94128643ec1a656f74abdb0d158ef551091c9ebd
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51 #include "pnfs.h"
52
53 #define NFSDDBG_FACILITY NFSDDBG_PROC
54
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57 .si_generation = ~0,
58 .si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61 /* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64 .si_generation = 1,
65 };
66 static const stateid_t close_stateid = {
67 .si_generation = 0xffffffffU,
68 };
69
70 static u64 current_sessionid = 1;
71
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80
81 /* Locking: */
82
83 /*
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
87 */
88 static DEFINE_SPINLOCK(state_lock);
89
90 /*
91 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
92 * the refcount on the open stateid to drop.
93 */
94 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
95
96 static struct kmem_cache *openowner_slab;
97 static struct kmem_cache *lockowner_slab;
98 static struct kmem_cache *file_slab;
99 static struct kmem_cache *stateid_slab;
100 static struct kmem_cache *deleg_slab;
101 static struct kmem_cache *odstate_slab;
102
103 static void free_session(struct nfsd4_session *);
104
105 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
106 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
107
108 static bool is_session_dead(struct nfsd4_session *ses)
109 {
110 return ses->se_flags & NFS4_SESSION_DEAD;
111 }
112
113 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
114 {
115 if (atomic_read(&ses->se_ref) > ref_held_by_me)
116 return nfserr_jukebox;
117 ses->se_flags |= NFS4_SESSION_DEAD;
118 return nfs_ok;
119 }
120
121 static bool is_client_expired(struct nfs4_client *clp)
122 {
123 return clp->cl_time == 0;
124 }
125
126 static __be32 get_client_locked(struct nfs4_client *clp)
127 {
128 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
129
130 lockdep_assert_held(&nn->client_lock);
131
132 if (is_client_expired(clp))
133 return nfserr_expired;
134 atomic_inc(&clp->cl_refcount);
135 return nfs_ok;
136 }
137
138 /* must be called under the client_lock */
139 static inline void
140 renew_client_locked(struct nfs4_client *clp)
141 {
142 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
143
144 if (is_client_expired(clp)) {
145 WARN_ON(1);
146 printk("%s: client (clientid %08x/%08x) already expired\n",
147 __func__,
148 clp->cl_clientid.cl_boot,
149 clp->cl_clientid.cl_id);
150 return;
151 }
152
153 dprintk("renewing client (clientid %08x/%08x)\n",
154 clp->cl_clientid.cl_boot,
155 clp->cl_clientid.cl_id);
156 list_move_tail(&clp->cl_lru, &nn->client_lru);
157 clp->cl_time = get_seconds();
158 }
159
160 static void put_client_renew_locked(struct nfs4_client *clp)
161 {
162 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
163
164 lockdep_assert_held(&nn->client_lock);
165
166 if (!atomic_dec_and_test(&clp->cl_refcount))
167 return;
168 if (!is_client_expired(clp))
169 renew_client_locked(clp);
170 }
171
172 static void put_client_renew(struct nfs4_client *clp)
173 {
174 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
175
176 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
177 return;
178 if (!is_client_expired(clp))
179 renew_client_locked(clp);
180 spin_unlock(&nn->client_lock);
181 }
182
183 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
184 {
185 __be32 status;
186
187 if (is_session_dead(ses))
188 return nfserr_badsession;
189 status = get_client_locked(ses->se_client);
190 if (status)
191 return status;
192 atomic_inc(&ses->se_ref);
193 return nfs_ok;
194 }
195
196 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
197 {
198 struct nfs4_client *clp = ses->se_client;
199 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
200
201 lockdep_assert_held(&nn->client_lock);
202
203 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
204 free_session(ses);
205 put_client_renew_locked(clp);
206 }
207
208 static void nfsd4_put_session(struct nfsd4_session *ses)
209 {
210 struct nfs4_client *clp = ses->se_client;
211 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
212
213 spin_lock(&nn->client_lock);
214 nfsd4_put_session_locked(ses);
215 spin_unlock(&nn->client_lock);
216 }
217
218 static struct nfsd4_blocked_lock *
219 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
220 struct nfsd_net *nn)
221 {
222 struct nfsd4_blocked_lock *cur, *found = NULL;
223
224 spin_lock(&nn->blocked_locks_lock);
225 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
226 if (fh_match(fh, &cur->nbl_fh)) {
227 list_del_init(&cur->nbl_list);
228 list_del_init(&cur->nbl_lru);
229 found = cur;
230 break;
231 }
232 }
233 spin_unlock(&nn->blocked_locks_lock);
234 if (found)
235 posix_unblock_lock(&found->nbl_lock);
236 return found;
237 }
238
239 static struct nfsd4_blocked_lock *
240 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
241 struct nfsd_net *nn)
242 {
243 struct nfsd4_blocked_lock *nbl;
244
245 nbl = find_blocked_lock(lo, fh, nn);
246 if (!nbl) {
247 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
248 if (nbl) {
249 fh_copy_shallow(&nbl->nbl_fh, fh);
250 locks_init_lock(&nbl->nbl_lock);
251 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
252 &nfsd4_cb_notify_lock_ops,
253 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
254 }
255 }
256 return nbl;
257 }
258
259 static void
260 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
261 {
262 locks_release_private(&nbl->nbl_lock);
263 kfree(nbl);
264 }
265
266 static void
267 remove_blocked_locks(struct nfs4_lockowner *lo)
268 {
269 struct nfs4_client *clp = lo->lo_owner.so_client;
270 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
271 struct nfsd4_blocked_lock *nbl;
272 LIST_HEAD(reaplist);
273
274 /* Dequeue all blocked locks */
275 spin_lock(&nn->blocked_locks_lock);
276 while (!list_empty(&lo->lo_blocked)) {
277 nbl = list_first_entry(&lo->lo_blocked,
278 struct nfsd4_blocked_lock,
279 nbl_list);
280 list_del_init(&nbl->nbl_list);
281 list_move(&nbl->nbl_lru, &reaplist);
282 }
283 spin_unlock(&nn->blocked_locks_lock);
284
285 /* Now free them */
286 while (!list_empty(&reaplist)) {
287 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
288 nbl_lru);
289 list_del_init(&nbl->nbl_lru);
290 posix_unblock_lock(&nbl->nbl_lock);
291 free_blocked_lock(nbl);
292 }
293 }
294
295 static int
296 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
297 {
298 /*
299 * Since this is just an optimization, we don't try very hard if it
300 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
301 * just quit trying on anything else.
302 */
303 switch (task->tk_status) {
304 case -NFS4ERR_DELAY:
305 rpc_delay(task, 1 * HZ);
306 return 0;
307 default:
308 return 1;
309 }
310 }
311
312 static void
313 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
314 {
315 struct nfsd4_blocked_lock *nbl = container_of(cb,
316 struct nfsd4_blocked_lock, nbl_cb);
317
318 free_blocked_lock(nbl);
319 }
320
321 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
322 .done = nfsd4_cb_notify_lock_done,
323 .release = nfsd4_cb_notify_lock_release,
324 };
325
326 static inline struct nfs4_stateowner *
327 nfs4_get_stateowner(struct nfs4_stateowner *sop)
328 {
329 atomic_inc(&sop->so_count);
330 return sop;
331 }
332
333 static int
334 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
335 {
336 return (sop->so_owner.len == owner->len) &&
337 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
338 }
339
340 static struct nfs4_openowner *
341 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
342 struct nfs4_client *clp)
343 {
344 struct nfs4_stateowner *so;
345
346 lockdep_assert_held(&clp->cl_lock);
347
348 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
349 so_strhash) {
350 if (!so->so_is_open_owner)
351 continue;
352 if (same_owner_str(so, &open->op_owner))
353 return openowner(nfs4_get_stateowner(so));
354 }
355 return NULL;
356 }
357
358 static struct nfs4_openowner *
359 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
360 struct nfs4_client *clp)
361 {
362 struct nfs4_openowner *oo;
363
364 spin_lock(&clp->cl_lock);
365 oo = find_openstateowner_str_locked(hashval, open, clp);
366 spin_unlock(&clp->cl_lock);
367 return oo;
368 }
369
370 static inline u32
371 opaque_hashval(const void *ptr, int nbytes)
372 {
373 unsigned char *cptr = (unsigned char *) ptr;
374
375 u32 x = 0;
376 while (nbytes--) {
377 x *= 37;
378 x += *cptr++;
379 }
380 return x;
381 }
382
383 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
384 {
385 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
386
387 kmem_cache_free(file_slab, fp);
388 }
389
390 void
391 put_nfs4_file(struct nfs4_file *fi)
392 {
393 might_lock(&state_lock);
394
395 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
396 hlist_del_rcu(&fi->fi_hash);
397 spin_unlock(&state_lock);
398 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
399 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
400 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
401 }
402 }
403
404 static struct file *
405 __nfs4_get_fd(struct nfs4_file *f, int oflag)
406 {
407 if (f->fi_fds[oflag])
408 return get_file(f->fi_fds[oflag]);
409 return NULL;
410 }
411
412 static struct file *
413 find_writeable_file_locked(struct nfs4_file *f)
414 {
415 struct file *ret;
416
417 lockdep_assert_held(&f->fi_lock);
418
419 ret = __nfs4_get_fd(f, O_WRONLY);
420 if (!ret)
421 ret = __nfs4_get_fd(f, O_RDWR);
422 return ret;
423 }
424
425 static struct file *
426 find_writeable_file(struct nfs4_file *f)
427 {
428 struct file *ret;
429
430 spin_lock(&f->fi_lock);
431 ret = find_writeable_file_locked(f);
432 spin_unlock(&f->fi_lock);
433
434 return ret;
435 }
436
437 static struct file *find_readable_file_locked(struct nfs4_file *f)
438 {
439 struct file *ret;
440
441 lockdep_assert_held(&f->fi_lock);
442
443 ret = __nfs4_get_fd(f, O_RDONLY);
444 if (!ret)
445 ret = __nfs4_get_fd(f, O_RDWR);
446 return ret;
447 }
448
449 static struct file *
450 find_readable_file(struct nfs4_file *f)
451 {
452 struct file *ret;
453
454 spin_lock(&f->fi_lock);
455 ret = find_readable_file_locked(f);
456 spin_unlock(&f->fi_lock);
457
458 return ret;
459 }
460
461 struct file *
462 find_any_file(struct nfs4_file *f)
463 {
464 struct file *ret;
465
466 spin_lock(&f->fi_lock);
467 ret = __nfs4_get_fd(f, O_RDWR);
468 if (!ret) {
469 ret = __nfs4_get_fd(f, O_WRONLY);
470 if (!ret)
471 ret = __nfs4_get_fd(f, O_RDONLY);
472 }
473 spin_unlock(&f->fi_lock);
474 return ret;
475 }
476
477 static atomic_long_t num_delegations;
478 unsigned long max_delegations;
479
480 /*
481 * Open owner state (share locks)
482 */
483
484 /* hash tables for lock and open owners */
485 #define OWNER_HASH_BITS 8
486 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
487 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
488
489 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
490 {
491 unsigned int ret;
492
493 ret = opaque_hashval(ownername->data, ownername->len);
494 return ret & OWNER_HASH_MASK;
495 }
496
497 /* hash table for nfs4_file */
498 #define FILE_HASH_BITS 8
499 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
500
501 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
502 {
503 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
504 }
505
506 static unsigned int file_hashval(struct knfsd_fh *fh)
507 {
508 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
509 }
510
511 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
512
513 static void
514 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
515 {
516 lockdep_assert_held(&fp->fi_lock);
517
518 if (access & NFS4_SHARE_ACCESS_WRITE)
519 atomic_inc(&fp->fi_access[O_WRONLY]);
520 if (access & NFS4_SHARE_ACCESS_READ)
521 atomic_inc(&fp->fi_access[O_RDONLY]);
522 }
523
524 static __be32
525 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
526 {
527 lockdep_assert_held(&fp->fi_lock);
528
529 /* Does this access mode make sense? */
530 if (access & ~NFS4_SHARE_ACCESS_BOTH)
531 return nfserr_inval;
532
533 /* Does it conflict with a deny mode already set? */
534 if ((access & fp->fi_share_deny) != 0)
535 return nfserr_share_denied;
536
537 __nfs4_file_get_access(fp, access);
538 return nfs_ok;
539 }
540
541 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
542 {
543 /* Common case is that there is no deny mode. */
544 if (deny) {
545 /* Does this deny mode make sense? */
546 if (deny & ~NFS4_SHARE_DENY_BOTH)
547 return nfserr_inval;
548
549 if ((deny & NFS4_SHARE_DENY_READ) &&
550 atomic_read(&fp->fi_access[O_RDONLY]))
551 return nfserr_share_denied;
552
553 if ((deny & NFS4_SHARE_DENY_WRITE) &&
554 atomic_read(&fp->fi_access[O_WRONLY]))
555 return nfserr_share_denied;
556 }
557 return nfs_ok;
558 }
559
560 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
561 {
562 might_lock(&fp->fi_lock);
563
564 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
565 struct file *f1 = NULL;
566 struct file *f2 = NULL;
567
568 swap(f1, fp->fi_fds[oflag]);
569 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
570 swap(f2, fp->fi_fds[O_RDWR]);
571 spin_unlock(&fp->fi_lock);
572 if (f1)
573 fput(f1);
574 if (f2)
575 fput(f2);
576 }
577 }
578
579 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
580 {
581 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
582
583 if (access & NFS4_SHARE_ACCESS_WRITE)
584 __nfs4_file_put_access(fp, O_WRONLY);
585 if (access & NFS4_SHARE_ACCESS_READ)
586 __nfs4_file_put_access(fp, O_RDONLY);
587 }
588
589 /*
590 * Allocate a new open/delegation state counter. This is needed for
591 * pNFS for proper return on close semantics.
592 *
593 * Note that we only allocate it for pNFS-enabled exports, otherwise
594 * all pointers to struct nfs4_clnt_odstate are always NULL.
595 */
596 static struct nfs4_clnt_odstate *
597 alloc_clnt_odstate(struct nfs4_client *clp)
598 {
599 struct nfs4_clnt_odstate *co;
600
601 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
602 if (co) {
603 co->co_client = clp;
604 atomic_set(&co->co_odcount, 1);
605 }
606 return co;
607 }
608
609 static void
610 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
611 {
612 struct nfs4_file *fp = co->co_file;
613
614 lockdep_assert_held(&fp->fi_lock);
615 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
616 }
617
618 static inline void
619 get_clnt_odstate(struct nfs4_clnt_odstate *co)
620 {
621 if (co)
622 atomic_inc(&co->co_odcount);
623 }
624
625 static void
626 put_clnt_odstate(struct nfs4_clnt_odstate *co)
627 {
628 struct nfs4_file *fp;
629
630 if (!co)
631 return;
632
633 fp = co->co_file;
634 if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
635 list_del(&co->co_perfile);
636 spin_unlock(&fp->fi_lock);
637
638 nfsd4_return_all_file_layouts(co->co_client, fp);
639 kmem_cache_free(odstate_slab, co);
640 }
641 }
642
643 static struct nfs4_clnt_odstate *
644 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
645 {
646 struct nfs4_clnt_odstate *co;
647 struct nfs4_client *cl;
648
649 if (!new)
650 return NULL;
651
652 cl = new->co_client;
653
654 spin_lock(&fp->fi_lock);
655 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
656 if (co->co_client == cl) {
657 get_clnt_odstate(co);
658 goto out;
659 }
660 }
661 co = new;
662 co->co_file = fp;
663 hash_clnt_odstate_locked(new);
664 out:
665 spin_unlock(&fp->fi_lock);
666 return co;
667 }
668
669 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
670 void (*sc_free)(struct nfs4_stid *))
671 {
672 struct nfs4_stid *stid;
673 int new_id;
674
675 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
676 if (!stid)
677 return NULL;
678
679 idr_preload(GFP_KERNEL);
680 spin_lock(&cl->cl_lock);
681 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
682 spin_unlock(&cl->cl_lock);
683 idr_preload_end();
684 if (new_id < 0)
685 goto out_free;
686
687 stid->sc_free = sc_free;
688 stid->sc_client = cl;
689 stid->sc_stateid.si_opaque.so_id = new_id;
690 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
691 /* Will be incremented before return to client: */
692 atomic_set(&stid->sc_count, 1);
693 spin_lock_init(&stid->sc_lock);
694
695 /*
696 * It shouldn't be a problem to reuse an opaque stateid value.
697 * I don't think it is for 4.1. But with 4.0 I worry that, for
698 * example, a stray write retransmission could be accepted by
699 * the server when it should have been rejected. Therefore,
700 * adopt a trick from the sctp code to attempt to maximize the
701 * amount of time until an id is reused, by ensuring they always
702 * "increase" (mod INT_MAX):
703 */
704 return stid;
705 out_free:
706 kmem_cache_free(slab, stid);
707 return NULL;
708 }
709
710 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
711 {
712 struct nfs4_stid *stid;
713
714 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
715 if (!stid)
716 return NULL;
717
718 return openlockstateid(stid);
719 }
720
721 static void nfs4_free_deleg(struct nfs4_stid *stid)
722 {
723 kmem_cache_free(deleg_slab, stid);
724 atomic_long_dec(&num_delegations);
725 }
726
727 /*
728 * When we recall a delegation, we should be careful not to hand it
729 * out again straight away.
730 * To ensure this we keep a pair of bloom filters ('new' and 'old')
731 * in which the filehandles of recalled delegations are "stored".
732 * If a filehandle appear in either filter, a delegation is blocked.
733 * When a delegation is recalled, the filehandle is stored in the "new"
734 * filter.
735 * Every 30 seconds we swap the filters and clear the "new" one,
736 * unless both are empty of course.
737 *
738 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
739 * low 3 bytes as hash-table indices.
740 *
741 * 'blocked_delegations_lock', which is always taken in block_delegations(),
742 * is used to manage concurrent access. Testing does not need the lock
743 * except when swapping the two filters.
744 */
745 static DEFINE_SPINLOCK(blocked_delegations_lock);
746 static struct bloom_pair {
747 int entries, old_entries;
748 time_t swap_time;
749 int new; /* index into 'set' */
750 DECLARE_BITMAP(set[2], 256);
751 } blocked_delegations;
752
753 static int delegation_blocked(struct knfsd_fh *fh)
754 {
755 u32 hash;
756 struct bloom_pair *bd = &blocked_delegations;
757
758 if (bd->entries == 0)
759 return 0;
760 if (seconds_since_boot() - bd->swap_time > 30) {
761 spin_lock(&blocked_delegations_lock);
762 if (seconds_since_boot() - bd->swap_time > 30) {
763 bd->entries -= bd->old_entries;
764 bd->old_entries = bd->entries;
765 memset(bd->set[bd->new], 0,
766 sizeof(bd->set[0]));
767 bd->new = 1-bd->new;
768 bd->swap_time = seconds_since_boot();
769 }
770 spin_unlock(&blocked_delegations_lock);
771 }
772 hash = jhash(&fh->fh_base, fh->fh_size, 0);
773 if (test_bit(hash&255, bd->set[0]) &&
774 test_bit((hash>>8)&255, bd->set[0]) &&
775 test_bit((hash>>16)&255, bd->set[0]))
776 return 1;
777
778 if (test_bit(hash&255, bd->set[1]) &&
779 test_bit((hash>>8)&255, bd->set[1]) &&
780 test_bit((hash>>16)&255, bd->set[1]))
781 return 1;
782
783 return 0;
784 }
785
786 static void block_delegations(struct knfsd_fh *fh)
787 {
788 u32 hash;
789 struct bloom_pair *bd = &blocked_delegations;
790
791 hash = jhash(&fh->fh_base, fh->fh_size, 0);
792
793 spin_lock(&blocked_delegations_lock);
794 __set_bit(hash&255, bd->set[bd->new]);
795 __set_bit((hash>>8)&255, bd->set[bd->new]);
796 __set_bit((hash>>16)&255, bd->set[bd->new]);
797 if (bd->entries == 0)
798 bd->swap_time = seconds_since_boot();
799 bd->entries += 1;
800 spin_unlock(&blocked_delegations_lock);
801 }
802
803 static struct nfs4_delegation *
804 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
805 struct nfs4_clnt_odstate *odstate)
806 {
807 struct nfs4_delegation *dp;
808 long n;
809
810 dprintk("NFSD alloc_init_deleg\n");
811 n = atomic_long_inc_return(&num_delegations);
812 if (n < 0 || n > max_delegations)
813 goto out_dec;
814 if (delegation_blocked(&current_fh->fh_handle))
815 goto out_dec;
816 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
817 if (dp == NULL)
818 goto out_dec;
819
820 /*
821 * delegation seqid's are never incremented. The 4.1 special
822 * meaning of seqid 0 isn't meaningful, really, but let's avoid
823 * 0 anyway just for consistency and use 1:
824 */
825 dp->dl_stid.sc_stateid.si_generation = 1;
826 INIT_LIST_HEAD(&dp->dl_perfile);
827 INIT_LIST_HEAD(&dp->dl_perclnt);
828 INIT_LIST_HEAD(&dp->dl_recall_lru);
829 dp->dl_clnt_odstate = odstate;
830 get_clnt_odstate(odstate);
831 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
832 dp->dl_retries = 1;
833 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
834 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
835 return dp;
836 out_dec:
837 atomic_long_dec(&num_delegations);
838 return NULL;
839 }
840
841 void
842 nfs4_put_stid(struct nfs4_stid *s)
843 {
844 struct nfs4_file *fp = s->sc_file;
845 struct nfs4_client *clp = s->sc_client;
846
847 might_lock(&clp->cl_lock);
848
849 if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
850 wake_up_all(&close_wq);
851 return;
852 }
853 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
854 spin_unlock(&clp->cl_lock);
855 s->sc_free(s);
856 if (fp)
857 put_nfs4_file(fp);
858 }
859
860 void
861 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
862 {
863 stateid_t *src = &stid->sc_stateid;
864
865 spin_lock(&stid->sc_lock);
866 if (unlikely(++src->si_generation == 0))
867 src->si_generation = 1;
868 memcpy(dst, src, sizeof(*dst));
869 spin_unlock(&stid->sc_lock);
870 }
871
872 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
873 {
874 struct file *filp = NULL;
875
876 spin_lock(&fp->fi_lock);
877 if (fp->fi_deleg_file && --fp->fi_delegees == 0)
878 swap(filp, fp->fi_deleg_file);
879 spin_unlock(&fp->fi_lock);
880
881 if (filp) {
882 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
883 fput(filp);
884 }
885 }
886
887 void nfs4_unhash_stid(struct nfs4_stid *s)
888 {
889 s->sc_type = 0;
890 }
891
892 /**
893 * nfs4_get_existing_delegation - Discover if this delegation already exists
894 * @clp: a pointer to the nfs4_client we're granting a delegation to
895 * @fp: a pointer to the nfs4_file we're granting a delegation on
896 *
897 * Return:
898 * On success: NULL if an existing delegation was not found.
899 *
900 * On error: -EAGAIN if one was previously granted to this nfs4_client
901 * for this nfs4_file.
902 *
903 */
904
905 static int
906 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
907 {
908 struct nfs4_delegation *searchdp = NULL;
909 struct nfs4_client *searchclp = NULL;
910
911 lockdep_assert_held(&state_lock);
912 lockdep_assert_held(&fp->fi_lock);
913
914 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
915 searchclp = searchdp->dl_stid.sc_client;
916 if (clp == searchclp) {
917 return -EAGAIN;
918 }
919 }
920 return 0;
921 }
922
923 /**
924 * hash_delegation_locked - Add a delegation to the appropriate lists
925 * @dp: a pointer to the nfs4_delegation we are adding.
926 * @fp: a pointer to the nfs4_file we're granting a delegation on
927 *
928 * Return:
929 * On success: NULL if the delegation was successfully hashed.
930 *
931 * On error: -EAGAIN if one was previously granted to this
932 * nfs4_client for this nfs4_file. Delegation is not hashed.
933 *
934 */
935
936 static int
937 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
938 {
939 int status;
940 struct nfs4_client *clp = dp->dl_stid.sc_client;
941
942 lockdep_assert_held(&state_lock);
943 lockdep_assert_held(&fp->fi_lock);
944
945 status = nfs4_get_existing_delegation(clp, fp);
946 if (status)
947 return status;
948 ++fp->fi_delegees;
949 atomic_inc(&dp->dl_stid.sc_count);
950 dp->dl_stid.sc_type = NFS4_DELEG_STID;
951 list_add(&dp->dl_perfile, &fp->fi_delegations);
952 list_add(&dp->dl_perclnt, &clp->cl_delegations);
953 return 0;
954 }
955
956 static bool
957 unhash_delegation_locked(struct nfs4_delegation *dp)
958 {
959 struct nfs4_file *fp = dp->dl_stid.sc_file;
960
961 lockdep_assert_held(&state_lock);
962
963 if (list_empty(&dp->dl_perfile))
964 return false;
965
966 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
967 /* Ensure that deleg break won't try to requeue it */
968 ++dp->dl_time;
969 spin_lock(&fp->fi_lock);
970 list_del_init(&dp->dl_perclnt);
971 list_del_init(&dp->dl_recall_lru);
972 list_del_init(&dp->dl_perfile);
973 spin_unlock(&fp->fi_lock);
974 return true;
975 }
976
977 static void destroy_delegation(struct nfs4_delegation *dp)
978 {
979 bool unhashed;
980
981 spin_lock(&state_lock);
982 unhashed = unhash_delegation_locked(dp);
983 spin_unlock(&state_lock);
984 if (unhashed) {
985 put_clnt_odstate(dp->dl_clnt_odstate);
986 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
987 nfs4_put_stid(&dp->dl_stid);
988 }
989 }
990
991 static void revoke_delegation(struct nfs4_delegation *dp)
992 {
993 struct nfs4_client *clp = dp->dl_stid.sc_client;
994
995 WARN_ON(!list_empty(&dp->dl_recall_lru));
996
997 put_clnt_odstate(dp->dl_clnt_odstate);
998 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
999
1000 if (clp->cl_minorversion == 0)
1001 nfs4_put_stid(&dp->dl_stid);
1002 else {
1003 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1004 spin_lock(&clp->cl_lock);
1005 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1006 spin_unlock(&clp->cl_lock);
1007 }
1008 }
1009
1010 /*
1011 * SETCLIENTID state
1012 */
1013
1014 static unsigned int clientid_hashval(u32 id)
1015 {
1016 return id & CLIENT_HASH_MASK;
1017 }
1018
1019 static unsigned int clientstr_hashval(const char *name)
1020 {
1021 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
1022 }
1023
1024 /*
1025 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1026 * st_{access,deny}_bmap field of the stateid, in order to track not
1027 * only what share bits are currently in force, but also what
1028 * combinations of share bits previous opens have used. This allows us
1029 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1030 * return an error if the client attempt to downgrade to a combination
1031 * of share bits not explicable by closing some of its previous opens.
1032 *
1033 * XXX: This enforcement is actually incomplete, since we don't keep
1034 * track of access/deny bit combinations; so, e.g., we allow:
1035 *
1036 * OPEN allow read, deny write
1037 * OPEN allow both, deny none
1038 * DOWNGRADE allow read, deny none
1039 *
1040 * which we should reject.
1041 */
1042 static unsigned int
1043 bmap_to_share_mode(unsigned long bmap) {
1044 int i;
1045 unsigned int access = 0;
1046
1047 for (i = 1; i < 4; i++) {
1048 if (test_bit(i, &bmap))
1049 access |= i;
1050 }
1051 return access;
1052 }
1053
1054 /* set share access for a given stateid */
1055 static inline void
1056 set_access(u32 access, struct nfs4_ol_stateid *stp)
1057 {
1058 unsigned char mask = 1 << access;
1059
1060 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1061 stp->st_access_bmap |= mask;
1062 }
1063
1064 /* clear share access for a given stateid */
1065 static inline void
1066 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1067 {
1068 unsigned char mask = 1 << access;
1069
1070 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1071 stp->st_access_bmap &= ~mask;
1072 }
1073
1074 /* test whether a given stateid has access */
1075 static inline bool
1076 test_access(u32 access, struct nfs4_ol_stateid *stp)
1077 {
1078 unsigned char mask = 1 << access;
1079
1080 return (bool)(stp->st_access_bmap & mask);
1081 }
1082
1083 /* set share deny for a given stateid */
1084 static inline void
1085 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1086 {
1087 unsigned char mask = 1 << deny;
1088
1089 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1090 stp->st_deny_bmap |= mask;
1091 }
1092
1093 /* clear share deny for a given stateid */
1094 static inline void
1095 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1096 {
1097 unsigned char mask = 1 << deny;
1098
1099 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1100 stp->st_deny_bmap &= ~mask;
1101 }
1102
1103 /* test whether a given stateid is denying specific access */
1104 static inline bool
1105 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1106 {
1107 unsigned char mask = 1 << deny;
1108
1109 return (bool)(stp->st_deny_bmap & mask);
1110 }
1111
1112 static int nfs4_access_to_omode(u32 access)
1113 {
1114 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1115 case NFS4_SHARE_ACCESS_READ:
1116 return O_RDONLY;
1117 case NFS4_SHARE_ACCESS_WRITE:
1118 return O_WRONLY;
1119 case NFS4_SHARE_ACCESS_BOTH:
1120 return O_RDWR;
1121 }
1122 WARN_ON_ONCE(1);
1123 return O_RDONLY;
1124 }
1125
1126 /*
1127 * A stateid that had a deny mode associated with it is being released
1128 * or downgraded. Recalculate the deny mode on the file.
1129 */
1130 static void
1131 recalculate_deny_mode(struct nfs4_file *fp)
1132 {
1133 struct nfs4_ol_stateid *stp;
1134
1135 spin_lock(&fp->fi_lock);
1136 fp->fi_share_deny = 0;
1137 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1138 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1139 spin_unlock(&fp->fi_lock);
1140 }
1141
1142 static void
1143 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1144 {
1145 int i;
1146 bool change = false;
1147
1148 for (i = 1; i < 4; i++) {
1149 if ((i & deny) != i) {
1150 change = true;
1151 clear_deny(i, stp);
1152 }
1153 }
1154
1155 /* Recalculate per-file deny mode if there was a change */
1156 if (change)
1157 recalculate_deny_mode(stp->st_stid.sc_file);
1158 }
1159
1160 /* release all access and file references for a given stateid */
1161 static void
1162 release_all_access(struct nfs4_ol_stateid *stp)
1163 {
1164 int i;
1165 struct nfs4_file *fp = stp->st_stid.sc_file;
1166
1167 if (fp && stp->st_deny_bmap != 0)
1168 recalculate_deny_mode(fp);
1169
1170 for (i = 1; i < 4; i++) {
1171 if (test_access(i, stp))
1172 nfs4_file_put_access(stp->st_stid.sc_file, i);
1173 clear_access(i, stp);
1174 }
1175 }
1176
1177 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1178 {
1179 kfree(sop->so_owner.data);
1180 sop->so_ops->so_free(sop);
1181 }
1182
1183 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1184 {
1185 struct nfs4_client *clp = sop->so_client;
1186
1187 might_lock(&clp->cl_lock);
1188
1189 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1190 return;
1191 sop->so_ops->so_unhash(sop);
1192 spin_unlock(&clp->cl_lock);
1193 nfs4_free_stateowner(sop);
1194 }
1195
1196 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1197 {
1198 struct nfs4_file *fp = stp->st_stid.sc_file;
1199
1200 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1201
1202 if (list_empty(&stp->st_perfile))
1203 return false;
1204
1205 spin_lock(&fp->fi_lock);
1206 list_del_init(&stp->st_perfile);
1207 spin_unlock(&fp->fi_lock);
1208 list_del(&stp->st_perstateowner);
1209 return true;
1210 }
1211
1212 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1213 {
1214 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1215
1216 put_clnt_odstate(stp->st_clnt_odstate);
1217 release_all_access(stp);
1218 if (stp->st_stateowner)
1219 nfs4_put_stateowner(stp->st_stateowner);
1220 kmem_cache_free(stateid_slab, stid);
1221 }
1222
1223 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1224 {
1225 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1226 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1227 struct file *file;
1228
1229 file = find_any_file(stp->st_stid.sc_file);
1230 if (file)
1231 filp_close(file, (fl_owner_t)lo);
1232 nfs4_free_ol_stateid(stid);
1233 }
1234
1235 /*
1236 * Put the persistent reference to an already unhashed generic stateid, while
1237 * holding the cl_lock. If it's the last reference, then put it onto the
1238 * reaplist for later destruction.
1239 */
1240 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1241 struct list_head *reaplist)
1242 {
1243 struct nfs4_stid *s = &stp->st_stid;
1244 struct nfs4_client *clp = s->sc_client;
1245
1246 lockdep_assert_held(&clp->cl_lock);
1247
1248 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1249
1250 if (!atomic_dec_and_test(&s->sc_count)) {
1251 wake_up_all(&close_wq);
1252 return;
1253 }
1254
1255 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1256 list_add(&stp->st_locks, reaplist);
1257 }
1258
1259 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1260 {
1261 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1262
1263 list_del_init(&stp->st_locks);
1264 nfs4_unhash_stid(&stp->st_stid);
1265 return unhash_ol_stateid(stp);
1266 }
1267
1268 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1269 {
1270 struct nfs4_client *clp = stp->st_stid.sc_client;
1271 bool unhashed;
1272
1273 spin_lock(&clp->cl_lock);
1274 unhashed = unhash_lock_stateid(stp);
1275 spin_unlock(&clp->cl_lock);
1276 if (unhashed)
1277 nfs4_put_stid(&stp->st_stid);
1278 }
1279
1280 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1281 {
1282 struct nfs4_client *clp = lo->lo_owner.so_client;
1283
1284 lockdep_assert_held(&clp->cl_lock);
1285
1286 list_del_init(&lo->lo_owner.so_strhash);
1287 }
1288
1289 /*
1290 * Free a list of generic stateids that were collected earlier after being
1291 * fully unhashed.
1292 */
1293 static void
1294 free_ol_stateid_reaplist(struct list_head *reaplist)
1295 {
1296 struct nfs4_ol_stateid *stp;
1297 struct nfs4_file *fp;
1298
1299 might_sleep();
1300
1301 while (!list_empty(reaplist)) {
1302 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1303 st_locks);
1304 list_del(&stp->st_locks);
1305 fp = stp->st_stid.sc_file;
1306 stp->st_stid.sc_free(&stp->st_stid);
1307 if (fp)
1308 put_nfs4_file(fp);
1309 }
1310 }
1311
1312 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1313 struct list_head *reaplist)
1314 {
1315 struct nfs4_ol_stateid *stp;
1316
1317 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1318
1319 while (!list_empty(&open_stp->st_locks)) {
1320 stp = list_entry(open_stp->st_locks.next,
1321 struct nfs4_ol_stateid, st_locks);
1322 WARN_ON(!unhash_lock_stateid(stp));
1323 put_ol_stateid_locked(stp, reaplist);
1324 }
1325 }
1326
1327 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1328 struct list_head *reaplist)
1329 {
1330 bool unhashed;
1331
1332 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1333
1334 unhashed = unhash_ol_stateid(stp);
1335 release_open_stateid_locks(stp, reaplist);
1336 return unhashed;
1337 }
1338
1339 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1340 {
1341 LIST_HEAD(reaplist);
1342
1343 spin_lock(&stp->st_stid.sc_client->cl_lock);
1344 if (unhash_open_stateid(stp, &reaplist))
1345 put_ol_stateid_locked(stp, &reaplist);
1346 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1347 free_ol_stateid_reaplist(&reaplist);
1348 }
1349
1350 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1351 {
1352 struct nfs4_client *clp = oo->oo_owner.so_client;
1353
1354 lockdep_assert_held(&clp->cl_lock);
1355
1356 list_del_init(&oo->oo_owner.so_strhash);
1357 list_del_init(&oo->oo_perclient);
1358 }
1359
1360 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1361 {
1362 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1363 nfsd_net_id);
1364 struct nfs4_ol_stateid *s;
1365
1366 spin_lock(&nn->client_lock);
1367 s = oo->oo_last_closed_stid;
1368 if (s) {
1369 list_del_init(&oo->oo_close_lru);
1370 oo->oo_last_closed_stid = NULL;
1371 }
1372 spin_unlock(&nn->client_lock);
1373 if (s)
1374 nfs4_put_stid(&s->st_stid);
1375 }
1376
1377 static void release_openowner(struct nfs4_openowner *oo)
1378 {
1379 struct nfs4_ol_stateid *stp;
1380 struct nfs4_client *clp = oo->oo_owner.so_client;
1381 struct list_head reaplist;
1382
1383 INIT_LIST_HEAD(&reaplist);
1384
1385 spin_lock(&clp->cl_lock);
1386 unhash_openowner_locked(oo);
1387 while (!list_empty(&oo->oo_owner.so_stateids)) {
1388 stp = list_first_entry(&oo->oo_owner.so_stateids,
1389 struct nfs4_ol_stateid, st_perstateowner);
1390 if (unhash_open_stateid(stp, &reaplist))
1391 put_ol_stateid_locked(stp, &reaplist);
1392 }
1393 spin_unlock(&clp->cl_lock);
1394 free_ol_stateid_reaplist(&reaplist);
1395 release_last_closed_stateid(oo);
1396 nfs4_put_stateowner(&oo->oo_owner);
1397 }
1398
1399 static inline int
1400 hash_sessionid(struct nfs4_sessionid *sessionid)
1401 {
1402 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1403
1404 return sid->sequence % SESSION_HASH_SIZE;
1405 }
1406
1407 #ifdef CONFIG_SUNRPC_DEBUG
1408 static inline void
1409 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1410 {
1411 u32 *ptr = (u32 *)(&sessionid->data[0]);
1412 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1413 }
1414 #else
1415 static inline void
1416 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1417 {
1418 }
1419 #endif
1420
1421 /*
1422 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1423 * won't be used for replay.
1424 */
1425 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1426 {
1427 struct nfs4_stateowner *so = cstate->replay_owner;
1428
1429 if (nfserr == nfserr_replay_me)
1430 return;
1431
1432 if (!seqid_mutating_err(ntohl(nfserr))) {
1433 nfsd4_cstate_clear_replay(cstate);
1434 return;
1435 }
1436 if (!so)
1437 return;
1438 if (so->so_is_open_owner)
1439 release_last_closed_stateid(openowner(so));
1440 so->so_seqid++;
1441 return;
1442 }
1443
1444 static void
1445 gen_sessionid(struct nfsd4_session *ses)
1446 {
1447 struct nfs4_client *clp = ses->se_client;
1448 struct nfsd4_sessionid *sid;
1449
1450 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1451 sid->clientid = clp->cl_clientid;
1452 sid->sequence = current_sessionid++;
1453 sid->reserved = 0;
1454 }
1455
1456 /*
1457 * The protocol defines ca_maxresponssize_cached to include the size of
1458 * the rpc header, but all we need to cache is the data starting after
1459 * the end of the initial SEQUENCE operation--the rest we regenerate
1460 * each time. Therefore we can advertise a ca_maxresponssize_cached
1461 * value that is the number of bytes in our cache plus a few additional
1462 * bytes. In order to stay on the safe side, and not promise more than
1463 * we can cache, those additional bytes must be the minimum possible: 24
1464 * bytes of rpc header (xid through accept state, with AUTH_NULL
1465 * verifier), 12 for the compound header (with zero-length tag), and 44
1466 * for the SEQUENCE op response:
1467 */
1468 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1469
1470 static void
1471 free_session_slots(struct nfsd4_session *ses)
1472 {
1473 int i;
1474
1475 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1476 free_svc_cred(&ses->se_slots[i]->sl_cred);
1477 kfree(ses->se_slots[i]);
1478 }
1479 }
1480
1481 /*
1482 * We don't actually need to cache the rpc and session headers, so we
1483 * can allocate a little less for each slot:
1484 */
1485 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1486 {
1487 u32 size;
1488
1489 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1490 size = 0;
1491 else
1492 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1493 return size + sizeof(struct nfsd4_slot);
1494 }
1495
1496 /*
1497 * XXX: If we run out of reserved DRC memory we could (up to a point)
1498 * re-negotiate active sessions and reduce their slot usage to make
1499 * room for new connections. For now we just fail the create session.
1500 */
1501 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1502 {
1503 u32 slotsize = slot_bytes(ca);
1504 u32 num = ca->maxreqs;
1505 int avail;
1506
1507 spin_lock(&nfsd_drc_lock);
1508 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1509 nfsd_drc_max_mem - nfsd_drc_mem_used);
1510 num = min_t(int, num, avail / slotsize);
1511 nfsd_drc_mem_used += num * slotsize;
1512 spin_unlock(&nfsd_drc_lock);
1513
1514 return num;
1515 }
1516
1517 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1518 {
1519 int slotsize = slot_bytes(ca);
1520
1521 spin_lock(&nfsd_drc_lock);
1522 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1523 spin_unlock(&nfsd_drc_lock);
1524 }
1525
1526 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1527 struct nfsd4_channel_attrs *battrs)
1528 {
1529 int numslots = fattrs->maxreqs;
1530 int slotsize = slot_bytes(fattrs);
1531 struct nfsd4_session *new;
1532 int mem, i;
1533
1534 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1535 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1536 mem = numslots * sizeof(struct nfsd4_slot *);
1537
1538 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1539 if (!new)
1540 return NULL;
1541 /* allocate each struct nfsd4_slot and data cache in one piece */
1542 for (i = 0; i < numslots; i++) {
1543 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1544 if (!new->se_slots[i])
1545 goto out_free;
1546 }
1547
1548 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1549 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1550
1551 return new;
1552 out_free:
1553 while (i--)
1554 kfree(new->se_slots[i]);
1555 kfree(new);
1556 return NULL;
1557 }
1558
1559 static void free_conn(struct nfsd4_conn *c)
1560 {
1561 svc_xprt_put(c->cn_xprt);
1562 kfree(c);
1563 }
1564
1565 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1566 {
1567 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1568 struct nfs4_client *clp = c->cn_session->se_client;
1569
1570 spin_lock(&clp->cl_lock);
1571 if (!list_empty(&c->cn_persession)) {
1572 list_del(&c->cn_persession);
1573 free_conn(c);
1574 }
1575 nfsd4_probe_callback(clp);
1576 spin_unlock(&clp->cl_lock);
1577 }
1578
1579 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1580 {
1581 struct nfsd4_conn *conn;
1582
1583 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1584 if (!conn)
1585 return NULL;
1586 svc_xprt_get(rqstp->rq_xprt);
1587 conn->cn_xprt = rqstp->rq_xprt;
1588 conn->cn_flags = flags;
1589 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1590 return conn;
1591 }
1592
1593 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1594 {
1595 conn->cn_session = ses;
1596 list_add(&conn->cn_persession, &ses->se_conns);
1597 }
1598
1599 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1600 {
1601 struct nfs4_client *clp = ses->se_client;
1602
1603 spin_lock(&clp->cl_lock);
1604 __nfsd4_hash_conn(conn, ses);
1605 spin_unlock(&clp->cl_lock);
1606 }
1607
1608 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1609 {
1610 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1611 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1612 }
1613
1614 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1615 {
1616 int ret;
1617
1618 nfsd4_hash_conn(conn, ses);
1619 ret = nfsd4_register_conn(conn);
1620 if (ret)
1621 /* oops; xprt is already down: */
1622 nfsd4_conn_lost(&conn->cn_xpt_user);
1623 /* We may have gained or lost a callback channel: */
1624 nfsd4_probe_callback_sync(ses->se_client);
1625 }
1626
1627 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1628 {
1629 u32 dir = NFS4_CDFC4_FORE;
1630
1631 if (cses->flags & SESSION4_BACK_CHAN)
1632 dir |= NFS4_CDFC4_BACK;
1633 return alloc_conn(rqstp, dir);
1634 }
1635
1636 /* must be called under client_lock */
1637 static void nfsd4_del_conns(struct nfsd4_session *s)
1638 {
1639 struct nfs4_client *clp = s->se_client;
1640 struct nfsd4_conn *c;
1641
1642 spin_lock(&clp->cl_lock);
1643 while (!list_empty(&s->se_conns)) {
1644 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1645 list_del_init(&c->cn_persession);
1646 spin_unlock(&clp->cl_lock);
1647
1648 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1649 free_conn(c);
1650
1651 spin_lock(&clp->cl_lock);
1652 }
1653 spin_unlock(&clp->cl_lock);
1654 }
1655
1656 static void __free_session(struct nfsd4_session *ses)
1657 {
1658 free_session_slots(ses);
1659 kfree(ses);
1660 }
1661
1662 static void free_session(struct nfsd4_session *ses)
1663 {
1664 nfsd4_del_conns(ses);
1665 nfsd4_put_drc_mem(&ses->se_fchannel);
1666 __free_session(ses);
1667 }
1668
1669 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1670 {
1671 int idx;
1672 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1673
1674 new->se_client = clp;
1675 gen_sessionid(new);
1676
1677 INIT_LIST_HEAD(&new->se_conns);
1678
1679 new->se_cb_seq_nr = 1;
1680 new->se_flags = cses->flags;
1681 new->se_cb_prog = cses->callback_prog;
1682 new->se_cb_sec = cses->cb_sec;
1683 atomic_set(&new->se_ref, 0);
1684 idx = hash_sessionid(&new->se_sessionid);
1685 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1686 spin_lock(&clp->cl_lock);
1687 list_add(&new->se_perclnt, &clp->cl_sessions);
1688 spin_unlock(&clp->cl_lock);
1689
1690 {
1691 struct sockaddr *sa = svc_addr(rqstp);
1692 /*
1693 * This is a little silly; with sessions there's no real
1694 * use for the callback address. Use the peer address
1695 * as a reasonable default for now, but consider fixing
1696 * the rpc client not to require an address in the
1697 * future:
1698 */
1699 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1700 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1701 }
1702 }
1703
1704 /* caller must hold client_lock */
1705 static struct nfsd4_session *
1706 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1707 {
1708 struct nfsd4_session *elem;
1709 int idx;
1710 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1711
1712 lockdep_assert_held(&nn->client_lock);
1713
1714 dump_sessionid(__func__, sessionid);
1715 idx = hash_sessionid(sessionid);
1716 /* Search in the appropriate list */
1717 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1718 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1719 NFS4_MAX_SESSIONID_LEN)) {
1720 return elem;
1721 }
1722 }
1723
1724 dprintk("%s: session not found\n", __func__);
1725 return NULL;
1726 }
1727
1728 static struct nfsd4_session *
1729 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1730 __be32 *ret)
1731 {
1732 struct nfsd4_session *session;
1733 __be32 status = nfserr_badsession;
1734
1735 session = __find_in_sessionid_hashtbl(sessionid, net);
1736 if (!session)
1737 goto out;
1738 status = nfsd4_get_session_locked(session);
1739 if (status)
1740 session = NULL;
1741 out:
1742 *ret = status;
1743 return session;
1744 }
1745
1746 /* caller must hold client_lock */
1747 static void
1748 unhash_session(struct nfsd4_session *ses)
1749 {
1750 struct nfs4_client *clp = ses->se_client;
1751 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1752
1753 lockdep_assert_held(&nn->client_lock);
1754
1755 list_del(&ses->se_hash);
1756 spin_lock(&ses->se_client->cl_lock);
1757 list_del(&ses->se_perclnt);
1758 spin_unlock(&ses->se_client->cl_lock);
1759 }
1760
1761 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1762 static int
1763 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1764 {
1765 /*
1766 * We're assuming the clid was not given out from a boot
1767 * precisely 2^32 (about 136 years) before this one. That seems
1768 * a safe assumption:
1769 */
1770 if (clid->cl_boot == (u32)nn->boot_time)
1771 return 0;
1772 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1773 clid->cl_boot, clid->cl_id, nn->boot_time);
1774 return 1;
1775 }
1776
1777 /*
1778 * XXX Should we use a slab cache ?
1779 * This type of memory management is somewhat inefficient, but we use it
1780 * anyway since SETCLIENTID is not a common operation.
1781 */
1782 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1783 {
1784 struct nfs4_client *clp;
1785 int i;
1786
1787 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1788 if (clp == NULL)
1789 return NULL;
1790 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1791 if (clp->cl_name.data == NULL)
1792 goto err_no_name;
1793 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1794 OWNER_HASH_SIZE, GFP_KERNEL);
1795 if (!clp->cl_ownerstr_hashtbl)
1796 goto err_no_hashtbl;
1797 for (i = 0; i < OWNER_HASH_SIZE; i++)
1798 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1799 clp->cl_name.len = name.len;
1800 INIT_LIST_HEAD(&clp->cl_sessions);
1801 idr_init(&clp->cl_stateids);
1802 atomic_set(&clp->cl_refcount, 0);
1803 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1804 INIT_LIST_HEAD(&clp->cl_idhash);
1805 INIT_LIST_HEAD(&clp->cl_openowners);
1806 INIT_LIST_HEAD(&clp->cl_delegations);
1807 INIT_LIST_HEAD(&clp->cl_lru);
1808 INIT_LIST_HEAD(&clp->cl_revoked);
1809 #ifdef CONFIG_NFSD_PNFS
1810 INIT_LIST_HEAD(&clp->cl_lo_states);
1811 #endif
1812 spin_lock_init(&clp->cl_lock);
1813 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1814 return clp;
1815 err_no_hashtbl:
1816 kfree(clp->cl_name.data);
1817 err_no_name:
1818 kfree(clp);
1819 return NULL;
1820 }
1821
1822 static void
1823 free_client(struct nfs4_client *clp)
1824 {
1825 while (!list_empty(&clp->cl_sessions)) {
1826 struct nfsd4_session *ses;
1827 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1828 se_perclnt);
1829 list_del(&ses->se_perclnt);
1830 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1831 free_session(ses);
1832 }
1833 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1834 free_svc_cred(&clp->cl_cred);
1835 kfree(clp->cl_ownerstr_hashtbl);
1836 kfree(clp->cl_name.data);
1837 idr_destroy(&clp->cl_stateids);
1838 kfree(clp);
1839 }
1840
1841 /* must be called under the client_lock */
1842 static void
1843 unhash_client_locked(struct nfs4_client *clp)
1844 {
1845 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1846 struct nfsd4_session *ses;
1847
1848 lockdep_assert_held(&nn->client_lock);
1849
1850 /* Mark the client as expired! */
1851 clp->cl_time = 0;
1852 /* Make it invisible */
1853 if (!list_empty(&clp->cl_idhash)) {
1854 list_del_init(&clp->cl_idhash);
1855 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1856 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1857 else
1858 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1859 }
1860 list_del_init(&clp->cl_lru);
1861 spin_lock(&clp->cl_lock);
1862 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1863 list_del_init(&ses->se_hash);
1864 spin_unlock(&clp->cl_lock);
1865 }
1866
1867 static void
1868 unhash_client(struct nfs4_client *clp)
1869 {
1870 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1871
1872 spin_lock(&nn->client_lock);
1873 unhash_client_locked(clp);
1874 spin_unlock(&nn->client_lock);
1875 }
1876
1877 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1878 {
1879 if (atomic_read(&clp->cl_refcount))
1880 return nfserr_jukebox;
1881 unhash_client_locked(clp);
1882 return nfs_ok;
1883 }
1884
1885 static void
1886 __destroy_client(struct nfs4_client *clp)
1887 {
1888 int i;
1889 struct nfs4_openowner *oo;
1890 struct nfs4_delegation *dp;
1891 struct list_head reaplist;
1892
1893 INIT_LIST_HEAD(&reaplist);
1894 spin_lock(&state_lock);
1895 while (!list_empty(&clp->cl_delegations)) {
1896 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1897 WARN_ON(!unhash_delegation_locked(dp));
1898 list_add(&dp->dl_recall_lru, &reaplist);
1899 }
1900 spin_unlock(&state_lock);
1901 while (!list_empty(&reaplist)) {
1902 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1903 list_del_init(&dp->dl_recall_lru);
1904 put_clnt_odstate(dp->dl_clnt_odstate);
1905 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1906 nfs4_put_stid(&dp->dl_stid);
1907 }
1908 while (!list_empty(&clp->cl_revoked)) {
1909 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1910 list_del_init(&dp->dl_recall_lru);
1911 nfs4_put_stid(&dp->dl_stid);
1912 }
1913 while (!list_empty(&clp->cl_openowners)) {
1914 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1915 nfs4_get_stateowner(&oo->oo_owner);
1916 release_openowner(oo);
1917 }
1918 for (i = 0; i < OWNER_HASH_SIZE; i++) {
1919 struct nfs4_stateowner *so, *tmp;
1920
1921 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
1922 so_strhash) {
1923 /* Should be no openowners at this point */
1924 WARN_ON_ONCE(so->so_is_open_owner);
1925 remove_blocked_locks(lockowner(so));
1926 }
1927 }
1928 nfsd4_return_all_client_layouts(clp);
1929 nfsd4_shutdown_callback(clp);
1930 if (clp->cl_cb_conn.cb_xprt)
1931 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1932 free_client(clp);
1933 }
1934
1935 static void
1936 destroy_client(struct nfs4_client *clp)
1937 {
1938 unhash_client(clp);
1939 __destroy_client(clp);
1940 }
1941
1942 static void expire_client(struct nfs4_client *clp)
1943 {
1944 unhash_client(clp);
1945 nfsd4_client_record_remove(clp);
1946 __destroy_client(clp);
1947 }
1948
1949 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1950 {
1951 memcpy(target->cl_verifier.data, source->data,
1952 sizeof(target->cl_verifier.data));
1953 }
1954
1955 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1956 {
1957 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1958 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1959 }
1960
1961 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1962 {
1963 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1964 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1965 GFP_KERNEL);
1966 if ((source->cr_principal && ! target->cr_principal) ||
1967 (source->cr_raw_principal && ! target->cr_raw_principal))
1968 return -ENOMEM;
1969
1970 target->cr_flavor = source->cr_flavor;
1971 target->cr_uid = source->cr_uid;
1972 target->cr_gid = source->cr_gid;
1973 target->cr_group_info = source->cr_group_info;
1974 get_group_info(target->cr_group_info);
1975 target->cr_gss_mech = source->cr_gss_mech;
1976 if (source->cr_gss_mech)
1977 gss_mech_get(source->cr_gss_mech);
1978 return 0;
1979 }
1980
1981 static int
1982 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1983 {
1984 if (o1->len < o2->len)
1985 return -1;
1986 if (o1->len > o2->len)
1987 return 1;
1988 return memcmp(o1->data, o2->data, o1->len);
1989 }
1990
1991 static int same_name(const char *n1, const char *n2)
1992 {
1993 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1994 }
1995
1996 static int
1997 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1998 {
1999 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2000 }
2001
2002 static int
2003 same_clid(clientid_t *cl1, clientid_t *cl2)
2004 {
2005 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2006 }
2007
2008 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2009 {
2010 int i;
2011
2012 if (g1->ngroups != g2->ngroups)
2013 return false;
2014 for (i=0; i<g1->ngroups; i++)
2015 if (!gid_eq(g1->gid[i], g2->gid[i]))
2016 return false;
2017 return true;
2018 }
2019
2020 /*
2021 * RFC 3530 language requires clid_inuse be returned when the
2022 * "principal" associated with a requests differs from that previously
2023 * used. We use uid, gid's, and gss principal string as our best
2024 * approximation. We also don't want to allow non-gss use of a client
2025 * established using gss: in theory cr_principal should catch that
2026 * change, but in practice cr_principal can be null even in the gss case
2027 * since gssd doesn't always pass down a principal string.
2028 */
2029 static bool is_gss_cred(struct svc_cred *cr)
2030 {
2031 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2032 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2033 }
2034
2035
2036 static bool
2037 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2038 {
2039 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2040 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2041 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2042 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2043 return false;
2044 if (cr1->cr_principal == cr2->cr_principal)
2045 return true;
2046 if (!cr1->cr_principal || !cr2->cr_principal)
2047 return false;
2048 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2049 }
2050
2051 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2052 {
2053 struct svc_cred *cr = &rqstp->rq_cred;
2054 u32 service;
2055
2056 if (!cr->cr_gss_mech)
2057 return false;
2058 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2059 return service == RPC_GSS_SVC_INTEGRITY ||
2060 service == RPC_GSS_SVC_PRIVACY;
2061 }
2062
2063 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2064 {
2065 struct svc_cred *cr = &rqstp->rq_cred;
2066
2067 if (!cl->cl_mach_cred)
2068 return true;
2069 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2070 return false;
2071 if (!svc_rqst_integrity_protected(rqstp))
2072 return false;
2073 if (cl->cl_cred.cr_raw_principal)
2074 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2075 cr->cr_raw_principal);
2076 if (!cr->cr_principal)
2077 return false;
2078 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2079 }
2080
2081 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2082 {
2083 __be32 verf[2];
2084
2085 /*
2086 * This is opaque to client, so no need to byte-swap. Use
2087 * __force to keep sparse happy
2088 */
2089 verf[0] = (__force __be32)get_seconds();
2090 verf[1] = (__force __be32)nn->clverifier_counter++;
2091 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2092 }
2093
2094 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2095 {
2096 clp->cl_clientid.cl_boot = nn->boot_time;
2097 clp->cl_clientid.cl_id = nn->clientid_counter++;
2098 gen_confirm(clp, nn);
2099 }
2100
2101 static struct nfs4_stid *
2102 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2103 {
2104 struct nfs4_stid *ret;
2105
2106 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2107 if (!ret || !ret->sc_type)
2108 return NULL;
2109 return ret;
2110 }
2111
2112 static struct nfs4_stid *
2113 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2114 {
2115 struct nfs4_stid *s;
2116
2117 spin_lock(&cl->cl_lock);
2118 s = find_stateid_locked(cl, t);
2119 if (s != NULL) {
2120 if (typemask & s->sc_type)
2121 atomic_inc(&s->sc_count);
2122 else
2123 s = NULL;
2124 }
2125 spin_unlock(&cl->cl_lock);
2126 return s;
2127 }
2128
2129 static struct nfs4_client *create_client(struct xdr_netobj name,
2130 struct svc_rqst *rqstp, nfs4_verifier *verf)
2131 {
2132 struct nfs4_client *clp;
2133 struct sockaddr *sa = svc_addr(rqstp);
2134 int ret;
2135 struct net *net = SVC_NET(rqstp);
2136
2137 clp = alloc_client(name);
2138 if (clp == NULL)
2139 return NULL;
2140
2141 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2142 if (ret) {
2143 free_client(clp);
2144 return NULL;
2145 }
2146 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2147 clp->cl_time = get_seconds();
2148 clear_bit(0, &clp->cl_cb_slot_busy);
2149 copy_verf(clp, verf);
2150 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2151 clp->cl_cb_session = NULL;
2152 clp->net = net;
2153 return clp;
2154 }
2155
2156 static void
2157 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2158 {
2159 struct rb_node **new = &(root->rb_node), *parent = NULL;
2160 struct nfs4_client *clp;
2161
2162 while (*new) {
2163 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2164 parent = *new;
2165
2166 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2167 new = &((*new)->rb_left);
2168 else
2169 new = &((*new)->rb_right);
2170 }
2171
2172 rb_link_node(&new_clp->cl_namenode, parent, new);
2173 rb_insert_color(&new_clp->cl_namenode, root);
2174 }
2175
2176 static struct nfs4_client *
2177 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2178 {
2179 int cmp;
2180 struct rb_node *node = root->rb_node;
2181 struct nfs4_client *clp;
2182
2183 while (node) {
2184 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2185 cmp = compare_blob(&clp->cl_name, name);
2186 if (cmp > 0)
2187 node = node->rb_left;
2188 else if (cmp < 0)
2189 node = node->rb_right;
2190 else
2191 return clp;
2192 }
2193 return NULL;
2194 }
2195
2196 static void
2197 add_to_unconfirmed(struct nfs4_client *clp)
2198 {
2199 unsigned int idhashval;
2200 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2201
2202 lockdep_assert_held(&nn->client_lock);
2203
2204 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2205 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2206 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2207 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2208 renew_client_locked(clp);
2209 }
2210
2211 static void
2212 move_to_confirmed(struct nfs4_client *clp)
2213 {
2214 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2215 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2216
2217 lockdep_assert_held(&nn->client_lock);
2218
2219 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2220 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2221 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2222 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2223 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2224 renew_client_locked(clp);
2225 }
2226
2227 static struct nfs4_client *
2228 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2229 {
2230 struct nfs4_client *clp;
2231 unsigned int idhashval = clientid_hashval(clid->cl_id);
2232
2233 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2234 if (same_clid(&clp->cl_clientid, clid)) {
2235 if ((bool)clp->cl_minorversion != sessions)
2236 return NULL;
2237 renew_client_locked(clp);
2238 return clp;
2239 }
2240 }
2241 return NULL;
2242 }
2243
2244 static struct nfs4_client *
2245 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2246 {
2247 struct list_head *tbl = nn->conf_id_hashtbl;
2248
2249 lockdep_assert_held(&nn->client_lock);
2250 return find_client_in_id_table(tbl, clid, sessions);
2251 }
2252
2253 static struct nfs4_client *
2254 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2255 {
2256 struct list_head *tbl = nn->unconf_id_hashtbl;
2257
2258 lockdep_assert_held(&nn->client_lock);
2259 return find_client_in_id_table(tbl, clid, sessions);
2260 }
2261
2262 static bool clp_used_exchangeid(struct nfs4_client *clp)
2263 {
2264 return clp->cl_exchange_flags != 0;
2265 }
2266
2267 static struct nfs4_client *
2268 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2269 {
2270 lockdep_assert_held(&nn->client_lock);
2271 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2272 }
2273
2274 static struct nfs4_client *
2275 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2276 {
2277 lockdep_assert_held(&nn->client_lock);
2278 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2279 }
2280
2281 static void
2282 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2283 {
2284 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2285 struct sockaddr *sa = svc_addr(rqstp);
2286 u32 scopeid = rpc_get_scope_id(sa);
2287 unsigned short expected_family;
2288
2289 /* Currently, we only support tcp and tcp6 for the callback channel */
2290 if (se->se_callback_netid_len == 3 &&
2291 !memcmp(se->se_callback_netid_val, "tcp", 3))
2292 expected_family = AF_INET;
2293 else if (se->se_callback_netid_len == 4 &&
2294 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2295 expected_family = AF_INET6;
2296 else
2297 goto out_err;
2298
2299 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2300 se->se_callback_addr_len,
2301 (struct sockaddr *)&conn->cb_addr,
2302 sizeof(conn->cb_addr));
2303
2304 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2305 goto out_err;
2306
2307 if (conn->cb_addr.ss_family == AF_INET6)
2308 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2309
2310 conn->cb_prog = se->se_callback_prog;
2311 conn->cb_ident = se->se_callback_ident;
2312 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2313 return;
2314 out_err:
2315 conn->cb_addr.ss_family = AF_UNSPEC;
2316 conn->cb_addrlen = 0;
2317 dprintk("NFSD: this client (clientid %08x/%08x) "
2318 "will not receive delegations\n",
2319 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2320
2321 return;
2322 }
2323
2324 /*
2325 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2326 */
2327 static void
2328 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2329 {
2330 struct xdr_buf *buf = resp->xdr.buf;
2331 struct nfsd4_slot *slot = resp->cstate.slot;
2332 unsigned int base;
2333
2334 dprintk("--> %s slot %p\n", __func__, slot);
2335
2336 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2337 slot->sl_opcnt = resp->opcnt;
2338 slot->sl_status = resp->cstate.status;
2339 free_svc_cred(&slot->sl_cred);
2340 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2341
2342 if (!nfsd4_cache_this(resp)) {
2343 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2344 return;
2345 }
2346 slot->sl_flags |= NFSD4_SLOT_CACHED;
2347
2348 base = resp->cstate.data_offset;
2349 slot->sl_datalen = buf->len - base;
2350 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2351 WARN(1, "%s: sessions DRC could not cache compound\n",
2352 __func__);
2353 return;
2354 }
2355
2356 /*
2357 * Encode the replay sequence operation from the slot values.
2358 * If cachethis is FALSE encode the uncached rep error on the next
2359 * operation which sets resp->p and increments resp->opcnt for
2360 * nfs4svc_encode_compoundres.
2361 *
2362 */
2363 static __be32
2364 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2365 struct nfsd4_compoundres *resp)
2366 {
2367 struct nfsd4_op *op;
2368 struct nfsd4_slot *slot = resp->cstate.slot;
2369
2370 /* Encode the replayed sequence operation */
2371 op = &args->ops[resp->opcnt - 1];
2372 nfsd4_encode_operation(resp, op);
2373
2374 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2375 return op->status;
2376 if (args->opcnt == 1) {
2377 /*
2378 * The original operation wasn't a solo sequence--we
2379 * always cache those--so this retry must not match the
2380 * original:
2381 */
2382 op->status = nfserr_seq_false_retry;
2383 } else {
2384 op = &args->ops[resp->opcnt++];
2385 op->status = nfserr_retry_uncached_rep;
2386 nfsd4_encode_operation(resp, op);
2387 }
2388 return op->status;
2389 }
2390
2391 /*
2392 * The sequence operation is not cached because we can use the slot and
2393 * session values.
2394 */
2395 static __be32
2396 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2397 struct nfsd4_sequence *seq)
2398 {
2399 struct nfsd4_slot *slot = resp->cstate.slot;
2400 struct xdr_stream *xdr = &resp->xdr;
2401 __be32 *p;
2402 __be32 status;
2403
2404 dprintk("--> %s slot %p\n", __func__, slot);
2405
2406 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2407 if (status)
2408 return status;
2409
2410 p = xdr_reserve_space(xdr, slot->sl_datalen);
2411 if (!p) {
2412 WARN_ON_ONCE(1);
2413 return nfserr_serverfault;
2414 }
2415 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2416 xdr_commit_encode(xdr);
2417
2418 resp->opcnt = slot->sl_opcnt;
2419 return slot->sl_status;
2420 }
2421
2422 /*
2423 * Set the exchange_id flags returned by the server.
2424 */
2425 static void
2426 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2427 {
2428 #ifdef CONFIG_NFSD_PNFS
2429 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2430 #else
2431 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2432 #endif
2433
2434 /* Referrals are supported, Migration is not. */
2435 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2436
2437 /* set the wire flags to return to client. */
2438 clid->flags = new->cl_exchange_flags;
2439 }
2440
2441 static bool client_has_openowners(struct nfs4_client *clp)
2442 {
2443 struct nfs4_openowner *oo;
2444
2445 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2446 if (!list_empty(&oo->oo_owner.so_stateids))
2447 return true;
2448 }
2449 return false;
2450 }
2451
2452 static bool client_has_state(struct nfs4_client *clp)
2453 {
2454 return client_has_openowners(clp)
2455 #ifdef CONFIG_NFSD_PNFS
2456 || !list_empty(&clp->cl_lo_states)
2457 #endif
2458 || !list_empty(&clp->cl_delegations)
2459 || !list_empty(&clp->cl_sessions);
2460 }
2461
2462 __be32
2463 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2464 union nfsd4_op_u *u)
2465 {
2466 struct nfsd4_exchange_id *exid = &u->exchange_id;
2467 struct nfs4_client *conf, *new;
2468 struct nfs4_client *unconf = NULL;
2469 __be32 status;
2470 char addr_str[INET6_ADDRSTRLEN];
2471 nfs4_verifier verf = exid->verifier;
2472 struct sockaddr *sa = svc_addr(rqstp);
2473 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2474 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2475
2476 rpc_ntop(sa, addr_str, sizeof(addr_str));
2477 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2478 "ip_addr=%s flags %x, spa_how %d\n",
2479 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2480 addr_str, exid->flags, exid->spa_how);
2481
2482 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2483 return nfserr_inval;
2484
2485 new = create_client(exid->clname, rqstp, &verf);
2486 if (new == NULL)
2487 return nfserr_jukebox;
2488
2489 switch (exid->spa_how) {
2490 case SP4_MACH_CRED:
2491 exid->spo_must_enforce[0] = 0;
2492 exid->spo_must_enforce[1] = (
2493 1 << (OP_BIND_CONN_TO_SESSION - 32) |
2494 1 << (OP_EXCHANGE_ID - 32) |
2495 1 << (OP_CREATE_SESSION - 32) |
2496 1 << (OP_DESTROY_SESSION - 32) |
2497 1 << (OP_DESTROY_CLIENTID - 32));
2498
2499 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2500 1 << (OP_OPEN_DOWNGRADE) |
2501 1 << (OP_LOCKU) |
2502 1 << (OP_DELEGRETURN));
2503
2504 exid->spo_must_allow[1] &= (
2505 1 << (OP_TEST_STATEID - 32) |
2506 1 << (OP_FREE_STATEID - 32));
2507 if (!svc_rqst_integrity_protected(rqstp)) {
2508 status = nfserr_inval;
2509 goto out_nolock;
2510 }
2511 /*
2512 * Sometimes userspace doesn't give us a principal.
2513 * Which is a bug, really. Anyway, we can't enforce
2514 * MACH_CRED in that case, better to give up now:
2515 */
2516 if (!new->cl_cred.cr_principal &&
2517 !new->cl_cred.cr_raw_principal) {
2518 status = nfserr_serverfault;
2519 goto out_nolock;
2520 }
2521 new->cl_mach_cred = true;
2522 case SP4_NONE:
2523 break;
2524 default: /* checked by xdr code */
2525 WARN_ON_ONCE(1);
2526 case SP4_SSV:
2527 status = nfserr_encr_alg_unsupp;
2528 goto out_nolock;
2529 }
2530
2531 /* Cases below refer to rfc 5661 section 18.35.4: */
2532 spin_lock(&nn->client_lock);
2533 conf = find_confirmed_client_by_name(&exid->clname, nn);
2534 if (conf) {
2535 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2536 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2537
2538 if (update) {
2539 if (!clp_used_exchangeid(conf)) { /* buggy client */
2540 status = nfserr_inval;
2541 goto out;
2542 }
2543 if (!nfsd4_mach_creds_match(conf, rqstp)) {
2544 status = nfserr_wrong_cred;
2545 goto out;
2546 }
2547 if (!creds_match) { /* case 9 */
2548 status = nfserr_perm;
2549 goto out;
2550 }
2551 if (!verfs_match) { /* case 8 */
2552 status = nfserr_not_same;
2553 goto out;
2554 }
2555 /* case 6 */
2556 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2557 goto out_copy;
2558 }
2559 if (!creds_match) { /* case 3 */
2560 if (client_has_state(conf)) {
2561 status = nfserr_clid_inuse;
2562 goto out;
2563 }
2564 goto out_new;
2565 }
2566 if (verfs_match) { /* case 2 */
2567 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2568 goto out_copy;
2569 }
2570 /* case 5, client reboot */
2571 conf = NULL;
2572 goto out_new;
2573 }
2574
2575 if (update) { /* case 7 */
2576 status = nfserr_noent;
2577 goto out;
2578 }
2579
2580 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
2581 if (unconf) /* case 4, possible retry or client restart */
2582 unhash_client_locked(unconf);
2583
2584 /* case 1 (normal case) */
2585 out_new:
2586 if (conf) {
2587 status = mark_client_expired_locked(conf);
2588 if (status)
2589 goto out;
2590 }
2591 new->cl_minorversion = cstate->minorversion;
2592 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2593 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2594
2595 gen_clid(new, nn);
2596 add_to_unconfirmed(new);
2597 swap(new, conf);
2598 out_copy:
2599 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2600 exid->clientid.cl_id = conf->cl_clientid.cl_id;
2601
2602 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2603 nfsd4_set_ex_flags(conf, exid);
2604
2605 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2606 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2607 status = nfs_ok;
2608
2609 out:
2610 spin_unlock(&nn->client_lock);
2611 out_nolock:
2612 if (new)
2613 expire_client(new);
2614 if (unconf)
2615 expire_client(unconf);
2616 return status;
2617 }
2618
2619 static __be32
2620 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2621 {
2622 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2623 slot_seqid);
2624
2625 /* The slot is in use, and no response has been sent. */
2626 if (slot_inuse) {
2627 if (seqid == slot_seqid)
2628 return nfserr_jukebox;
2629 else
2630 return nfserr_seq_misordered;
2631 }
2632 /* Note unsigned 32-bit arithmetic handles wraparound: */
2633 if (likely(seqid == slot_seqid + 1))
2634 return nfs_ok;
2635 if (seqid == slot_seqid)
2636 return nfserr_replay_cache;
2637 return nfserr_seq_misordered;
2638 }
2639
2640 /*
2641 * Cache the create session result into the create session single DRC
2642 * slot cache by saving the xdr structure. sl_seqid has been set.
2643 * Do this for solo or embedded create session operations.
2644 */
2645 static void
2646 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2647 struct nfsd4_clid_slot *slot, __be32 nfserr)
2648 {
2649 slot->sl_status = nfserr;
2650 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2651 }
2652
2653 static __be32
2654 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2655 struct nfsd4_clid_slot *slot)
2656 {
2657 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2658 return slot->sl_status;
2659 }
2660
2661 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2662 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2663 1 + /* MIN tag is length with zero, only length */ \
2664 3 + /* version, opcount, opcode */ \
2665 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2666 /* seqid, slotID, slotID, cache */ \
2667 4 ) * sizeof(__be32))
2668
2669 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2670 2 + /* verifier: AUTH_NULL, length 0 */\
2671 1 + /* status */ \
2672 1 + /* MIN tag is length with zero, only length */ \
2673 3 + /* opcount, opcode, opstatus*/ \
2674 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2675 /* seqid, slotID, slotID, slotID, status */ \
2676 5 ) * sizeof(__be32))
2677
2678 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2679 {
2680 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2681
2682 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2683 return nfserr_toosmall;
2684 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2685 return nfserr_toosmall;
2686 ca->headerpadsz = 0;
2687 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2688 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2689 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2690 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2691 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2692 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2693 /*
2694 * Note decreasing slot size below client's request may make it
2695 * difficult for client to function correctly, whereas
2696 * decreasing the number of slots will (just?) affect
2697 * performance. When short on memory we therefore prefer to
2698 * decrease number of slots instead of their size. Clients that
2699 * request larger slots than they need will get poor results:
2700 */
2701 ca->maxreqs = nfsd4_get_drc_mem(ca);
2702 if (!ca->maxreqs)
2703 return nfserr_jukebox;
2704
2705 return nfs_ok;
2706 }
2707
2708 /*
2709 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2710 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2711 */
2712 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2713 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2714
2715 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2716 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2717
2718 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2719 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2720 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2721 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2722 sizeof(__be32))
2723
2724 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2725 {
2726 ca->headerpadsz = 0;
2727
2728 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2729 return nfserr_toosmall;
2730 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2731 return nfserr_toosmall;
2732 ca->maxresp_cached = 0;
2733 if (ca->maxops < 2)
2734 return nfserr_toosmall;
2735
2736 return nfs_ok;
2737 }
2738
2739 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2740 {
2741 switch (cbs->flavor) {
2742 case RPC_AUTH_NULL:
2743 case RPC_AUTH_UNIX:
2744 return nfs_ok;
2745 default:
2746 /*
2747 * GSS case: the spec doesn't allow us to return this
2748 * error. But it also doesn't allow us not to support
2749 * GSS.
2750 * I'd rather this fail hard than return some error the
2751 * client might think it can already handle:
2752 */
2753 return nfserr_encr_alg_unsupp;
2754 }
2755 }
2756
2757 __be32
2758 nfsd4_create_session(struct svc_rqst *rqstp,
2759 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
2760 {
2761 struct nfsd4_create_session *cr_ses = &u->create_session;
2762 struct sockaddr *sa = svc_addr(rqstp);
2763 struct nfs4_client *conf, *unconf;
2764 struct nfs4_client *old = NULL;
2765 struct nfsd4_session *new;
2766 struct nfsd4_conn *conn;
2767 struct nfsd4_clid_slot *cs_slot = NULL;
2768 __be32 status = 0;
2769 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2770
2771 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2772 return nfserr_inval;
2773 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2774 if (status)
2775 return status;
2776 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2777 if (status)
2778 return status;
2779 status = check_backchannel_attrs(&cr_ses->back_channel);
2780 if (status)
2781 goto out_release_drc_mem;
2782 status = nfserr_jukebox;
2783 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2784 if (!new)
2785 goto out_release_drc_mem;
2786 conn = alloc_conn_from_crses(rqstp, cr_ses);
2787 if (!conn)
2788 goto out_free_session;
2789
2790 spin_lock(&nn->client_lock);
2791 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2792 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2793 WARN_ON_ONCE(conf && unconf);
2794
2795 if (conf) {
2796 status = nfserr_wrong_cred;
2797 if (!nfsd4_mach_creds_match(conf, rqstp))
2798 goto out_free_conn;
2799 cs_slot = &conf->cl_cs_slot;
2800 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2801 if (status) {
2802 if (status == nfserr_replay_cache)
2803 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2804 goto out_free_conn;
2805 }
2806 } else if (unconf) {
2807 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2808 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2809 status = nfserr_clid_inuse;
2810 goto out_free_conn;
2811 }
2812 status = nfserr_wrong_cred;
2813 if (!nfsd4_mach_creds_match(unconf, rqstp))
2814 goto out_free_conn;
2815 cs_slot = &unconf->cl_cs_slot;
2816 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2817 if (status) {
2818 /* an unconfirmed replay returns misordered */
2819 status = nfserr_seq_misordered;
2820 goto out_free_conn;
2821 }
2822 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2823 if (old) {
2824 status = mark_client_expired_locked(old);
2825 if (status) {
2826 old = NULL;
2827 goto out_free_conn;
2828 }
2829 }
2830 move_to_confirmed(unconf);
2831 conf = unconf;
2832 } else {
2833 status = nfserr_stale_clientid;
2834 goto out_free_conn;
2835 }
2836 status = nfs_ok;
2837 /* Persistent sessions are not supported */
2838 cr_ses->flags &= ~SESSION4_PERSIST;
2839 /* Upshifting from TCP to RDMA is not supported */
2840 cr_ses->flags &= ~SESSION4_RDMA;
2841
2842 init_session(rqstp, new, conf, cr_ses);
2843 nfsd4_get_session_locked(new);
2844
2845 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2846 NFS4_MAX_SESSIONID_LEN);
2847 cs_slot->sl_seqid++;
2848 cr_ses->seqid = cs_slot->sl_seqid;
2849
2850 /* cache solo and embedded create sessions under the client_lock */
2851 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2852 spin_unlock(&nn->client_lock);
2853 /* init connection and backchannel */
2854 nfsd4_init_conn(rqstp, conn, new);
2855 nfsd4_put_session(new);
2856 if (old)
2857 expire_client(old);
2858 return status;
2859 out_free_conn:
2860 spin_unlock(&nn->client_lock);
2861 free_conn(conn);
2862 if (old)
2863 expire_client(old);
2864 out_free_session:
2865 __free_session(new);
2866 out_release_drc_mem:
2867 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2868 return status;
2869 }
2870
2871 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2872 {
2873 switch (*dir) {
2874 case NFS4_CDFC4_FORE:
2875 case NFS4_CDFC4_BACK:
2876 return nfs_ok;
2877 case NFS4_CDFC4_FORE_OR_BOTH:
2878 case NFS4_CDFC4_BACK_OR_BOTH:
2879 *dir = NFS4_CDFC4_BOTH;
2880 return nfs_ok;
2881 };
2882 return nfserr_inval;
2883 }
2884
2885 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2886 struct nfsd4_compound_state *cstate,
2887 union nfsd4_op_u *u)
2888 {
2889 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
2890 struct nfsd4_session *session = cstate->session;
2891 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2892 __be32 status;
2893
2894 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2895 if (status)
2896 return status;
2897 spin_lock(&nn->client_lock);
2898 session->se_cb_prog = bc->bc_cb_program;
2899 session->se_cb_sec = bc->bc_cb_sec;
2900 spin_unlock(&nn->client_lock);
2901
2902 nfsd4_probe_callback(session->se_client);
2903
2904 return nfs_ok;
2905 }
2906
2907 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2908 struct nfsd4_compound_state *cstate,
2909 union nfsd4_op_u *u)
2910 {
2911 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
2912 __be32 status;
2913 struct nfsd4_conn *conn;
2914 struct nfsd4_session *session;
2915 struct net *net = SVC_NET(rqstp);
2916 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2917
2918 if (!nfsd4_last_compound_op(rqstp))
2919 return nfserr_not_only_op;
2920 spin_lock(&nn->client_lock);
2921 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2922 spin_unlock(&nn->client_lock);
2923 if (!session)
2924 goto out_no_session;
2925 status = nfserr_wrong_cred;
2926 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2927 goto out;
2928 status = nfsd4_map_bcts_dir(&bcts->dir);
2929 if (status)
2930 goto out;
2931 conn = alloc_conn(rqstp, bcts->dir);
2932 status = nfserr_jukebox;
2933 if (!conn)
2934 goto out;
2935 nfsd4_init_conn(rqstp, conn, session);
2936 status = nfs_ok;
2937 out:
2938 nfsd4_put_session(session);
2939 out_no_session:
2940 return status;
2941 }
2942
2943 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2944 {
2945 if (!session)
2946 return 0;
2947 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2948 }
2949
2950 __be32
2951 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2952 union nfsd4_op_u *u)
2953 {
2954 struct nfsd4_destroy_session *sessionid = &u->destroy_session;
2955 struct nfsd4_session *ses;
2956 __be32 status;
2957 int ref_held_by_me = 0;
2958 struct net *net = SVC_NET(r);
2959 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2960
2961 status = nfserr_not_only_op;
2962 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2963 if (!nfsd4_last_compound_op(r))
2964 goto out;
2965 ref_held_by_me++;
2966 }
2967 dump_sessionid(__func__, &sessionid->sessionid);
2968 spin_lock(&nn->client_lock);
2969 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2970 if (!ses)
2971 goto out_client_lock;
2972 status = nfserr_wrong_cred;
2973 if (!nfsd4_mach_creds_match(ses->se_client, r))
2974 goto out_put_session;
2975 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2976 if (status)
2977 goto out_put_session;
2978 unhash_session(ses);
2979 spin_unlock(&nn->client_lock);
2980
2981 nfsd4_probe_callback_sync(ses->se_client);
2982
2983 spin_lock(&nn->client_lock);
2984 status = nfs_ok;
2985 out_put_session:
2986 nfsd4_put_session_locked(ses);
2987 out_client_lock:
2988 spin_unlock(&nn->client_lock);
2989 out:
2990 return status;
2991 }
2992
2993 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2994 {
2995 struct nfsd4_conn *c;
2996
2997 list_for_each_entry(c, &s->se_conns, cn_persession) {
2998 if (c->cn_xprt == xpt) {
2999 return c;
3000 }
3001 }
3002 return NULL;
3003 }
3004
3005 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3006 {
3007 struct nfs4_client *clp = ses->se_client;
3008 struct nfsd4_conn *c;
3009 __be32 status = nfs_ok;
3010 int ret;
3011
3012 spin_lock(&clp->cl_lock);
3013 c = __nfsd4_find_conn(new->cn_xprt, ses);
3014 if (c)
3015 goto out_free;
3016 status = nfserr_conn_not_bound_to_session;
3017 if (clp->cl_mach_cred)
3018 goto out_free;
3019 __nfsd4_hash_conn(new, ses);
3020 spin_unlock(&clp->cl_lock);
3021 ret = nfsd4_register_conn(new);
3022 if (ret)
3023 /* oops; xprt is already down: */
3024 nfsd4_conn_lost(&new->cn_xpt_user);
3025 return nfs_ok;
3026 out_free:
3027 spin_unlock(&clp->cl_lock);
3028 free_conn(new);
3029 return status;
3030 }
3031
3032 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3033 {
3034 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3035
3036 return args->opcnt > session->se_fchannel.maxops;
3037 }
3038
3039 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3040 struct nfsd4_session *session)
3041 {
3042 struct xdr_buf *xb = &rqstp->rq_arg;
3043
3044 return xb->len > session->se_fchannel.maxreq_sz;
3045 }
3046
3047 static bool replay_matches_cache(struct svc_rqst *rqstp,
3048 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3049 {
3050 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3051
3052 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3053 (bool)seq->cachethis)
3054 return false;
3055 /*
3056 * If there's an error than the reply can have fewer ops than
3057 * the call. But if we cached a reply with *more* ops than the
3058 * call you're sending us now, then this new call is clearly not
3059 * really a replay of the old one:
3060 */
3061 if (slot->sl_opcnt < argp->opcnt)
3062 return false;
3063 /* This is the only check explicitly called by spec: */
3064 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3065 return false;
3066 /*
3067 * There may be more comparisons we could actually do, but the
3068 * spec doesn't require us to catch every case where the calls
3069 * don't match (that would require caching the call as well as
3070 * the reply), so we don't bother.
3071 */
3072 return true;
3073 }
3074
3075 __be32
3076 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3077 union nfsd4_op_u *u)
3078 {
3079 struct nfsd4_sequence *seq = &u->sequence;
3080 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3081 struct xdr_stream *xdr = &resp->xdr;
3082 struct nfsd4_session *session;
3083 struct nfs4_client *clp;
3084 struct nfsd4_slot *slot;
3085 struct nfsd4_conn *conn;
3086 __be32 status;
3087 int buflen;
3088 struct net *net = SVC_NET(rqstp);
3089 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3090
3091 if (resp->opcnt != 1)
3092 return nfserr_sequence_pos;
3093
3094 /*
3095 * Will be either used or freed by nfsd4_sequence_check_conn
3096 * below.
3097 */
3098 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3099 if (!conn)
3100 return nfserr_jukebox;
3101
3102 spin_lock(&nn->client_lock);
3103 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3104 if (!session)
3105 goto out_no_session;
3106 clp = session->se_client;
3107
3108 status = nfserr_too_many_ops;
3109 if (nfsd4_session_too_many_ops(rqstp, session))
3110 goto out_put_session;
3111
3112 status = nfserr_req_too_big;
3113 if (nfsd4_request_too_big(rqstp, session))
3114 goto out_put_session;
3115
3116 status = nfserr_badslot;
3117 if (seq->slotid >= session->se_fchannel.maxreqs)
3118 goto out_put_session;
3119
3120 slot = session->se_slots[seq->slotid];
3121 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3122
3123 /* We do not negotiate the number of slots yet, so set the
3124 * maxslots to the session maxreqs which is used to encode
3125 * sr_highest_slotid and the sr_target_slot id to maxslots */
3126 seq->maxslots = session->se_fchannel.maxreqs;
3127
3128 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3129 slot->sl_flags & NFSD4_SLOT_INUSE);
3130 if (status == nfserr_replay_cache) {
3131 status = nfserr_seq_misordered;
3132 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3133 goto out_put_session;
3134 status = nfserr_seq_false_retry;
3135 if (!replay_matches_cache(rqstp, seq, slot))
3136 goto out_put_session;
3137 cstate->slot = slot;
3138 cstate->session = session;
3139 cstate->clp = clp;
3140 /* Return the cached reply status and set cstate->status
3141 * for nfsd4_proc_compound processing */
3142 status = nfsd4_replay_cache_entry(resp, seq);
3143 cstate->status = nfserr_replay_cache;
3144 goto out;
3145 }
3146 if (status)
3147 goto out_put_session;
3148
3149 status = nfsd4_sequence_check_conn(conn, session);
3150 conn = NULL;
3151 if (status)
3152 goto out_put_session;
3153
3154 buflen = (seq->cachethis) ?
3155 session->se_fchannel.maxresp_cached :
3156 session->se_fchannel.maxresp_sz;
3157 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3158 nfserr_rep_too_big;
3159 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3160 goto out_put_session;
3161 svc_reserve(rqstp, buflen);
3162
3163 status = nfs_ok;
3164 /* Success! bump slot seqid */
3165 slot->sl_seqid = seq->seqid;
3166 slot->sl_flags |= NFSD4_SLOT_INUSE;
3167 if (seq->cachethis)
3168 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3169 else
3170 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3171
3172 cstate->slot = slot;
3173 cstate->session = session;
3174 cstate->clp = clp;
3175
3176 out:
3177 switch (clp->cl_cb_state) {
3178 case NFSD4_CB_DOWN:
3179 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3180 break;
3181 case NFSD4_CB_FAULT:
3182 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3183 break;
3184 default:
3185 seq->status_flags = 0;
3186 }
3187 if (!list_empty(&clp->cl_revoked))
3188 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3189 out_no_session:
3190 if (conn)
3191 free_conn(conn);
3192 spin_unlock(&nn->client_lock);
3193 return status;
3194 out_put_session:
3195 nfsd4_put_session_locked(session);
3196 goto out_no_session;
3197 }
3198
3199 void
3200 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3201 {
3202 struct nfsd4_compound_state *cs = &resp->cstate;
3203
3204 if (nfsd4_has_session(cs)) {
3205 if (cs->status != nfserr_replay_cache) {
3206 nfsd4_store_cache_entry(resp);
3207 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3208 }
3209 /* Drop session reference that was taken in nfsd4_sequence() */
3210 nfsd4_put_session(cs->session);
3211 } else if (cs->clp)
3212 put_client_renew(cs->clp);
3213 }
3214
3215 __be32
3216 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3217 struct nfsd4_compound_state *cstate,
3218 union nfsd4_op_u *u)
3219 {
3220 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3221 struct nfs4_client *conf, *unconf;
3222 struct nfs4_client *clp = NULL;
3223 __be32 status = 0;
3224 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3225
3226 spin_lock(&nn->client_lock);
3227 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3228 conf = find_confirmed_client(&dc->clientid, true, nn);
3229 WARN_ON_ONCE(conf && unconf);
3230
3231 if (conf) {
3232 if (client_has_state(conf)) {
3233 status = nfserr_clientid_busy;
3234 goto out;
3235 }
3236 status = mark_client_expired_locked(conf);
3237 if (status)
3238 goto out;
3239 clp = conf;
3240 } else if (unconf)
3241 clp = unconf;
3242 else {
3243 status = nfserr_stale_clientid;
3244 goto out;
3245 }
3246 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3247 clp = NULL;
3248 status = nfserr_wrong_cred;
3249 goto out;
3250 }
3251 unhash_client_locked(clp);
3252 out:
3253 spin_unlock(&nn->client_lock);
3254 if (clp)
3255 expire_client(clp);
3256 return status;
3257 }
3258
3259 __be32
3260 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3261 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3262 {
3263 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3264 __be32 status = 0;
3265
3266 if (rc->rca_one_fs) {
3267 if (!cstate->current_fh.fh_dentry)
3268 return nfserr_nofilehandle;
3269 /*
3270 * We don't take advantage of the rca_one_fs case.
3271 * That's OK, it's optional, we can safely ignore it.
3272 */
3273 return nfs_ok;
3274 }
3275
3276 status = nfserr_complete_already;
3277 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3278 &cstate->session->se_client->cl_flags))
3279 goto out;
3280
3281 status = nfserr_stale_clientid;
3282 if (is_client_expired(cstate->session->se_client))
3283 /*
3284 * The following error isn't really legal.
3285 * But we only get here if the client just explicitly
3286 * destroyed the client. Surely it no longer cares what
3287 * error it gets back on an operation for the dead
3288 * client.
3289 */
3290 goto out;
3291
3292 status = nfs_ok;
3293 nfsd4_client_record_create(cstate->session->se_client);
3294 out:
3295 return status;
3296 }
3297
3298 __be32
3299 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3300 union nfsd4_op_u *u)
3301 {
3302 struct nfsd4_setclientid *setclid = &u->setclientid;
3303 struct xdr_netobj clname = setclid->se_name;
3304 nfs4_verifier clverifier = setclid->se_verf;
3305 struct nfs4_client *conf, *new;
3306 struct nfs4_client *unconf = NULL;
3307 __be32 status;
3308 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3309
3310 new = create_client(clname, rqstp, &clverifier);
3311 if (new == NULL)
3312 return nfserr_jukebox;
3313 /* Cases below refer to rfc 3530 section 14.2.33: */
3314 spin_lock(&nn->client_lock);
3315 conf = find_confirmed_client_by_name(&clname, nn);
3316 if (conf && client_has_state(conf)) {
3317 /* case 0: */
3318 status = nfserr_clid_inuse;
3319 if (clp_used_exchangeid(conf))
3320 goto out;
3321 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3322 char addr_str[INET6_ADDRSTRLEN];
3323 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3324 sizeof(addr_str));
3325 dprintk("NFSD: setclientid: string in use by client "
3326 "at %s\n", addr_str);
3327 goto out;
3328 }
3329 }
3330 unconf = find_unconfirmed_client_by_name(&clname, nn);
3331 if (unconf)
3332 unhash_client_locked(unconf);
3333 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3334 /* case 1: probable callback update */
3335 copy_clid(new, conf);
3336 gen_confirm(new, nn);
3337 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3338 gen_clid(new, nn);
3339 new->cl_minorversion = 0;
3340 gen_callback(new, setclid, rqstp);
3341 add_to_unconfirmed(new);
3342 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3343 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3344 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3345 new = NULL;
3346 status = nfs_ok;
3347 out:
3348 spin_unlock(&nn->client_lock);
3349 if (new)
3350 free_client(new);
3351 if (unconf)
3352 expire_client(unconf);
3353 return status;
3354 }
3355
3356
3357 __be32
3358 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3359 struct nfsd4_compound_state *cstate,
3360 union nfsd4_op_u *u)
3361 {
3362 struct nfsd4_setclientid_confirm *setclientid_confirm =
3363 &u->setclientid_confirm;
3364 struct nfs4_client *conf, *unconf;
3365 struct nfs4_client *old = NULL;
3366 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3367 clientid_t * clid = &setclientid_confirm->sc_clientid;
3368 __be32 status;
3369 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3370
3371 if (STALE_CLIENTID(clid, nn))
3372 return nfserr_stale_clientid;
3373
3374 spin_lock(&nn->client_lock);
3375 conf = find_confirmed_client(clid, false, nn);
3376 unconf = find_unconfirmed_client(clid, false, nn);
3377 /*
3378 * We try hard to give out unique clientid's, so if we get an
3379 * attempt to confirm the same clientid with a different cred,
3380 * the client may be buggy; this should never happen.
3381 *
3382 * Nevertheless, RFC 7530 recommends INUSE for this case:
3383 */
3384 status = nfserr_clid_inuse;
3385 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3386 goto out;
3387 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3388 goto out;
3389 /* cases below refer to rfc 3530 section 14.2.34: */
3390 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3391 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3392 /* case 2: probable retransmit */
3393 status = nfs_ok;
3394 } else /* case 4: client hasn't noticed we rebooted yet? */
3395 status = nfserr_stale_clientid;
3396 goto out;
3397 }
3398 status = nfs_ok;
3399 if (conf) { /* case 1: callback update */
3400 old = unconf;
3401 unhash_client_locked(old);
3402 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3403 } else { /* case 3: normal case; new or rebooted client */
3404 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3405 if (old) {
3406 status = nfserr_clid_inuse;
3407 if (client_has_state(old)
3408 && !same_creds(&unconf->cl_cred,
3409 &old->cl_cred))
3410 goto out;
3411 status = mark_client_expired_locked(old);
3412 if (status) {
3413 old = NULL;
3414 goto out;
3415 }
3416 }
3417 move_to_confirmed(unconf);
3418 conf = unconf;
3419 }
3420 get_client_locked(conf);
3421 spin_unlock(&nn->client_lock);
3422 nfsd4_probe_callback(conf);
3423 spin_lock(&nn->client_lock);
3424 put_client_renew_locked(conf);
3425 out:
3426 spin_unlock(&nn->client_lock);
3427 if (old)
3428 expire_client(old);
3429 return status;
3430 }
3431
3432 static struct nfs4_file *nfsd4_alloc_file(void)
3433 {
3434 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3435 }
3436
3437 /* OPEN Share state helper functions */
3438 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3439 struct nfs4_file *fp)
3440 {
3441 lockdep_assert_held(&state_lock);
3442
3443 atomic_set(&fp->fi_ref, 1);
3444 spin_lock_init(&fp->fi_lock);
3445 INIT_LIST_HEAD(&fp->fi_stateids);
3446 INIT_LIST_HEAD(&fp->fi_delegations);
3447 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3448 fh_copy_shallow(&fp->fi_fhandle, fh);
3449 fp->fi_deleg_file = NULL;
3450 fp->fi_had_conflict = false;
3451 fp->fi_share_deny = 0;
3452 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3453 memset(fp->fi_access, 0, sizeof(fp->fi_access));
3454 #ifdef CONFIG_NFSD_PNFS
3455 INIT_LIST_HEAD(&fp->fi_lo_states);
3456 atomic_set(&fp->fi_lo_recalls, 0);
3457 #endif
3458 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3459 }
3460
3461 void
3462 nfsd4_free_slabs(void)
3463 {
3464 kmem_cache_destroy(odstate_slab);
3465 kmem_cache_destroy(openowner_slab);
3466 kmem_cache_destroy(lockowner_slab);
3467 kmem_cache_destroy(file_slab);
3468 kmem_cache_destroy(stateid_slab);
3469 kmem_cache_destroy(deleg_slab);
3470 }
3471
3472 int
3473 nfsd4_init_slabs(void)
3474 {
3475 openowner_slab = kmem_cache_create("nfsd4_openowners",
3476 sizeof(struct nfs4_openowner), 0, 0, NULL);
3477 if (openowner_slab == NULL)
3478 goto out;
3479 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3480 sizeof(struct nfs4_lockowner), 0, 0, NULL);
3481 if (lockowner_slab == NULL)
3482 goto out_free_openowner_slab;
3483 file_slab = kmem_cache_create("nfsd4_files",
3484 sizeof(struct nfs4_file), 0, 0, NULL);
3485 if (file_slab == NULL)
3486 goto out_free_lockowner_slab;
3487 stateid_slab = kmem_cache_create("nfsd4_stateids",
3488 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3489 if (stateid_slab == NULL)
3490 goto out_free_file_slab;
3491 deleg_slab = kmem_cache_create("nfsd4_delegations",
3492 sizeof(struct nfs4_delegation), 0, 0, NULL);
3493 if (deleg_slab == NULL)
3494 goto out_free_stateid_slab;
3495 odstate_slab = kmem_cache_create("nfsd4_odstate",
3496 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3497 if (odstate_slab == NULL)
3498 goto out_free_deleg_slab;
3499 return 0;
3500
3501 out_free_deleg_slab:
3502 kmem_cache_destroy(deleg_slab);
3503 out_free_stateid_slab:
3504 kmem_cache_destroy(stateid_slab);
3505 out_free_file_slab:
3506 kmem_cache_destroy(file_slab);
3507 out_free_lockowner_slab:
3508 kmem_cache_destroy(lockowner_slab);
3509 out_free_openowner_slab:
3510 kmem_cache_destroy(openowner_slab);
3511 out:
3512 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3513 return -ENOMEM;
3514 }
3515
3516 static void init_nfs4_replay(struct nfs4_replay *rp)
3517 {
3518 rp->rp_status = nfserr_serverfault;
3519 rp->rp_buflen = 0;
3520 rp->rp_buf = rp->rp_ibuf;
3521 mutex_init(&rp->rp_mutex);
3522 }
3523
3524 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3525 struct nfs4_stateowner *so)
3526 {
3527 if (!nfsd4_has_session(cstate)) {
3528 mutex_lock(&so->so_replay.rp_mutex);
3529 cstate->replay_owner = nfs4_get_stateowner(so);
3530 }
3531 }
3532
3533 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3534 {
3535 struct nfs4_stateowner *so = cstate->replay_owner;
3536
3537 if (so != NULL) {
3538 cstate->replay_owner = NULL;
3539 mutex_unlock(&so->so_replay.rp_mutex);
3540 nfs4_put_stateowner(so);
3541 }
3542 }
3543
3544 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3545 {
3546 struct nfs4_stateowner *sop;
3547
3548 sop = kmem_cache_alloc(slab, GFP_KERNEL);
3549 if (!sop)
3550 return NULL;
3551
3552 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3553 if (!sop->so_owner.data) {
3554 kmem_cache_free(slab, sop);
3555 return NULL;
3556 }
3557 sop->so_owner.len = owner->len;
3558
3559 INIT_LIST_HEAD(&sop->so_stateids);
3560 sop->so_client = clp;
3561 init_nfs4_replay(&sop->so_replay);
3562 atomic_set(&sop->so_count, 1);
3563 return sop;
3564 }
3565
3566 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3567 {
3568 lockdep_assert_held(&clp->cl_lock);
3569
3570 list_add(&oo->oo_owner.so_strhash,
3571 &clp->cl_ownerstr_hashtbl[strhashval]);
3572 list_add(&oo->oo_perclient, &clp->cl_openowners);
3573 }
3574
3575 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3576 {
3577 unhash_openowner_locked(openowner(so));
3578 }
3579
3580 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3581 {
3582 struct nfs4_openowner *oo = openowner(so);
3583
3584 kmem_cache_free(openowner_slab, oo);
3585 }
3586
3587 static const struct nfs4_stateowner_operations openowner_ops = {
3588 .so_unhash = nfs4_unhash_openowner,
3589 .so_free = nfs4_free_openowner,
3590 };
3591
3592 static struct nfs4_ol_stateid *
3593 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3594 {
3595 struct nfs4_ol_stateid *local, *ret = NULL;
3596 struct nfs4_openowner *oo = open->op_openowner;
3597
3598 lockdep_assert_held(&fp->fi_lock);
3599
3600 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3601 /* ignore lock owners */
3602 if (local->st_stateowner->so_is_open_owner == 0)
3603 continue;
3604 if (local->st_stateowner != &oo->oo_owner)
3605 continue;
3606 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3607 ret = local;
3608 atomic_inc(&ret->st_stid.sc_count);
3609 break;
3610 }
3611 }
3612 return ret;
3613 }
3614
3615 static __be32
3616 nfsd4_verify_open_stid(struct nfs4_stid *s)
3617 {
3618 __be32 ret = nfs_ok;
3619
3620 switch (s->sc_type) {
3621 default:
3622 break;
3623 case NFS4_CLOSED_STID:
3624 case NFS4_CLOSED_DELEG_STID:
3625 ret = nfserr_bad_stateid;
3626 break;
3627 case NFS4_REVOKED_DELEG_STID:
3628 ret = nfserr_deleg_revoked;
3629 }
3630 return ret;
3631 }
3632
3633 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3634 static __be32
3635 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3636 {
3637 __be32 ret;
3638
3639 mutex_lock(&stp->st_mutex);
3640 ret = nfsd4_verify_open_stid(&stp->st_stid);
3641 if (ret != nfs_ok)
3642 mutex_unlock(&stp->st_mutex);
3643 return ret;
3644 }
3645
3646 static struct nfs4_ol_stateid *
3647 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3648 {
3649 struct nfs4_ol_stateid *stp;
3650 for (;;) {
3651 spin_lock(&fp->fi_lock);
3652 stp = nfsd4_find_existing_open(fp, open);
3653 spin_unlock(&fp->fi_lock);
3654 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3655 break;
3656 nfs4_put_stid(&stp->st_stid);
3657 }
3658 return stp;
3659 }
3660
3661 static struct nfs4_openowner *
3662 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3663 struct nfsd4_compound_state *cstate)
3664 {
3665 struct nfs4_client *clp = cstate->clp;
3666 struct nfs4_openowner *oo, *ret;
3667
3668 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3669 if (!oo)
3670 return NULL;
3671 oo->oo_owner.so_ops = &openowner_ops;
3672 oo->oo_owner.so_is_open_owner = 1;
3673 oo->oo_owner.so_seqid = open->op_seqid;
3674 oo->oo_flags = 0;
3675 if (nfsd4_has_session(cstate))
3676 oo->oo_flags |= NFS4_OO_CONFIRMED;
3677 oo->oo_time = 0;
3678 oo->oo_last_closed_stid = NULL;
3679 INIT_LIST_HEAD(&oo->oo_close_lru);
3680 spin_lock(&clp->cl_lock);
3681 ret = find_openstateowner_str_locked(strhashval, open, clp);
3682 if (ret == NULL) {
3683 hash_openowner(oo, clp, strhashval);
3684 ret = oo;
3685 } else
3686 nfs4_free_stateowner(&oo->oo_owner);
3687
3688 spin_unlock(&clp->cl_lock);
3689 return ret;
3690 }
3691
3692 static struct nfs4_ol_stateid *
3693 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3694 {
3695
3696 struct nfs4_openowner *oo = open->op_openowner;
3697 struct nfs4_ol_stateid *retstp = NULL;
3698 struct nfs4_ol_stateid *stp;
3699
3700 stp = open->op_stp;
3701 /* We are moving these outside of the spinlocks to avoid the warnings */
3702 mutex_init(&stp->st_mutex);
3703 mutex_lock(&stp->st_mutex);
3704
3705 retry:
3706 spin_lock(&oo->oo_owner.so_client->cl_lock);
3707 spin_lock(&fp->fi_lock);
3708
3709 retstp = nfsd4_find_existing_open(fp, open);
3710 if (retstp)
3711 goto out_unlock;
3712
3713 open->op_stp = NULL;
3714 atomic_inc(&stp->st_stid.sc_count);
3715 stp->st_stid.sc_type = NFS4_OPEN_STID;
3716 INIT_LIST_HEAD(&stp->st_locks);
3717 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3718 get_nfs4_file(fp);
3719 stp->st_stid.sc_file = fp;
3720 stp->st_access_bmap = 0;
3721 stp->st_deny_bmap = 0;
3722 stp->st_openstp = NULL;
3723 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3724 list_add(&stp->st_perfile, &fp->fi_stateids);
3725
3726 out_unlock:
3727 spin_unlock(&fp->fi_lock);
3728 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3729 if (retstp) {
3730 /* Handle races with CLOSE */
3731 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3732 nfs4_put_stid(&retstp->st_stid);
3733 goto retry;
3734 }
3735 /* To keep mutex tracking happy */
3736 mutex_unlock(&stp->st_mutex);
3737 stp = retstp;
3738 }
3739 return stp;
3740 }
3741
3742 /*
3743 * In the 4.0 case we need to keep the owners around a little while to handle
3744 * CLOSE replay. We still do need to release any file access that is held by
3745 * them before returning however.
3746 */
3747 static void
3748 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3749 {
3750 struct nfs4_ol_stateid *last;
3751 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3752 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3753 nfsd_net_id);
3754
3755 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3756
3757 /*
3758 * We know that we hold one reference via nfsd4_close, and another
3759 * "persistent" reference for the client. If the refcount is higher
3760 * than 2, then there are still calls in progress that are using this
3761 * stateid. We can't put the sc_file reference until they are finished.
3762 * Wait for the refcount to drop to 2. Since it has been unhashed,
3763 * there should be no danger of the refcount going back up again at
3764 * this point.
3765 */
3766 wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3767
3768 release_all_access(s);
3769 if (s->st_stid.sc_file) {
3770 put_nfs4_file(s->st_stid.sc_file);
3771 s->st_stid.sc_file = NULL;
3772 }
3773
3774 spin_lock(&nn->client_lock);
3775 last = oo->oo_last_closed_stid;
3776 oo->oo_last_closed_stid = s;
3777 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3778 oo->oo_time = get_seconds();
3779 spin_unlock(&nn->client_lock);
3780 if (last)
3781 nfs4_put_stid(&last->st_stid);
3782 }
3783
3784 /* search file_hashtbl[] for file */
3785 static struct nfs4_file *
3786 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3787 {
3788 struct nfs4_file *fp;
3789
3790 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3791 if (fh_match(&fp->fi_fhandle, fh)) {
3792 if (atomic_inc_not_zero(&fp->fi_ref))
3793 return fp;
3794 }
3795 }
3796 return NULL;
3797 }
3798
3799 struct nfs4_file *
3800 find_file(struct knfsd_fh *fh)
3801 {
3802 struct nfs4_file *fp;
3803 unsigned int hashval = file_hashval(fh);
3804
3805 rcu_read_lock();
3806 fp = find_file_locked(fh, hashval);
3807 rcu_read_unlock();
3808 return fp;
3809 }
3810
3811 static struct nfs4_file *
3812 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3813 {
3814 struct nfs4_file *fp;
3815 unsigned int hashval = file_hashval(fh);
3816
3817 rcu_read_lock();
3818 fp = find_file_locked(fh, hashval);
3819 rcu_read_unlock();
3820 if (fp)
3821 return fp;
3822
3823 spin_lock(&state_lock);
3824 fp = find_file_locked(fh, hashval);
3825 if (likely(fp == NULL)) {
3826 nfsd4_init_file(fh, hashval, new);
3827 fp = new;
3828 }
3829 spin_unlock(&state_lock);
3830
3831 return fp;
3832 }
3833
3834 /*
3835 * Called to check deny when READ with all zero stateid or
3836 * WRITE with all zero or all one stateid
3837 */
3838 static __be32
3839 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3840 {
3841 struct nfs4_file *fp;
3842 __be32 ret = nfs_ok;
3843
3844 fp = find_file(&current_fh->fh_handle);
3845 if (!fp)
3846 return ret;
3847 /* Check for conflicting share reservations */
3848 spin_lock(&fp->fi_lock);
3849 if (fp->fi_share_deny & deny_type)
3850 ret = nfserr_locked;
3851 spin_unlock(&fp->fi_lock);
3852 put_nfs4_file(fp);
3853 return ret;
3854 }
3855
3856 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3857 {
3858 struct nfs4_delegation *dp = cb_to_delegation(cb);
3859 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3860 nfsd_net_id);
3861
3862 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3863
3864 /*
3865 * We can't do this in nfsd_break_deleg_cb because it is
3866 * already holding inode->i_lock.
3867 *
3868 * If the dl_time != 0, then we know that it has already been
3869 * queued for a lease break. Don't queue it again.
3870 */
3871 spin_lock(&state_lock);
3872 if (dp->dl_time == 0) {
3873 dp->dl_time = get_seconds();
3874 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3875 }
3876 spin_unlock(&state_lock);
3877 }
3878
3879 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3880 struct rpc_task *task)
3881 {
3882 struct nfs4_delegation *dp = cb_to_delegation(cb);
3883
3884 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3885 return 1;
3886
3887 switch (task->tk_status) {
3888 case 0:
3889 return 1;
3890 case -EBADHANDLE:
3891 case -NFS4ERR_BAD_STATEID:
3892 /*
3893 * Race: client probably got cb_recall before open reply
3894 * granting delegation.
3895 */
3896 if (dp->dl_retries--) {
3897 rpc_delay(task, 2 * HZ);
3898 return 0;
3899 }
3900 /*FALLTHRU*/
3901 default:
3902 return -1;
3903 }
3904 }
3905
3906 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3907 {
3908 struct nfs4_delegation *dp = cb_to_delegation(cb);
3909
3910 nfs4_put_stid(&dp->dl_stid);
3911 }
3912
3913 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3914 .prepare = nfsd4_cb_recall_prepare,
3915 .done = nfsd4_cb_recall_done,
3916 .release = nfsd4_cb_recall_release,
3917 };
3918
3919 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3920 {
3921 /*
3922 * We're assuming the state code never drops its reference
3923 * without first removing the lease. Since we're in this lease
3924 * callback (and since the lease code is serialized by the kernel
3925 * lock) we know the server hasn't removed the lease yet, we know
3926 * it's safe to take a reference.
3927 */
3928 atomic_inc(&dp->dl_stid.sc_count);
3929 nfsd4_run_cb(&dp->dl_recall);
3930 }
3931
3932 /* Called from break_lease() with i_lock held. */
3933 static bool
3934 nfsd_break_deleg_cb(struct file_lock *fl)
3935 {
3936 bool ret = false;
3937 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3938 struct nfs4_delegation *dp;
3939
3940 if (!fp) {
3941 WARN(1, "(%p)->fl_owner NULL\n", fl);
3942 return ret;
3943 }
3944 if (fp->fi_had_conflict) {
3945 WARN(1, "duplicate break on %p\n", fp);
3946 return ret;
3947 }
3948 /*
3949 * We don't want the locks code to timeout the lease for us;
3950 * we'll remove it ourself if a delegation isn't returned
3951 * in time:
3952 */
3953 fl->fl_break_time = 0;
3954
3955 spin_lock(&fp->fi_lock);
3956 fp->fi_had_conflict = true;
3957 /*
3958 * If there are no delegations on the list, then return true
3959 * so that the lease code will go ahead and delete it.
3960 */
3961 if (list_empty(&fp->fi_delegations))
3962 ret = true;
3963 else
3964 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3965 nfsd_break_one_deleg(dp);
3966 spin_unlock(&fp->fi_lock);
3967 return ret;
3968 }
3969
3970 static int
3971 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3972 struct list_head *dispose)
3973 {
3974 if (arg & F_UNLCK)
3975 return lease_modify(onlist, arg, dispose);
3976 else
3977 return -EAGAIN;
3978 }
3979
3980 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3981 .lm_break = nfsd_break_deleg_cb,
3982 .lm_change = nfsd_change_deleg_cb,
3983 };
3984
3985 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3986 {
3987 if (nfsd4_has_session(cstate))
3988 return nfs_ok;
3989 if (seqid == so->so_seqid - 1)
3990 return nfserr_replay_me;
3991 if (seqid == so->so_seqid)
3992 return nfs_ok;
3993 return nfserr_bad_seqid;
3994 }
3995
3996 static __be32 lookup_clientid(clientid_t *clid,
3997 struct nfsd4_compound_state *cstate,
3998 struct nfsd_net *nn)
3999 {
4000 struct nfs4_client *found;
4001
4002 if (cstate->clp) {
4003 found = cstate->clp;
4004 if (!same_clid(&found->cl_clientid, clid))
4005 return nfserr_stale_clientid;
4006 return nfs_ok;
4007 }
4008
4009 if (STALE_CLIENTID(clid, nn))
4010 return nfserr_stale_clientid;
4011
4012 /*
4013 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4014 * cached already then we know this is for is for v4.0 and "sessions"
4015 * will be false.
4016 */
4017 WARN_ON_ONCE(cstate->session);
4018 spin_lock(&nn->client_lock);
4019 found = find_confirmed_client(clid, false, nn);
4020 if (!found) {
4021 spin_unlock(&nn->client_lock);
4022 return nfserr_expired;
4023 }
4024 atomic_inc(&found->cl_refcount);
4025 spin_unlock(&nn->client_lock);
4026
4027 /* Cache the nfs4_client in cstate! */
4028 cstate->clp = found;
4029 return nfs_ok;
4030 }
4031
4032 __be32
4033 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4034 struct nfsd4_open *open, struct nfsd_net *nn)
4035 {
4036 clientid_t *clientid = &open->op_clientid;
4037 struct nfs4_client *clp = NULL;
4038 unsigned int strhashval;
4039 struct nfs4_openowner *oo = NULL;
4040 __be32 status;
4041
4042 if (STALE_CLIENTID(&open->op_clientid, nn))
4043 return nfserr_stale_clientid;
4044 /*
4045 * In case we need it later, after we've already created the
4046 * file and don't want to risk a further failure:
4047 */
4048 open->op_file = nfsd4_alloc_file();
4049 if (open->op_file == NULL)
4050 return nfserr_jukebox;
4051
4052 status = lookup_clientid(clientid, cstate, nn);
4053 if (status)
4054 return status;
4055 clp = cstate->clp;
4056
4057 strhashval = ownerstr_hashval(&open->op_owner);
4058 oo = find_openstateowner_str(strhashval, open, clp);
4059 open->op_openowner = oo;
4060 if (!oo) {
4061 goto new_owner;
4062 }
4063 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4064 /* Replace unconfirmed owners without checking for replay. */
4065 release_openowner(oo);
4066 open->op_openowner = NULL;
4067 goto new_owner;
4068 }
4069 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4070 if (status)
4071 return status;
4072 goto alloc_stateid;
4073 new_owner:
4074 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4075 if (oo == NULL)
4076 return nfserr_jukebox;
4077 open->op_openowner = oo;
4078 alloc_stateid:
4079 open->op_stp = nfs4_alloc_open_stateid(clp);
4080 if (!open->op_stp)
4081 return nfserr_jukebox;
4082
4083 if (nfsd4_has_session(cstate) &&
4084 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4085 open->op_odstate = alloc_clnt_odstate(clp);
4086 if (!open->op_odstate)
4087 return nfserr_jukebox;
4088 }
4089
4090 return nfs_ok;
4091 }
4092
4093 static inline __be32
4094 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4095 {
4096 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4097 return nfserr_openmode;
4098 else
4099 return nfs_ok;
4100 }
4101
4102 static int share_access_to_flags(u32 share_access)
4103 {
4104 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4105 }
4106
4107 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4108 {
4109 struct nfs4_stid *ret;
4110
4111 ret = find_stateid_by_type(cl, s,
4112 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4113 if (!ret)
4114 return NULL;
4115 return delegstateid(ret);
4116 }
4117
4118 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4119 {
4120 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4121 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4122 }
4123
4124 static __be32
4125 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4126 struct nfs4_delegation **dp)
4127 {
4128 int flags;
4129 __be32 status = nfserr_bad_stateid;
4130 struct nfs4_delegation *deleg;
4131
4132 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4133 if (deleg == NULL)
4134 goto out;
4135 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4136 nfs4_put_stid(&deleg->dl_stid);
4137 if (cl->cl_minorversion)
4138 status = nfserr_deleg_revoked;
4139 goto out;
4140 }
4141 flags = share_access_to_flags(open->op_share_access);
4142 status = nfs4_check_delegmode(deleg, flags);
4143 if (status) {
4144 nfs4_put_stid(&deleg->dl_stid);
4145 goto out;
4146 }
4147 *dp = deleg;
4148 out:
4149 if (!nfsd4_is_deleg_cur(open))
4150 return nfs_ok;
4151 if (status)
4152 return status;
4153 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4154 return nfs_ok;
4155 }
4156
4157 static inline int nfs4_access_to_access(u32 nfs4_access)
4158 {
4159 int flags = 0;
4160
4161 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4162 flags |= NFSD_MAY_READ;
4163 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4164 flags |= NFSD_MAY_WRITE;
4165 return flags;
4166 }
4167
4168 static inline __be32
4169 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4170 struct nfsd4_open *open)
4171 {
4172 struct iattr iattr = {
4173 .ia_valid = ATTR_SIZE,
4174 .ia_size = 0,
4175 };
4176 if (!open->op_truncate)
4177 return 0;
4178 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4179 return nfserr_inval;
4180 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4181 }
4182
4183 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4184 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4185 struct nfsd4_open *open)
4186 {
4187 struct file *filp = NULL;
4188 __be32 status;
4189 int oflag = nfs4_access_to_omode(open->op_share_access);
4190 int access = nfs4_access_to_access(open->op_share_access);
4191 unsigned char old_access_bmap, old_deny_bmap;
4192
4193 spin_lock(&fp->fi_lock);
4194
4195 /*
4196 * Are we trying to set a deny mode that would conflict with
4197 * current access?
4198 */
4199 status = nfs4_file_check_deny(fp, open->op_share_deny);
4200 if (status != nfs_ok) {
4201 spin_unlock(&fp->fi_lock);
4202 goto out;
4203 }
4204
4205 /* set access to the file */
4206 status = nfs4_file_get_access(fp, open->op_share_access);
4207 if (status != nfs_ok) {
4208 spin_unlock(&fp->fi_lock);
4209 goto out;
4210 }
4211
4212 /* Set access bits in stateid */
4213 old_access_bmap = stp->st_access_bmap;
4214 set_access(open->op_share_access, stp);
4215
4216 /* Set new deny mask */
4217 old_deny_bmap = stp->st_deny_bmap;
4218 set_deny(open->op_share_deny, stp);
4219 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4220
4221 if (!fp->fi_fds[oflag]) {
4222 spin_unlock(&fp->fi_lock);
4223 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4224 if (status)
4225 goto out_put_access;
4226 spin_lock(&fp->fi_lock);
4227 if (!fp->fi_fds[oflag]) {
4228 fp->fi_fds[oflag] = filp;
4229 filp = NULL;
4230 }
4231 }
4232 spin_unlock(&fp->fi_lock);
4233 if (filp)
4234 fput(filp);
4235
4236 status = nfsd4_truncate(rqstp, cur_fh, open);
4237 if (status)
4238 goto out_put_access;
4239 out:
4240 return status;
4241 out_put_access:
4242 stp->st_access_bmap = old_access_bmap;
4243 nfs4_file_put_access(fp, open->op_share_access);
4244 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4245 goto out;
4246 }
4247
4248 static __be32
4249 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4250 {
4251 __be32 status;
4252 unsigned char old_deny_bmap = stp->st_deny_bmap;
4253
4254 if (!test_access(open->op_share_access, stp))
4255 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4256
4257 /* test and set deny mode */
4258 spin_lock(&fp->fi_lock);
4259 status = nfs4_file_check_deny(fp, open->op_share_deny);
4260 if (status == nfs_ok) {
4261 set_deny(open->op_share_deny, stp);
4262 fp->fi_share_deny |=
4263 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4264 }
4265 spin_unlock(&fp->fi_lock);
4266
4267 if (status != nfs_ok)
4268 return status;
4269
4270 status = nfsd4_truncate(rqstp, cur_fh, open);
4271 if (status != nfs_ok)
4272 reset_union_bmap_deny(old_deny_bmap, stp);
4273 return status;
4274 }
4275
4276 /* Should we give out recallable state?: */
4277 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4278 {
4279 if (clp->cl_cb_state == NFSD4_CB_UP)
4280 return true;
4281 /*
4282 * In the sessions case, since we don't have to establish a
4283 * separate connection for callbacks, we assume it's OK
4284 * until we hear otherwise:
4285 */
4286 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4287 }
4288
4289 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4290 {
4291 struct file_lock *fl;
4292
4293 fl = locks_alloc_lock();
4294 if (!fl)
4295 return NULL;
4296 fl->fl_lmops = &nfsd_lease_mng_ops;
4297 fl->fl_flags = FL_DELEG;
4298 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4299 fl->fl_end = OFFSET_MAX;
4300 fl->fl_owner = (fl_owner_t)fp;
4301 fl->fl_pid = current->tgid;
4302 return fl;
4303 }
4304
4305 /**
4306 * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4307 * @dp: a pointer to the nfs4_delegation we're adding.
4308 *
4309 * Return:
4310 * On success: Return code will be 0 on success.
4311 *
4312 * On error: -EAGAIN if there was an existing delegation.
4313 * nonzero if there is an error in other cases.
4314 *
4315 */
4316
4317 static int nfs4_setlease(struct nfs4_delegation *dp)
4318 {
4319 struct nfs4_file *fp = dp->dl_stid.sc_file;
4320 struct file_lock *fl;
4321 struct file *filp;
4322 int status = 0;
4323
4324 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4325 if (!fl)
4326 return -ENOMEM;
4327 filp = find_readable_file(fp);
4328 if (!filp) {
4329 /* We should always have a readable file here */
4330 WARN_ON_ONCE(1);
4331 locks_free_lock(fl);
4332 return -EBADF;
4333 }
4334 fl->fl_file = filp;
4335 status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4336 if (fl)
4337 locks_free_lock(fl);
4338 if (status)
4339 goto out_fput;
4340 spin_lock(&state_lock);
4341 spin_lock(&fp->fi_lock);
4342 /* Did the lease get broken before we took the lock? */
4343 status = -EAGAIN;
4344 if (fp->fi_had_conflict)
4345 goto out_unlock;
4346 /* Race breaker */
4347 if (fp->fi_deleg_file) {
4348 status = hash_delegation_locked(dp, fp);
4349 goto out_unlock;
4350 }
4351 fp->fi_deleg_file = filp;
4352 fp->fi_delegees = 0;
4353 status = hash_delegation_locked(dp, fp);
4354 spin_unlock(&fp->fi_lock);
4355 spin_unlock(&state_lock);
4356 if (status) {
4357 /* Should never happen, this is a new fi_deleg_file */
4358 WARN_ON_ONCE(1);
4359 goto out_fput;
4360 }
4361 return 0;
4362 out_unlock:
4363 spin_unlock(&fp->fi_lock);
4364 spin_unlock(&state_lock);
4365 out_fput:
4366 fput(filp);
4367 return status;
4368 }
4369
4370 static struct nfs4_delegation *
4371 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4372 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4373 {
4374 int status;
4375 struct nfs4_delegation *dp;
4376
4377 if (fp->fi_had_conflict)
4378 return ERR_PTR(-EAGAIN);
4379
4380 spin_lock(&state_lock);
4381 spin_lock(&fp->fi_lock);
4382 status = nfs4_get_existing_delegation(clp, fp);
4383 spin_unlock(&fp->fi_lock);
4384 spin_unlock(&state_lock);
4385
4386 if (status)
4387 return ERR_PTR(status);
4388
4389 dp = alloc_init_deleg(clp, fh, odstate);
4390 if (!dp)
4391 return ERR_PTR(-ENOMEM);
4392
4393 get_nfs4_file(fp);
4394 spin_lock(&state_lock);
4395 spin_lock(&fp->fi_lock);
4396 dp->dl_stid.sc_file = fp;
4397 if (!fp->fi_deleg_file) {
4398 spin_unlock(&fp->fi_lock);
4399 spin_unlock(&state_lock);
4400 status = nfs4_setlease(dp);
4401 goto out;
4402 }
4403 if (fp->fi_had_conflict) {
4404 status = -EAGAIN;
4405 goto out_unlock;
4406 }
4407 status = hash_delegation_locked(dp, fp);
4408 out_unlock:
4409 spin_unlock(&fp->fi_lock);
4410 spin_unlock(&state_lock);
4411 out:
4412 if (status) {
4413 put_clnt_odstate(dp->dl_clnt_odstate);
4414 nfs4_put_stid(&dp->dl_stid);
4415 return ERR_PTR(status);
4416 }
4417 return dp;
4418 }
4419
4420 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4421 {
4422 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4423 if (status == -EAGAIN)
4424 open->op_why_no_deleg = WND4_CONTENTION;
4425 else {
4426 open->op_why_no_deleg = WND4_RESOURCE;
4427 switch (open->op_deleg_want) {
4428 case NFS4_SHARE_WANT_READ_DELEG:
4429 case NFS4_SHARE_WANT_WRITE_DELEG:
4430 case NFS4_SHARE_WANT_ANY_DELEG:
4431 break;
4432 case NFS4_SHARE_WANT_CANCEL:
4433 open->op_why_no_deleg = WND4_CANCELLED;
4434 break;
4435 case NFS4_SHARE_WANT_NO_DELEG:
4436 WARN_ON_ONCE(1);
4437 }
4438 }
4439 }
4440
4441 /*
4442 * Attempt to hand out a delegation.
4443 *
4444 * Note we don't support write delegations, and won't until the vfs has
4445 * proper support for them.
4446 */
4447 static void
4448 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4449 struct nfs4_ol_stateid *stp)
4450 {
4451 struct nfs4_delegation *dp;
4452 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4453 struct nfs4_client *clp = stp->st_stid.sc_client;
4454 int cb_up;
4455 int status = 0;
4456
4457 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4458 open->op_recall = 0;
4459 switch (open->op_claim_type) {
4460 case NFS4_OPEN_CLAIM_PREVIOUS:
4461 if (!cb_up)
4462 open->op_recall = 1;
4463 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4464 goto out_no_deleg;
4465 break;
4466 case NFS4_OPEN_CLAIM_NULL:
4467 case NFS4_OPEN_CLAIM_FH:
4468 /*
4469 * Let's not give out any delegations till everyone's
4470 * had the chance to reclaim theirs, *and* until
4471 * NLM locks have all been reclaimed:
4472 */
4473 if (locks_in_grace(clp->net))
4474 goto out_no_deleg;
4475 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4476 goto out_no_deleg;
4477 /*
4478 * Also, if the file was opened for write or
4479 * create, there's a good chance the client's
4480 * about to write to it, resulting in an
4481 * immediate recall (since we don't support
4482 * write delegations):
4483 */
4484 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4485 goto out_no_deleg;
4486 if (open->op_create == NFS4_OPEN_CREATE)
4487 goto out_no_deleg;
4488 break;
4489 default:
4490 goto out_no_deleg;
4491 }
4492 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4493 if (IS_ERR(dp))
4494 goto out_no_deleg;
4495
4496 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4497
4498 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4499 STATEID_VAL(&dp->dl_stid.sc_stateid));
4500 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4501 nfs4_put_stid(&dp->dl_stid);
4502 return;
4503 out_no_deleg:
4504 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4505 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4506 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4507 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4508 open->op_recall = 1;
4509 }
4510
4511 /* 4.1 client asking for a delegation? */
4512 if (open->op_deleg_want)
4513 nfsd4_open_deleg_none_ext(open, status);
4514 return;
4515 }
4516
4517 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4518 struct nfs4_delegation *dp)
4519 {
4520 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4521 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4522 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4523 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4524 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4525 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4526 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4527 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4528 }
4529 /* Otherwise the client must be confused wanting a delegation
4530 * it already has, therefore we don't return
4531 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4532 */
4533 }
4534
4535 __be32
4536 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4537 {
4538 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4539 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4540 struct nfs4_file *fp = NULL;
4541 struct nfs4_ol_stateid *stp = NULL;
4542 struct nfs4_delegation *dp = NULL;
4543 __be32 status;
4544 bool new_stp = false;
4545
4546 /*
4547 * Lookup file; if found, lookup stateid and check open request,
4548 * and check for delegations in the process of being recalled.
4549 * If not found, create the nfs4_file struct
4550 */
4551 fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4552 if (fp != open->op_file) {
4553 status = nfs4_check_deleg(cl, open, &dp);
4554 if (status)
4555 goto out;
4556 stp = nfsd4_find_and_lock_existing_open(fp, open);
4557 } else {
4558 open->op_file = NULL;
4559 status = nfserr_bad_stateid;
4560 if (nfsd4_is_deleg_cur(open))
4561 goto out;
4562 }
4563
4564 if (!stp) {
4565 stp = init_open_stateid(fp, open);
4566 if (!open->op_stp)
4567 new_stp = true;
4568 }
4569
4570 /*
4571 * OPEN the file, or upgrade an existing OPEN.
4572 * If truncate fails, the OPEN fails.
4573 *
4574 * stp is already locked.
4575 */
4576 if (!new_stp) {
4577 /* Stateid was found, this is an OPEN upgrade */
4578 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4579 if (status) {
4580 mutex_unlock(&stp->st_mutex);
4581 goto out;
4582 }
4583 } else {
4584 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4585 if (status) {
4586 stp->st_stid.sc_type = NFS4_CLOSED_STID;
4587 release_open_stateid(stp);
4588 mutex_unlock(&stp->st_mutex);
4589 goto out;
4590 }
4591
4592 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4593 open->op_odstate);
4594 if (stp->st_clnt_odstate == open->op_odstate)
4595 open->op_odstate = NULL;
4596 }
4597
4598 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4599 mutex_unlock(&stp->st_mutex);
4600
4601 if (nfsd4_has_session(&resp->cstate)) {
4602 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4603 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4604 open->op_why_no_deleg = WND4_NOT_WANTED;
4605 goto nodeleg;
4606 }
4607 }
4608
4609 /*
4610 * Attempt to hand out a delegation. No error return, because the
4611 * OPEN succeeds even if we fail.
4612 */
4613 nfs4_open_delegation(current_fh, open, stp);
4614 nodeleg:
4615 status = nfs_ok;
4616
4617 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4618 STATEID_VAL(&stp->st_stid.sc_stateid));
4619 out:
4620 /* 4.1 client trying to upgrade/downgrade delegation? */
4621 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4622 open->op_deleg_want)
4623 nfsd4_deleg_xgrade_none_ext(open, dp);
4624
4625 if (fp)
4626 put_nfs4_file(fp);
4627 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4628 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4629 /*
4630 * To finish the open response, we just need to set the rflags.
4631 */
4632 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4633 if (nfsd4_has_session(&resp->cstate))
4634 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4635 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
4636 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4637
4638 if (dp)
4639 nfs4_put_stid(&dp->dl_stid);
4640 if (stp)
4641 nfs4_put_stid(&stp->st_stid);
4642
4643 return status;
4644 }
4645
4646 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4647 struct nfsd4_open *open)
4648 {
4649 if (open->op_openowner) {
4650 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4651
4652 nfsd4_cstate_assign_replay(cstate, so);
4653 nfs4_put_stateowner(so);
4654 }
4655 if (open->op_file)
4656 kmem_cache_free(file_slab, open->op_file);
4657 if (open->op_stp)
4658 nfs4_put_stid(&open->op_stp->st_stid);
4659 if (open->op_odstate)
4660 kmem_cache_free(odstate_slab, open->op_odstate);
4661 }
4662
4663 __be32
4664 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4665 union nfsd4_op_u *u)
4666 {
4667 clientid_t *clid = &u->renew;
4668 struct nfs4_client *clp;
4669 __be32 status;
4670 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4671
4672 dprintk("process_renew(%08x/%08x): starting\n",
4673 clid->cl_boot, clid->cl_id);
4674 status = lookup_clientid(clid, cstate, nn);
4675 if (status)
4676 goto out;
4677 clp = cstate->clp;
4678 status = nfserr_cb_path_down;
4679 if (!list_empty(&clp->cl_delegations)
4680 && clp->cl_cb_state != NFSD4_CB_UP)
4681 goto out;
4682 status = nfs_ok;
4683 out:
4684 return status;
4685 }
4686
4687 void
4688 nfsd4_end_grace(struct nfsd_net *nn)
4689 {
4690 /* do nothing if grace period already ended */
4691 if (nn->grace_ended)
4692 return;
4693
4694 dprintk("NFSD: end of grace period\n");
4695 nn->grace_ended = true;
4696 /*
4697 * If the server goes down again right now, an NFSv4
4698 * client will still be allowed to reclaim after it comes back up,
4699 * even if it hasn't yet had a chance to reclaim state this time.
4700 *
4701 */
4702 nfsd4_record_grace_done(nn);
4703 /*
4704 * At this point, NFSv4 clients can still reclaim. But if the
4705 * server crashes, any that have not yet reclaimed will be out
4706 * of luck on the next boot.
4707 *
4708 * (NFSv4.1+ clients are considered to have reclaimed once they
4709 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4710 * have reclaimed after their first OPEN.)
4711 */
4712 locks_end_grace(&nn->nfsd4_manager);
4713 /*
4714 * At this point, and once lockd and/or any other containers
4715 * exit their grace period, further reclaims will fail and
4716 * regular locking can resume.
4717 */
4718 }
4719
4720 static time_t
4721 nfs4_laundromat(struct nfsd_net *nn)
4722 {
4723 struct nfs4_client *clp;
4724 struct nfs4_openowner *oo;
4725 struct nfs4_delegation *dp;
4726 struct nfs4_ol_stateid *stp;
4727 struct nfsd4_blocked_lock *nbl;
4728 struct list_head *pos, *next, reaplist;
4729 time_t cutoff = get_seconds() - nn->nfsd4_lease;
4730 time_t t, new_timeo = nn->nfsd4_lease;
4731
4732 dprintk("NFSD: laundromat service - starting\n");
4733 nfsd4_end_grace(nn);
4734 INIT_LIST_HEAD(&reaplist);
4735 spin_lock(&nn->client_lock);
4736 list_for_each_safe(pos, next, &nn->client_lru) {
4737 clp = list_entry(pos, struct nfs4_client, cl_lru);
4738 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4739 t = clp->cl_time - cutoff;
4740 new_timeo = min(new_timeo, t);
4741 break;
4742 }
4743 if (mark_client_expired_locked(clp)) {
4744 dprintk("NFSD: client in use (clientid %08x)\n",
4745 clp->cl_clientid.cl_id);
4746 continue;
4747 }
4748 list_add(&clp->cl_lru, &reaplist);
4749 }
4750 spin_unlock(&nn->client_lock);
4751 list_for_each_safe(pos, next, &reaplist) {
4752 clp = list_entry(pos, struct nfs4_client, cl_lru);
4753 dprintk("NFSD: purging unused client (clientid %08x)\n",
4754 clp->cl_clientid.cl_id);
4755 list_del_init(&clp->cl_lru);
4756 expire_client(clp);
4757 }
4758 spin_lock(&state_lock);
4759 list_for_each_safe(pos, next, &nn->del_recall_lru) {
4760 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4761 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4762 t = dp->dl_time - cutoff;
4763 new_timeo = min(new_timeo, t);
4764 break;
4765 }
4766 WARN_ON(!unhash_delegation_locked(dp));
4767 list_add(&dp->dl_recall_lru, &reaplist);
4768 }
4769 spin_unlock(&state_lock);
4770 while (!list_empty(&reaplist)) {
4771 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4772 dl_recall_lru);
4773 list_del_init(&dp->dl_recall_lru);
4774 revoke_delegation(dp);
4775 }
4776
4777 spin_lock(&nn->client_lock);
4778 while (!list_empty(&nn->close_lru)) {
4779 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4780 oo_close_lru);
4781 if (time_after((unsigned long)oo->oo_time,
4782 (unsigned long)cutoff)) {
4783 t = oo->oo_time - cutoff;
4784 new_timeo = min(new_timeo, t);
4785 break;
4786 }
4787 list_del_init(&oo->oo_close_lru);
4788 stp = oo->oo_last_closed_stid;
4789 oo->oo_last_closed_stid = NULL;
4790 spin_unlock(&nn->client_lock);
4791 nfs4_put_stid(&stp->st_stid);
4792 spin_lock(&nn->client_lock);
4793 }
4794 spin_unlock(&nn->client_lock);
4795
4796 /*
4797 * It's possible for a client to try and acquire an already held lock
4798 * that is being held for a long time, and then lose interest in it.
4799 * So, we clean out any un-revisited request after a lease period
4800 * under the assumption that the client is no longer interested.
4801 *
4802 * RFC5661, sec. 9.6 states that the client must not rely on getting
4803 * notifications and must continue to poll for locks, even when the
4804 * server supports them. Thus this shouldn't lead to clients blocking
4805 * indefinitely once the lock does become free.
4806 */
4807 BUG_ON(!list_empty(&reaplist));
4808 spin_lock(&nn->blocked_locks_lock);
4809 while (!list_empty(&nn->blocked_locks_lru)) {
4810 nbl = list_first_entry(&nn->blocked_locks_lru,
4811 struct nfsd4_blocked_lock, nbl_lru);
4812 if (time_after((unsigned long)nbl->nbl_time,
4813 (unsigned long)cutoff)) {
4814 t = nbl->nbl_time - cutoff;
4815 new_timeo = min(new_timeo, t);
4816 break;
4817 }
4818 list_move(&nbl->nbl_lru, &reaplist);
4819 list_del_init(&nbl->nbl_list);
4820 }
4821 spin_unlock(&nn->blocked_locks_lock);
4822
4823 while (!list_empty(&reaplist)) {
4824 nbl = list_first_entry(&reaplist,
4825 struct nfsd4_blocked_lock, nbl_lru);
4826 list_del_init(&nbl->nbl_lru);
4827 posix_unblock_lock(&nbl->nbl_lock);
4828 free_blocked_lock(nbl);
4829 }
4830
4831 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4832 return new_timeo;
4833 }
4834
4835 static struct workqueue_struct *laundry_wq;
4836 static void laundromat_main(struct work_struct *);
4837
4838 static void
4839 laundromat_main(struct work_struct *laundry)
4840 {
4841 time_t t;
4842 struct delayed_work *dwork = to_delayed_work(laundry);
4843 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4844 laundromat_work);
4845
4846 t = nfs4_laundromat(nn);
4847 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4848 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4849 }
4850
4851 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4852 {
4853 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4854 return nfserr_bad_stateid;
4855 return nfs_ok;
4856 }
4857
4858 static inline int
4859 access_permit_read(struct nfs4_ol_stateid *stp)
4860 {
4861 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4862 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4863 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4864 }
4865
4866 static inline int
4867 access_permit_write(struct nfs4_ol_stateid *stp)
4868 {
4869 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4870 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4871 }
4872
4873 static
4874 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4875 {
4876 __be32 status = nfserr_openmode;
4877
4878 /* For lock stateid's, we test the parent open, not the lock: */
4879 if (stp->st_openstp)
4880 stp = stp->st_openstp;
4881 if ((flags & WR_STATE) && !access_permit_write(stp))
4882 goto out;
4883 if ((flags & RD_STATE) && !access_permit_read(stp))
4884 goto out;
4885 status = nfs_ok;
4886 out:
4887 return status;
4888 }
4889
4890 static inline __be32
4891 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4892 {
4893 if (ONE_STATEID(stateid) && (flags & RD_STATE))
4894 return nfs_ok;
4895 else if (opens_in_grace(net)) {
4896 /* Answer in remaining cases depends on existence of
4897 * conflicting state; so we must wait out the grace period. */
4898 return nfserr_grace;
4899 } else if (flags & WR_STATE)
4900 return nfs4_share_conflict(current_fh,
4901 NFS4_SHARE_DENY_WRITE);
4902 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4903 return nfs4_share_conflict(current_fh,
4904 NFS4_SHARE_DENY_READ);
4905 }
4906
4907 /*
4908 * Allow READ/WRITE during grace period on recovered state only for files
4909 * that are not able to provide mandatory locking.
4910 */
4911 static inline int
4912 grace_disallows_io(struct net *net, struct inode *inode)
4913 {
4914 return opens_in_grace(net) && mandatory_lock(inode);
4915 }
4916
4917 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4918 {
4919 /*
4920 * When sessions are used the stateid generation number is ignored
4921 * when it is zero.
4922 */
4923 if (has_session && in->si_generation == 0)
4924 return nfs_ok;
4925
4926 if (in->si_generation == ref->si_generation)
4927 return nfs_ok;
4928
4929 /* If the client sends us a stateid from the future, it's buggy: */
4930 if (nfsd4_stateid_generation_after(in, ref))
4931 return nfserr_bad_stateid;
4932 /*
4933 * However, we could see a stateid from the past, even from a
4934 * non-buggy client. For example, if the client sends a lock
4935 * while some IO is outstanding, the lock may bump si_generation
4936 * while the IO is still in flight. The client could avoid that
4937 * situation by waiting for responses on all the IO requests,
4938 * but better performance may result in retrying IO that
4939 * receives an old_stateid error if requests are rarely
4940 * reordered in flight:
4941 */
4942 return nfserr_old_stateid;
4943 }
4944
4945 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4946 {
4947 if (ols->st_stateowner->so_is_open_owner &&
4948 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4949 return nfserr_bad_stateid;
4950 return nfs_ok;
4951 }
4952
4953 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4954 {
4955 struct nfs4_stid *s;
4956 __be32 status = nfserr_bad_stateid;
4957
4958 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4959 CLOSE_STATEID(stateid))
4960 return status;
4961 /* Client debugging aid. */
4962 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4963 char addr_str[INET6_ADDRSTRLEN];
4964 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4965 sizeof(addr_str));
4966 pr_warn_ratelimited("NFSD: client %s testing state ID "
4967 "with incorrect client ID\n", addr_str);
4968 return status;
4969 }
4970 spin_lock(&cl->cl_lock);
4971 s = find_stateid_locked(cl, stateid);
4972 if (!s)
4973 goto out_unlock;
4974 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4975 if (status)
4976 goto out_unlock;
4977 switch (s->sc_type) {
4978 case NFS4_DELEG_STID:
4979 status = nfs_ok;
4980 break;
4981 case NFS4_REVOKED_DELEG_STID:
4982 status = nfserr_deleg_revoked;
4983 break;
4984 case NFS4_OPEN_STID:
4985 case NFS4_LOCK_STID:
4986 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4987 break;
4988 default:
4989 printk("unknown stateid type %x\n", s->sc_type);
4990 /* Fallthrough */
4991 case NFS4_CLOSED_STID:
4992 case NFS4_CLOSED_DELEG_STID:
4993 status = nfserr_bad_stateid;
4994 }
4995 out_unlock:
4996 spin_unlock(&cl->cl_lock);
4997 return status;
4998 }
4999
5000 __be32
5001 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5002 stateid_t *stateid, unsigned char typemask,
5003 struct nfs4_stid **s, struct nfsd_net *nn)
5004 {
5005 __be32 status;
5006 bool return_revoked = false;
5007
5008 /*
5009 * only return revoked delegations if explicitly asked.
5010 * otherwise we report revoked or bad_stateid status.
5011 */
5012 if (typemask & NFS4_REVOKED_DELEG_STID)
5013 return_revoked = true;
5014 else if (typemask & NFS4_DELEG_STID)
5015 typemask |= NFS4_REVOKED_DELEG_STID;
5016
5017 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5018 CLOSE_STATEID(stateid))
5019 return nfserr_bad_stateid;
5020 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5021 if (status == nfserr_stale_clientid) {
5022 if (cstate->session)
5023 return nfserr_bad_stateid;
5024 return nfserr_stale_stateid;
5025 }
5026 if (status)
5027 return status;
5028 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5029 if (!*s)
5030 return nfserr_bad_stateid;
5031 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5032 nfs4_put_stid(*s);
5033 if (cstate->minorversion)
5034 return nfserr_deleg_revoked;
5035 return nfserr_bad_stateid;
5036 }
5037 return nfs_ok;
5038 }
5039
5040 static struct file *
5041 nfs4_find_file(struct nfs4_stid *s, int flags)
5042 {
5043 if (!s)
5044 return NULL;
5045
5046 switch (s->sc_type) {
5047 case NFS4_DELEG_STID:
5048 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5049 return NULL;
5050 return get_file(s->sc_file->fi_deleg_file);
5051 case NFS4_OPEN_STID:
5052 case NFS4_LOCK_STID:
5053 if (flags & RD_STATE)
5054 return find_readable_file(s->sc_file);
5055 else
5056 return find_writeable_file(s->sc_file);
5057 break;
5058 }
5059
5060 return NULL;
5061 }
5062
5063 static __be32
5064 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5065 {
5066 __be32 status;
5067
5068 status = nfsd4_check_openowner_confirmed(ols);
5069 if (status)
5070 return status;
5071 return nfs4_check_openmode(ols, flags);
5072 }
5073
5074 static __be32
5075 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5076 struct file **filpp, bool *tmp_file, int flags)
5077 {
5078 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5079 struct file *file;
5080 __be32 status;
5081
5082 file = nfs4_find_file(s, flags);
5083 if (file) {
5084 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5085 acc | NFSD_MAY_OWNER_OVERRIDE);
5086 if (status) {
5087 fput(file);
5088 return status;
5089 }
5090
5091 *filpp = file;
5092 } else {
5093 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5094 if (status)
5095 return status;
5096
5097 if (tmp_file)
5098 *tmp_file = true;
5099 }
5100
5101 return 0;
5102 }
5103
5104 /*
5105 * Checks for stateid operations
5106 */
5107 __be32
5108 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5109 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5110 stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
5111 {
5112 struct inode *ino = d_inode(fhp->fh_dentry);
5113 struct net *net = SVC_NET(rqstp);
5114 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5115 struct nfs4_stid *s = NULL;
5116 __be32 status;
5117
5118 if (filpp)
5119 *filpp = NULL;
5120 if (tmp_file)
5121 *tmp_file = false;
5122
5123 if (grace_disallows_io(net, ino))
5124 return nfserr_grace;
5125
5126 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5127 status = check_special_stateids(net, fhp, stateid, flags);
5128 goto done;
5129 }
5130
5131 status = nfsd4_lookup_stateid(cstate, stateid,
5132 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5133 &s, nn);
5134 if (status)
5135 return status;
5136 status = check_stateid_generation(stateid, &s->sc_stateid,
5137 nfsd4_has_session(cstate));
5138 if (status)
5139 goto out;
5140
5141 switch (s->sc_type) {
5142 case NFS4_DELEG_STID:
5143 status = nfs4_check_delegmode(delegstateid(s), flags);
5144 break;
5145 case NFS4_OPEN_STID:
5146 case NFS4_LOCK_STID:
5147 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
5148 break;
5149 default:
5150 status = nfserr_bad_stateid;
5151 break;
5152 }
5153 if (status)
5154 goto out;
5155 status = nfs4_check_fh(fhp, s);
5156
5157 done:
5158 if (!status && filpp)
5159 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
5160 out:
5161 if (s)
5162 nfs4_put_stid(s);
5163 return status;
5164 }
5165
5166 /*
5167 * Test if the stateid is valid
5168 */
5169 __be32
5170 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5171 union nfsd4_op_u *u)
5172 {
5173 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5174 struct nfsd4_test_stateid_id *stateid;
5175 struct nfs4_client *cl = cstate->session->se_client;
5176
5177 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5178 stateid->ts_id_status =
5179 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5180
5181 return nfs_ok;
5182 }
5183
5184 static __be32
5185 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5186 {
5187 struct nfs4_ol_stateid *stp = openlockstateid(s);
5188 __be32 ret;
5189
5190 mutex_lock(&stp->st_mutex);
5191
5192 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5193 if (ret)
5194 goto out;
5195
5196 ret = nfserr_locks_held;
5197 if (check_for_locks(stp->st_stid.sc_file,
5198 lockowner(stp->st_stateowner)))
5199 goto out;
5200
5201 release_lock_stateid(stp);
5202 ret = nfs_ok;
5203
5204 out:
5205 mutex_unlock(&stp->st_mutex);
5206 nfs4_put_stid(s);
5207 return ret;
5208 }
5209
5210 __be32
5211 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5212 union nfsd4_op_u *u)
5213 {
5214 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5215 stateid_t *stateid = &free_stateid->fr_stateid;
5216 struct nfs4_stid *s;
5217 struct nfs4_delegation *dp;
5218 struct nfs4_client *cl = cstate->session->se_client;
5219 __be32 ret = nfserr_bad_stateid;
5220
5221 spin_lock(&cl->cl_lock);
5222 s = find_stateid_locked(cl, stateid);
5223 if (!s)
5224 goto out_unlock;
5225 switch (s->sc_type) {
5226 case NFS4_DELEG_STID:
5227 ret = nfserr_locks_held;
5228 break;
5229 case NFS4_OPEN_STID:
5230 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5231 if (ret)
5232 break;
5233 ret = nfserr_locks_held;
5234 break;
5235 case NFS4_LOCK_STID:
5236 atomic_inc(&s->sc_count);
5237 spin_unlock(&cl->cl_lock);
5238 ret = nfsd4_free_lock_stateid(stateid, s);
5239 goto out;
5240 case NFS4_REVOKED_DELEG_STID:
5241 dp = delegstateid(s);
5242 list_del_init(&dp->dl_recall_lru);
5243 spin_unlock(&cl->cl_lock);
5244 nfs4_put_stid(s);
5245 ret = nfs_ok;
5246 goto out;
5247 /* Default falls through and returns nfserr_bad_stateid */
5248 }
5249 out_unlock:
5250 spin_unlock(&cl->cl_lock);
5251 out:
5252 return ret;
5253 }
5254
5255 static inline int
5256 setlkflg (int type)
5257 {
5258 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5259 RD_STATE : WR_STATE;
5260 }
5261
5262 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5263 {
5264 struct svc_fh *current_fh = &cstate->current_fh;
5265 struct nfs4_stateowner *sop = stp->st_stateowner;
5266 __be32 status;
5267
5268 status = nfsd4_check_seqid(cstate, sop, seqid);
5269 if (status)
5270 return status;
5271 status = nfsd4_lock_ol_stateid(stp);
5272 if (status != nfs_ok)
5273 return status;
5274 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5275 if (status == nfs_ok)
5276 status = nfs4_check_fh(current_fh, &stp->st_stid);
5277 if (status != nfs_ok)
5278 mutex_unlock(&stp->st_mutex);
5279 return status;
5280 }
5281
5282 /*
5283 * Checks for sequence id mutating operations.
5284 */
5285 static __be32
5286 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5287 stateid_t *stateid, char typemask,
5288 struct nfs4_ol_stateid **stpp,
5289 struct nfsd_net *nn)
5290 {
5291 __be32 status;
5292 struct nfs4_stid *s;
5293 struct nfs4_ol_stateid *stp = NULL;
5294
5295 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5296 seqid, STATEID_VAL(stateid));
5297
5298 *stpp = NULL;
5299 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5300 if (status)
5301 return status;
5302 stp = openlockstateid(s);
5303 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5304
5305 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5306 if (!status)
5307 *stpp = stp;
5308 else
5309 nfs4_put_stid(&stp->st_stid);
5310 return status;
5311 }
5312
5313 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5314 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5315 {
5316 __be32 status;
5317 struct nfs4_openowner *oo;
5318 struct nfs4_ol_stateid *stp;
5319
5320 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5321 NFS4_OPEN_STID, &stp, nn);
5322 if (status)
5323 return status;
5324 oo = openowner(stp->st_stateowner);
5325 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5326 mutex_unlock(&stp->st_mutex);
5327 nfs4_put_stid(&stp->st_stid);
5328 return nfserr_bad_stateid;
5329 }
5330 *stpp = stp;
5331 return nfs_ok;
5332 }
5333
5334 __be32
5335 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5336 union nfsd4_op_u *u)
5337 {
5338 struct nfsd4_open_confirm *oc = &u->open_confirm;
5339 __be32 status;
5340 struct nfs4_openowner *oo;
5341 struct nfs4_ol_stateid *stp;
5342 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5343
5344 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5345 cstate->current_fh.fh_dentry);
5346
5347 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5348 if (status)
5349 return status;
5350
5351 status = nfs4_preprocess_seqid_op(cstate,
5352 oc->oc_seqid, &oc->oc_req_stateid,
5353 NFS4_OPEN_STID, &stp, nn);
5354 if (status)
5355 goto out;
5356 oo = openowner(stp->st_stateowner);
5357 status = nfserr_bad_stateid;
5358 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5359 mutex_unlock(&stp->st_mutex);
5360 goto put_stateid;
5361 }
5362 oo->oo_flags |= NFS4_OO_CONFIRMED;
5363 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5364 mutex_unlock(&stp->st_mutex);
5365 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5366 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5367
5368 nfsd4_client_record_create(oo->oo_owner.so_client);
5369 status = nfs_ok;
5370 put_stateid:
5371 nfs4_put_stid(&stp->st_stid);
5372 out:
5373 nfsd4_bump_seqid(cstate, status);
5374 return status;
5375 }
5376
5377 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5378 {
5379 if (!test_access(access, stp))
5380 return;
5381 nfs4_file_put_access(stp->st_stid.sc_file, access);
5382 clear_access(access, stp);
5383 }
5384
5385 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5386 {
5387 switch (to_access) {
5388 case NFS4_SHARE_ACCESS_READ:
5389 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5390 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5391 break;
5392 case NFS4_SHARE_ACCESS_WRITE:
5393 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5394 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5395 break;
5396 case NFS4_SHARE_ACCESS_BOTH:
5397 break;
5398 default:
5399 WARN_ON_ONCE(1);
5400 }
5401 }
5402
5403 __be32
5404 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5405 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5406 {
5407 struct nfsd4_open_downgrade *od = &u->open_downgrade;
5408 __be32 status;
5409 struct nfs4_ol_stateid *stp;
5410 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5411
5412 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5413 cstate->current_fh.fh_dentry);
5414
5415 /* We don't yet support WANT bits: */
5416 if (od->od_deleg_want)
5417 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5418 od->od_deleg_want);
5419
5420 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5421 &od->od_stateid, &stp, nn);
5422 if (status)
5423 goto out;
5424 status = nfserr_inval;
5425 if (!test_access(od->od_share_access, stp)) {
5426 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5427 stp->st_access_bmap, od->od_share_access);
5428 goto put_stateid;
5429 }
5430 if (!test_deny(od->od_share_deny, stp)) {
5431 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5432 stp->st_deny_bmap, od->od_share_deny);
5433 goto put_stateid;
5434 }
5435 nfs4_stateid_downgrade(stp, od->od_share_access);
5436 reset_union_bmap_deny(od->od_share_deny, stp);
5437 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5438 status = nfs_ok;
5439 put_stateid:
5440 mutex_unlock(&stp->st_mutex);
5441 nfs4_put_stid(&stp->st_stid);
5442 out:
5443 nfsd4_bump_seqid(cstate, status);
5444 return status;
5445 }
5446
5447 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5448 {
5449 struct nfs4_client *clp = s->st_stid.sc_client;
5450 bool unhashed;
5451 LIST_HEAD(reaplist);
5452
5453 spin_lock(&clp->cl_lock);
5454 unhashed = unhash_open_stateid(s, &reaplist);
5455
5456 if (clp->cl_minorversion) {
5457 if (unhashed)
5458 put_ol_stateid_locked(s, &reaplist);
5459 spin_unlock(&clp->cl_lock);
5460 free_ol_stateid_reaplist(&reaplist);
5461 } else {
5462 spin_unlock(&clp->cl_lock);
5463 free_ol_stateid_reaplist(&reaplist);
5464 if (unhashed)
5465 move_to_close_lru(s, clp->net);
5466 }
5467 }
5468
5469 /*
5470 * nfs4_unlock_state() called after encode
5471 */
5472 __be32
5473 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5474 union nfsd4_op_u *u)
5475 {
5476 struct nfsd4_close *close = &u->close;
5477 __be32 status;
5478 struct nfs4_ol_stateid *stp;
5479 struct net *net = SVC_NET(rqstp);
5480 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5481
5482 dprintk("NFSD: nfsd4_close on file %pd\n",
5483 cstate->current_fh.fh_dentry);
5484
5485 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5486 &close->cl_stateid,
5487 NFS4_OPEN_STID|NFS4_CLOSED_STID,
5488 &stp, nn);
5489 nfsd4_bump_seqid(cstate, status);
5490 if (status)
5491 goto out;
5492
5493 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5494 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5495
5496 nfsd4_close_open_stateid(stp);
5497 mutex_unlock(&stp->st_mutex);
5498
5499 /* See RFC5661 sectionm 18.2.4 */
5500 if (stp->st_stid.sc_client->cl_minorversion)
5501 memcpy(&close->cl_stateid, &close_stateid,
5502 sizeof(close->cl_stateid));
5503
5504 /* put reference from nfs4_preprocess_seqid_op */
5505 nfs4_put_stid(&stp->st_stid);
5506 out:
5507 return status;
5508 }
5509
5510 __be32
5511 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5512 union nfsd4_op_u *u)
5513 {
5514 struct nfsd4_delegreturn *dr = &u->delegreturn;
5515 struct nfs4_delegation *dp;
5516 stateid_t *stateid = &dr->dr_stateid;
5517 struct nfs4_stid *s;
5518 __be32 status;
5519 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5520
5521 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5522 return status;
5523
5524 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5525 if (status)
5526 goto out;
5527 dp = delegstateid(s);
5528 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
5529 if (status)
5530 goto put_stateid;
5531
5532 destroy_delegation(dp);
5533 put_stateid:
5534 nfs4_put_stid(&dp->dl_stid);
5535 out:
5536 return status;
5537 }
5538
5539 static inline u64
5540 end_offset(u64 start, u64 len)
5541 {
5542 u64 end;
5543
5544 end = start + len;
5545 return end >= start ? end: NFS4_MAX_UINT64;
5546 }
5547
5548 /* last octet in a range */
5549 static inline u64
5550 last_byte_offset(u64 start, u64 len)
5551 {
5552 u64 end;
5553
5554 WARN_ON_ONCE(!len);
5555 end = start + len;
5556 return end > start ? end - 1: NFS4_MAX_UINT64;
5557 }
5558
5559 /*
5560 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5561 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5562 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5563 * locking, this prevents us from being completely protocol-compliant. The
5564 * real solution to this problem is to start using unsigned file offsets in
5565 * the VFS, but this is a very deep change!
5566 */
5567 static inline void
5568 nfs4_transform_lock_offset(struct file_lock *lock)
5569 {
5570 if (lock->fl_start < 0)
5571 lock->fl_start = OFFSET_MAX;
5572 if (lock->fl_end < 0)
5573 lock->fl_end = OFFSET_MAX;
5574 }
5575
5576 static fl_owner_t
5577 nfsd4_fl_get_owner(fl_owner_t owner)
5578 {
5579 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5580
5581 nfs4_get_stateowner(&lo->lo_owner);
5582 return owner;
5583 }
5584
5585 static void
5586 nfsd4_fl_put_owner(fl_owner_t owner)
5587 {
5588 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5589
5590 if (lo)
5591 nfs4_put_stateowner(&lo->lo_owner);
5592 }
5593
5594 static void
5595 nfsd4_lm_notify(struct file_lock *fl)
5596 {
5597 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
5598 struct net *net = lo->lo_owner.so_client->net;
5599 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5600 struct nfsd4_blocked_lock *nbl = container_of(fl,
5601 struct nfsd4_blocked_lock, nbl_lock);
5602 bool queue = false;
5603
5604 /* An empty list means that something else is going to be using it */
5605 spin_lock(&nn->blocked_locks_lock);
5606 if (!list_empty(&nbl->nbl_list)) {
5607 list_del_init(&nbl->nbl_list);
5608 list_del_init(&nbl->nbl_lru);
5609 queue = true;
5610 }
5611 spin_unlock(&nn->blocked_locks_lock);
5612
5613 if (queue)
5614 nfsd4_run_cb(&nbl->nbl_cb);
5615 }
5616
5617 static const struct lock_manager_operations nfsd_posix_mng_ops = {
5618 .lm_notify = nfsd4_lm_notify,
5619 .lm_get_owner = nfsd4_fl_get_owner,
5620 .lm_put_owner = nfsd4_fl_put_owner,
5621 };
5622
5623 static inline void
5624 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5625 {
5626 struct nfs4_lockowner *lo;
5627
5628 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5629 lo = (struct nfs4_lockowner *) fl->fl_owner;
5630 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5631 lo->lo_owner.so_owner.len, GFP_KERNEL);
5632 if (!deny->ld_owner.data)
5633 /* We just don't care that much */
5634 goto nevermind;
5635 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5636 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5637 } else {
5638 nevermind:
5639 deny->ld_owner.len = 0;
5640 deny->ld_owner.data = NULL;
5641 deny->ld_clientid.cl_boot = 0;
5642 deny->ld_clientid.cl_id = 0;
5643 }
5644 deny->ld_start = fl->fl_start;
5645 deny->ld_length = NFS4_MAX_UINT64;
5646 if (fl->fl_end != NFS4_MAX_UINT64)
5647 deny->ld_length = fl->fl_end - fl->fl_start + 1;
5648 deny->ld_type = NFS4_READ_LT;
5649 if (fl->fl_type != F_RDLCK)
5650 deny->ld_type = NFS4_WRITE_LT;
5651 }
5652
5653 static struct nfs4_lockowner *
5654 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5655 {
5656 unsigned int strhashval = ownerstr_hashval(owner);
5657 struct nfs4_stateowner *so;
5658
5659 lockdep_assert_held(&clp->cl_lock);
5660
5661 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5662 so_strhash) {
5663 if (so->so_is_open_owner)
5664 continue;
5665 if (same_owner_str(so, owner))
5666 return lockowner(nfs4_get_stateowner(so));
5667 }
5668 return NULL;
5669 }
5670
5671 static struct nfs4_lockowner *
5672 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5673 {
5674 struct nfs4_lockowner *lo;
5675
5676 spin_lock(&clp->cl_lock);
5677 lo = find_lockowner_str_locked(clp, owner);
5678 spin_unlock(&clp->cl_lock);
5679 return lo;
5680 }
5681
5682 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5683 {
5684 unhash_lockowner_locked(lockowner(sop));
5685 }
5686
5687 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5688 {
5689 struct nfs4_lockowner *lo = lockowner(sop);
5690
5691 kmem_cache_free(lockowner_slab, lo);
5692 }
5693
5694 static const struct nfs4_stateowner_operations lockowner_ops = {
5695 .so_unhash = nfs4_unhash_lockowner,
5696 .so_free = nfs4_free_lockowner,
5697 };
5698
5699 /*
5700 * Alloc a lock owner structure.
5701 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5702 * occurred.
5703 *
5704 * strhashval = ownerstr_hashval
5705 */
5706 static struct nfs4_lockowner *
5707 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5708 struct nfs4_ol_stateid *open_stp,
5709 struct nfsd4_lock *lock)
5710 {
5711 struct nfs4_lockowner *lo, *ret;
5712
5713 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5714 if (!lo)
5715 return NULL;
5716 INIT_LIST_HEAD(&lo->lo_blocked);
5717 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5718 lo->lo_owner.so_is_open_owner = 0;
5719 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5720 lo->lo_owner.so_ops = &lockowner_ops;
5721 spin_lock(&clp->cl_lock);
5722 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5723 if (ret == NULL) {
5724 list_add(&lo->lo_owner.so_strhash,
5725 &clp->cl_ownerstr_hashtbl[strhashval]);
5726 ret = lo;
5727 } else
5728 nfs4_free_stateowner(&lo->lo_owner);
5729
5730 spin_unlock(&clp->cl_lock);
5731 return ret;
5732 }
5733
5734 static void
5735 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5736 struct nfs4_file *fp, struct inode *inode,
5737 struct nfs4_ol_stateid *open_stp)
5738 {
5739 struct nfs4_client *clp = lo->lo_owner.so_client;
5740
5741 lockdep_assert_held(&clp->cl_lock);
5742
5743 atomic_inc(&stp->st_stid.sc_count);
5744 stp->st_stid.sc_type = NFS4_LOCK_STID;
5745 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5746 get_nfs4_file(fp);
5747 stp->st_stid.sc_file = fp;
5748 stp->st_access_bmap = 0;
5749 stp->st_deny_bmap = open_stp->st_deny_bmap;
5750 stp->st_openstp = open_stp;
5751 mutex_init(&stp->st_mutex);
5752 list_add(&stp->st_locks, &open_stp->st_locks);
5753 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5754 spin_lock(&fp->fi_lock);
5755 list_add(&stp->st_perfile, &fp->fi_stateids);
5756 spin_unlock(&fp->fi_lock);
5757 }
5758
5759 static struct nfs4_ol_stateid *
5760 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5761 {
5762 struct nfs4_ol_stateid *lst;
5763 struct nfs4_client *clp = lo->lo_owner.so_client;
5764
5765 lockdep_assert_held(&clp->cl_lock);
5766
5767 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5768 if (lst->st_stid.sc_file == fp) {
5769 atomic_inc(&lst->st_stid.sc_count);
5770 return lst;
5771 }
5772 }
5773 return NULL;
5774 }
5775
5776 static struct nfs4_ol_stateid *
5777 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5778 struct inode *inode, struct nfs4_ol_stateid *ost,
5779 bool *new)
5780 {
5781 struct nfs4_stid *ns = NULL;
5782 struct nfs4_ol_stateid *lst;
5783 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5784 struct nfs4_client *clp = oo->oo_owner.so_client;
5785
5786 spin_lock(&clp->cl_lock);
5787 lst = find_lock_stateid(lo, fi);
5788 if (lst == NULL) {
5789 spin_unlock(&clp->cl_lock);
5790 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5791 if (ns == NULL)
5792 return NULL;
5793
5794 spin_lock(&clp->cl_lock);
5795 lst = find_lock_stateid(lo, fi);
5796 if (likely(!lst)) {
5797 lst = openlockstateid(ns);
5798 init_lock_stateid(lst, lo, fi, inode, ost);
5799 ns = NULL;
5800 *new = true;
5801 }
5802 }
5803 spin_unlock(&clp->cl_lock);
5804 if (ns)
5805 nfs4_put_stid(ns);
5806 return lst;
5807 }
5808
5809 static int
5810 check_lock_length(u64 offset, u64 length)
5811 {
5812 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5813 (length > ~offset)));
5814 }
5815
5816 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5817 {
5818 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5819
5820 lockdep_assert_held(&fp->fi_lock);
5821
5822 if (test_access(access, lock_stp))
5823 return;
5824 __nfs4_file_get_access(fp, access);
5825 set_access(access, lock_stp);
5826 }
5827
5828 static __be32
5829 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5830 struct nfs4_ol_stateid *ost,
5831 struct nfsd4_lock *lock,
5832 struct nfs4_ol_stateid **plst, bool *new)
5833 {
5834 __be32 status;
5835 struct nfs4_file *fi = ost->st_stid.sc_file;
5836 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5837 struct nfs4_client *cl = oo->oo_owner.so_client;
5838 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5839 struct nfs4_lockowner *lo;
5840 struct nfs4_ol_stateid *lst;
5841 unsigned int strhashval;
5842 bool hashed;
5843
5844 lo = find_lockowner_str(cl, &lock->lk_new_owner);
5845 if (!lo) {
5846 strhashval = ownerstr_hashval(&lock->lk_new_owner);
5847 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5848 if (lo == NULL)
5849 return nfserr_jukebox;
5850 } else {
5851 /* with an existing lockowner, seqids must be the same */
5852 status = nfserr_bad_seqid;
5853 if (!cstate->minorversion &&
5854 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5855 goto out;
5856 }
5857
5858 retry:
5859 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5860 if (lst == NULL) {
5861 status = nfserr_jukebox;
5862 goto out;
5863 }
5864
5865 mutex_lock(&lst->st_mutex);
5866
5867 /* See if it's still hashed to avoid race with FREE_STATEID */
5868 spin_lock(&cl->cl_lock);
5869 hashed = !list_empty(&lst->st_perfile);
5870 spin_unlock(&cl->cl_lock);
5871
5872 if (!hashed) {
5873 mutex_unlock(&lst->st_mutex);
5874 nfs4_put_stid(&lst->st_stid);
5875 goto retry;
5876 }
5877 status = nfs_ok;
5878 *plst = lst;
5879 out:
5880 nfs4_put_stateowner(&lo->lo_owner);
5881 return status;
5882 }
5883
5884 /*
5885 * LOCK operation
5886 */
5887 __be32
5888 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5889 union nfsd4_op_u *u)
5890 {
5891 struct nfsd4_lock *lock = &u->lock;
5892 struct nfs4_openowner *open_sop = NULL;
5893 struct nfs4_lockowner *lock_sop = NULL;
5894 struct nfs4_ol_stateid *lock_stp = NULL;
5895 struct nfs4_ol_stateid *open_stp = NULL;
5896 struct nfs4_file *fp;
5897 struct file *filp = NULL;
5898 struct nfsd4_blocked_lock *nbl = NULL;
5899 struct file_lock *file_lock = NULL;
5900 struct file_lock *conflock = NULL;
5901 __be32 status = 0;
5902 int lkflg;
5903 int err;
5904 bool new = false;
5905 unsigned char fl_type;
5906 unsigned int fl_flags = FL_POSIX;
5907 struct net *net = SVC_NET(rqstp);
5908 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5909
5910 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5911 (long long) lock->lk_offset,
5912 (long long) lock->lk_length);
5913
5914 if (check_lock_length(lock->lk_offset, lock->lk_length))
5915 return nfserr_inval;
5916
5917 if ((status = fh_verify(rqstp, &cstate->current_fh,
5918 S_IFREG, NFSD_MAY_LOCK))) {
5919 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5920 return status;
5921 }
5922
5923 if (lock->lk_is_new) {
5924 if (nfsd4_has_session(cstate))
5925 /* See rfc 5661 18.10.3: given clientid is ignored: */
5926 memcpy(&lock->lk_new_clientid,
5927 &cstate->session->se_client->cl_clientid,
5928 sizeof(clientid_t));
5929
5930 status = nfserr_stale_clientid;
5931 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5932 goto out;
5933
5934 /* validate and update open stateid and open seqid */
5935 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5936 lock->lk_new_open_seqid,
5937 &lock->lk_new_open_stateid,
5938 &open_stp, nn);
5939 if (status)
5940 goto out;
5941 mutex_unlock(&open_stp->st_mutex);
5942 open_sop = openowner(open_stp->st_stateowner);
5943 status = nfserr_bad_stateid;
5944 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5945 &lock->lk_new_clientid))
5946 goto out;
5947 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5948 &lock_stp, &new);
5949 } else {
5950 status = nfs4_preprocess_seqid_op(cstate,
5951 lock->lk_old_lock_seqid,
5952 &lock->lk_old_lock_stateid,
5953 NFS4_LOCK_STID, &lock_stp, nn);
5954 }
5955 if (status)
5956 goto out;
5957 lock_sop = lockowner(lock_stp->st_stateowner);
5958
5959 lkflg = setlkflg(lock->lk_type);
5960 status = nfs4_check_openmode(lock_stp, lkflg);
5961 if (status)
5962 goto out;
5963
5964 status = nfserr_grace;
5965 if (locks_in_grace(net) && !lock->lk_reclaim)
5966 goto out;
5967 status = nfserr_no_grace;
5968 if (!locks_in_grace(net) && lock->lk_reclaim)
5969 goto out;
5970
5971 fp = lock_stp->st_stid.sc_file;
5972 switch (lock->lk_type) {
5973 case NFS4_READW_LT:
5974 if (nfsd4_has_session(cstate))
5975 fl_flags |= FL_SLEEP;
5976 /* Fallthrough */
5977 case NFS4_READ_LT:
5978 spin_lock(&fp->fi_lock);
5979 filp = find_readable_file_locked(fp);
5980 if (filp)
5981 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5982 spin_unlock(&fp->fi_lock);
5983 fl_type = F_RDLCK;
5984 break;
5985 case NFS4_WRITEW_LT:
5986 if (nfsd4_has_session(cstate))
5987 fl_flags |= FL_SLEEP;
5988 /* Fallthrough */
5989 case NFS4_WRITE_LT:
5990 spin_lock(&fp->fi_lock);
5991 filp = find_writeable_file_locked(fp);
5992 if (filp)
5993 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5994 spin_unlock(&fp->fi_lock);
5995 fl_type = F_WRLCK;
5996 break;
5997 default:
5998 status = nfserr_inval;
5999 goto out;
6000 }
6001
6002 if (!filp) {
6003 status = nfserr_openmode;
6004 goto out;
6005 }
6006
6007 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6008 if (!nbl) {
6009 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6010 status = nfserr_jukebox;
6011 goto out;
6012 }
6013
6014 file_lock = &nbl->nbl_lock;
6015 file_lock->fl_type = fl_type;
6016 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6017 file_lock->fl_pid = current->tgid;
6018 file_lock->fl_file = filp;
6019 file_lock->fl_flags = fl_flags;
6020 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6021 file_lock->fl_start = lock->lk_offset;
6022 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6023 nfs4_transform_lock_offset(file_lock);
6024
6025 conflock = locks_alloc_lock();
6026 if (!conflock) {
6027 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6028 status = nfserr_jukebox;
6029 goto out;
6030 }
6031
6032 if (fl_flags & FL_SLEEP) {
6033 nbl->nbl_time = jiffies;
6034 spin_lock(&nn->blocked_locks_lock);
6035 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6036 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6037 spin_unlock(&nn->blocked_locks_lock);
6038 }
6039
6040 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
6041 switch (err) {
6042 case 0: /* success! */
6043 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6044 status = 0;
6045 break;
6046 case FILE_LOCK_DEFERRED:
6047 nbl = NULL;
6048 /* Fallthrough */
6049 case -EAGAIN: /* conflock holds conflicting lock */
6050 status = nfserr_denied;
6051 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6052 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6053 break;
6054 case -EDEADLK:
6055 status = nfserr_deadlock;
6056 break;
6057 default:
6058 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6059 status = nfserrno(err);
6060 break;
6061 }
6062 out:
6063 if (nbl) {
6064 /* dequeue it if we queued it before */
6065 if (fl_flags & FL_SLEEP) {
6066 spin_lock(&nn->blocked_locks_lock);
6067 list_del_init(&nbl->nbl_list);
6068 list_del_init(&nbl->nbl_lru);
6069 spin_unlock(&nn->blocked_locks_lock);
6070 }
6071 free_blocked_lock(nbl);
6072 }
6073 if (filp)
6074 fput(filp);
6075 if (lock_stp) {
6076 /* Bump seqid manually if the 4.0 replay owner is openowner */
6077 if (cstate->replay_owner &&
6078 cstate->replay_owner != &lock_sop->lo_owner &&
6079 seqid_mutating_err(ntohl(status)))
6080 lock_sop->lo_owner.so_seqid++;
6081
6082 mutex_unlock(&lock_stp->st_mutex);
6083
6084 /*
6085 * If this is a new, never-before-used stateid, and we are
6086 * returning an error, then just go ahead and release it.
6087 */
6088 if (status && new)
6089 release_lock_stateid(lock_stp);
6090
6091 nfs4_put_stid(&lock_stp->st_stid);
6092 }
6093 if (open_stp)
6094 nfs4_put_stid(&open_stp->st_stid);
6095 nfsd4_bump_seqid(cstate, status);
6096 if (conflock)
6097 locks_free_lock(conflock);
6098 return status;
6099 }
6100
6101 /*
6102 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6103 * so we do a temporary open here just to get an open file to pass to
6104 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6105 * inode operation.)
6106 */
6107 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6108 {
6109 struct file *file;
6110 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6111 if (!err) {
6112 err = nfserrno(vfs_test_lock(file, lock));
6113 fput(file);
6114 }
6115 return err;
6116 }
6117
6118 /*
6119 * LOCKT operation
6120 */
6121 __be32
6122 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6123 union nfsd4_op_u *u)
6124 {
6125 struct nfsd4_lockt *lockt = &u->lockt;
6126 struct file_lock *file_lock = NULL;
6127 struct nfs4_lockowner *lo = NULL;
6128 __be32 status;
6129 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6130
6131 if (locks_in_grace(SVC_NET(rqstp)))
6132 return nfserr_grace;
6133
6134 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6135 return nfserr_inval;
6136
6137 if (!nfsd4_has_session(cstate)) {
6138 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6139 if (status)
6140 goto out;
6141 }
6142
6143 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6144 goto out;
6145
6146 file_lock = locks_alloc_lock();
6147 if (!file_lock) {
6148 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6149 status = nfserr_jukebox;
6150 goto out;
6151 }
6152
6153 switch (lockt->lt_type) {
6154 case NFS4_READ_LT:
6155 case NFS4_READW_LT:
6156 file_lock->fl_type = F_RDLCK;
6157 break;
6158 case NFS4_WRITE_LT:
6159 case NFS4_WRITEW_LT:
6160 file_lock->fl_type = F_WRLCK;
6161 break;
6162 default:
6163 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6164 status = nfserr_inval;
6165 goto out;
6166 }
6167
6168 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6169 if (lo)
6170 file_lock->fl_owner = (fl_owner_t)lo;
6171 file_lock->fl_pid = current->tgid;
6172 file_lock->fl_flags = FL_POSIX;
6173
6174 file_lock->fl_start = lockt->lt_offset;
6175 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6176
6177 nfs4_transform_lock_offset(file_lock);
6178
6179 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6180 if (status)
6181 goto out;
6182
6183 if (file_lock->fl_type != F_UNLCK) {
6184 status = nfserr_denied;
6185 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6186 }
6187 out:
6188 if (lo)
6189 nfs4_put_stateowner(&lo->lo_owner);
6190 if (file_lock)
6191 locks_free_lock(file_lock);
6192 return status;
6193 }
6194
6195 __be32
6196 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6197 union nfsd4_op_u *u)
6198 {
6199 struct nfsd4_locku *locku = &u->locku;
6200 struct nfs4_ol_stateid *stp;
6201 struct file *filp = NULL;
6202 struct file_lock *file_lock = NULL;
6203 __be32 status;
6204 int err;
6205 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6206
6207 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6208 (long long) locku->lu_offset,
6209 (long long) locku->lu_length);
6210
6211 if (check_lock_length(locku->lu_offset, locku->lu_length))
6212 return nfserr_inval;
6213
6214 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6215 &locku->lu_stateid, NFS4_LOCK_STID,
6216 &stp, nn);
6217 if (status)
6218 goto out;
6219 filp = find_any_file(stp->st_stid.sc_file);
6220 if (!filp) {
6221 status = nfserr_lock_range;
6222 goto put_stateid;
6223 }
6224 file_lock = locks_alloc_lock();
6225 if (!file_lock) {
6226 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6227 status = nfserr_jukebox;
6228 goto fput;
6229 }
6230
6231 file_lock->fl_type = F_UNLCK;
6232 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6233 file_lock->fl_pid = current->tgid;
6234 file_lock->fl_file = filp;
6235 file_lock->fl_flags = FL_POSIX;
6236 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6237 file_lock->fl_start = locku->lu_offset;
6238
6239 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6240 locku->lu_length);
6241 nfs4_transform_lock_offset(file_lock);
6242
6243 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
6244 if (err) {
6245 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6246 goto out_nfserr;
6247 }
6248 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6249 fput:
6250 fput(filp);
6251 put_stateid:
6252 mutex_unlock(&stp->st_mutex);
6253 nfs4_put_stid(&stp->st_stid);
6254 out:
6255 nfsd4_bump_seqid(cstate, status);
6256 if (file_lock)
6257 locks_free_lock(file_lock);
6258 return status;
6259
6260 out_nfserr:
6261 status = nfserrno(err);
6262 goto fput;
6263 }
6264
6265 /*
6266 * returns
6267 * true: locks held by lockowner
6268 * false: no locks held by lockowner
6269 */
6270 static bool
6271 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6272 {
6273 struct file_lock *fl;
6274 int status = false;
6275 struct file *filp = find_any_file(fp);
6276 struct inode *inode;
6277 struct file_lock_context *flctx;
6278
6279 if (!filp) {
6280 /* Any valid lock stateid should have some sort of access */
6281 WARN_ON_ONCE(1);
6282 return status;
6283 }
6284
6285 inode = file_inode(filp);
6286 flctx = inode->i_flctx;
6287
6288 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6289 spin_lock(&flctx->flc_lock);
6290 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6291 if (fl->fl_owner == (fl_owner_t)lowner) {
6292 status = true;
6293 break;
6294 }
6295 }
6296 spin_unlock(&flctx->flc_lock);
6297 }
6298 fput(filp);
6299 return status;
6300 }
6301
6302 __be32
6303 nfsd4_release_lockowner(struct svc_rqst *rqstp,
6304 struct nfsd4_compound_state *cstate,
6305 union nfsd4_op_u *u)
6306 {
6307 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6308 clientid_t *clid = &rlockowner->rl_clientid;
6309 struct nfs4_stateowner *sop;
6310 struct nfs4_lockowner *lo = NULL;
6311 struct nfs4_ol_stateid *stp;
6312 struct xdr_netobj *owner = &rlockowner->rl_owner;
6313 unsigned int hashval = ownerstr_hashval(owner);
6314 __be32 status;
6315 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6316 struct nfs4_client *clp;
6317 LIST_HEAD (reaplist);
6318
6319 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6320 clid->cl_boot, clid->cl_id);
6321
6322 status = lookup_clientid(clid, cstate, nn);
6323 if (status)
6324 return status;
6325
6326 clp = cstate->clp;
6327 /* Find the matching lock stateowner */
6328 spin_lock(&clp->cl_lock);
6329 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6330 so_strhash) {
6331
6332 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6333 continue;
6334
6335 /* see if there are still any locks associated with it */
6336 lo = lockowner(sop);
6337 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6338 if (check_for_locks(stp->st_stid.sc_file, lo)) {
6339 status = nfserr_locks_held;
6340 spin_unlock(&clp->cl_lock);
6341 return status;
6342 }
6343 }
6344
6345 nfs4_get_stateowner(sop);
6346 break;
6347 }
6348 if (!lo) {
6349 spin_unlock(&clp->cl_lock);
6350 return status;
6351 }
6352
6353 unhash_lockowner_locked(lo);
6354 while (!list_empty(&lo->lo_owner.so_stateids)) {
6355 stp = list_first_entry(&lo->lo_owner.so_stateids,
6356 struct nfs4_ol_stateid,
6357 st_perstateowner);
6358 WARN_ON(!unhash_lock_stateid(stp));
6359 put_ol_stateid_locked(stp, &reaplist);
6360 }
6361 spin_unlock(&clp->cl_lock);
6362 free_ol_stateid_reaplist(&reaplist);
6363 remove_blocked_locks(lo);
6364 nfs4_put_stateowner(&lo->lo_owner);
6365
6366 return status;
6367 }
6368
6369 static inline struct nfs4_client_reclaim *
6370 alloc_reclaim(void)
6371 {
6372 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6373 }
6374
6375 bool
6376 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6377 {
6378 struct nfs4_client_reclaim *crp;
6379
6380 crp = nfsd4_find_reclaim_client(name, nn);
6381 return (crp && crp->cr_clp);
6382 }
6383
6384 /*
6385 * failure => all reset bets are off, nfserr_no_grace...
6386 */
6387 struct nfs4_client_reclaim *
6388 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6389 {
6390 unsigned int strhashval;
6391 struct nfs4_client_reclaim *crp;
6392
6393 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6394 crp = alloc_reclaim();
6395 if (crp) {
6396 strhashval = clientstr_hashval(name);
6397 INIT_LIST_HEAD(&crp->cr_strhash);
6398 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6399 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6400 crp->cr_clp = NULL;
6401 nn->reclaim_str_hashtbl_size++;
6402 }
6403 return crp;
6404 }
6405
6406 void
6407 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6408 {
6409 list_del(&crp->cr_strhash);
6410 kfree(crp);
6411 nn->reclaim_str_hashtbl_size--;
6412 }
6413
6414 void
6415 nfs4_release_reclaim(struct nfsd_net *nn)
6416 {
6417 struct nfs4_client_reclaim *crp = NULL;
6418 int i;
6419
6420 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6421 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6422 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6423 struct nfs4_client_reclaim, cr_strhash);
6424 nfs4_remove_reclaim_record(crp, nn);
6425 }
6426 }
6427 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6428 }
6429
6430 /*
6431 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6432 struct nfs4_client_reclaim *
6433 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6434 {
6435 unsigned int strhashval;
6436 struct nfs4_client_reclaim *crp = NULL;
6437
6438 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6439
6440 strhashval = clientstr_hashval(recdir);
6441 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6442 if (same_name(crp->cr_recdir, recdir)) {
6443 return crp;
6444 }
6445 }
6446 return NULL;
6447 }
6448
6449 /*
6450 * Called from OPEN. Look for clientid in reclaim list.
6451 */
6452 __be32
6453 nfs4_check_open_reclaim(clientid_t *clid,
6454 struct nfsd4_compound_state *cstate,
6455 struct nfsd_net *nn)
6456 {
6457 __be32 status;
6458
6459 /* find clientid in conf_id_hashtbl */
6460 status = lookup_clientid(clid, cstate, nn);
6461 if (status)
6462 return nfserr_reclaim_bad;
6463
6464 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6465 return nfserr_no_grace;
6466
6467 if (nfsd4_client_record_check(cstate->clp))
6468 return nfserr_reclaim_bad;
6469
6470 return nfs_ok;
6471 }
6472
6473 #ifdef CONFIG_NFSD_FAULT_INJECTION
6474 static inline void
6475 put_client(struct nfs4_client *clp)
6476 {
6477 atomic_dec(&clp->cl_refcount);
6478 }
6479
6480 static struct nfs4_client *
6481 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6482 {
6483 struct nfs4_client *clp;
6484 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6485 nfsd_net_id);
6486
6487 if (!nfsd_netns_ready(nn))
6488 return NULL;
6489
6490 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6491 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6492 return clp;
6493 }
6494 return NULL;
6495 }
6496
6497 u64
6498 nfsd_inject_print_clients(void)
6499 {
6500 struct nfs4_client *clp;
6501 u64 count = 0;
6502 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6503 nfsd_net_id);
6504 char buf[INET6_ADDRSTRLEN];
6505
6506 if (!nfsd_netns_ready(nn))
6507 return 0;
6508
6509 spin_lock(&nn->client_lock);
6510 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6511 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6512 pr_info("NFS Client: %s\n", buf);
6513 ++count;
6514 }
6515 spin_unlock(&nn->client_lock);
6516
6517 return count;
6518 }
6519
6520 u64
6521 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6522 {
6523 u64 count = 0;
6524 struct nfs4_client *clp;
6525 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6526 nfsd_net_id);
6527
6528 if (!nfsd_netns_ready(nn))
6529 return count;
6530
6531 spin_lock(&nn->client_lock);
6532 clp = nfsd_find_client(addr, addr_size);
6533 if (clp) {
6534 if (mark_client_expired_locked(clp) == nfs_ok)
6535 ++count;
6536 else
6537 clp = NULL;
6538 }
6539 spin_unlock(&nn->client_lock);
6540
6541 if (clp)
6542 expire_client(clp);
6543
6544 return count;
6545 }
6546
6547 u64
6548 nfsd_inject_forget_clients(u64 max)
6549 {
6550 u64 count = 0;
6551 struct nfs4_client *clp, *next;
6552 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6553 nfsd_net_id);
6554 LIST_HEAD(reaplist);
6555
6556 if (!nfsd_netns_ready(nn))
6557 return count;
6558
6559 spin_lock(&nn->client_lock);
6560 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6561 if (mark_client_expired_locked(clp) == nfs_ok) {
6562 list_add(&clp->cl_lru, &reaplist);
6563 if (max != 0 && ++count >= max)
6564 break;
6565 }
6566 }
6567 spin_unlock(&nn->client_lock);
6568
6569 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6570 expire_client(clp);
6571
6572 return count;
6573 }
6574
6575 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6576 const char *type)
6577 {
6578 char buf[INET6_ADDRSTRLEN];
6579 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6580 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6581 }
6582
6583 static void
6584 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6585 struct list_head *collect)
6586 {
6587 struct nfs4_client *clp = lst->st_stid.sc_client;
6588 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6589 nfsd_net_id);
6590
6591 if (!collect)
6592 return;
6593
6594 lockdep_assert_held(&nn->client_lock);
6595 atomic_inc(&clp->cl_refcount);
6596 list_add(&lst->st_locks, collect);
6597 }
6598
6599 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6600 struct list_head *collect,
6601 bool (*func)(struct nfs4_ol_stateid *))
6602 {
6603 struct nfs4_openowner *oop;
6604 struct nfs4_ol_stateid *stp, *st_next;
6605 struct nfs4_ol_stateid *lst, *lst_next;
6606 u64 count = 0;
6607
6608 spin_lock(&clp->cl_lock);
6609 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6610 list_for_each_entry_safe(stp, st_next,
6611 &oop->oo_owner.so_stateids, st_perstateowner) {
6612 list_for_each_entry_safe(lst, lst_next,
6613 &stp->st_locks, st_locks) {
6614 if (func) {
6615 if (func(lst))
6616 nfsd_inject_add_lock_to_list(lst,
6617 collect);
6618 }
6619 ++count;
6620 /*
6621 * Despite the fact that these functions deal
6622 * with 64-bit integers for "count", we must
6623 * ensure that it doesn't blow up the
6624 * clp->cl_refcount. Throw a warning if we
6625 * start to approach INT_MAX here.
6626 */
6627 WARN_ON_ONCE(count == (INT_MAX / 2));
6628 if (count == max)
6629 goto out;
6630 }
6631 }
6632 }
6633 out:
6634 spin_unlock(&clp->cl_lock);
6635
6636 return count;
6637 }
6638
6639 static u64
6640 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6641 u64 max)
6642 {
6643 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6644 }
6645
6646 static u64
6647 nfsd_print_client_locks(struct nfs4_client *clp)
6648 {
6649 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6650 nfsd_print_count(clp, count, "locked files");
6651 return count;
6652 }
6653
6654 u64
6655 nfsd_inject_print_locks(void)
6656 {
6657 struct nfs4_client *clp;
6658 u64 count = 0;
6659 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6660 nfsd_net_id);
6661
6662 if (!nfsd_netns_ready(nn))
6663 return 0;
6664
6665 spin_lock(&nn->client_lock);
6666 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6667 count += nfsd_print_client_locks(clp);
6668 spin_unlock(&nn->client_lock);
6669
6670 return count;
6671 }
6672
6673 static void
6674 nfsd_reap_locks(struct list_head *reaplist)
6675 {
6676 struct nfs4_client *clp;
6677 struct nfs4_ol_stateid *stp, *next;
6678
6679 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6680 list_del_init(&stp->st_locks);
6681 clp = stp->st_stid.sc_client;
6682 nfs4_put_stid(&stp->st_stid);
6683 put_client(clp);
6684 }
6685 }
6686
6687 u64
6688 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6689 {
6690 unsigned int count = 0;
6691 struct nfs4_client *clp;
6692 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6693 nfsd_net_id);
6694 LIST_HEAD(reaplist);
6695
6696 if (!nfsd_netns_ready(nn))
6697 return count;
6698
6699 spin_lock(&nn->client_lock);
6700 clp = nfsd_find_client(addr, addr_size);
6701 if (clp)
6702 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6703 spin_unlock(&nn->client_lock);
6704 nfsd_reap_locks(&reaplist);
6705 return count;
6706 }
6707
6708 u64
6709 nfsd_inject_forget_locks(u64 max)
6710 {
6711 u64 count = 0;
6712 struct nfs4_client *clp;
6713 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6714 nfsd_net_id);
6715 LIST_HEAD(reaplist);
6716
6717 if (!nfsd_netns_ready(nn))
6718 return count;
6719
6720 spin_lock(&nn->client_lock);
6721 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6722 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6723 if (max != 0 && count >= max)
6724 break;
6725 }
6726 spin_unlock(&nn->client_lock);
6727 nfsd_reap_locks(&reaplist);
6728 return count;
6729 }
6730
6731 static u64
6732 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6733 struct list_head *collect,
6734 void (*func)(struct nfs4_openowner *))
6735 {
6736 struct nfs4_openowner *oop, *next;
6737 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6738 nfsd_net_id);
6739 u64 count = 0;
6740
6741 lockdep_assert_held(&nn->client_lock);
6742
6743 spin_lock(&clp->cl_lock);
6744 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6745 if (func) {
6746 func(oop);
6747 if (collect) {
6748 atomic_inc(&clp->cl_refcount);
6749 list_add(&oop->oo_perclient, collect);
6750 }
6751 }
6752 ++count;
6753 /*
6754 * Despite the fact that these functions deal with
6755 * 64-bit integers for "count", we must ensure that
6756 * it doesn't blow up the clp->cl_refcount. Throw a
6757 * warning if we start to approach INT_MAX here.
6758 */
6759 WARN_ON_ONCE(count == (INT_MAX / 2));
6760 if (count == max)
6761 break;
6762 }
6763 spin_unlock(&clp->cl_lock);
6764
6765 return count;
6766 }
6767
6768 static u64
6769 nfsd_print_client_openowners(struct nfs4_client *clp)
6770 {
6771 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6772
6773 nfsd_print_count(clp, count, "openowners");
6774 return count;
6775 }
6776
6777 static u64
6778 nfsd_collect_client_openowners(struct nfs4_client *clp,
6779 struct list_head *collect, u64 max)
6780 {
6781 return nfsd_foreach_client_openowner(clp, max, collect,
6782 unhash_openowner_locked);
6783 }
6784
6785 u64
6786 nfsd_inject_print_openowners(void)
6787 {
6788 struct nfs4_client *clp;
6789 u64 count = 0;
6790 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6791 nfsd_net_id);
6792
6793 if (!nfsd_netns_ready(nn))
6794 return 0;
6795
6796 spin_lock(&nn->client_lock);
6797 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6798 count += nfsd_print_client_openowners(clp);
6799 spin_unlock(&nn->client_lock);
6800
6801 return count;
6802 }
6803
6804 static void
6805 nfsd_reap_openowners(struct list_head *reaplist)
6806 {
6807 struct nfs4_client *clp;
6808 struct nfs4_openowner *oop, *next;
6809
6810 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6811 list_del_init(&oop->oo_perclient);
6812 clp = oop->oo_owner.so_client;
6813 release_openowner(oop);
6814 put_client(clp);
6815 }
6816 }
6817
6818 u64
6819 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6820 size_t addr_size)
6821 {
6822 unsigned int count = 0;
6823 struct nfs4_client *clp;
6824 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6825 nfsd_net_id);
6826 LIST_HEAD(reaplist);
6827
6828 if (!nfsd_netns_ready(nn))
6829 return count;
6830
6831 spin_lock(&nn->client_lock);
6832 clp = nfsd_find_client(addr, addr_size);
6833 if (clp)
6834 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6835 spin_unlock(&nn->client_lock);
6836 nfsd_reap_openowners(&reaplist);
6837 return count;
6838 }
6839
6840 u64
6841 nfsd_inject_forget_openowners(u64 max)
6842 {
6843 u64 count = 0;
6844 struct nfs4_client *clp;
6845 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6846 nfsd_net_id);
6847 LIST_HEAD(reaplist);
6848
6849 if (!nfsd_netns_ready(nn))
6850 return count;
6851
6852 spin_lock(&nn->client_lock);
6853 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6854 count += nfsd_collect_client_openowners(clp, &reaplist,
6855 max - count);
6856 if (max != 0 && count >= max)
6857 break;
6858 }
6859 spin_unlock(&nn->client_lock);
6860 nfsd_reap_openowners(&reaplist);
6861 return count;
6862 }
6863
6864 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6865 struct list_head *victims)
6866 {
6867 struct nfs4_delegation *dp, *next;
6868 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6869 nfsd_net_id);
6870 u64 count = 0;
6871
6872 lockdep_assert_held(&nn->client_lock);
6873
6874 spin_lock(&state_lock);
6875 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6876 if (victims) {
6877 /*
6878 * It's not safe to mess with delegations that have a
6879 * non-zero dl_time. They might have already been broken
6880 * and could be processed by the laundromat outside of
6881 * the state_lock. Just leave them be.
6882 */
6883 if (dp->dl_time != 0)
6884 continue;
6885
6886 atomic_inc(&clp->cl_refcount);
6887 WARN_ON(!unhash_delegation_locked(dp));
6888 list_add(&dp->dl_recall_lru, victims);
6889 }
6890 ++count;
6891 /*
6892 * Despite the fact that these functions deal with
6893 * 64-bit integers for "count", we must ensure that
6894 * it doesn't blow up the clp->cl_refcount. Throw a
6895 * warning if we start to approach INT_MAX here.
6896 */
6897 WARN_ON_ONCE(count == (INT_MAX / 2));
6898 if (count == max)
6899 break;
6900 }
6901 spin_unlock(&state_lock);
6902 return count;
6903 }
6904
6905 static u64
6906 nfsd_print_client_delegations(struct nfs4_client *clp)
6907 {
6908 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6909
6910 nfsd_print_count(clp, count, "delegations");
6911 return count;
6912 }
6913
6914 u64
6915 nfsd_inject_print_delegations(void)
6916 {
6917 struct nfs4_client *clp;
6918 u64 count = 0;
6919 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6920 nfsd_net_id);
6921
6922 if (!nfsd_netns_ready(nn))
6923 return 0;
6924
6925 spin_lock(&nn->client_lock);
6926 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6927 count += nfsd_print_client_delegations(clp);
6928 spin_unlock(&nn->client_lock);
6929
6930 return count;
6931 }
6932
6933 static void
6934 nfsd_forget_delegations(struct list_head *reaplist)
6935 {
6936 struct nfs4_client *clp;
6937 struct nfs4_delegation *dp, *next;
6938
6939 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6940 list_del_init(&dp->dl_recall_lru);
6941 clp = dp->dl_stid.sc_client;
6942 revoke_delegation(dp);
6943 put_client(clp);
6944 }
6945 }
6946
6947 u64
6948 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6949 size_t addr_size)
6950 {
6951 u64 count = 0;
6952 struct nfs4_client *clp;
6953 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6954 nfsd_net_id);
6955 LIST_HEAD(reaplist);
6956
6957 if (!nfsd_netns_ready(nn))
6958 return count;
6959
6960 spin_lock(&nn->client_lock);
6961 clp = nfsd_find_client(addr, addr_size);
6962 if (clp)
6963 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6964 spin_unlock(&nn->client_lock);
6965
6966 nfsd_forget_delegations(&reaplist);
6967 return count;
6968 }
6969
6970 u64
6971 nfsd_inject_forget_delegations(u64 max)
6972 {
6973 u64 count = 0;
6974 struct nfs4_client *clp;
6975 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6976 nfsd_net_id);
6977 LIST_HEAD(reaplist);
6978
6979 if (!nfsd_netns_ready(nn))
6980 return count;
6981
6982 spin_lock(&nn->client_lock);
6983 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6984 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6985 if (max != 0 && count >= max)
6986 break;
6987 }
6988 spin_unlock(&nn->client_lock);
6989 nfsd_forget_delegations(&reaplist);
6990 return count;
6991 }
6992
6993 static void
6994 nfsd_recall_delegations(struct list_head *reaplist)
6995 {
6996 struct nfs4_client *clp;
6997 struct nfs4_delegation *dp, *next;
6998
6999 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7000 list_del_init(&dp->dl_recall_lru);
7001 clp = dp->dl_stid.sc_client;
7002 /*
7003 * We skipped all entries that had a zero dl_time before,
7004 * so we can now reset the dl_time back to 0. If a delegation
7005 * break comes in now, then it won't make any difference since
7006 * we're recalling it either way.
7007 */
7008 spin_lock(&state_lock);
7009 dp->dl_time = 0;
7010 spin_unlock(&state_lock);
7011 nfsd_break_one_deleg(dp);
7012 put_client(clp);
7013 }
7014 }
7015
7016 u64
7017 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7018 size_t addr_size)
7019 {
7020 u64 count = 0;
7021 struct nfs4_client *clp;
7022 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7023 nfsd_net_id);
7024 LIST_HEAD(reaplist);
7025
7026 if (!nfsd_netns_ready(nn))
7027 return count;
7028
7029 spin_lock(&nn->client_lock);
7030 clp = nfsd_find_client(addr, addr_size);
7031 if (clp)
7032 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7033 spin_unlock(&nn->client_lock);
7034
7035 nfsd_recall_delegations(&reaplist);
7036 return count;
7037 }
7038
7039 u64
7040 nfsd_inject_recall_delegations(u64 max)
7041 {
7042 u64 count = 0;
7043 struct nfs4_client *clp, *next;
7044 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7045 nfsd_net_id);
7046 LIST_HEAD(reaplist);
7047
7048 if (!nfsd_netns_ready(nn))
7049 return count;
7050
7051 spin_lock(&nn->client_lock);
7052 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7053 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7054 if (max != 0 && ++count >= max)
7055 break;
7056 }
7057 spin_unlock(&nn->client_lock);
7058 nfsd_recall_delegations(&reaplist);
7059 return count;
7060 }
7061 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7062
7063 /*
7064 * Since the lifetime of a delegation isn't limited to that of an open, a
7065 * client may quite reasonably hang on to a delegation as long as it has
7066 * the inode cached. This becomes an obvious problem the first time a
7067 * client's inode cache approaches the size of the server's total memory.
7068 *
7069 * For now we avoid this problem by imposing a hard limit on the number
7070 * of delegations, which varies according to the server's memory size.
7071 */
7072 static void
7073 set_max_delegations(void)
7074 {
7075 /*
7076 * Allow at most 4 delegations per megabyte of RAM. Quick
7077 * estimates suggest that in the worst case (where every delegation
7078 * is for a different inode), a delegation could take about 1.5K,
7079 * giving a worst case usage of about 6% of memory.
7080 */
7081 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7082 }
7083
7084 static int nfs4_state_create_net(struct net *net)
7085 {
7086 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7087 int i;
7088
7089 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7090 CLIENT_HASH_SIZE, GFP_KERNEL);
7091 if (!nn->conf_id_hashtbl)
7092 goto err;
7093 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7094 CLIENT_HASH_SIZE, GFP_KERNEL);
7095 if (!nn->unconf_id_hashtbl)
7096 goto err_unconf_id;
7097 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
7098 SESSION_HASH_SIZE, GFP_KERNEL);
7099 if (!nn->sessionid_hashtbl)
7100 goto err_sessionid;
7101
7102 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7103 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7104 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7105 }
7106 for (i = 0; i < SESSION_HASH_SIZE; i++)
7107 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7108 nn->conf_name_tree = RB_ROOT;
7109 nn->unconf_name_tree = RB_ROOT;
7110 nn->boot_time = get_seconds();
7111 nn->grace_ended = false;
7112 nn->nfsd4_manager.block_opens = true;
7113 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7114 INIT_LIST_HEAD(&nn->client_lru);
7115 INIT_LIST_HEAD(&nn->close_lru);
7116 INIT_LIST_HEAD(&nn->del_recall_lru);
7117 spin_lock_init(&nn->client_lock);
7118
7119 spin_lock_init(&nn->blocked_locks_lock);
7120 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7121
7122 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7123 get_net(net);
7124
7125 return 0;
7126
7127 err_sessionid:
7128 kfree(nn->unconf_id_hashtbl);
7129 err_unconf_id:
7130 kfree(nn->conf_id_hashtbl);
7131 err:
7132 return -ENOMEM;
7133 }
7134
7135 static void
7136 nfs4_state_destroy_net(struct net *net)
7137 {
7138 int i;
7139 struct nfs4_client *clp = NULL;
7140 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7141
7142 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7143 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7144 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7145 destroy_client(clp);
7146 }
7147 }
7148
7149 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7150
7151 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7152 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7153 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7154 destroy_client(clp);
7155 }
7156 }
7157
7158 kfree(nn->sessionid_hashtbl);
7159 kfree(nn->unconf_id_hashtbl);
7160 kfree(nn->conf_id_hashtbl);
7161 put_net(net);
7162 }
7163
7164 int
7165 nfs4_state_start_net(struct net *net)
7166 {
7167 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7168 int ret;
7169
7170 ret = nfs4_state_create_net(net);
7171 if (ret)
7172 return ret;
7173 locks_start_grace(net, &nn->nfsd4_manager);
7174 nfsd4_client_tracking_init(net);
7175 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
7176 nn->nfsd4_grace, net);
7177 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7178 return 0;
7179 }
7180
7181 /* initialization to perform when the nfsd service is started: */
7182
7183 int
7184 nfs4_state_start(void)
7185 {
7186 int ret;
7187
7188 ret = set_callback_cred();
7189 if (ret)
7190 return ret;
7191
7192 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7193 if (laundry_wq == NULL) {
7194 ret = -ENOMEM;
7195 goto out_cleanup_cred;
7196 }
7197 ret = nfsd4_create_callback_queue();
7198 if (ret)
7199 goto out_free_laundry;
7200
7201 set_max_delegations();
7202 return 0;
7203
7204 out_free_laundry:
7205 destroy_workqueue(laundry_wq);
7206 out_cleanup_cred:
7207 cleanup_callback_cred();
7208 return ret;
7209 }
7210
7211 void
7212 nfs4_state_shutdown_net(struct net *net)
7213 {
7214 struct nfs4_delegation *dp = NULL;
7215 struct list_head *pos, *next, reaplist;
7216 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7217
7218 cancel_delayed_work_sync(&nn->laundromat_work);
7219 locks_end_grace(&nn->nfsd4_manager);
7220
7221 INIT_LIST_HEAD(&reaplist);
7222 spin_lock(&state_lock);
7223 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7224 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7225 WARN_ON(!unhash_delegation_locked(dp));
7226 list_add(&dp->dl_recall_lru, &reaplist);
7227 }
7228 spin_unlock(&state_lock);
7229 list_for_each_safe(pos, next, &reaplist) {
7230 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7231 list_del_init(&dp->dl_recall_lru);
7232 put_clnt_odstate(dp->dl_clnt_odstate);
7233 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
7234 nfs4_put_stid(&dp->dl_stid);
7235 }
7236
7237 nfsd4_client_tracking_exit(net);
7238 nfs4_state_destroy_net(net);
7239 }
7240
7241 void
7242 nfs4_state_shutdown(void)
7243 {
7244 destroy_workqueue(laundry_wq);
7245 nfsd4_destroy_callback_queue();
7246 cleanup_callback_cred();
7247 }
7248
7249 static void
7250 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7251 {
7252 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7253 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7254 }
7255
7256 static void
7257 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7258 {
7259 if (cstate->minorversion) {
7260 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7261 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7262 }
7263 }
7264
7265 void
7266 clear_current_stateid(struct nfsd4_compound_state *cstate)
7267 {
7268 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7269 }
7270
7271 /*
7272 * functions to set current state id
7273 */
7274 void
7275 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7276 union nfsd4_op_u *u)
7277 {
7278 put_stateid(cstate, &u->open_downgrade.od_stateid);
7279 }
7280
7281 void
7282 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7283 union nfsd4_op_u *u)
7284 {
7285 put_stateid(cstate, &u->open.op_stateid);
7286 }
7287
7288 void
7289 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7290 union nfsd4_op_u *u)
7291 {
7292 put_stateid(cstate, &u->close.cl_stateid);
7293 }
7294
7295 void
7296 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7297 union nfsd4_op_u *u)
7298 {
7299 put_stateid(cstate, &u->lock.lk_resp_stateid);
7300 }
7301
7302 /*
7303 * functions to consume current state id
7304 */
7305
7306 void
7307 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7308 union nfsd4_op_u *u)
7309 {
7310 get_stateid(cstate, &u->open_downgrade.od_stateid);
7311 }
7312
7313 void
7314 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7315 union nfsd4_op_u *u)
7316 {
7317 get_stateid(cstate, &u->delegreturn.dr_stateid);
7318 }
7319
7320 void
7321 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7322 union nfsd4_op_u *u)
7323 {
7324 get_stateid(cstate, &u->free_stateid.fr_stateid);
7325 }
7326
7327 void
7328 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7329 union nfsd4_op_u *u)
7330 {
7331 get_stateid(cstate, &u->setattr.sa_stateid);
7332 }
7333
7334 void
7335 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7336 union nfsd4_op_u *u)
7337 {
7338 get_stateid(cstate, &u->close.cl_stateid);
7339 }
7340
7341 void
7342 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7343 union nfsd4_op_u *u)
7344 {
7345 get_stateid(cstate, &u->locku.lu_stateid);
7346 }
7347
7348 void
7349 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7350 union nfsd4_op_u *u)
7351 {
7352 get_stateid(cstate, &u->read.rd_stateid);
7353 }
7354
7355 void
7356 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7357 union nfsd4_op_u *u)
7358 {
7359 get_stateid(cstate, &u->write.wr_stateid);
7360 }