defconfig: exynos9610: Re-add dropped Wi-Fi AP options lost
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / fs / nfsd / nfs4state.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
aceaf78d 35#include <linux/file.h>
b89f4321 36#include <linux/fs.h>
5a0e3ad6 37#include <linux/slab.h>
0964a3d3 38#include <linux/namei.h>
c2f1a551 39#include <linux/swap.h>
17456804 40#include <linux/pagemap.h>
7df302f7 41#include <linux/ratelimit.h>
68e76ad0 42#include <linux/sunrpc/svcauth_gss.h>
5976687a 43#include <linux/sunrpc/addr.h>
87545899 44#include <linux/jhash.h>
9a74af21 45#include "xdr4.h"
06b332a5 46#include "xdr4cb.h"
0a3adade 47#include "vfs.h"
bfa4b365 48#include "current_stateid.h"
1da177e4 49
5e1533c7 50#include "netns.h"
9cf514cc 51#include "pnfs.h"
5e1533c7 52
1da177e4
LT
53#define NFSDDBG_FACILITY NFSDDBG_PROC
54
f32f3c2d
BF
55#define all_ones {{~0,~0},~0}
56static const stateid_t one_stateid = {
57 .si_generation = ~0,
58 .si_opaque = all_ones,
59};
60static const stateid_t zero_stateid = {
61 /* all fields zero */
62};
19ff0f28
TM
63static const stateid_t currentstateid = {
64 .si_generation = 1,
65};
5f71ff51
TM
66static const stateid_t close_stateid = {
67 .si_generation = 0xffffffffU,
68};
f32f3c2d 69
ec6b5d7b 70static u64 current_sessionid = 1;
fd39ca9a 71
f32f3c2d
BF
72#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
19ff0f28 74#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
631db7f3 75#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
1da177e4 76
1da177e4 77/* forward declarations */
f9c00c3a 78static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
6011695d 79static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
1da177e4 80
8b671b80
BF
81/* Locking: */
82
8b671b80
BF
83/*
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
87 */
cdc97505 88static DEFINE_SPINLOCK(state_lock);
8b671b80 89
b401be22
JL
90/*
91 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
92 * the refcount on the open stateid to drop.
93 */
94static DECLARE_WAIT_QUEUE_HEAD(close_wq);
95
abf1135b
CH
96static struct kmem_cache *openowner_slab;
97static struct kmem_cache *lockowner_slab;
98static struct kmem_cache *file_slab;
99static struct kmem_cache *stateid_slab;
100static struct kmem_cache *deleg_slab;
8287f009 101static struct kmem_cache *odstate_slab;
e60d4398 102
66b2b9b2 103static void free_session(struct nfsd4_session *);
508dc6e1 104
c4cb8974 105static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
76d348fa 106static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
0162ac2b 107
f0f51f5c 108static bool is_session_dead(struct nfsd4_session *ses)
66b2b9b2 109{
f0f51f5c 110 return ses->se_flags & NFS4_SESSION_DEAD;
66b2b9b2
BF
111}
112
f0f51f5c 113static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
508dc6e1 114{
f0f51f5c 115 if (atomic_read(&ses->se_ref) > ref_held_by_me)
66b2b9b2
BF
116 return nfserr_jukebox;
117 ses->se_flags |= NFS4_SESSION_DEAD;
118 return nfs_ok;
508dc6e1
BH
119}
120
221a6876
BF
121static bool is_client_expired(struct nfs4_client *clp)
122{
123 return clp->cl_time == 0;
124}
125
221a6876
BF
126static __be32 get_client_locked(struct nfs4_client *clp)
127{
0a880a28
TM
128 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
129
130 lockdep_assert_held(&nn->client_lock);
131
221a6876
BF
132 if (is_client_expired(clp))
133 return nfserr_expired;
134 atomic_inc(&clp->cl_refcount);
135 return nfs_ok;
136}
137
138/* must be called under the client_lock */
139static inline void
140renew_client_locked(struct nfs4_client *clp)
141{
142 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
143
144 if (is_client_expired(clp)) {
145 WARN_ON(1);
146 printk("%s: client (clientid %08x/%08x) already expired\n",
147 __func__,
148 clp->cl_clientid.cl_boot,
149 clp->cl_clientid.cl_id);
150 return;
151 }
152
153 dprintk("renewing client (clientid %08x/%08x)\n",
154 clp->cl_clientid.cl_boot,
155 clp->cl_clientid.cl_id);
156 list_move_tail(&clp->cl_lru, &nn->client_lru);
157 clp->cl_time = get_seconds();
158}
159
ba138435 160static void put_client_renew_locked(struct nfs4_client *clp)
221a6876 161{
0a880a28
TM
162 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
163
164 lockdep_assert_held(&nn->client_lock);
165
221a6876
BF
166 if (!atomic_dec_and_test(&clp->cl_refcount))
167 return;
168 if (!is_client_expired(clp))
169 renew_client_locked(clp);
170}
171
4b24ca7d
JL
172static void put_client_renew(struct nfs4_client *clp)
173{
174 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
175
d6c249b4
JL
176 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
177 return;
178 if (!is_client_expired(clp))
179 renew_client_locked(clp);
4b24ca7d
JL
180 spin_unlock(&nn->client_lock);
181}
182
d4e19e70
TM
183static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
184{
185 __be32 status;
186
187 if (is_session_dead(ses))
188 return nfserr_badsession;
189 status = get_client_locked(ses->se_client);
190 if (status)
191 return status;
192 atomic_inc(&ses->se_ref);
193 return nfs_ok;
194}
195
196static void nfsd4_put_session_locked(struct nfsd4_session *ses)
197{
198 struct nfs4_client *clp = ses->se_client;
0a880a28
TM
199 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
200
201 lockdep_assert_held(&nn->client_lock);
d4e19e70
TM
202
203 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
204 free_session(ses);
205 put_client_renew_locked(clp);
206}
207
208static void nfsd4_put_session(struct nfsd4_session *ses)
209{
210 struct nfs4_client *clp = ses->se_client;
211 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
212
213 spin_lock(&nn->client_lock);
214 nfsd4_put_session_locked(ses);
215 spin_unlock(&nn->client_lock);
216}
217
76d348fa
JL
218static struct nfsd4_blocked_lock *
219find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
220 struct nfsd_net *nn)
221{
222 struct nfsd4_blocked_lock *cur, *found = NULL;
223
0cc11a61 224 spin_lock(&nn->blocked_locks_lock);
76d348fa
JL
225 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
226 if (fh_match(fh, &cur->nbl_fh)) {
227 list_del_init(&cur->nbl_list);
7919d0a2 228 list_del_init(&cur->nbl_lru);
76d348fa
JL
229 found = cur;
230 break;
231 }
232 }
0cc11a61 233 spin_unlock(&nn->blocked_locks_lock);
76d348fa
JL
234 if (found)
235 posix_unblock_lock(&found->nbl_lock);
236 return found;
237}
238
239static struct nfsd4_blocked_lock *
240find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
241 struct nfsd_net *nn)
242{
243 struct nfsd4_blocked_lock *nbl;
244
245 nbl = find_blocked_lock(lo, fh, nn);
246 if (!nbl) {
247 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
248 if (nbl) {
249 fh_copy_shallow(&nbl->nbl_fh, fh);
250 locks_init_lock(&nbl->nbl_lock);
251 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
252 &nfsd4_cb_notify_lock_ops,
253 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
254 }
255 }
256 return nbl;
257}
258
259static void
260free_blocked_lock(struct nfsd4_blocked_lock *nbl)
261{
262 locks_release_private(&nbl->nbl_lock);
263 kfree(nbl);
264}
265
797bfd05
JL
266static void
267remove_blocked_locks(struct nfs4_lockowner *lo)
268{
269 struct nfs4_client *clp = lo->lo_owner.so_client;
270 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
271 struct nfsd4_blocked_lock *nbl;
272 LIST_HEAD(reaplist);
273
274 /* Dequeue all blocked locks */
275 spin_lock(&nn->blocked_locks_lock);
276 while (!list_empty(&lo->lo_blocked)) {
277 nbl = list_first_entry(&lo->lo_blocked,
278 struct nfsd4_blocked_lock,
279 nbl_list);
280 list_del_init(&nbl->nbl_list);
281 list_move(&nbl->nbl_lru, &reaplist);
282 }
283 spin_unlock(&nn->blocked_locks_lock);
284
285 /* Now free them */
286 while (!list_empty(&reaplist)) {
287 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
288 nbl_lru);
289 list_del_init(&nbl->nbl_lru);
290 posix_unblock_lock(&nbl->nbl_lock);
291 free_blocked_lock(nbl);
292 }
293}
294
76d348fa
JL
295static int
296nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
297{
298 /*
299 * Since this is just an optimization, we don't try very hard if it
300 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
301 * just quit trying on anything else.
302 */
303 switch (task->tk_status) {
304 case -NFS4ERR_DELAY:
305 rpc_delay(task, 1 * HZ);
306 return 0;
307 default:
308 return 1;
309 }
310}
311
312static void
313nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
314{
315 struct nfsd4_blocked_lock *nbl = container_of(cb,
316 struct nfsd4_blocked_lock, nbl_cb);
317
318 free_blocked_lock(nbl);
319}
320
321static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
322 .done = nfsd4_cb_notify_lock_done,
323 .release = nfsd4_cb_notify_lock_release,
324};
325
b5971afa
KM
326static inline struct nfs4_stateowner *
327nfs4_get_stateowner(struct nfs4_stateowner *sop)
328{
329 atomic_inc(&sop->so_count);
330 return sop;
331}
332
7ffb5880 333static int
d4f0489f 334same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
7ffb5880
TM
335{
336 return (sop->so_owner.len == owner->len) &&
d4f0489f 337 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
7ffb5880
TM
338}
339
340static struct nfs4_openowner *
341find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
d4f0489f 342 struct nfs4_client *clp)
7ffb5880
TM
343{
344 struct nfs4_stateowner *so;
7ffb5880 345
d4f0489f 346 lockdep_assert_held(&clp->cl_lock);
7ffb5880 347
d4f0489f
TM
348 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
349 so_strhash) {
7ffb5880
TM
350 if (!so->so_is_open_owner)
351 continue;
b5971afa
KM
352 if (same_owner_str(so, &open->op_owner))
353 return openowner(nfs4_get_stateowner(so));
7ffb5880
TM
354 }
355 return NULL;
356}
357
358static struct nfs4_openowner *
359find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
d4f0489f 360 struct nfs4_client *clp)
7ffb5880
TM
361{
362 struct nfs4_openowner *oo;
363
d4f0489f
TM
364 spin_lock(&clp->cl_lock);
365 oo = find_openstateowner_str_locked(hashval, open, clp);
366 spin_unlock(&clp->cl_lock);
7ffb5880
TM
367 return oo;
368}
369
1da177e4
LT
370static inline u32
371opaque_hashval(const void *ptr, int nbytes)
372{
373 unsigned char *cptr = (unsigned char *) ptr;
374
375 u32 x = 0;
376 while (nbytes--) {
377 x *= 37;
378 x += *cptr++;
379 }
380 return x;
381}
382
5b095e99 383static void nfsd4_free_file_rcu(struct rcu_head *rcu)
32513b40 384{
5b095e99
JL
385 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
386
387 kmem_cache_free(file_slab, fp);
32513b40
BF
388}
389
e6ba76e1 390void
13cd2184
N
391put_nfs4_file(struct nfs4_file *fi)
392{
02e1215f
JL
393 might_lock(&state_lock);
394
cdc97505 395 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
5b095e99 396 hlist_del_rcu(&fi->fi_hash);
cdc97505 397 spin_unlock(&state_lock);
8287f009 398 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
5b095e99
JL
399 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
400 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
8b671b80 401 }
13cd2184
N
402}
403
de18643d
TM
404static struct file *
405__nfs4_get_fd(struct nfs4_file *f, int oflag)
406{
407 if (f->fi_fds[oflag])
408 return get_file(f->fi_fds[oflag]);
409 return NULL;
410}
411
412static struct file *
413find_writeable_file_locked(struct nfs4_file *f)
414{
415 struct file *ret;
416
417 lockdep_assert_held(&f->fi_lock);
418
419 ret = __nfs4_get_fd(f, O_WRONLY);
420 if (!ret)
421 ret = __nfs4_get_fd(f, O_RDWR);
422 return ret;
423}
424
425static struct file *
426find_writeable_file(struct nfs4_file *f)
427{
428 struct file *ret;
429
430 spin_lock(&f->fi_lock);
431 ret = find_writeable_file_locked(f);
432 spin_unlock(&f->fi_lock);
433
434 return ret;
435}
436
437static struct file *find_readable_file_locked(struct nfs4_file *f)
438{
439 struct file *ret;
440
441 lockdep_assert_held(&f->fi_lock);
442
443 ret = __nfs4_get_fd(f, O_RDONLY);
444 if (!ret)
445 ret = __nfs4_get_fd(f, O_RDWR);
446 return ret;
447}
448
449static struct file *
450find_readable_file(struct nfs4_file *f)
451{
452 struct file *ret;
453
454 spin_lock(&f->fi_lock);
455 ret = find_readable_file_locked(f);
456 spin_unlock(&f->fi_lock);
457
458 return ret;
459}
460
4d227fca 461struct file *
de18643d
TM
462find_any_file(struct nfs4_file *f)
463{
464 struct file *ret;
465
466 spin_lock(&f->fi_lock);
467 ret = __nfs4_get_fd(f, O_RDWR);
468 if (!ret) {
469 ret = __nfs4_get_fd(f, O_WRONLY);
470 if (!ret)
471 ret = __nfs4_get_fd(f, O_RDONLY);
472 }
473 spin_unlock(&f->fi_lock);
474 return ret;
475}
476
02a3508d 477static atomic_long_t num_delegations;
697ce9be 478unsigned long max_delegations;
ef0f3390
N
479
480/*
481 * Open owner state (share locks)
482 */
483
16bfdaaf
BF
484/* hash tables for lock and open owners */
485#define OWNER_HASH_BITS 8
486#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
487#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
ef0f3390 488
d4f0489f 489static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
ddc04c41
BF
490{
491 unsigned int ret;
492
493 ret = opaque_hashval(ownername->data, ownername->len);
16bfdaaf 494 return ret & OWNER_HASH_MASK;
ddc04c41 495}
ef0f3390 496
ef0f3390
N
497/* hash table for nfs4_file */
498#define FILE_HASH_BITS 8
499#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
35079582 500
ca943217 501static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
ddc04c41 502{
ca943217
TM
503 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
504}
505
506static unsigned int file_hashval(struct knfsd_fh *fh)
507{
508 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
509}
510
89876f8c 511static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
ef0f3390 512
12659651
JL
513static void
514__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
3477565e 515{
7214e860
JL
516 lockdep_assert_held(&fp->fi_lock);
517
12659651
JL
518 if (access & NFS4_SHARE_ACCESS_WRITE)
519 atomic_inc(&fp->fi_access[O_WRONLY]);
520 if (access & NFS4_SHARE_ACCESS_READ)
521 atomic_inc(&fp->fi_access[O_RDONLY]);
3477565e
BF
522}
523
12659651
JL
524static __be32
525nfs4_file_get_access(struct nfs4_file *fp, u32 access)
998db52c 526{
7214e860
JL
527 lockdep_assert_held(&fp->fi_lock);
528
12659651
JL
529 /* Does this access mode make sense? */
530 if (access & ~NFS4_SHARE_ACCESS_BOTH)
531 return nfserr_inval;
532
baeb4ff0
JL
533 /* Does it conflict with a deny mode already set? */
534 if ((access & fp->fi_share_deny) != 0)
535 return nfserr_share_denied;
536
12659651
JL
537 __nfs4_file_get_access(fp, access);
538 return nfs_ok;
998db52c
BF
539}
540
baeb4ff0
JL
541static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
542{
543 /* Common case is that there is no deny mode. */
544 if (deny) {
545 /* Does this deny mode make sense? */
546 if (deny & ~NFS4_SHARE_DENY_BOTH)
547 return nfserr_inval;
548
549 if ((deny & NFS4_SHARE_DENY_READ) &&
550 atomic_read(&fp->fi_access[O_RDONLY]))
551 return nfserr_share_denied;
552
553 if ((deny & NFS4_SHARE_DENY_WRITE) &&
554 atomic_read(&fp->fi_access[O_WRONLY]))
555 return nfserr_share_denied;
556 }
557 return nfs_ok;
558}
559
998db52c 560static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
f9d7562f 561{
de18643d
TM
562 might_lock(&fp->fi_lock);
563
564 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
565 struct file *f1 = NULL;
566 struct file *f2 = NULL;
567
6d338b51 568 swap(f1, fp->fi_fds[oflag]);
0c7c3e67 569 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
6d338b51 570 swap(f2, fp->fi_fds[O_RDWR]);
de18643d
TM
571 spin_unlock(&fp->fi_lock);
572 if (f1)
573 fput(f1);
574 if (f2)
575 fput(f2);
f9d7562f
BF
576 }
577}
578
12659651 579static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
998db52c 580{
12659651
JL
581 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
582
583 if (access & NFS4_SHARE_ACCESS_WRITE)
998db52c 584 __nfs4_file_put_access(fp, O_WRONLY);
12659651
JL
585 if (access & NFS4_SHARE_ACCESS_READ)
586 __nfs4_file_put_access(fp, O_RDONLY);
998db52c
BF
587}
588
8287f009
SB
589/*
590 * Allocate a new open/delegation state counter. This is needed for
591 * pNFS for proper return on close semantics.
592 *
593 * Note that we only allocate it for pNFS-enabled exports, otherwise
594 * all pointers to struct nfs4_clnt_odstate are always NULL.
595 */
596static struct nfs4_clnt_odstate *
597alloc_clnt_odstate(struct nfs4_client *clp)
598{
599 struct nfs4_clnt_odstate *co;
600
601 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
602 if (co) {
603 co->co_client = clp;
604 atomic_set(&co->co_odcount, 1);
605 }
606 return co;
607}
608
609static void
610hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
611{
612 struct nfs4_file *fp = co->co_file;
613
614 lockdep_assert_held(&fp->fi_lock);
615 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
616}
617
618static inline void
619get_clnt_odstate(struct nfs4_clnt_odstate *co)
620{
621 if (co)
622 atomic_inc(&co->co_odcount);
623}
624
625static void
626put_clnt_odstate(struct nfs4_clnt_odstate *co)
627{
628 struct nfs4_file *fp;
629
630 if (!co)
631 return;
632
633 fp = co->co_file;
634 if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
635 list_del(&co->co_perfile);
636 spin_unlock(&fp->fi_lock);
637
638 nfsd4_return_all_file_layouts(co->co_client, fp);
639 kmem_cache_free(odstate_slab, co);
640 }
641}
642
643static struct nfs4_clnt_odstate *
644find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
645{
646 struct nfs4_clnt_odstate *co;
647 struct nfs4_client *cl;
648
649 if (!new)
650 return NULL;
651
652 cl = new->co_client;
653
654 spin_lock(&fp->fi_lock);
655 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
656 if (co->co_client == cl) {
657 get_clnt_odstate(co);
658 goto out;
659 }
660 }
661 co = new;
662 co->co_file = fp;
663 hash_clnt_odstate_locked(new);
664out:
665 spin_unlock(&fp->fi_lock);
666 return co;
667}
668
d19fb70d
KM
669struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
670 void (*sc_free)(struct nfs4_stid *))
2a74aba7 671{
3abdb607 672 struct nfs4_stid *stid;
6136d2b4 673 int new_id;
2a74aba7 674
f8338834 675 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
3abdb607
BF
676 if (!stid)
677 return NULL;
678
4770d722
JL
679 idr_preload(GFP_KERNEL);
680 spin_lock(&cl->cl_lock);
681 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
682 spin_unlock(&cl->cl_lock);
683 idr_preload_end();
ebd6c707 684 if (new_id < 0)
3abdb607 685 goto out_free;
d19fb70d
KM
686
687 stid->sc_free = sc_free;
2a74aba7 688 stid->sc_client = cl;
3abdb607
BF
689 stid->sc_stateid.si_opaque.so_id = new_id;
690 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
2a74aba7 691 /* Will be incremented before return to client: */
72c0b0fb 692 atomic_set(&stid->sc_count, 1);
9767feb2 693 spin_lock_init(&stid->sc_lock);
996e0938 694
996e0938 695 /*
3abdb607
BF
696 * It shouldn't be a problem to reuse an opaque stateid value.
697 * I don't think it is for 4.1. But with 4.0 I worry that, for
698 * example, a stray write retransmission could be accepted by
699 * the server when it should have been rejected. Therefore,
700 * adopt a trick from the sctp code to attempt to maximize the
701 * amount of time until an id is reused, by ensuring they always
702 * "increase" (mod INT_MAX):
996e0938 703 */
3abdb607
BF
704 return stid;
705out_free:
2c44a234 706 kmem_cache_free(slab, stid);
3abdb607 707 return NULL;
2a74aba7
BF
708}
709
b49e084d 710static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
4cdc951b 711{
6011695d 712 struct nfs4_stid *stid;
6011695d 713
d19fb70d 714 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
6011695d
TM
715 if (!stid)
716 return NULL;
717
d19fb70d 718 return openlockstateid(stid);
6011695d
TM
719}
720
721static void nfs4_free_deleg(struct nfs4_stid *stid)
722{
6011695d
TM
723 kmem_cache_free(deleg_slab, stid);
724 atomic_long_dec(&num_delegations);
4cdc951b
BF
725}
726
6282cd56
N
727/*
728 * When we recall a delegation, we should be careful not to hand it
729 * out again straight away.
730 * To ensure this we keep a pair of bloom filters ('new' and 'old')
731 * in which the filehandles of recalled delegations are "stored".
732 * If a filehandle appear in either filter, a delegation is blocked.
733 * When a delegation is recalled, the filehandle is stored in the "new"
734 * filter.
735 * Every 30 seconds we swap the filters and clear the "new" one,
736 * unless both are empty of course.
737 *
738 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
739 * low 3 bytes as hash-table indices.
740 *
f54fe962 741 * 'blocked_delegations_lock', which is always taken in block_delegations(),
6282cd56
N
742 * is used to manage concurrent access. Testing does not need the lock
743 * except when swapping the two filters.
744 */
f54fe962 745static DEFINE_SPINLOCK(blocked_delegations_lock);
6282cd56
N
746static struct bloom_pair {
747 int entries, old_entries;
748 time_t swap_time;
749 int new; /* index into 'set' */
750 DECLARE_BITMAP(set[2], 256);
751} blocked_delegations;
752
753static int delegation_blocked(struct knfsd_fh *fh)
754{
755 u32 hash;
756 struct bloom_pair *bd = &blocked_delegations;
757
758 if (bd->entries == 0)
759 return 0;
760 if (seconds_since_boot() - bd->swap_time > 30) {
f54fe962 761 spin_lock(&blocked_delegations_lock);
6282cd56
N
762 if (seconds_since_boot() - bd->swap_time > 30) {
763 bd->entries -= bd->old_entries;
764 bd->old_entries = bd->entries;
765 memset(bd->set[bd->new], 0,
766 sizeof(bd->set[0]));
767 bd->new = 1-bd->new;
768 bd->swap_time = seconds_since_boot();
769 }
f54fe962 770 spin_unlock(&blocked_delegations_lock);
6282cd56 771 }
87545899 772 hash = jhash(&fh->fh_base, fh->fh_size, 0);
6282cd56
N
773 if (test_bit(hash&255, bd->set[0]) &&
774 test_bit((hash>>8)&255, bd->set[0]) &&
775 test_bit((hash>>16)&255, bd->set[0]))
776 return 1;
777
778 if (test_bit(hash&255, bd->set[1]) &&
779 test_bit((hash>>8)&255, bd->set[1]) &&
780 test_bit((hash>>16)&255, bd->set[1]))
781 return 1;
782
783 return 0;
784}
785
786static void block_delegations(struct knfsd_fh *fh)
787{
788 u32 hash;
789 struct bloom_pair *bd = &blocked_delegations;
790
87545899 791 hash = jhash(&fh->fh_base, fh->fh_size, 0);
6282cd56 792
f54fe962 793 spin_lock(&blocked_delegations_lock);
6282cd56
N
794 __set_bit(hash&255, bd->set[bd->new]);
795 __set_bit((hash>>8)&255, bd->set[bd->new]);
796 __set_bit((hash>>16)&255, bd->set[bd->new]);
797 if (bd->entries == 0)
798 bd->swap_time = seconds_since_boot();
799 bd->entries += 1;
f54fe962 800 spin_unlock(&blocked_delegations_lock);
6282cd56
N
801}
802
1da177e4 803static struct nfs4_delegation *
8287f009
SB
804alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
805 struct nfs4_clnt_odstate *odstate)
1da177e4
LT
806{
807 struct nfs4_delegation *dp;
02a3508d 808 long n;
1da177e4
LT
809
810 dprintk("NFSD alloc_init_deleg\n");
02a3508d
TM
811 n = atomic_long_inc_return(&num_delegations);
812 if (n < 0 || n > max_delegations)
813 goto out_dec;
6282cd56 814 if (delegation_blocked(&current_fh->fh_handle))
02a3508d 815 goto out_dec;
d19fb70d 816 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
5b2d21c1 817 if (dp == NULL)
02a3508d 818 goto out_dec;
6011695d 819
2a74aba7
BF
820 /*
821 * delegation seqid's are never incremented. The 4.1 special
6136d2b4
BF
822 * meaning of seqid 0 isn't meaningful, really, but let's avoid
823 * 0 anyway just for consistency and use 1:
2a74aba7
BF
824 */
825 dp->dl_stid.sc_stateid.si_generation = 1;
ea1da636
N
826 INIT_LIST_HEAD(&dp->dl_perfile);
827 INIT_LIST_HEAD(&dp->dl_perclnt);
1da177e4 828 INIT_LIST_HEAD(&dp->dl_recall_lru);
8287f009
SB
829 dp->dl_clnt_odstate = odstate;
830 get_clnt_odstate(odstate);
99c41515 831 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
f0b5de1b
CH
832 dp->dl_retries = 1;
833 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
0162ac2b 834 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1da177e4 835 return dp;
02a3508d
TM
836out_dec:
837 atomic_long_dec(&num_delegations);
838 return NULL;
1da177e4
LT
839}
840
841void
6011695d 842nfs4_put_stid(struct nfs4_stid *s)
1da177e4 843{
11b9164a 844 struct nfs4_file *fp = s->sc_file;
6011695d
TM
845 struct nfs4_client *clp = s->sc_client;
846
4770d722
JL
847 might_lock(&clp->cl_lock);
848
b401be22
JL
849 if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
850 wake_up_all(&close_wq);
6011695d 851 return;
b401be22 852 }
6011695d 853 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
4770d722 854 spin_unlock(&clp->cl_lock);
6011695d 855 s->sc_free(s);
11b9164a
TM
856 if (fp)
857 put_nfs4_file(fp);
1da177e4
LT
858}
859
9767feb2
JL
860void
861nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
862{
863 stateid_t *src = &stid->sc_stateid;
864
865 spin_lock(&stid->sc_lock);
866 if (unlikely(++src->si_generation == 0))
867 src->si_generation = 1;
868 memcpy(dst, src, sizeof(*dst));
869 spin_unlock(&stid->sc_lock);
870}
871
acfdf5c3 872static void nfs4_put_deleg_lease(struct nfs4_file *fp)
1da177e4 873{
6bcc034e 874 struct file *filp = NULL;
417c6629 875
6bcc034e 876 spin_lock(&fp->fi_lock);
67db1034 877 if (fp->fi_deleg_file && --fp->fi_delegees == 0)
6bcc034e 878 swap(filp, fp->fi_deleg_file);
6bcc034e
JL
879 spin_unlock(&fp->fi_lock);
880
881 if (filp) {
2ab99ee1 882 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
6bcc034e 883 fput(filp);
acfdf5c3 884 }
1da177e4
LT
885}
886
cd61c522 887void nfs4_unhash_stid(struct nfs4_stid *s)
6136d2b4 888{
3abdb607 889 s->sc_type = 0;
6136d2b4
BF
890}
891
34ed9872
AE
892/**
893 * nfs4_get_existing_delegation - Discover if this delegation already exists
894 * @clp: a pointer to the nfs4_client we're granting a delegation to
895 * @fp: a pointer to the nfs4_file we're granting a delegation on
896 *
897 * Return:
898 * On success: NULL if an existing delegation was not found.
899 *
900 * On error: -EAGAIN if one was previously granted to this nfs4_client
901 * for this nfs4_file.
902 *
903 */
904
905static int
906nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
907{
908 struct nfs4_delegation *searchdp = NULL;
909 struct nfs4_client *searchclp = NULL;
910
911 lockdep_assert_held(&state_lock);
912 lockdep_assert_held(&fp->fi_lock);
913
914 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
915 searchclp = searchdp->dl_stid.sc_client;
916 if (clp == searchclp) {
917 return -EAGAIN;
918 }
919 }
920 return 0;
921}
922
923/**
924 * hash_delegation_locked - Add a delegation to the appropriate lists
925 * @dp: a pointer to the nfs4_delegation we are adding.
926 * @fp: a pointer to the nfs4_file we're granting a delegation on
927 *
928 * Return:
929 * On success: NULL if the delegation was successfully hashed.
930 *
931 * On error: -EAGAIN if one was previously granted to this
932 * nfs4_client for this nfs4_file. Delegation is not hashed.
933 *
934 */
935
936static int
931ee56c
BH
937hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
938{
34ed9872
AE
939 int status;
940 struct nfs4_client *clp = dp->dl_stid.sc_client;
941
cdc97505 942 lockdep_assert_held(&state_lock);
417c6629 943 lockdep_assert_held(&fp->fi_lock);
931ee56c 944
34ed9872
AE
945 status = nfs4_get_existing_delegation(clp, fp);
946 if (status)
947 return status;
948 ++fp->fi_delegees;
67cb1279 949 atomic_inc(&dp->dl_stid.sc_count);
3fb87d13 950 dp->dl_stid.sc_type = NFS4_DELEG_STID;
931ee56c 951 list_add(&dp->dl_perfile, &fp->fi_delegations);
34ed9872
AE
952 list_add(&dp->dl_perclnt, &clp->cl_delegations);
953 return 0;
931ee56c
BH
954}
955
3fcbbd24 956static bool
42690676 957unhash_delegation_locked(struct nfs4_delegation *dp)
1da177e4 958{
11b9164a 959 struct nfs4_file *fp = dp->dl_stid.sc_file;
02e1215f 960
42690676
JL
961 lockdep_assert_held(&state_lock);
962
3fcbbd24
JL
963 if (list_empty(&dp->dl_perfile))
964 return false;
965
b0fc29d6 966 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
d55a166c
JL
967 /* Ensure that deleg break won't try to requeue it */
968 ++dp->dl_time;
417c6629 969 spin_lock(&fp->fi_lock);
931ee56c 970 list_del_init(&dp->dl_perclnt);
1da177e4 971 list_del_init(&dp->dl_recall_lru);
02e1215f
JL
972 list_del_init(&dp->dl_perfile);
973 spin_unlock(&fp->fi_lock);
3fcbbd24 974 return true;
3bd64a5b
BF
975}
976
3bd64a5b
BF
977static void destroy_delegation(struct nfs4_delegation *dp)
978{
3fcbbd24
JL
979 bool unhashed;
980
42690676 981 spin_lock(&state_lock);
3fcbbd24 982 unhashed = unhash_delegation_locked(dp);
42690676 983 spin_unlock(&state_lock);
3fcbbd24
JL
984 if (unhashed) {
985 put_clnt_odstate(dp->dl_clnt_odstate);
986 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
987 nfs4_put_stid(&dp->dl_stid);
988 }
3bd64a5b
BF
989}
990
991static void revoke_delegation(struct nfs4_delegation *dp)
992{
993 struct nfs4_client *clp = dp->dl_stid.sc_client;
994
2d4a532d
JL
995 WARN_ON(!list_empty(&dp->dl_recall_lru));
996
8287f009 997 put_clnt_odstate(dp->dl_clnt_odstate);
afbda402
JL
998 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
999
3bd64a5b 1000 if (clp->cl_minorversion == 0)
6011695d 1001 nfs4_put_stid(&dp->dl_stid);
3bd64a5b 1002 else {
3bd64a5b 1003 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
2d4a532d
JL
1004 spin_lock(&clp->cl_lock);
1005 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1006 spin_unlock(&clp->cl_lock);
3bd64a5b
BF
1007 }
1008}
1009
1da177e4
LT
1010/*
1011 * SETCLIENTID state
1012 */
1013
ddc04c41
BF
1014static unsigned int clientid_hashval(u32 id)
1015{
1016 return id & CLIENT_HASH_MASK;
1017}
1018
1019static unsigned int clientstr_hashval(const char *name)
1020{
1021 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
1022}
1023
f9d7562f
BF
1024/*
1025 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1026 * st_{access,deny}_bmap field of the stateid, in order to track not
1027 * only what share bits are currently in force, but also what
1028 * combinations of share bits previous opens have used. This allows us
1029 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1030 * return an error if the client attempt to downgrade to a combination
1031 * of share bits not explicable by closing some of its previous opens.
1032 *
1033 * XXX: This enforcement is actually incomplete, since we don't keep
1034 * track of access/deny bit combinations; so, e.g., we allow:
1035 *
1036 * OPEN allow read, deny write
1037 * OPEN allow both, deny none
1038 * DOWNGRADE allow read, deny none
1039 *
1040 * which we should reject.
1041 */
5ae037e5
JL
1042static unsigned int
1043bmap_to_share_mode(unsigned long bmap) {
f9d7562f 1044 int i;
5ae037e5 1045 unsigned int access = 0;
f9d7562f 1046
f9d7562f
BF
1047 for (i = 1; i < 4; i++) {
1048 if (test_bit(i, &bmap))
5ae037e5 1049 access |= i;
f9d7562f 1050 }
5ae037e5 1051 return access;
f9d7562f
BF
1052}
1053
82c5ff1b
JL
1054/* set share access for a given stateid */
1055static inline void
1056set_access(u32 access, struct nfs4_ol_stateid *stp)
1057{
c11c591f
JL
1058 unsigned char mask = 1 << access;
1059
1060 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1061 stp->st_access_bmap |= mask;
82c5ff1b
JL
1062}
1063
1064/* clear share access for a given stateid */
1065static inline void
1066clear_access(u32 access, struct nfs4_ol_stateid *stp)
1067{
c11c591f
JL
1068 unsigned char mask = 1 << access;
1069
1070 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1071 stp->st_access_bmap &= ~mask;
82c5ff1b
JL
1072}
1073
1074/* test whether a given stateid has access */
1075static inline bool
1076test_access(u32 access, struct nfs4_ol_stateid *stp)
1077{
c11c591f
JL
1078 unsigned char mask = 1 << access;
1079
1080 return (bool)(stp->st_access_bmap & mask);
82c5ff1b
JL
1081}
1082
ce0fc43c
JL
1083/* set share deny for a given stateid */
1084static inline void
c11c591f 1085set_deny(u32 deny, struct nfs4_ol_stateid *stp)
ce0fc43c 1086{
c11c591f
JL
1087 unsigned char mask = 1 << deny;
1088
1089 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1090 stp->st_deny_bmap |= mask;
ce0fc43c
JL
1091}
1092
1093/* clear share deny for a given stateid */
1094static inline void
c11c591f 1095clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
ce0fc43c 1096{
c11c591f
JL
1097 unsigned char mask = 1 << deny;
1098
1099 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1100 stp->st_deny_bmap &= ~mask;
ce0fc43c
JL
1101}
1102
1103/* test whether a given stateid is denying specific access */
1104static inline bool
c11c591f 1105test_deny(u32 deny, struct nfs4_ol_stateid *stp)
ce0fc43c 1106{
c11c591f
JL
1107 unsigned char mask = 1 << deny;
1108
1109 return (bool)(stp->st_deny_bmap & mask);
f9d7562f
BF
1110}
1111
1112static int nfs4_access_to_omode(u32 access)
1113{
8f34a430 1114 switch (access & NFS4_SHARE_ACCESS_BOTH) {
f9d7562f
BF
1115 case NFS4_SHARE_ACCESS_READ:
1116 return O_RDONLY;
1117 case NFS4_SHARE_ACCESS_WRITE:
1118 return O_WRONLY;
1119 case NFS4_SHARE_ACCESS_BOTH:
1120 return O_RDWR;
1121 }
063b0fb9
BF
1122 WARN_ON_ONCE(1);
1123 return O_RDONLY;
f9d7562f
BF
1124}
1125
baeb4ff0
JL
1126/*
1127 * A stateid that had a deny mode associated with it is being released
1128 * or downgraded. Recalculate the deny mode on the file.
1129 */
1130static void
1131recalculate_deny_mode(struct nfs4_file *fp)
1132{
1133 struct nfs4_ol_stateid *stp;
1134
1135 spin_lock(&fp->fi_lock);
1136 fp->fi_share_deny = 0;
1137 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1138 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1139 spin_unlock(&fp->fi_lock);
1140}
1141
1142static void
1143reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1144{
1145 int i;
1146 bool change = false;
1147
1148 for (i = 1; i < 4; i++) {
1149 if ((i & deny) != i) {
1150 change = true;
1151 clear_deny(i, stp);
1152 }
1153 }
1154
1155 /* Recalculate per-file deny mode if there was a change */
1156 if (change)
11b9164a 1157 recalculate_deny_mode(stp->st_stid.sc_file);
baeb4ff0
JL
1158}
1159
82c5ff1b
JL
1160/* release all access and file references for a given stateid */
1161static void
1162release_all_access(struct nfs4_ol_stateid *stp)
1163{
1164 int i;
11b9164a 1165 struct nfs4_file *fp = stp->st_stid.sc_file;
baeb4ff0
JL
1166
1167 if (fp && stp->st_deny_bmap != 0)
1168 recalculate_deny_mode(fp);
82c5ff1b
JL
1169
1170 for (i = 1; i < 4; i++) {
1171 if (test_access(i, stp))
11b9164a 1172 nfs4_file_put_access(stp->st_stid.sc_file, i);
82c5ff1b
JL
1173 clear_access(i, stp);
1174 }
1175}
1176
d50ffded
KM
1177static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1178{
1179 kfree(sop->so_owner.data);
1180 sop->so_ops->so_free(sop);
1181}
1182
6b180f0b
JL
1183static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1184{
a819ecc1
JL
1185 struct nfs4_client *clp = sop->so_client;
1186
1187 might_lock(&clp->cl_lock);
1188
1189 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
6b180f0b 1190 return;
8f4b54c5 1191 sop->so_ops->so_unhash(sop);
a819ecc1 1192 spin_unlock(&clp->cl_lock);
d50ffded 1193 nfs4_free_stateowner(sop);
6b180f0b
JL
1194}
1195
e8568739 1196static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
529d7b2a 1197{
11b9164a 1198 struct nfs4_file *fp = stp->st_stid.sc_file;
1d31a253 1199
1c755dc1
JL
1200 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1201
e8568739
JL
1202 if (list_empty(&stp->st_perfile))
1203 return false;
1204
1d31a253 1205 spin_lock(&fp->fi_lock);
e8568739 1206 list_del_init(&stp->st_perfile);
1d31a253 1207 spin_unlock(&fp->fi_lock);
529d7b2a 1208 list_del(&stp->st_perstateowner);
e8568739 1209 return true;
529d7b2a
BF
1210}
1211
6011695d 1212static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
529d7b2a 1213{
6011695d 1214 struct nfs4_ol_stateid *stp = openlockstateid(stid);
4665e2ba 1215
8287f009 1216 put_clnt_odstate(stp->st_clnt_odstate);
6011695d 1217 release_all_access(stp);
d3134b10
JL
1218 if (stp->st_stateowner)
1219 nfs4_put_stateowner(stp->st_stateowner);
6011695d 1220 kmem_cache_free(stateid_slab, stid);
529d7b2a
BF
1221}
1222
b49e084d 1223static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
529d7b2a 1224{
b49e084d
JL
1225 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1226 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
529d7b2a
BF
1227 struct file *file;
1228
b49e084d
JL
1229 file = find_any_file(stp->st_stid.sc_file);
1230 if (file)
1231 filp_close(file, (fl_owner_t)lo);
1232 nfs4_free_ol_stateid(stid);
1233}
1234
2c41beb0
JL
1235/*
1236 * Put the persistent reference to an already unhashed generic stateid, while
1237 * holding the cl_lock. If it's the last reference, then put it onto the
1238 * reaplist for later destruction.
1239 */
1240static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1241 struct list_head *reaplist)
1242{
1243 struct nfs4_stid *s = &stp->st_stid;
1244 struct nfs4_client *clp = s->sc_client;
1245
1246 lockdep_assert_held(&clp->cl_lock);
1247
1248 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1249
1250 if (!atomic_dec_and_test(&s->sc_count)) {
1251 wake_up_all(&close_wq);
1252 return;
1253 }
1254
1255 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1256 list_add(&stp->st_locks, reaplist);
1257}
1258
e8568739 1259static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
b49e084d 1260{
f46c445b 1261 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
3c1c995c
JL
1262
1263 list_del_init(&stp->st_locks);
cd61c522 1264 nfs4_unhash_stid(&stp->st_stid);
e8568739 1265 return unhash_ol_stateid(stp);
3c1c995c
JL
1266}
1267
1268static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1269{
f46c445b 1270 struct nfs4_client *clp = stp->st_stid.sc_client;
e8568739 1271 bool unhashed;
3c1c995c 1272
f46c445b 1273 spin_lock(&clp->cl_lock);
e8568739 1274 unhashed = unhash_lock_stateid(stp);
f46c445b 1275 spin_unlock(&clp->cl_lock);
e8568739
JL
1276 if (unhashed)
1277 nfs4_put_stid(&stp->st_stid);
529d7b2a
BF
1278}
1279
c58c6610 1280static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
8f4b54c5 1281{
d4f0489f 1282 struct nfs4_client *clp = lo->lo_owner.so_client;
c58c6610 1283
d4f0489f 1284 lockdep_assert_held(&clp->cl_lock);
c58c6610 1285
8f4b54c5
JL
1286 list_del_init(&lo->lo_owner.so_strhash);
1287}
1288
2c41beb0
JL
1289/*
1290 * Free a list of generic stateids that were collected earlier after being
1291 * fully unhashed.
1292 */
1293static void
1294free_ol_stateid_reaplist(struct list_head *reaplist)
1295{
1296 struct nfs4_ol_stateid *stp;
fb94d766 1297 struct nfs4_file *fp;
2c41beb0
JL
1298
1299 might_sleep();
1300
1301 while (!list_empty(reaplist)) {
1302 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1303 st_locks);
1304 list_del(&stp->st_locks);
fb94d766 1305 fp = stp->st_stid.sc_file;
2c41beb0 1306 stp->st_stid.sc_free(&stp->st_stid);
fb94d766
KM
1307 if (fp)
1308 put_nfs4_file(fp);
2c41beb0
JL
1309 }
1310}
1311
d83017f9
JL
1312static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1313 struct list_head *reaplist)
3c87b9b7
TM
1314{
1315 struct nfs4_ol_stateid *stp;
1316
e8568739
JL
1317 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1318
3c87b9b7
TM
1319 while (!list_empty(&open_stp->st_locks)) {
1320 stp = list_entry(open_stp->st_locks.next,
1321 struct nfs4_ol_stateid, st_locks);
e8568739 1322 WARN_ON(!unhash_lock_stateid(stp));
d83017f9 1323 put_ol_stateid_locked(stp, reaplist);
529d7b2a
BF
1324 }
1325}
1326
e8568739 1327static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
d83017f9 1328 struct list_head *reaplist)
2283963f 1329{
e8568739
JL
1330 bool unhashed;
1331
2c41beb0
JL
1332 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1333
e8568739 1334 unhashed = unhash_ol_stateid(stp);
d83017f9 1335 release_open_stateid_locks(stp, reaplist);
e8568739 1336 return unhashed;
38c387b5
BF
1337}
1338
1339static void release_open_stateid(struct nfs4_ol_stateid *stp)
1340{
2c41beb0
JL
1341 LIST_HEAD(reaplist);
1342
1343 spin_lock(&stp->st_stid.sc_client->cl_lock);
e8568739
JL
1344 if (unhash_open_stateid(stp, &reaplist))
1345 put_ol_stateid_locked(stp, &reaplist);
2c41beb0
JL
1346 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1347 free_ol_stateid_reaplist(&reaplist);
2283963f
BF
1348}
1349
7ffb5880 1350static void unhash_openowner_locked(struct nfs4_openowner *oo)
f1d110ca 1351{
d4f0489f 1352 struct nfs4_client *clp = oo->oo_owner.so_client;
7ffb5880 1353
d4f0489f 1354 lockdep_assert_held(&clp->cl_lock);
7ffb5880 1355
8f4b54c5
JL
1356 list_del_init(&oo->oo_owner.so_strhash);
1357 list_del_init(&oo->oo_perclient);
f1d110ca
BF
1358}
1359
f7a4d872
BF
1360static void release_last_closed_stateid(struct nfs4_openowner *oo)
1361{
217526e7
JL
1362 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1363 nfsd_net_id);
1364 struct nfs4_ol_stateid *s;
f7a4d872 1365
217526e7
JL
1366 spin_lock(&nn->client_lock);
1367 s = oo->oo_last_closed_stid;
f7a4d872 1368 if (s) {
d3134b10 1369 list_del_init(&oo->oo_close_lru);
f7a4d872
BF
1370 oo->oo_last_closed_stid = NULL;
1371 }
217526e7
JL
1372 spin_unlock(&nn->client_lock);
1373 if (s)
1374 nfs4_put_stid(&s->st_stid);
f7a4d872
BF
1375}
1376
2c41beb0 1377static void release_openowner(struct nfs4_openowner *oo)
8f4b54c5
JL
1378{
1379 struct nfs4_ol_stateid *stp;
d4f0489f 1380 struct nfs4_client *clp = oo->oo_owner.so_client;
2c41beb0 1381 struct list_head reaplist;
7ffb5880 1382
2c41beb0 1383 INIT_LIST_HEAD(&reaplist);
8f4b54c5 1384
2c41beb0
JL
1385 spin_lock(&clp->cl_lock);
1386 unhash_openowner_locked(oo);
8f4b54c5
JL
1387 while (!list_empty(&oo->oo_owner.so_stateids)) {
1388 stp = list_first_entry(&oo->oo_owner.so_stateids,
1389 struct nfs4_ol_stateid, st_perstateowner);
e8568739
JL
1390 if (unhash_open_stateid(stp, &reaplist))
1391 put_ol_stateid_locked(stp, &reaplist);
8f4b54c5 1392 }
d4f0489f 1393 spin_unlock(&clp->cl_lock);
2c41beb0 1394 free_ol_stateid_reaplist(&reaplist);
f7a4d872 1395 release_last_closed_stateid(oo);
6b180f0b 1396 nfs4_put_stateowner(&oo->oo_owner);
f1d110ca
BF
1397}
1398
5282fd72
ME
1399static inline int
1400hash_sessionid(struct nfs4_sessionid *sessionid)
1401{
1402 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1403
1404 return sid->sequence % SESSION_HASH_SIZE;
1405}
1406
135dd002 1407#ifdef CONFIG_SUNRPC_DEBUG
5282fd72
ME
1408static inline void
1409dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1410{
1411 u32 *ptr = (u32 *)(&sessionid->data[0]);
1412 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1413}
8f199b82
TM
1414#else
1415static inline void
1416dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1417{
1418}
1419#endif
1420
9411b1d4
BF
1421/*
1422 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1423 * won't be used for replay.
1424 */
1425void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1426{
1427 struct nfs4_stateowner *so = cstate->replay_owner;
1428
1429 if (nfserr == nfserr_replay_me)
1430 return;
1431
1432 if (!seqid_mutating_err(ntohl(nfserr))) {
58fb12e6 1433 nfsd4_cstate_clear_replay(cstate);
9411b1d4
BF
1434 return;
1435 }
1436 if (!so)
1437 return;
1438 if (so->so_is_open_owner)
1439 release_last_closed_stateid(openowner(so));
1440 so->so_seqid++;
1441 return;
1442}
5282fd72 1443
ec6b5d7b
AA
1444static void
1445gen_sessionid(struct nfsd4_session *ses)
1446{
1447 struct nfs4_client *clp = ses->se_client;
1448 struct nfsd4_sessionid *sid;
1449
1450 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1451 sid->clientid = clp->cl_clientid;
1452 sid->sequence = current_sessionid++;
1453 sid->reserved = 0;
1454}
1455
1456/*
a649637c
AA
1457 * The protocol defines ca_maxresponssize_cached to include the size of
1458 * the rpc header, but all we need to cache is the data starting after
1459 * the end of the initial SEQUENCE operation--the rest we regenerate
1460 * each time. Therefore we can advertise a ca_maxresponssize_cached
1461 * value that is the number of bytes in our cache plus a few additional
1462 * bytes. In order to stay on the safe side, and not promise more than
1463 * we can cache, those additional bytes must be the minimum possible: 24
1464 * bytes of rpc header (xid through accept state, with AUTH_NULL
1465 * verifier), 12 for the compound header (with zero-length tag), and 44
1466 * for the SEQUENCE op response:
1467 */
1468#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1469
557ce264
AA
1470static void
1471free_session_slots(struct nfsd4_session *ses)
1472{
1473 int i;
1474
ff371bc8
BF
1475 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1476 free_svc_cred(&ses->se_slots[i]->sl_cred);
557ce264 1477 kfree(ses->se_slots[i]);
ff371bc8 1478 }
557ce264
AA
1479}
1480
a649637c 1481/*
efe0cb6d
BF
1482 * We don't actually need to cache the rpc and session headers, so we
1483 * can allocate a little less for each slot:
1484 */
55c760cf 1485static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
efe0cb6d 1486{
55c760cf 1487 u32 size;
efe0cb6d 1488
55c760cf
BF
1489 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1490 size = 0;
1491 else
1492 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1493 return size + sizeof(struct nfsd4_slot);
5b6feee9 1494}
ec6b5d7b 1495
5b6feee9
BF
1496/*
1497 * XXX: If we run out of reserved DRC memory we could (up to a point)
a649637c 1498 * re-negotiate active sessions and reduce their slot usage to make
42b2aa86 1499 * room for new connections. For now we just fail the create session.
ec6b5d7b 1500 */
55c760cf 1501static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
ec6b5d7b 1502{
55c760cf
BF
1503 u32 slotsize = slot_bytes(ca);
1504 u32 num = ca->maxreqs;
5b6feee9 1505 int avail;
ec6b5d7b 1506
5b6feee9 1507 spin_lock(&nfsd_drc_lock);
697ce9be
ZY
1508 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1509 nfsd_drc_max_mem - nfsd_drc_mem_used);
5b6feee9
BF
1510 num = min_t(int, num, avail / slotsize);
1511 nfsd_drc_mem_used += num * slotsize;
1512 spin_unlock(&nfsd_drc_lock);
ec6b5d7b 1513
5b6feee9
BF
1514 return num;
1515}
ec6b5d7b 1516
55c760cf 1517static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
5b6feee9 1518{
55c760cf
BF
1519 int slotsize = slot_bytes(ca);
1520
4bd9b0f4 1521 spin_lock(&nfsd_drc_lock);
55c760cf 1522 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
4bd9b0f4 1523 spin_unlock(&nfsd_drc_lock);
5b6feee9 1524}
ec6b5d7b 1525
60810e54
KM
1526static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1527 struct nfsd4_channel_attrs *battrs)
5b6feee9 1528{
60810e54
KM
1529 int numslots = fattrs->maxreqs;
1530 int slotsize = slot_bytes(fattrs);
5b6feee9
BF
1531 struct nfsd4_session *new;
1532 int mem, i;
a649637c 1533
5b6feee9
BF
1534 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1535 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1536 mem = numslots * sizeof(struct nfsd4_slot *);
ec6b5d7b 1537
5b6feee9
BF
1538 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1539 if (!new)
1540 return NULL;
557ce264 1541 /* allocate each struct nfsd4_slot and data cache in one piece */
5b6feee9 1542 for (i = 0; i < numslots; i++) {
55c760cf 1543 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
5b6feee9 1544 if (!new->se_slots[i])
557ce264 1545 goto out_free;
557ce264 1546 }
60810e54
KM
1547
1548 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1549 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1550
5b6feee9
BF
1551 return new;
1552out_free:
1553 while (i--)
1554 kfree(new->se_slots[i]);
1555 kfree(new);
1556 return NULL;
ec6b5d7b
AA
1557}
1558
19cf5c02
BF
1559static void free_conn(struct nfsd4_conn *c)
1560{
1561 svc_xprt_put(c->cn_xprt);
1562 kfree(c);
1563}
ec6b5d7b 1564
19cf5c02
BF
1565static void nfsd4_conn_lost(struct svc_xpt_user *u)
1566{
1567 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1568 struct nfs4_client *clp = c->cn_session->se_client;
ec6b5d7b 1569
19cf5c02
BF
1570 spin_lock(&clp->cl_lock);
1571 if (!list_empty(&c->cn_persession)) {
1572 list_del(&c->cn_persession);
1573 free_conn(c);
1574 }
eea49806 1575 nfsd4_probe_callback(clp);
2e4b7239 1576 spin_unlock(&clp->cl_lock);
19cf5c02 1577}
ec6b5d7b 1578
d29c374c 1579static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
c7662518 1580{
c7662518 1581 struct nfsd4_conn *conn;
ec6b5d7b 1582
c7662518
BF
1583 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1584 if (!conn)
db90681d 1585 return NULL;
c7662518
BF
1586 svc_xprt_get(rqstp->rq_xprt);
1587 conn->cn_xprt = rqstp->rq_xprt;
d29c374c 1588 conn->cn_flags = flags;
db90681d
BF
1589 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1590 return conn;
1591}
a649637c 1592
328ead28
BF
1593static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1594{
1595 conn->cn_session = ses;
1596 list_add(&conn->cn_persession, &ses->se_conns);
ec6b5d7b
AA
1597}
1598
db90681d 1599static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
557ce264 1600{
db90681d 1601 struct nfs4_client *clp = ses->se_client;
557ce264 1602
c7662518 1603 spin_lock(&clp->cl_lock);
328ead28 1604 __nfsd4_hash_conn(conn, ses);
c7662518 1605 spin_unlock(&clp->cl_lock);
557ce264
AA
1606}
1607
21b75b01 1608static int nfsd4_register_conn(struct nfsd4_conn *conn)
efe0cb6d 1609{
19cf5c02 1610 conn->cn_xpt_user.callback = nfsd4_conn_lost;
21b75b01 1611 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
efe0cb6d
BF
1612}
1613
e1ff371f 1614static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
ec6b5d7b 1615{
21b75b01 1616 int ret;
ec6b5d7b 1617
db90681d 1618 nfsd4_hash_conn(conn, ses);
21b75b01
BF
1619 ret = nfsd4_register_conn(conn);
1620 if (ret)
1621 /* oops; xprt is already down: */
1622 nfsd4_conn_lost(&conn->cn_xpt_user);
57a37144
BF
1623 /* We may have gained or lost a callback channel: */
1624 nfsd4_probe_callback_sync(ses->se_client);
c7662518 1625}
ec6b5d7b 1626
e1ff371f 1627static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1d1bc8f2
BF
1628{
1629 u32 dir = NFS4_CDFC4_FORE;
1630
e1ff371f 1631 if (cses->flags & SESSION4_BACK_CHAN)
1d1bc8f2 1632 dir |= NFS4_CDFC4_BACK;
e1ff371f 1633 return alloc_conn(rqstp, dir);
1d1bc8f2
BF
1634}
1635
1636/* must be called under client_lock */
19cf5c02 1637static void nfsd4_del_conns(struct nfsd4_session *s)
c7662518 1638{
19cf5c02
BF
1639 struct nfs4_client *clp = s->se_client;
1640 struct nfsd4_conn *c;
ec6b5d7b 1641
19cf5c02
BF
1642 spin_lock(&clp->cl_lock);
1643 while (!list_empty(&s->se_conns)) {
1644 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1645 list_del_init(&c->cn_persession);
1646 spin_unlock(&clp->cl_lock);
557ce264 1647
19cf5c02
BF
1648 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1649 free_conn(c);
ec6b5d7b 1650
19cf5c02
BF
1651 spin_lock(&clp->cl_lock);
1652 }
1653 spin_unlock(&clp->cl_lock);
c7662518 1654}
ec6b5d7b 1655
1377b69e
BF
1656static void __free_session(struct nfsd4_session *ses)
1657{
1377b69e
BF
1658 free_session_slots(ses);
1659 kfree(ses);
1660}
1661
66b2b9b2 1662static void free_session(struct nfsd4_session *ses)
c7662518 1663{
19cf5c02 1664 nfsd4_del_conns(ses);
55c760cf 1665 nfsd4_put_drc_mem(&ses->se_fchannel);
1377b69e 1666 __free_session(ses);
c7662518
BF
1667}
1668
135ae827 1669static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
a827bcb2 1670{
a827bcb2 1671 int idx;
1872de0e 1672 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
a827bcb2 1673
ec6b5d7b
AA
1674 new->se_client = clp;
1675 gen_sessionid(new);
ec6b5d7b 1676
c7662518
BF
1677 INIT_LIST_HEAD(&new->se_conns);
1678
ac7c46f2 1679 new->se_cb_seq_nr = 1;
ec6b5d7b 1680 new->se_flags = cses->flags;
8b5ce5cd 1681 new->se_cb_prog = cses->callback_prog;
c6bb3ca2 1682 new->se_cb_sec = cses->cb_sec;
66b2b9b2 1683 atomic_set(&new->se_ref, 0);
5b6feee9 1684 idx = hash_sessionid(&new->se_sessionid);
1872de0e 1685 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
4c649378 1686 spin_lock(&clp->cl_lock);
ec6b5d7b 1687 list_add(&new->se_perclnt, &clp->cl_sessions);
4c649378 1688 spin_unlock(&clp->cl_lock);
60810e54 1689
b0d2e42c 1690 {
edd76786 1691 struct sockaddr *sa = svc_addr(rqstp);
dcbeaa68
BF
1692 /*
1693 * This is a little silly; with sessions there's no real
1694 * use for the callback address. Use the peer address
1695 * as a reasonable default for now, but consider fixing
1696 * the rpc client not to require an address in the
1697 * future:
1698 */
edd76786
BF
1699 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1700 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
edd76786 1701 }
ec6b5d7b
AA
1702}
1703
9089f1b4 1704/* caller must hold client_lock */
5282fd72 1705static struct nfsd4_session *
d4e19e70 1706__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
5282fd72
ME
1707{
1708 struct nfsd4_session *elem;
1709 int idx;
1872de0e 1710 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5282fd72 1711
0a880a28
TM
1712 lockdep_assert_held(&nn->client_lock);
1713
5282fd72
ME
1714 dump_sessionid(__func__, sessionid);
1715 idx = hash_sessionid(sessionid);
5282fd72 1716 /* Search in the appropriate list */
1872de0e 1717 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
5282fd72
ME
1718 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1719 NFS4_MAX_SESSIONID_LEN)) {
1720 return elem;
1721 }
1722 }
1723
1724 dprintk("%s: session not found\n", __func__);
1725 return NULL;
1726}
1727
d4e19e70
TM
1728static struct nfsd4_session *
1729find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1730 __be32 *ret)
1731{
1732 struct nfsd4_session *session;
1733 __be32 status = nfserr_badsession;
1734
1735 session = __find_in_sessionid_hashtbl(sessionid, net);
1736 if (!session)
1737 goto out;
1738 status = nfsd4_get_session_locked(session);
1739 if (status)
1740 session = NULL;
1741out:
1742 *ret = status;
1743 return session;
1744}
1745
9089f1b4 1746/* caller must hold client_lock */
7116ed6b 1747static void
5282fd72 1748unhash_session(struct nfsd4_session *ses)
7116ed6b 1749{
0a880a28
TM
1750 struct nfs4_client *clp = ses->se_client;
1751 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1752
1753 lockdep_assert_held(&nn->client_lock);
1754
7116ed6b 1755 list_del(&ses->se_hash);
4c649378 1756 spin_lock(&ses->se_client->cl_lock);
7116ed6b 1757 list_del(&ses->se_perclnt);
4c649378 1758 spin_unlock(&ses->se_client->cl_lock);
5282fd72
ME
1759}
1760
1da177e4
LT
1761/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1762static int
2c142baa 1763STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1da177e4 1764{
bbc7f33a
BF
1765 /*
1766 * We're assuming the clid was not given out from a boot
1767 * precisely 2^32 (about 136 years) before this one. That seems
1768 * a safe assumption:
1769 */
1770 if (clid->cl_boot == (u32)nn->boot_time)
1da177e4 1771 return 0;
60adfc50 1772 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
2c142baa 1773 clid->cl_boot, clid->cl_id, nn->boot_time);
1da177e4
LT
1774 return 1;
1775}
1776
1777/*
1778 * XXX Should we use a slab cache ?
1779 * This type of memory management is somewhat inefficient, but we use it
1780 * anyway since SETCLIENTID is not a common operation.
1781 */
35bba9a3 1782static struct nfs4_client *alloc_client(struct xdr_netobj name)
1da177e4
LT
1783{
1784 struct nfs4_client *clp;
d4f0489f 1785 int i;
1da177e4 1786
35bba9a3
BF
1787 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1788 if (clp == NULL)
1789 return NULL;
67114fe6 1790 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
d4f0489f
TM
1791 if (clp->cl_name.data == NULL)
1792 goto err_no_name;
1793 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1794 OWNER_HASH_SIZE, GFP_KERNEL);
1795 if (!clp->cl_ownerstr_hashtbl)
1796 goto err_no_hashtbl;
1797 for (i = 0; i < OWNER_HASH_SIZE; i++)
1798 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
35bba9a3 1799 clp->cl_name.len = name.len;
5694c93e
TM
1800 INIT_LIST_HEAD(&clp->cl_sessions);
1801 idr_init(&clp->cl_stateids);
1802 atomic_set(&clp->cl_refcount, 0);
1803 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1804 INIT_LIST_HEAD(&clp->cl_idhash);
1805 INIT_LIST_HEAD(&clp->cl_openowners);
1806 INIT_LIST_HEAD(&clp->cl_delegations);
1807 INIT_LIST_HEAD(&clp->cl_lru);
5694c93e 1808 INIT_LIST_HEAD(&clp->cl_revoked);
9cf514cc
CH
1809#ifdef CONFIG_NFSD_PNFS
1810 INIT_LIST_HEAD(&clp->cl_lo_states);
1811#endif
5694c93e
TM
1812 spin_lock_init(&clp->cl_lock);
1813 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1da177e4 1814 return clp;
d4f0489f
TM
1815err_no_hashtbl:
1816 kfree(clp->cl_name.data);
1817err_no_name:
1818 kfree(clp);
1819 return NULL;
1da177e4
LT
1820}
1821
4dd86e15 1822static void
1da177e4
LT
1823free_client(struct nfs4_client *clp)
1824{
792c95dd
BF
1825 while (!list_empty(&clp->cl_sessions)) {
1826 struct nfsd4_session *ses;
1827 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1828 se_perclnt);
1829 list_del(&ses->se_perclnt);
66b2b9b2
BF
1830 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1831 free_session(ses);
792c95dd 1832 }
4cb57e30 1833 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
03a4e1f6 1834 free_svc_cred(&clp->cl_cred);
d4f0489f 1835 kfree(clp->cl_ownerstr_hashtbl);
1da177e4 1836 kfree(clp->cl_name.data);
2d32b29a 1837 idr_destroy(&clp->cl_stateids);
1da177e4
LT
1838 kfree(clp);
1839}
1840
84d38ac9 1841/* must be called under the client_lock */
4beb345b 1842static void
84d38ac9
BH
1843unhash_client_locked(struct nfs4_client *clp)
1844{
4beb345b 1845 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
792c95dd
BF
1846 struct nfsd4_session *ses;
1847
0a880a28
TM
1848 lockdep_assert_held(&nn->client_lock);
1849
4beb345b
TM
1850 /* Mark the client as expired! */
1851 clp->cl_time = 0;
1852 /* Make it invisible */
1853 if (!list_empty(&clp->cl_idhash)) {
1854 list_del_init(&clp->cl_idhash);
1855 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1856 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1857 else
1858 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1859 }
1860 list_del_init(&clp->cl_lru);
4c649378 1861 spin_lock(&clp->cl_lock);
792c95dd
BF
1862 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1863 list_del_init(&ses->se_hash);
4c649378 1864 spin_unlock(&clp->cl_lock);
84d38ac9
BH
1865}
1866
1da177e4 1867static void
4beb345b
TM
1868unhash_client(struct nfs4_client *clp)
1869{
1870 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1871
1872 spin_lock(&nn->client_lock);
1873 unhash_client_locked(clp);
1874 spin_unlock(&nn->client_lock);
1875}
1876
97403d95
JL
1877static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1878{
1879 if (atomic_read(&clp->cl_refcount))
1880 return nfserr_jukebox;
1881 unhash_client_locked(clp);
1882 return nfs_ok;
1883}
1884
4beb345b
TM
1885static void
1886__destroy_client(struct nfs4_client *clp)
1da177e4 1887{
797bfd05 1888 int i;
fe0750e5 1889 struct nfs4_openowner *oo;
1da177e4 1890 struct nfs4_delegation *dp;
1da177e4
LT
1891 struct list_head reaplist;
1892
1da177e4 1893 INIT_LIST_HEAD(&reaplist);
cdc97505 1894 spin_lock(&state_lock);
ea1da636
N
1895 while (!list_empty(&clp->cl_delegations)) {
1896 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
3fcbbd24 1897 WARN_ON(!unhash_delegation_locked(dp));
42690676 1898 list_add(&dp->dl_recall_lru, &reaplist);
1da177e4 1899 }
cdc97505 1900 spin_unlock(&state_lock);
1da177e4
LT
1901 while (!list_empty(&reaplist)) {
1902 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
42690676 1903 list_del_init(&dp->dl_recall_lru);
8287f009 1904 put_clnt_odstate(dp->dl_clnt_odstate);
afbda402 1905 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6011695d 1906 nfs4_put_stid(&dp->dl_stid);
1da177e4 1907 }
2d4a532d 1908 while (!list_empty(&clp->cl_revoked)) {
c876486b 1909 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2d4a532d 1910 list_del_init(&dp->dl_recall_lru);
6011695d 1911 nfs4_put_stid(&dp->dl_stid);
956c4fee 1912 }
ea1da636 1913 while (!list_empty(&clp->cl_openowners)) {
fe0750e5 1914 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
b5971afa 1915 nfs4_get_stateowner(&oo->oo_owner);
fe0750e5 1916 release_openowner(oo);
1da177e4 1917 }
797bfd05
JL
1918 for (i = 0; i < OWNER_HASH_SIZE; i++) {
1919 struct nfs4_stateowner *so, *tmp;
1920
1921 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
1922 so_strhash) {
1923 /* Should be no openowners at this point */
1924 WARN_ON_ONCE(so->so_is_open_owner);
1925 remove_blocked_locks(lockowner(so));
1926 }
1927 }
9cf514cc 1928 nfsd4_return_all_client_layouts(clp);
6ff8da08 1929 nfsd4_shutdown_callback(clp);
84d38ac9
BH
1930 if (clp->cl_cb_conn.cb_xprt)
1931 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
221a6876 1932 free_client(clp);
1da177e4
LT
1933}
1934
4beb345b
TM
1935static void
1936destroy_client(struct nfs4_client *clp)
1937{
1938 unhash_client(clp);
1939 __destroy_client(clp);
1940}
1941
0d22f68f
BF
1942static void expire_client(struct nfs4_client *clp)
1943{
4beb345b 1944 unhash_client(clp);
0d22f68f 1945 nfsd4_client_record_remove(clp);
4beb345b 1946 __destroy_client(clp);
0d22f68f
BF
1947}
1948
35bba9a3
BF
1949static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1950{
1951 memcpy(target->cl_verifier.data, source->data,
1952 sizeof(target->cl_verifier.data));
1da177e4
LT
1953}
1954
35bba9a3
BF
1955static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1956{
1da177e4
LT
1957 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1958 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1959}
1960
50043859
BF
1961static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1962{
2f10fdcb
N
1963 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1964 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1965 GFP_KERNEL);
1966 if ((source->cr_principal && ! target->cr_principal) ||
1967 (source->cr_raw_principal && ! target->cr_raw_principal))
1968 return -ENOMEM;
50043859 1969
d5497fc6 1970 target->cr_flavor = source->cr_flavor;
1da177e4
LT
1971 target->cr_uid = source->cr_uid;
1972 target->cr_gid = source->cr_gid;
1973 target->cr_group_info = source->cr_group_info;
1974 get_group_info(target->cr_group_info);
0dc1531a
BF
1975 target->cr_gss_mech = source->cr_gss_mech;
1976 if (source->cr_gss_mech)
1977 gss_mech_get(source->cr_gss_mech);
03a4e1f6 1978 return 0;
1da177e4
LT
1979}
1980
ef17af2a 1981static int
ac55fdc4
JL
1982compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1983{
ef17af2a
RV
1984 if (o1->len < o2->len)
1985 return -1;
1986 if (o1->len > o2->len)
1987 return 1;
1988 return memcmp(o1->data, o2->data, o1->len);
ac55fdc4
JL
1989}
1990
35bba9a3 1991static int same_name(const char *n1, const char *n2)
599e0a22 1992{
a55370a3 1993 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1da177e4
LT
1994}
1995
1996static int
599e0a22
BF
1997same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1998{
1999 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1da177e4
LT
2000}
2001
2002static int
599e0a22
BF
2003same_clid(clientid_t *cl1, clientid_t *cl2)
2004{
2005 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1da177e4
LT
2006}
2007
8fbba96e
BF
2008static bool groups_equal(struct group_info *g1, struct group_info *g2)
2009{
2010 int i;
2011
2012 if (g1->ngroups != g2->ngroups)
2013 return false;
2014 for (i=0; i<g1->ngroups; i++)
81243eac 2015 if (!gid_eq(g1->gid[i], g2->gid[i]))
8fbba96e
BF
2016 return false;
2017 return true;
2018}
2019
68eb3508
BF
2020/*
2021 * RFC 3530 language requires clid_inuse be returned when the
2022 * "principal" associated with a requests differs from that previously
2023 * used. We use uid, gid's, and gss principal string as our best
2024 * approximation. We also don't want to allow non-gss use of a client
2025 * established using gss: in theory cr_principal should catch that
2026 * change, but in practice cr_principal can be null even in the gss case
2027 * since gssd doesn't always pass down a principal string.
2028 */
2029static bool is_gss_cred(struct svc_cred *cr)
2030{
2031 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2032 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2033}
2034
2035
5559b50a 2036static bool
599e0a22
BF
2037same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2038{
68eb3508 2039 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
6fab8779
EB
2040 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2041 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
8fbba96e
BF
2042 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2043 return false;
2044 if (cr1->cr_principal == cr2->cr_principal)
2045 return true;
2046 if (!cr1->cr_principal || !cr2->cr_principal)
2047 return false;
5559b50a 2048 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1da177e4
LT
2049}
2050
57266a6e
BF
2051static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2052{
2053 struct svc_cred *cr = &rqstp->rq_cred;
2054 u32 service;
2055
c4720591
BF
2056 if (!cr->cr_gss_mech)
2057 return false;
57266a6e
BF
2058 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2059 return service == RPC_GSS_SVC_INTEGRITY ||
2060 service == RPC_GSS_SVC_PRIVACY;
2061}
2062
dedeb13f 2063bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
57266a6e
BF
2064{
2065 struct svc_cred *cr = &rqstp->rq_cred;
2066
2067 if (!cl->cl_mach_cred)
2068 return true;
2069 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2070 return false;
2071 if (!svc_rqst_integrity_protected(rqstp))
2072 return false;
414ca017
BF
2073 if (cl->cl_cred.cr_raw_principal)
2074 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2075 cr->cr_raw_principal);
57266a6e
BF
2076 if (!cr->cr_principal)
2077 return false;
2078 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2079}
2080
294ac32e 2081static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
deda2faa 2082{
ab4684d1 2083 __be32 verf[2];
1da177e4 2084
f419992c
JL
2085 /*
2086 * This is opaque to client, so no need to byte-swap. Use
2087 * __force to keep sparse happy
2088 */
2089 verf[0] = (__force __be32)get_seconds();
19311aa8 2090 verf[1] = (__force __be32)nn->clverifier_counter++;
ab4684d1 2091 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1da177e4
LT
2092}
2093
294ac32e
JL
2094static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2095{
2096 clp->cl_clientid.cl_boot = nn->boot_time;
2097 clp->cl_clientid.cl_id = nn->clientid_counter++;
2098 gen_confirm(clp, nn);
2099}
2100
4770d722
JL
2101static struct nfs4_stid *
2102find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
4581d140 2103{
3abdb607
BF
2104 struct nfs4_stid *ret;
2105
2106 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2107 if (!ret || !ret->sc_type)
2108 return NULL;
2109 return ret;
4d71ab87
BF
2110}
2111
4770d722
JL
2112static struct nfs4_stid *
2113find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
f459e453
BF
2114{
2115 struct nfs4_stid *s;
4d71ab87 2116
4770d722
JL
2117 spin_lock(&cl->cl_lock);
2118 s = find_stateid_locked(cl, t);
2d3f9668
TM
2119 if (s != NULL) {
2120 if (typemask & s->sc_type)
2121 atomic_inc(&s->sc_count);
2122 else
2123 s = NULL;
2124 }
4770d722
JL
2125 spin_unlock(&cl->cl_lock);
2126 return s;
4581d140
BF
2127}
2128
2216d449 2129static struct nfs4_client *create_client(struct xdr_netobj name,
b09333c4
RL
2130 struct svc_rqst *rqstp, nfs4_verifier *verf)
2131{
2132 struct nfs4_client *clp;
2133 struct sockaddr *sa = svc_addr(rqstp);
03a4e1f6 2134 int ret;
c212cecf 2135 struct net *net = SVC_NET(rqstp);
b09333c4
RL
2136
2137 clp = alloc_client(name);
2138 if (clp == NULL)
2139 return NULL;
2140
03a4e1f6
BF
2141 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2142 if (ret) {
03a4e1f6 2143 free_client(clp);
03a4e1f6 2144 return NULL;
b09333c4 2145 }
0162ac2b 2146 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
07cd4909 2147 clp->cl_time = get_seconds();
b09333c4 2148 clear_bit(0, &clp->cl_cb_slot_busy);
b09333c4
RL
2149 copy_verf(clp, verf);
2150 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
edd76786 2151 clp->cl_cb_session = NULL;
c212cecf 2152 clp->net = net;
b09333c4
RL
2153 return clp;
2154}
2155
fd39ca9a 2156static void
ac55fdc4
JL
2157add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2158{
2159 struct rb_node **new = &(root->rb_node), *parent = NULL;
2160 struct nfs4_client *clp;
2161
2162 while (*new) {
2163 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2164 parent = *new;
2165
2166 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2167 new = &((*new)->rb_left);
2168 else
2169 new = &((*new)->rb_right);
2170 }
2171
2172 rb_link_node(&new_clp->cl_namenode, parent, new);
2173 rb_insert_color(&new_clp->cl_namenode, root);
2174}
2175
2176static struct nfs4_client *
2177find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2178{
ef17af2a 2179 int cmp;
ac55fdc4
JL
2180 struct rb_node *node = root->rb_node;
2181 struct nfs4_client *clp;
2182
2183 while (node) {
2184 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2185 cmp = compare_blob(&clp->cl_name, name);
2186 if (cmp > 0)
2187 node = node->rb_left;
2188 else if (cmp < 0)
2189 node = node->rb_right;
2190 else
2191 return clp;
2192 }
2193 return NULL;
2194}
2195
2196static void
2197add_to_unconfirmed(struct nfs4_client *clp)
1da177e4
LT
2198{
2199 unsigned int idhashval;
0a7ec377 2200 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1da177e4 2201
0a880a28
TM
2202 lockdep_assert_held(&nn->client_lock);
2203
ac55fdc4 2204 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
a99454aa 2205 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1da177e4 2206 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
0a7ec377 2207 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3dbacee6 2208 renew_client_locked(clp);
1da177e4
LT
2209}
2210
fd39ca9a 2211static void
1da177e4
LT
2212move_to_confirmed(struct nfs4_client *clp)
2213{
2214 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
8daae4dc 2215 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1da177e4 2216
0a880a28
TM
2217 lockdep_assert_held(&nn->client_lock);
2218
1da177e4 2219 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
8daae4dc 2220 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
a99454aa 2221 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
382a62e7 2222 add_clp_to_name_tree(clp, &nn->conf_name_tree);
ac55fdc4 2223 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3dbacee6 2224 renew_client_locked(clp);
1da177e4
LT
2225}
2226
2227static struct nfs4_client *
bfa85e83 2228find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1da177e4
LT
2229{
2230 struct nfs4_client *clp;
2231 unsigned int idhashval = clientid_hashval(clid->cl_id);
2232
bfa85e83 2233 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
a50d2ad1 2234 if (same_clid(&clp->cl_clientid, clid)) {
d15c077e
BF
2235 if ((bool)clp->cl_minorversion != sessions)
2236 return NULL;
3dbacee6 2237 renew_client_locked(clp);
1da177e4 2238 return clp;
a50d2ad1 2239 }
1da177e4
LT
2240 }
2241 return NULL;
2242}
2243
bfa85e83
BF
2244static struct nfs4_client *
2245find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2246{
2247 struct list_head *tbl = nn->conf_id_hashtbl;
2248
0a880a28 2249 lockdep_assert_held(&nn->client_lock);
bfa85e83
BF
2250 return find_client_in_id_table(tbl, clid, sessions);
2251}
2252
1da177e4 2253static struct nfs4_client *
0a7ec377 2254find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1da177e4 2255{
bfa85e83 2256 struct list_head *tbl = nn->unconf_id_hashtbl;
1da177e4 2257
0a880a28 2258 lockdep_assert_held(&nn->client_lock);
bfa85e83 2259 return find_client_in_id_table(tbl, clid, sessions);
1da177e4
LT
2260}
2261
6e5f15c9 2262static bool clp_used_exchangeid(struct nfs4_client *clp)
a1bcecd2 2263{
6e5f15c9 2264 return clp->cl_exchange_flags != 0;
e203d506 2265}
a1bcecd2 2266
28ce6054 2267static struct nfs4_client *
382a62e7 2268find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
28ce6054 2269{
0a880a28 2270 lockdep_assert_held(&nn->client_lock);
382a62e7 2271 return find_clp_in_name_tree(name, &nn->conf_name_tree);
28ce6054
N
2272}
2273
2274static struct nfs4_client *
a99454aa 2275find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
28ce6054 2276{
0a880a28 2277 lockdep_assert_held(&nn->client_lock);
a99454aa 2278 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
28ce6054
N
2279}
2280
fd39ca9a 2281static void
6f3d772f 2282gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1da177e4 2283{
07263f1e 2284 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
6f3d772f
TU
2285 struct sockaddr *sa = svc_addr(rqstp);
2286 u32 scopeid = rpc_get_scope_id(sa);
7077ecba
JL
2287 unsigned short expected_family;
2288
2289 /* Currently, we only support tcp and tcp6 for the callback channel */
2290 if (se->se_callback_netid_len == 3 &&
2291 !memcmp(se->se_callback_netid_val, "tcp", 3))
2292 expected_family = AF_INET;
2293 else if (se->se_callback_netid_len == 4 &&
2294 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2295 expected_family = AF_INET6;
2296 else
1da177e4
LT
2297 goto out_err;
2298
c212cecf 2299 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
aa9a4ec7 2300 se->se_callback_addr_len,
07263f1e
BF
2301 (struct sockaddr *)&conn->cb_addr,
2302 sizeof(conn->cb_addr));
aa9a4ec7 2303
07263f1e 2304 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1da177e4 2305 goto out_err;
aa9a4ec7 2306
07263f1e
BF
2307 if (conn->cb_addr.ss_family == AF_INET6)
2308 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
fbf4665f 2309
07263f1e
BF
2310 conn->cb_prog = se->se_callback_prog;
2311 conn->cb_ident = se->se_callback_ident;
849a1cf1 2312 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1da177e4
LT
2313 return;
2314out_err:
07263f1e
BF
2315 conn->cb_addr.ss_family = AF_UNSPEC;
2316 conn->cb_addrlen = 0;
4ab495bf 2317 dprintk("NFSD: this client (clientid %08x/%08x) "
1da177e4
LT
2318 "will not receive delegations\n",
2319 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2320
1da177e4
LT
2321 return;
2322}
2323
074fe897 2324/*
067e1ace 2325 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
074fe897 2326 */
b607664e 2327static void
074fe897 2328nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
074fe897 2329{
f5236013 2330 struct xdr_buf *buf = resp->xdr.buf;
557ce264
AA
2331 struct nfsd4_slot *slot = resp->cstate.slot;
2332 unsigned int base;
074fe897 2333
557ce264 2334 dprintk("--> %s slot %p\n", __func__, slot);
074fe897 2335
e74cfcb8 2336 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
557ce264
AA
2337 slot->sl_opcnt = resp->opcnt;
2338 slot->sl_status = resp->cstate.status;
ff371bc8
BF
2339 free_svc_cred(&slot->sl_cred);
2340 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
074fe897 2341
e74cfcb8
BF
2342 if (!nfsd4_cache_this(resp)) {
2343 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
bf864a31 2344 return;
074fe897 2345 }
e74cfcb8
BF
2346 slot->sl_flags |= NFSD4_SLOT_CACHED;
2347
f5236013
BF
2348 base = resp->cstate.data_offset;
2349 slot->sl_datalen = buf->len - base;
2350 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
d3f03403
DC
2351 WARN(1, "%s: sessions DRC could not cache compound\n",
2352 __func__);
557ce264 2353 return;
074fe897
AA
2354}
2355
2356/*
abfabf8c
AA
2357 * Encode the replay sequence operation from the slot values.
2358 * If cachethis is FALSE encode the uncached rep error on the next
2359 * operation which sets resp->p and increments resp->opcnt for
2360 * nfs4svc_encode_compoundres.
074fe897 2361 *
074fe897 2362 */
abfabf8c
AA
2363static __be32
2364nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2365 struct nfsd4_compoundres *resp)
074fe897 2366{
abfabf8c
AA
2367 struct nfsd4_op *op;
2368 struct nfsd4_slot *slot = resp->cstate.slot;
bf864a31 2369
abfabf8c
AA
2370 /* Encode the replayed sequence operation */
2371 op = &args->ops[resp->opcnt - 1];
2372 nfsd4_encode_operation(resp, op);
bf864a31 2373
e74cfcb8
BF
2374 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2375 return op->status;
2376 if (args->opcnt == 1) {
2377 /*
2378 * The original operation wasn't a solo sequence--we
2379 * always cache those--so this retry must not match the
2380 * original:
2381 */
2382 op->status = nfserr_seq_false_retry;
2383 } else {
abfabf8c
AA
2384 op = &args->ops[resp->opcnt++];
2385 op->status = nfserr_retry_uncached_rep;
2386 nfsd4_encode_operation(resp, op);
074fe897 2387 }
abfabf8c 2388 return op->status;
074fe897
AA
2389}
2390
2391/*
557ce264
AA
2392 * The sequence operation is not cached because we can use the slot and
2393 * session values.
074fe897 2394 */
3ca2eb98 2395static __be32
bf864a31
AA
2396nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2397 struct nfsd4_sequence *seq)
074fe897 2398{
557ce264 2399 struct nfsd4_slot *slot = resp->cstate.slot;
f5236013
BF
2400 struct xdr_stream *xdr = &resp->xdr;
2401 __be32 *p;
074fe897
AA
2402 __be32 status;
2403
557ce264 2404 dprintk("--> %s slot %p\n", __func__, slot);
074fe897 2405
abfabf8c 2406 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
0da7b19c 2407 if (status)
abfabf8c 2408 return status;
074fe897 2409
f5236013
BF
2410 p = xdr_reserve_space(xdr, slot->sl_datalen);
2411 if (!p) {
2412 WARN_ON_ONCE(1);
2413 return nfserr_serverfault;
2414 }
2415 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2416 xdr_commit_encode(xdr);
074fe897 2417
557ce264 2418 resp->opcnt = slot->sl_opcnt;
f5236013 2419 return slot->sl_status;
074fe897
AA
2420}
2421
0733d213
AA
2422/*
2423 * Set the exchange_id flags returned by the server.
2424 */
2425static void
2426nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2427{
9cf514cc
CH
2428#ifdef CONFIG_NFSD_PNFS
2429 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2430#else
0733d213 2431 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
9cf514cc 2432#endif
0733d213
AA
2433
2434 /* Referrals are supported, Migration is not. */
2435 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2436
2437 /* set the wire flags to return to client. */
2438 clid->flags = new->cl_exchange_flags;
2439}
2440
4eaea134
BF
2441static bool client_has_openowners(struct nfs4_client *clp)
2442{
2443 struct nfs4_openowner *oo;
2444
2445 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2446 if (!list_empty(&oo->oo_owner.so_stateids))
2447 return true;
2448 }
2449 return false;
2450}
2451
631fc9ea
BF
2452static bool client_has_state(struct nfs4_client *clp)
2453{
4eaea134 2454 return client_has_openowners(clp)
47e970be
KM
2455#ifdef CONFIG_NFSD_PNFS
2456 || !list_empty(&clp->cl_lo_states)
2457#endif
6eccece9
BF
2458 || !list_empty(&clp->cl_delegations)
2459 || !list_empty(&clp->cl_sessions);
631fc9ea
BF
2460}
2461
069b6ad4 2462__be32
eb69853d
CH
2463nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2464 union nfsd4_op_u *u)
069b6ad4 2465{
eb69853d 2466 struct nfsd4_exchange_id *exid = &u->exchange_id;
3dbacee6
TM
2467 struct nfs4_client *conf, *new;
2468 struct nfs4_client *unconf = NULL;
57b7b43b 2469 __be32 status;
363168b4 2470 char addr_str[INET6_ADDRSTRLEN];
0733d213 2471 nfs4_verifier verf = exid->verifier;
363168b4 2472 struct sockaddr *sa = svc_addr(rqstp);
83e08fd4 2473 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
c212cecf 2474 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
0733d213 2475
363168b4 2476 rpc_ntop(sa, addr_str, sizeof(addr_str));
0733d213 2477 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
363168b4 2478 "ip_addr=%s flags %x, spa_how %d\n",
0733d213 2479 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
363168b4 2480 addr_str, exid->flags, exid->spa_how);
0733d213 2481
a084daf5 2482 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
0733d213
AA
2483 return nfserr_inval;
2484
50c7b948
BF
2485 new = create_client(exid->clname, rqstp, &verf);
2486 if (new == NULL)
2487 return nfserr_jukebox;
2488
0733d213 2489 switch (exid->spa_how) {
57266a6e 2490 case SP4_MACH_CRED:
ed941643
AE
2491 exid->spo_must_enforce[0] = 0;
2492 exid->spo_must_enforce[1] = (
2493 1 << (OP_BIND_CONN_TO_SESSION - 32) |
2494 1 << (OP_EXCHANGE_ID - 32) |
2495 1 << (OP_CREATE_SESSION - 32) |
2496 1 << (OP_DESTROY_SESSION - 32) |
2497 1 << (OP_DESTROY_CLIENTID - 32));
2498
2499 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2500 1 << (OP_OPEN_DOWNGRADE) |
2501 1 << (OP_LOCKU) |
2502 1 << (OP_DELEGRETURN));
2503
2504 exid->spo_must_allow[1] &= (
2505 1 << (OP_TEST_STATEID - 32) |
2506 1 << (OP_FREE_STATEID - 32));
50c7b948
BF
2507 if (!svc_rqst_integrity_protected(rqstp)) {
2508 status = nfserr_inval;
2509 goto out_nolock;
920dd9bb
BF
2510 }
2511 /*
2512 * Sometimes userspace doesn't give us a principal.
2513 * Which is a bug, really. Anyway, we can't enforce
2514 * MACH_CRED in that case, better to give up now:
2515 */
414ca017
BF
2516 if (!new->cl_cred.cr_principal &&
2517 !new->cl_cred.cr_raw_principal) {
920dd9bb
BF
2518 status = nfserr_serverfault;
2519 goto out_nolock;
50c7b948
BF
2520 }
2521 new->cl_mach_cred = true;
0733d213
AA
2522 case SP4_NONE:
2523 break;
063b0fb9
BF
2524 default: /* checked by xdr code */
2525 WARN_ON_ONCE(1);
0733d213 2526 case SP4_SSV:
8edf4b02
KM
2527 status = nfserr_encr_alg_unsupp;
2528 goto out_nolock;
0733d213
AA
2529 }
2530
2dbb269d 2531 /* Cases below refer to rfc 5661 section 18.35.4: */
3dbacee6 2532 spin_lock(&nn->client_lock);
382a62e7 2533 conf = find_confirmed_client_by_name(&exid->clname, nn);
0733d213 2534 if (conf) {
83e08fd4
BF
2535 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2536 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2537
136e658d
BF
2538 if (update) {
2539 if (!clp_used_exchangeid(conf)) { /* buggy client */
2dbb269d 2540 status = nfserr_inval;
1a308118
BF
2541 goto out;
2542 }
dedeb13f 2543 if (!nfsd4_mach_creds_match(conf, rqstp)) {
57266a6e
BF
2544 status = nfserr_wrong_cred;
2545 goto out;
2546 }
136e658d 2547 if (!creds_match) { /* case 9 */
ea236d07 2548 status = nfserr_perm;
136e658d
BF
2549 goto out;
2550 }
2551 if (!verfs_match) { /* case 8 */
0733d213
AA
2552 status = nfserr_not_same;
2553 goto out;
2554 }
136e658d
BF
2555 /* case 6 */
2556 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
136e658d 2557 goto out_copy;
0733d213 2558 }
136e658d 2559 if (!creds_match) { /* case 3 */
631fc9ea
BF
2560 if (client_has_state(conf)) {
2561 status = nfserr_clid_inuse;
0733d213
AA
2562 goto out;
2563 }
0733d213
AA
2564 goto out_new;
2565 }
136e658d 2566 if (verfs_match) { /* case 2 */
0f1ba0ef 2567 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
136e658d
BF
2568 goto out_copy;
2569 }
2570 /* case 5, client reboot */
3dbacee6 2571 conf = NULL;
136e658d 2572 goto out_new;
6ddbbbfe
MS
2573 }
2574
2dbb269d 2575 if (update) { /* case 7 */
6ddbbbfe
MS
2576 status = nfserr_noent;
2577 goto out;
0733d213
AA
2578 }
2579
a99454aa 2580 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
2dbb269d 2581 if (unconf) /* case 4, possible retry or client restart */
3dbacee6 2582 unhash_client_locked(unconf);
0733d213 2583
2dbb269d 2584 /* case 1 (normal case) */
0733d213 2585out_new:
fd699b8a
JL
2586 if (conf) {
2587 status = mark_client_expired_locked(conf);
2588 if (status)
2589 goto out;
2590 }
4f540e29 2591 new->cl_minorversion = cstate->minorversion;
ed941643
AE
2592 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2593 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
0733d213 2594
c212cecf 2595 gen_clid(new, nn);
ac55fdc4 2596 add_to_unconfirmed(new);
3dbacee6 2597 swap(new, conf);
0733d213 2598out_copy:
5cc40fd7
TM
2599 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2600 exid->clientid.cl_id = conf->cl_clientid.cl_id;
0733d213 2601
5cc40fd7
TM
2602 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2603 nfsd4_set_ex_flags(conf, exid);
0733d213
AA
2604
2605 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
5cc40fd7 2606 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
0733d213
AA
2607 status = nfs_ok;
2608
2609out:
3dbacee6 2610 spin_unlock(&nn->client_lock);
50c7b948 2611out_nolock:
5cc40fd7 2612 if (new)
3dbacee6
TM
2613 expire_client(new);
2614 if (unconf)
2615 expire_client(unconf);
0733d213 2616 return status;
069b6ad4
AA
2617}
2618
57b7b43b 2619static __be32
88e588d5 2620check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
b85d4c01 2621{
88e588d5
AA
2622 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2623 slot_seqid);
b85d4c01
BH
2624
2625 /* The slot is in use, and no response has been sent. */
88e588d5
AA
2626 if (slot_inuse) {
2627 if (seqid == slot_seqid)
b85d4c01
BH
2628 return nfserr_jukebox;
2629 else
2630 return nfserr_seq_misordered;
2631 }
f6d82485 2632 /* Note unsigned 32-bit arithmetic handles wraparound: */
88e588d5 2633 if (likely(seqid == slot_seqid + 1))
b85d4c01 2634 return nfs_ok;
88e588d5 2635 if (seqid == slot_seqid)
b85d4c01 2636 return nfserr_replay_cache;
b85d4c01
BH
2637 return nfserr_seq_misordered;
2638}
2639
49557cc7
AA
2640/*
2641 * Cache the create session result into the create session single DRC
2642 * slot cache by saving the xdr structure. sl_seqid has been set.
2643 * Do this for solo or embedded create session operations.
2644 */
2645static void
2646nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
57b7b43b 2647 struct nfsd4_clid_slot *slot, __be32 nfserr)
49557cc7
AA
2648{
2649 slot->sl_status = nfserr;
2650 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2651}
2652
2653static __be32
2654nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2655 struct nfsd4_clid_slot *slot)
2656{
2657 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2658 return slot->sl_status;
2659}
2660
1b74c25b
MJ
2661#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2662 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2663 1 + /* MIN tag is length with zero, only length */ \
2664 3 + /* version, opcount, opcode */ \
2665 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2666 /* seqid, slotID, slotID, cache */ \
2667 4 ) * sizeof(__be32))
2668
2669#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2670 2 + /* verifier: AUTH_NULL, length 0 */\
2671 1 + /* status */ \
2672 1 + /* MIN tag is length with zero, only length */ \
2673 3 + /* opcount, opcode, opstatus*/ \
2674 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2675 /* seqid, slotID, slotID, slotID, status */ \
2676 5 ) * sizeof(__be32))
2677
55c760cf 2678static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1b74c25b 2679{
55c760cf
BF
2680 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2681
373cd409
BF
2682 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2683 return nfserr_toosmall;
2684 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2685 return nfserr_toosmall;
55c760cf
BF
2686 ca->headerpadsz = 0;
2687 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2688 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2689 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2690 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2691 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2692 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2693 /*
2694 * Note decreasing slot size below client's request may make it
2695 * difficult for client to function correctly, whereas
2696 * decreasing the number of slots will (just?) affect
2697 * performance. When short on memory we therefore prefer to
2698 * decrease number of slots instead of their size. Clients that
2699 * request larger slots than they need will get poor results:
2700 */
2701 ca->maxreqs = nfsd4_get_drc_mem(ca);
2702 if (!ca->maxreqs)
2703 return nfserr_jukebox;
2704
373cd409 2705 return nfs_ok;
1b74c25b
MJ
2706}
2707
4500632f
CL
2708/*
2709 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2710 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2711 */
2712#define RPC_MAX_HEADER_WITH_AUTH_SYS \
2713 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2714
2715#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2716 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2717
8a891633 2718#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
4500632f 2719 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
8a891633 2720#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
4500632f
CL
2721 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2722 sizeof(__be32))
8a891633 2723
06b332a5 2724static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
1b74c25b 2725{
06b332a5
BF
2726 ca->headerpadsz = 0;
2727
8a891633 2728 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
06b332a5 2729 return nfserr_toosmall;
8a891633 2730 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
06b332a5
BF
2731 return nfserr_toosmall;
2732 ca->maxresp_cached = 0;
2733 if (ca->maxops < 2)
2734 return nfserr_toosmall;
2735
2736 return nfs_ok;
1b74c25b
MJ
2737}
2738
b78724b7
BF
2739static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2740{
2741 switch (cbs->flavor) {
2742 case RPC_AUTH_NULL:
2743 case RPC_AUTH_UNIX:
2744 return nfs_ok;
2745 default:
2746 /*
2747 * GSS case: the spec doesn't allow us to return this
2748 * error. But it also doesn't allow us not to support
2749 * GSS.
2750 * I'd rather this fail hard than return some error the
2751 * client might think it can already handle:
2752 */
2753 return nfserr_encr_alg_unsupp;
2754 }
2755}
2756
069b6ad4
AA
2757__be32
2758nfsd4_create_session(struct svc_rqst *rqstp,
eb69853d 2759 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
069b6ad4 2760{
eb69853d 2761 struct nfsd4_create_session *cr_ses = &u->create_session;
363168b4 2762 struct sockaddr *sa = svc_addr(rqstp);
ec6b5d7b 2763 struct nfs4_client *conf, *unconf;
d20c11d8 2764 struct nfs4_client *old = NULL;
ac7c46f2 2765 struct nfsd4_session *new;
81f0b2a4 2766 struct nfsd4_conn *conn;
49557cc7 2767 struct nfsd4_clid_slot *cs_slot = NULL;
57b7b43b 2768 __be32 status = 0;
8daae4dc 2769 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
ec6b5d7b 2770
a62573dc
MJ
2771 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2772 return nfserr_inval;
b78724b7
BF
2773 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2774 if (status)
2775 return status;
55c760cf 2776 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
06b332a5
BF
2777 if (status)
2778 return status;
2779 status = check_backchannel_attrs(&cr_ses->back_channel);
373cd409 2780 if (status)
f403e450 2781 goto out_release_drc_mem;
81f0b2a4 2782 status = nfserr_jukebox;
60810e54 2783 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
55c760cf
BF
2784 if (!new)
2785 goto out_release_drc_mem;
81f0b2a4
BF
2786 conn = alloc_conn_from_crses(rqstp, cr_ses);
2787 if (!conn)
2788 goto out_free_session;
a62573dc 2789
d20c11d8 2790 spin_lock(&nn->client_lock);
0a7ec377 2791 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
8daae4dc 2792 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
78389046 2793 WARN_ON_ONCE(conf && unconf);
ec6b5d7b
AA
2794
2795 if (conf) {
57266a6e 2796 status = nfserr_wrong_cred;
dedeb13f 2797 if (!nfsd4_mach_creds_match(conf, rqstp))
57266a6e 2798 goto out_free_conn;
49557cc7
AA
2799 cs_slot = &conf->cl_cs_slot;
2800 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
f5e22bb6
KM
2801 if (status) {
2802 if (status == nfserr_replay_cache)
2803 status = nfsd4_replay_create_session(cr_ses, cs_slot);
81f0b2a4 2804 goto out_free_conn;
ec6b5d7b 2805 }
ec6b5d7b
AA
2806 } else if (unconf) {
2807 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
363168b4 2808 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
ec6b5d7b 2809 status = nfserr_clid_inuse;
81f0b2a4 2810 goto out_free_conn;
ec6b5d7b 2811 }
57266a6e 2812 status = nfserr_wrong_cred;
dedeb13f 2813 if (!nfsd4_mach_creds_match(unconf, rqstp))
57266a6e 2814 goto out_free_conn;
49557cc7
AA
2815 cs_slot = &unconf->cl_cs_slot;
2816 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
38eb76a5
AA
2817 if (status) {
2818 /* an unconfirmed replay returns misordered */
ec6b5d7b 2819 status = nfserr_seq_misordered;
81f0b2a4 2820 goto out_free_conn;
ec6b5d7b 2821 }
382a62e7 2822 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
221a6876 2823 if (old) {
d20c11d8 2824 status = mark_client_expired_locked(old);
7abea1e8
JL
2825 if (status) {
2826 old = NULL;
221a6876 2827 goto out_free_conn;
7abea1e8 2828 }
221a6876 2829 }
8f9d3d3b 2830 move_to_confirmed(unconf);
ec6b5d7b
AA
2831 conf = unconf;
2832 } else {
2833 status = nfserr_stale_clientid;
81f0b2a4 2834 goto out_free_conn;
ec6b5d7b 2835 }
81f0b2a4 2836 status = nfs_ok;
4ce85c8c 2837 /* Persistent sessions are not supported */
408b79bc 2838 cr_ses->flags &= ~SESSION4_PERSIST;
4ce85c8c 2839 /* Upshifting from TCP to RDMA is not supported */
408b79bc
BF
2840 cr_ses->flags &= ~SESSION4_RDMA;
2841
81f0b2a4 2842 init_session(rqstp, new, conf, cr_ses);
d20c11d8 2843 nfsd4_get_session_locked(new);
81f0b2a4 2844
ac7c46f2 2845 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
ec6b5d7b 2846 NFS4_MAX_SESSIONID_LEN);
86c3e16c 2847 cs_slot->sl_seqid++;
49557cc7 2848 cr_ses->seqid = cs_slot->sl_seqid;
ec6b5d7b 2849
d20c11d8 2850 /* cache solo and embedded create sessions under the client_lock */
49557cc7 2851 nfsd4_cache_create_session(cr_ses, cs_slot, status);
d20c11d8
JL
2852 spin_unlock(&nn->client_lock);
2853 /* init connection and backchannel */
2854 nfsd4_init_conn(rqstp, conn, new);
2855 nfsd4_put_session(new);
d20c11d8
JL
2856 if (old)
2857 expire_client(old);
ec6b5d7b 2858 return status;
81f0b2a4 2859out_free_conn:
d20c11d8 2860 spin_unlock(&nn->client_lock);
81f0b2a4 2861 free_conn(conn);
d20c11d8
JL
2862 if (old)
2863 expire_client(old);
81f0b2a4
BF
2864out_free_session:
2865 __free_session(new);
55c760cf
BF
2866out_release_drc_mem:
2867 nfsd4_put_drc_mem(&cr_ses->fore_channel);
1ca50792 2868 return status;
069b6ad4
AA
2869}
2870
1d1bc8f2
BF
2871static __be32 nfsd4_map_bcts_dir(u32 *dir)
2872{
2873 switch (*dir) {
2874 case NFS4_CDFC4_FORE:
2875 case NFS4_CDFC4_BACK:
2876 return nfs_ok;
2877 case NFS4_CDFC4_FORE_OR_BOTH:
2878 case NFS4_CDFC4_BACK_OR_BOTH:
2879 *dir = NFS4_CDFC4_BOTH;
2880 return nfs_ok;
2881 };
2882 return nfserr_inval;
2883}
2884
eb69853d
CH
2885__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2886 struct nfsd4_compound_state *cstate,
2887 union nfsd4_op_u *u)
cb73a9f4 2888{
eb69853d 2889 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
cb73a9f4 2890 struct nfsd4_session *session = cstate->session;
c9a49628 2891 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
b78724b7 2892 __be32 status;
cb73a9f4 2893
b78724b7
BF
2894 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2895 if (status)
2896 return status;
c9a49628 2897 spin_lock(&nn->client_lock);
cb73a9f4
BF
2898 session->se_cb_prog = bc->bc_cb_program;
2899 session->se_cb_sec = bc->bc_cb_sec;
c9a49628 2900 spin_unlock(&nn->client_lock);
cb73a9f4
BF
2901
2902 nfsd4_probe_callback(session->se_client);
2903
2904 return nfs_ok;
2905}
2906
1d1bc8f2
BF
2907__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2908 struct nfsd4_compound_state *cstate,
eb69853d 2909 union nfsd4_op_u *u)
1d1bc8f2 2910{
eb69853d 2911 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
1d1bc8f2 2912 __be32 status;
3ba63671 2913 struct nfsd4_conn *conn;
4f6e6c17 2914 struct nfsd4_session *session;
d4e19e70
TM
2915 struct net *net = SVC_NET(rqstp);
2916 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1d1bc8f2
BF
2917
2918 if (!nfsd4_last_compound_op(rqstp))
2919 return nfserr_not_only_op;
c9a49628 2920 spin_lock(&nn->client_lock);
d4e19e70 2921 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
c9a49628 2922 spin_unlock(&nn->client_lock);
4f6e6c17 2923 if (!session)
d4e19e70 2924 goto out_no_session;
57266a6e 2925 status = nfserr_wrong_cred;
dedeb13f 2926 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
57266a6e 2927 goto out;
1d1bc8f2 2928 status = nfsd4_map_bcts_dir(&bcts->dir);
3ba63671 2929 if (status)
4f6e6c17 2930 goto out;
3ba63671 2931 conn = alloc_conn(rqstp, bcts->dir);
4f6e6c17 2932 status = nfserr_jukebox;
3ba63671 2933 if (!conn)
4f6e6c17
BF
2934 goto out;
2935 nfsd4_init_conn(rqstp, conn, session);
2936 status = nfs_ok;
2937out:
d4e19e70
TM
2938 nfsd4_put_session(session);
2939out_no_session:
4f6e6c17 2940 return status;
1d1bc8f2
BF
2941}
2942
5d4cec2f
BF
2943static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2944{
2945 if (!session)
2946 return 0;
2947 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2948}
2949
069b6ad4 2950__be32
eb69853d
CH
2951nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2952 union nfsd4_op_u *u)
069b6ad4 2953{
eb69853d 2954 struct nfsd4_destroy_session *sessionid = &u->destroy_session;
e10e0cfc 2955 struct nfsd4_session *ses;
abcdff09 2956 __be32 status;
f0f51f5c 2957 int ref_held_by_me = 0;
d4e19e70
TM
2958 struct net *net = SVC_NET(r);
2959 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
e10e0cfc 2960
abcdff09 2961 status = nfserr_not_only_op;
5d4cec2f 2962 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
57716355 2963 if (!nfsd4_last_compound_op(r))
abcdff09 2964 goto out;
f0f51f5c 2965 ref_held_by_me++;
57716355 2966 }
e10e0cfc 2967 dump_sessionid(__func__, &sessionid->sessionid);
c9a49628 2968 spin_lock(&nn->client_lock);
d4e19e70 2969 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
abcdff09
BF
2970 if (!ses)
2971 goto out_client_lock;
57266a6e 2972 status = nfserr_wrong_cred;
dedeb13f 2973 if (!nfsd4_mach_creds_match(ses->se_client, r))
d4e19e70 2974 goto out_put_session;
f0f51f5c 2975 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
66b2b9b2 2976 if (status)
f0f51f5c 2977 goto out_put_session;
e10e0cfc 2978 unhash_session(ses);
c9a49628 2979 spin_unlock(&nn->client_lock);
e10e0cfc 2980
84f5f7cc 2981 nfsd4_probe_callback_sync(ses->se_client);
19cf5c02 2982
c9a49628 2983 spin_lock(&nn->client_lock);
e10e0cfc 2984 status = nfs_ok;
f0f51f5c 2985out_put_session:
d4e19e70 2986 nfsd4_put_session_locked(ses);
abcdff09
BF
2987out_client_lock:
2988 spin_unlock(&nn->client_lock);
e10e0cfc 2989out:
e10e0cfc 2990 return status;
069b6ad4
AA
2991}
2992
a663bdd8 2993static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
328ead28
BF
2994{
2995 struct nfsd4_conn *c;
2996
2997 list_for_each_entry(c, &s->se_conns, cn_persession) {
a663bdd8 2998 if (c->cn_xprt == xpt) {
328ead28
BF
2999 return c;
3000 }
3001 }
3002 return NULL;
3003}
3004
57266a6e 3005static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
328ead28
BF
3006{
3007 struct nfs4_client *clp = ses->se_client;
a663bdd8 3008 struct nfsd4_conn *c;
57266a6e 3009 __be32 status = nfs_ok;
21b75b01 3010 int ret;
328ead28
BF
3011
3012 spin_lock(&clp->cl_lock);
a663bdd8 3013 c = __nfsd4_find_conn(new->cn_xprt, ses);
57266a6e
BF
3014 if (c)
3015 goto out_free;
3016 status = nfserr_conn_not_bound_to_session;
3017 if (clp->cl_mach_cred)
3018 goto out_free;
328ead28
BF
3019 __nfsd4_hash_conn(new, ses);
3020 spin_unlock(&clp->cl_lock);
21b75b01
BF
3021 ret = nfsd4_register_conn(new);
3022 if (ret)
3023 /* oops; xprt is already down: */
3024 nfsd4_conn_lost(&new->cn_xpt_user);
57266a6e
BF
3025 return nfs_ok;
3026out_free:
3027 spin_unlock(&clp->cl_lock);
3028 free_conn(new);
3029 return status;
328ead28
BF
3030}
3031
868b89c3
MJ
3032static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3033{
3034 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3035
3036 return args->opcnt > session->se_fchannel.maxops;
3037}
3038
ae82a8d0
MJ
3039static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3040 struct nfsd4_session *session)
3041{
3042 struct xdr_buf *xb = &rqstp->rq_arg;
3043
3044 return xb->len > session->se_fchannel.maxreq_sz;
3045}
3046
ff371bc8
BF
3047static bool replay_matches_cache(struct svc_rqst *rqstp,
3048 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3049{
3050 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3051
3052 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3053 (bool)seq->cachethis)
3054 return false;
3055 /*
3056 * If there's an error than the reply can have fewer ops than
3057 * the call. But if we cached a reply with *more* ops than the
3058 * call you're sending us now, then this new call is clearly not
3059 * really a replay of the old one:
3060 */
3061 if (slot->sl_opcnt < argp->opcnt)
3062 return false;
3063 /* This is the only check explicitly called by spec: */
3064 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3065 return false;
3066 /*
3067 * There may be more comparisons we could actually do, but the
3068 * spec doesn't require us to catch every case where the calls
3069 * don't match (that would require caching the call as well as
3070 * the reply), so we don't bother.
3071 */
3072 return true;
3073}
3074
069b6ad4 3075__be32
eb69853d
CH
3076nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3077 union nfsd4_op_u *u)
069b6ad4 3078{
eb69853d 3079 struct nfsd4_sequence *seq = &u->sequence;
f9bb94c4 3080 struct nfsd4_compoundres *resp = rqstp->rq_resp;
47ee5298 3081 struct xdr_stream *xdr = &resp->xdr;
b85d4c01 3082 struct nfsd4_session *session;
221a6876 3083 struct nfs4_client *clp;
b85d4c01 3084 struct nfsd4_slot *slot;
a663bdd8 3085 struct nfsd4_conn *conn;
57b7b43b 3086 __be32 status;
47ee5298 3087 int buflen;
d4e19e70
TM
3088 struct net *net = SVC_NET(rqstp);
3089 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
b85d4c01 3090
f9bb94c4
AA
3091 if (resp->opcnt != 1)
3092 return nfserr_sequence_pos;
3093
a663bdd8
BF
3094 /*
3095 * Will be either used or freed by nfsd4_sequence_check_conn
3096 * below.
3097 */
3098 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3099 if (!conn)
3100 return nfserr_jukebox;
3101
c9a49628 3102 spin_lock(&nn->client_lock);
d4e19e70 3103 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
b85d4c01 3104 if (!session)
221a6876
BF
3105 goto out_no_session;
3106 clp = session->se_client;
b85d4c01 3107
868b89c3
MJ
3108 status = nfserr_too_many_ops;
3109 if (nfsd4_session_too_many_ops(rqstp, session))
66b2b9b2 3110 goto out_put_session;
868b89c3 3111
ae82a8d0
MJ
3112 status = nfserr_req_too_big;
3113 if (nfsd4_request_too_big(rqstp, session))
66b2b9b2 3114 goto out_put_session;
ae82a8d0 3115
b85d4c01 3116 status = nfserr_badslot;
6c18ba9f 3117 if (seq->slotid >= session->se_fchannel.maxreqs)
66b2b9b2 3118 goto out_put_session;
b85d4c01 3119
557ce264 3120 slot = session->se_slots[seq->slotid];
b85d4c01
BH
3121 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3122
a8dfdaeb
AA
3123 /* We do not negotiate the number of slots yet, so set the
3124 * maxslots to the session maxreqs which is used to encode
3125 * sr_highest_slotid and the sr_target_slot id to maxslots */
3126 seq->maxslots = session->se_fchannel.maxreqs;
3127
73e79482
BF
3128 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3129 slot->sl_flags & NFSD4_SLOT_INUSE);
b85d4c01 3130 if (status == nfserr_replay_cache) {
bf5c43c8
BF
3131 status = nfserr_seq_misordered;
3132 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
66b2b9b2 3133 goto out_put_session;
ff371bc8
BF
3134 status = nfserr_seq_false_retry;
3135 if (!replay_matches_cache(rqstp, seq, slot))
3136 goto out_put_session;
b85d4c01
BH
3137 cstate->slot = slot;
3138 cstate->session = session;
4b24ca7d 3139 cstate->clp = clp;
da3846a2 3140 /* Return the cached reply status and set cstate->status
557ce264 3141 * for nfsd4_proc_compound processing */
bf864a31 3142 status = nfsd4_replay_cache_entry(resp, seq);
da3846a2 3143 cstate->status = nfserr_replay_cache;
aaf84eb9 3144 goto out;
b85d4c01
BH
3145 }
3146 if (status)
66b2b9b2 3147 goto out_put_session;
b85d4c01 3148
57266a6e 3149 status = nfsd4_sequence_check_conn(conn, session);
a663bdd8 3150 conn = NULL;
57266a6e
BF
3151 if (status)
3152 goto out_put_session;
328ead28 3153
47ee5298
BF
3154 buflen = (seq->cachethis) ?
3155 session->se_fchannel.maxresp_cached :
3156 session->se_fchannel.maxresp_sz;
3157 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3158 nfserr_rep_too_big;
a5cddc88 3159 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
47ee5298 3160 goto out_put_session;
32aaa62e 3161 svc_reserve(rqstp, buflen);
47ee5298
BF
3162
3163 status = nfs_ok;
b85d4c01 3164 /* Success! bump slot seqid */
b85d4c01 3165 slot->sl_seqid = seq->seqid;
bf5c43c8 3166 slot->sl_flags |= NFSD4_SLOT_INUSE;
73e79482
BF
3167 if (seq->cachethis)
3168 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
bf5c43c8
BF
3169 else
3170 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
b85d4c01
BH
3171
3172 cstate->slot = slot;
3173 cstate->session = session;
4b24ca7d 3174 cstate->clp = clp;
b85d4c01 3175
b85d4c01 3176out:
221a6876
BF
3177 switch (clp->cl_cb_state) {
3178 case NFSD4_CB_DOWN:
3179 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3180 break;
3181 case NFSD4_CB_FAULT:
3182 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3183 break;
3184 default:
3185 seq->status_flags = 0;
aaf84eb9 3186 }
3bd64a5b
BF
3187 if (!list_empty(&clp->cl_revoked))
3188 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
221a6876 3189out_no_session:
3f42d2c4
KM
3190 if (conn)
3191 free_conn(conn);
c9a49628 3192 spin_unlock(&nn->client_lock);
b85d4c01 3193 return status;
66b2b9b2 3194out_put_session:
d4e19e70 3195 nfsd4_put_session_locked(session);
221a6876 3196 goto out_no_session;
069b6ad4
AA
3197}
3198
b607664e
TM
3199void
3200nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3201{
3202 struct nfsd4_compound_state *cs = &resp->cstate;
3203
3204 if (nfsd4_has_session(cs)) {
b607664e
TM
3205 if (cs->status != nfserr_replay_cache) {
3206 nfsd4_store_cache_entry(resp);
3207 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3208 }
d4e19e70 3209 /* Drop session reference that was taken in nfsd4_sequence() */
b607664e 3210 nfsd4_put_session(cs->session);
4b24ca7d
JL
3211 } else if (cs->clp)
3212 put_client_renew(cs->clp);
b607664e
TM
3213}
3214
345c2842 3215__be32
eb69853d
CH
3216nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3217 struct nfsd4_compound_state *cstate,
3218 union nfsd4_op_u *u)
345c2842 3219{
eb69853d 3220 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
6b10ad19
TM
3221 struct nfs4_client *conf, *unconf;
3222 struct nfs4_client *clp = NULL;
57b7b43b 3223 __be32 status = 0;
8daae4dc 3224 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
345c2842 3225
6b10ad19 3226 spin_lock(&nn->client_lock);
0a7ec377 3227 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
8daae4dc 3228 conf = find_confirmed_client(&dc->clientid, true, nn);
78389046 3229 WARN_ON_ONCE(conf && unconf);
345c2842
MJ
3230
3231 if (conf) {
c0293b01 3232 if (client_has_state(conf)) {
345c2842
MJ
3233 status = nfserr_clientid_busy;
3234 goto out;
3235 }
fd699b8a
JL
3236 status = mark_client_expired_locked(conf);
3237 if (status)
3238 goto out;
6b10ad19 3239 clp = conf;
345c2842
MJ
3240 } else if (unconf)
3241 clp = unconf;
3242 else {
3243 status = nfserr_stale_clientid;
3244 goto out;
3245 }
dedeb13f 3246 if (!nfsd4_mach_creds_match(clp, rqstp)) {
6b10ad19 3247 clp = NULL;
57266a6e
BF
3248 status = nfserr_wrong_cred;
3249 goto out;
3250 }
6b10ad19 3251 unhash_client_locked(clp);
345c2842 3252out:
6b10ad19 3253 spin_unlock(&nn->client_lock);
6b10ad19
TM
3254 if (clp)
3255 expire_client(clp);
345c2842
MJ
3256 return status;
3257}
3258
4dc6ec00 3259__be32
eb69853d
CH
3260nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3261 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4dc6ec00 3262{
eb69853d 3263 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
57b7b43b 3264 __be32 status = 0;
bcecf1cc 3265
4dc6ec00
BF
3266 if (rc->rca_one_fs) {
3267 if (!cstate->current_fh.fh_dentry)
3268 return nfserr_nofilehandle;
3269 /*
3270 * We don't take advantage of the rca_one_fs case.
3271 * That's OK, it's optional, we can safely ignore it.
3272 */
d28c442f 3273 return nfs_ok;
4dc6ec00 3274 }
bcecf1cc 3275
bcecf1cc 3276 status = nfserr_complete_already;
a52d726b
JL
3277 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3278 &cstate->session->se_client->cl_flags))
bcecf1cc
MJ
3279 goto out;
3280
3281 status = nfserr_stale_clientid;
3282 if (is_client_expired(cstate->session->se_client))
4dc6ec00
BF
3283 /*
3284 * The following error isn't really legal.
3285 * But we only get here if the client just explicitly
3286 * destroyed the client. Surely it no longer cares what
3287 * error it gets back on an operation for the dead
3288 * client.
3289 */
bcecf1cc
MJ
3290 goto out;
3291
3292 status = nfs_ok;
2a4317c5 3293 nfsd4_client_record_create(cstate->session->se_client);
bcecf1cc 3294out:
bcecf1cc 3295 return status;
4dc6ec00
BF
3296}
3297
b37ad28b 3298__be32
b591480b 3299nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 3300 union nfsd4_op_u *u)
1da177e4 3301{
eb69853d 3302 struct nfsd4_setclientid *setclid = &u->setclientid;
a084daf5 3303 struct xdr_netobj clname = setclid->se_name;
1da177e4 3304 nfs4_verifier clverifier = setclid->se_verf;
3dbacee6
TM
3305 struct nfs4_client *conf, *new;
3306 struct nfs4_client *unconf = NULL;
b37ad28b 3307 __be32 status;
c212cecf
SK
3308 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3309
5cc40fd7
TM
3310 new = create_client(clname, rqstp, &clverifier);
3311 if (new == NULL)
3312 return nfserr_jukebox;
63db4632 3313 /* Cases below refer to rfc 3530 section 14.2.33: */
3dbacee6 3314 spin_lock(&nn->client_lock);
382a62e7 3315 conf = find_confirmed_client_by_name(&clname, nn);
2b634821 3316 if (conf && client_has_state(conf)) {
63db4632 3317 /* case 0: */
1da177e4 3318 status = nfserr_clid_inuse;
e203d506
BF
3319 if (clp_used_exchangeid(conf))
3320 goto out;
026722c2 3321 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
363168b4
JL
3322 char addr_str[INET6_ADDRSTRLEN];
3323 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3324 sizeof(addr_str));
3325 dprintk("NFSD: setclientid: string in use by client "
3326 "at %s\n", addr_str);
1da177e4
LT
3327 goto out;
3328 }
1da177e4 3329 }
a99454aa 3330 unconf = find_unconfirmed_client_by_name(&clname, nn);
8f930711 3331 if (unconf)
3dbacee6 3332 unhash_client_locked(unconf);
41eb1670 3333 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
63db4632 3334 /* case 1: probable callback update */
1da177e4 3335 copy_clid(new, conf);
41eb1670
KM
3336 gen_confirm(new, nn);
3337 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
c212cecf 3338 gen_clid(new, nn);
8323c3b2 3339 new->cl_minorversion = 0;
6f3d772f 3340 gen_callback(new, setclid, rqstp);
ac55fdc4 3341 add_to_unconfirmed(new);
1da177e4
LT
3342 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3343 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3344 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
5cc40fd7 3345 new = NULL;
1da177e4
LT
3346 status = nfs_ok;
3347out:
3dbacee6 3348 spin_unlock(&nn->client_lock);
5cc40fd7
TM
3349 if (new)
3350 free_client(new);
3dbacee6
TM
3351 if (unconf)
3352 expire_client(unconf);
1da177e4
LT
3353 return status;
3354}
3355
3356
b37ad28b 3357__be32
b591480b 3358nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
eb69853d
CH
3359 struct nfsd4_compound_state *cstate,
3360 union nfsd4_op_u *u)
1da177e4 3361{
eb69853d
CH
3362 struct nfsd4_setclientid_confirm *setclientid_confirm =
3363 &u->setclientid_confirm;
21ab45a4 3364 struct nfs4_client *conf, *unconf;
d20c11d8 3365 struct nfs4_client *old = NULL;
1da177e4
LT
3366 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3367 clientid_t * clid = &setclientid_confirm->sc_clientid;
b37ad28b 3368 __be32 status;
7f2210fa 3369 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1da177e4 3370
2c142baa 3371 if (STALE_CLIENTID(clid, nn))
1da177e4 3372 return nfserr_stale_clientid;
21ab45a4 3373
d20c11d8 3374 spin_lock(&nn->client_lock);
8daae4dc 3375 conf = find_confirmed_client(clid, false, nn);
0a7ec377 3376 unconf = find_unconfirmed_client(clid, false, nn);
a186e767 3377 /*
8695b90a
BF
3378 * We try hard to give out unique clientid's, so if we get an
3379 * attempt to confirm the same clientid with a different cred,
f984a7ce
BF
3380 * the client may be buggy; this should never happen.
3381 *
3382 * Nevertheless, RFC 7530 recommends INUSE for this case:
a186e767 3383 */
f984a7ce 3384 status = nfserr_clid_inuse;
8695b90a
BF
3385 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3386 goto out;
3387 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3388 goto out;
63db4632 3389 /* cases below refer to rfc 3530 section 14.2.34: */
90d700b7 3390 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
7d22fc11
BF
3391 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3392 /* case 2: probable retransmit */
1da177e4 3393 status = nfs_ok;
7d22fc11 3394 } else /* case 4: client hasn't noticed we rebooted yet? */
90d700b7
BF
3395 status = nfserr_stale_clientid;
3396 goto out;
3397 }
3398 status = nfs_ok;
3399 if (conf) { /* case 1: callback update */
d20c11d8
JL
3400 old = unconf;
3401 unhash_client_locked(old);
8695b90a 3402 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
90d700b7 3403 } else { /* case 3: normal case; new or rebooted client */
d20c11d8
JL
3404 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3405 if (old) {
2b634821
BF
3406 status = nfserr_clid_inuse;
3407 if (client_has_state(old)
3408 && !same_creds(&unconf->cl_cred,
3409 &old->cl_cred))
3410 goto out;
d20c11d8 3411 status = mark_client_expired_locked(old);
7abea1e8
JL
3412 if (status) {
3413 old = NULL;
221a6876 3414 goto out;
7abea1e8 3415 }
221a6876 3416 }
8695b90a 3417 move_to_confirmed(unconf);
d20c11d8 3418 conf = unconf;
08e8987c 3419 }
d20c11d8
JL
3420 get_client_locked(conf);
3421 spin_unlock(&nn->client_lock);
3422 nfsd4_probe_callback(conf);
3423 spin_lock(&nn->client_lock);
3424 put_client_renew_locked(conf);
1da177e4 3425out:
d20c11d8
JL
3426 spin_unlock(&nn->client_lock);
3427 if (old)
3428 expire_client(old);
1da177e4
LT
3429 return status;
3430}
3431
32513b40
BF
3432static struct nfs4_file *nfsd4_alloc_file(void)
3433{
3434 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3435}
3436
1da177e4 3437/* OPEN Share state helper functions */
5b095e99
JL
3438static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3439 struct nfs4_file *fp)
1da177e4 3440{
950e0118
TM
3441 lockdep_assert_held(&state_lock);
3442
32513b40 3443 atomic_set(&fp->fi_ref, 1);
1d31a253 3444 spin_lock_init(&fp->fi_lock);
32513b40
BF
3445 INIT_LIST_HEAD(&fp->fi_stateids);
3446 INIT_LIST_HEAD(&fp->fi_delegations);
8287f009 3447 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
e2cf80d7 3448 fh_copy_shallow(&fp->fi_fhandle, fh);
0c637be8 3449 fp->fi_deleg_file = NULL;
32513b40 3450 fp->fi_had_conflict = false;
baeb4ff0 3451 fp->fi_share_deny = 0;
32513b40
BF
3452 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3453 memset(fp->fi_access, 0, sizeof(fp->fi_access));
9cf514cc
CH
3454#ifdef CONFIG_NFSD_PNFS
3455 INIT_LIST_HEAD(&fp->fi_lo_states);
c5c707f9 3456 atomic_set(&fp->fi_lo_recalls, 0);
9cf514cc 3457#endif
5b095e99 3458 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
1da177e4
LT
3459}
3460
e8ff2a84 3461void
1da177e4
LT
3462nfsd4_free_slabs(void)
3463{
8287f009 3464 kmem_cache_destroy(odstate_slab);
abf1135b
CH
3465 kmem_cache_destroy(openowner_slab);
3466 kmem_cache_destroy(lockowner_slab);
3467 kmem_cache_destroy(file_slab);
3468 kmem_cache_destroy(stateid_slab);
3469 kmem_cache_destroy(deleg_slab);
e60d4398 3470}
1da177e4 3471
72083396 3472int
e60d4398
N
3473nfsd4_init_slabs(void)
3474{
fe0750e5
BF
3475 openowner_slab = kmem_cache_create("nfsd4_openowners",
3476 sizeof(struct nfs4_openowner), 0, 0, NULL);
3477 if (openowner_slab == NULL)
abf1135b 3478 goto out;
fe0750e5 3479 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3c40794b 3480 sizeof(struct nfs4_lockowner), 0, 0, NULL);
fe0750e5 3481 if (lockowner_slab == NULL)
abf1135b 3482 goto out_free_openowner_slab;
e60d4398 3483 file_slab = kmem_cache_create("nfsd4_files",
20c2df83 3484 sizeof(struct nfs4_file), 0, 0, NULL);
e60d4398 3485 if (file_slab == NULL)
abf1135b 3486 goto out_free_lockowner_slab;
5ac049ac 3487 stateid_slab = kmem_cache_create("nfsd4_stateids",
dcef0413 3488 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
5ac049ac 3489 if (stateid_slab == NULL)
abf1135b 3490 goto out_free_file_slab;
5b2d21c1 3491 deleg_slab = kmem_cache_create("nfsd4_delegations",
20c2df83 3492 sizeof(struct nfs4_delegation), 0, 0, NULL);
5b2d21c1 3493 if (deleg_slab == NULL)
abf1135b 3494 goto out_free_stateid_slab;
8287f009
SB
3495 odstate_slab = kmem_cache_create("nfsd4_odstate",
3496 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3497 if (odstate_slab == NULL)
3498 goto out_free_deleg_slab;
e60d4398 3499 return 0;
abf1135b 3500
8287f009
SB
3501out_free_deleg_slab:
3502 kmem_cache_destroy(deleg_slab);
abf1135b
CH
3503out_free_stateid_slab:
3504 kmem_cache_destroy(stateid_slab);
3505out_free_file_slab:
3506 kmem_cache_destroy(file_slab);
3507out_free_lockowner_slab:
3508 kmem_cache_destroy(lockowner_slab);
3509out_free_openowner_slab:
3510 kmem_cache_destroy(openowner_slab);
3511out:
e60d4398
N
3512 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3513 return -ENOMEM;
1da177e4
LT
3514}
3515
ff194bd9 3516static void init_nfs4_replay(struct nfs4_replay *rp)
1da177e4 3517{
ff194bd9
BF
3518 rp->rp_status = nfserr_serverfault;
3519 rp->rp_buflen = 0;
3520 rp->rp_buf = rp->rp_ibuf;
58fb12e6
JL
3521 mutex_init(&rp->rp_mutex);
3522}
3523
3524static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3525 struct nfs4_stateowner *so)
3526{
3527 if (!nfsd4_has_session(cstate)) {
3528 mutex_lock(&so->so_replay.rp_mutex);
b5971afa 3529 cstate->replay_owner = nfs4_get_stateowner(so);
58fb12e6
JL
3530 }
3531}
3532
3533void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3534{
3535 struct nfs4_stateowner *so = cstate->replay_owner;
3536
3537 if (so != NULL) {
3538 cstate->replay_owner = NULL;
3539 mutex_unlock(&so->so_replay.rp_mutex);
3540 nfs4_put_stateowner(so);
3541 }
1da177e4
LT
3542}
3543
fe0750e5 3544static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
ff194bd9 3545{
1da177e4 3546 struct nfs4_stateowner *sop;
1da177e4 3547
fe0750e5 3548 sop = kmem_cache_alloc(slab, GFP_KERNEL);
ff194bd9
BF
3549 if (!sop)
3550 return NULL;
3551
3552 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3553 if (!sop->so_owner.data) {
fe0750e5 3554 kmem_cache_free(slab, sop);
1da177e4 3555 return NULL;
ff194bd9
BF
3556 }
3557 sop->so_owner.len = owner->len;
3558
ea1da636 3559 INIT_LIST_HEAD(&sop->so_stateids);
ff194bd9
BF
3560 sop->so_client = clp;
3561 init_nfs4_replay(&sop->so_replay);
6b180f0b 3562 atomic_set(&sop->so_count, 1);
ff194bd9
BF
3563 return sop;
3564}
3565
fe0750e5 3566static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
ff194bd9 3567{
d4f0489f 3568 lockdep_assert_held(&clp->cl_lock);
9b531137 3569
d4f0489f
TM
3570 list_add(&oo->oo_owner.so_strhash,
3571 &clp->cl_ownerstr_hashtbl[strhashval]);
fe0750e5 3572 list_add(&oo->oo_perclient, &clp->cl_openowners);
ff194bd9
BF
3573}
3574
8f4b54c5
JL
3575static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3576{
d4f0489f 3577 unhash_openowner_locked(openowner(so));
8f4b54c5
JL
3578}
3579
6b180f0b
JL
3580static void nfs4_free_openowner(struct nfs4_stateowner *so)
3581{
3582 struct nfs4_openowner *oo = openowner(so);
3583
3584 kmem_cache_free(openowner_slab, oo);
3585}
3586
3587static const struct nfs4_stateowner_operations openowner_ops = {
8f4b54c5
JL
3588 .so_unhash = nfs4_unhash_openowner,
3589 .so_free = nfs4_free_openowner,
6b180f0b
JL
3590};
3591
7fc0564e
AE
3592static struct nfs4_ol_stateid *
3593nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3594{
3595 struct nfs4_ol_stateid *local, *ret = NULL;
3596 struct nfs4_openowner *oo = open->op_openowner;
3597
3598 lockdep_assert_held(&fp->fi_lock);
3599
3600 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3601 /* ignore lock owners */
3602 if (local->st_stateowner->so_is_open_owner == 0)
3603 continue;
db77ab54
TM
3604 if (local->st_stateowner != &oo->oo_owner)
3605 continue;
3606 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
7fc0564e
AE
3607 ret = local;
3608 atomic_inc(&ret->st_stid.sc_count);
3609 break;
3610 }
3611 }
3612 return ret;
3613}
3614
db77ab54
TM
3615static __be32
3616nfsd4_verify_open_stid(struct nfs4_stid *s)
3617{
3618 __be32 ret = nfs_ok;
3619
3620 switch (s->sc_type) {
3621 default:
3622 break;
3623 case NFS4_CLOSED_STID:
3624 case NFS4_CLOSED_DELEG_STID:
3625 ret = nfserr_bad_stateid;
3626 break;
3627 case NFS4_REVOKED_DELEG_STID:
3628 ret = nfserr_deleg_revoked;
3629 }
3630 return ret;
3631}
3632
3633/* Lock the stateid st_mutex, and deal with races with CLOSE */
3634static __be32
3635nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3636{
3637 __be32 ret;
3638
3639 mutex_lock(&stp->st_mutex);
3640 ret = nfsd4_verify_open_stid(&stp->st_stid);
3641 if (ret != nfs_ok)
3642 mutex_unlock(&stp->st_mutex);
3643 return ret;
3644}
3645
3646static struct nfs4_ol_stateid *
3647nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3648{
3649 struct nfs4_ol_stateid *stp;
3650 for (;;) {
3651 spin_lock(&fp->fi_lock);
3652 stp = nfsd4_find_existing_open(fp, open);
3653 spin_unlock(&fp->fi_lock);
3654 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3655 break;
3656 nfs4_put_stid(&stp->st_stid);
3657 }
3658 return stp;
3659}
3660
fe0750e5 3661static struct nfs4_openowner *
13d6f66b 3662alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
db24b3b4
JL
3663 struct nfsd4_compound_state *cstate)
3664{
13d6f66b 3665 struct nfs4_client *clp = cstate->clp;
7ffb5880 3666 struct nfs4_openowner *oo, *ret;
ff194bd9 3667
fe0750e5
BF
3668 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3669 if (!oo)
ff194bd9 3670 return NULL;
6b180f0b 3671 oo->oo_owner.so_ops = &openowner_ops;
fe0750e5
BF
3672 oo->oo_owner.so_is_open_owner = 1;
3673 oo->oo_owner.so_seqid = open->op_seqid;
d3134b10 3674 oo->oo_flags = 0;
db24b3b4
JL
3675 if (nfsd4_has_session(cstate))
3676 oo->oo_flags |= NFS4_OO_CONFIRMED;
fe0750e5 3677 oo->oo_time = 0;
38c387b5 3678 oo->oo_last_closed_stid = NULL;
fe0750e5 3679 INIT_LIST_HEAD(&oo->oo_close_lru);
d4f0489f
TM
3680 spin_lock(&clp->cl_lock);
3681 ret = find_openstateowner_str_locked(strhashval, open, clp);
7ffb5880
TM
3682 if (ret == NULL) {
3683 hash_openowner(oo, clp, strhashval);
3684 ret = oo;
3685 } else
d50ffded
KM
3686 nfs4_free_stateowner(&oo->oo_owner);
3687
d4f0489f 3688 spin_unlock(&clp->cl_lock);
c5952338 3689 return ret;
1da177e4
LT
3690}
3691
7fc0564e 3692static struct nfs4_ol_stateid *
8c7245ab 3693init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
7fc0564e
AE
3694{
3695
fe0750e5 3696 struct nfs4_openowner *oo = open->op_openowner;
7fc0564e 3697 struct nfs4_ol_stateid *retstp = NULL;
8c7245ab 3698 struct nfs4_ol_stateid *stp;
1da177e4 3699
8c7245ab 3700 stp = open->op_stp;
5cc1fb2a
OD
3701 /* We are moving these outside of the spinlocks to avoid the warnings */
3702 mutex_init(&stp->st_mutex);
3703 mutex_lock(&stp->st_mutex);
3704
db77ab54 3705retry:
7fc0564e
AE
3706 spin_lock(&oo->oo_owner.so_client->cl_lock);
3707 spin_lock(&fp->fi_lock);
3708
3709 retstp = nfsd4_find_existing_open(fp, open);
3710 if (retstp)
3711 goto out_unlock;
8c7245ab
OD
3712
3713 open->op_stp = NULL;
d6f2bc5d 3714 atomic_inc(&stp->st_stid.sc_count);
3abdb607 3715 stp->st_stid.sc_type = NFS4_OPEN_STID;
3c87b9b7 3716 INIT_LIST_HEAD(&stp->st_locks);
b5971afa 3717 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
13cd2184 3718 get_nfs4_file(fp);
11b9164a 3719 stp->st_stid.sc_file = fp;
1da177e4
LT
3720 stp->st_access_bmap = 0;
3721 stp->st_deny_bmap = 0;
4c4cd222 3722 stp->st_openstp = NULL;
1c755dc1 3723 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
1d31a253 3724 list_add(&stp->st_perfile, &fp->fi_stateids);
7fc0564e
AE
3725
3726out_unlock:
1d31a253 3727 spin_unlock(&fp->fi_lock);
1c755dc1 3728 spin_unlock(&oo->oo_owner.so_client->cl_lock);
5cc1fb2a 3729 if (retstp) {
db77ab54
TM
3730 /* Handle races with CLOSE */
3731 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3732 nfs4_put_stid(&retstp->st_stid);
3733 goto retry;
3734 }
8c7245ab 3735 /* To keep mutex tracking happy */
5cc1fb2a 3736 mutex_unlock(&stp->st_mutex);
8c7245ab 3737 stp = retstp;
5cc1fb2a 3738 }
8c7245ab 3739 return stp;
1da177e4
LT
3740}
3741
d3134b10
JL
3742/*
3743 * In the 4.0 case we need to keep the owners around a little while to handle
3744 * CLOSE replay. We still do need to release any file access that is held by
3745 * them before returning however.
3746 */
fd39ca9a 3747static void
d3134b10 3748move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
1da177e4 3749{
217526e7 3750 struct nfs4_ol_stateid *last;
d3134b10
JL
3751 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3752 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3753 nfsd_net_id);
73758fed 3754
fe0750e5 3755 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
1da177e4 3756
b401be22
JL
3757 /*
3758 * We know that we hold one reference via nfsd4_close, and another
3759 * "persistent" reference for the client. If the refcount is higher
3760 * than 2, then there are still calls in progress that are using this
3761 * stateid. We can't put the sc_file reference until they are finished.
3762 * Wait for the refcount to drop to 2. Since it has been unhashed,
3763 * there should be no danger of the refcount going back up again at
3764 * this point.
3765 */
3766 wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3767
d3134b10
JL
3768 release_all_access(s);
3769 if (s->st_stid.sc_file) {
3770 put_nfs4_file(s->st_stid.sc_file);
3771 s->st_stid.sc_file = NULL;
3772 }
217526e7
JL
3773
3774 spin_lock(&nn->client_lock);
3775 last = oo->oo_last_closed_stid;
d3134b10 3776 oo->oo_last_closed_stid = s;
73758fed 3777 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
fe0750e5 3778 oo->oo_time = get_seconds();
217526e7
JL
3779 spin_unlock(&nn->client_lock);
3780 if (last)
3781 nfs4_put_stid(&last->st_stid);
1da177e4
LT
3782}
3783
1da177e4
LT
3784/* search file_hashtbl[] for file */
3785static struct nfs4_file *
5b095e99 3786find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
1da177e4 3787{
1da177e4
LT
3788 struct nfs4_file *fp;
3789
5b095e99 3790 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
4d94c2ef 3791 if (fh_match(&fp->fi_fhandle, fh)) {
5b095e99
JL
3792 if (atomic_inc_not_zero(&fp->fi_ref))
3793 return fp;
13cd2184 3794 }
1da177e4
LT
3795 }
3796 return NULL;
3797}
3798
e6ba76e1 3799struct nfs4_file *
ca943217 3800find_file(struct knfsd_fh *fh)
950e0118
TM
3801{
3802 struct nfs4_file *fp;
5b095e99 3803 unsigned int hashval = file_hashval(fh);
950e0118 3804
5b095e99
JL
3805 rcu_read_lock();
3806 fp = find_file_locked(fh, hashval);
3807 rcu_read_unlock();
950e0118
TM
3808 return fp;
3809}
3810
3811static struct nfs4_file *
f9c00c3a 3812find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
950e0118
TM
3813{
3814 struct nfs4_file *fp;
5b095e99
JL
3815 unsigned int hashval = file_hashval(fh);
3816
3817 rcu_read_lock();
3818 fp = find_file_locked(fh, hashval);
3819 rcu_read_unlock();
3820 if (fp)
3821 return fp;
950e0118
TM
3822
3823 spin_lock(&state_lock);
5b095e99
JL
3824 fp = find_file_locked(fh, hashval);
3825 if (likely(fp == NULL)) {
3826 nfsd4_init_file(fh, hashval, new);
950e0118
TM
3827 fp = new;
3828 }
3829 spin_unlock(&state_lock);
3830
3831 return fp;
3832}
3833
1da177e4
LT
3834/*
3835 * Called to check deny when READ with all zero stateid or
3836 * WRITE with all zero or all one stateid
3837 */
b37ad28b 3838static __be32
1da177e4
LT
3839nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3840{
1da177e4 3841 struct nfs4_file *fp;
baeb4ff0 3842 __be32 ret = nfs_ok;
1da177e4 3843
ca943217 3844 fp = find_file(&current_fh->fh_handle);
13cd2184 3845 if (!fp)
baeb4ff0
JL
3846 return ret;
3847 /* Check for conflicting share reservations */
1d31a253 3848 spin_lock(&fp->fi_lock);
baeb4ff0
JL
3849 if (fp->fi_share_deny & deny_type)
3850 ret = nfserr_locked;
1d31a253 3851 spin_unlock(&fp->fi_lock);
13cd2184
N
3852 put_nfs4_file(fp);
3853 return ret;
1da177e4
LT
3854}
3855
0162ac2b 3856static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
1da177e4 3857{
0162ac2b 3858 struct nfs4_delegation *dp = cb_to_delegation(cb);
11b9164a
TM
3859 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3860 nfsd_net_id);
e8c69d17 3861
11b9164a 3862 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
f54fe962 3863
dff1399f 3864 /*
f54fe962
JL
3865 * We can't do this in nfsd_break_deleg_cb because it is
3866 * already holding inode->i_lock.
3867 *
dff1399f
JL
3868 * If the dl_time != 0, then we know that it has already been
3869 * queued for a lease break. Don't queue it again.
3870 */
f54fe962 3871 spin_lock(&state_lock);
dff1399f 3872 if (dp->dl_time == 0) {
dff1399f 3873 dp->dl_time = get_seconds();
02e1215f 3874 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
dff1399f 3875 }
02e1215f
JL
3876 spin_unlock(&state_lock);
3877}
1da177e4 3878
0162ac2b
CH
3879static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3880 struct rpc_task *task)
3881{
3882 struct nfs4_delegation *dp = cb_to_delegation(cb);
3883
a457974f
AE
3884 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3885 return 1;
3886
0162ac2b
CH
3887 switch (task->tk_status) {
3888 case 0:
3889 return 1;
3890 case -EBADHANDLE:
3891 case -NFS4ERR_BAD_STATEID:
3892 /*
3893 * Race: client probably got cb_recall before open reply
3894 * granting delegation.
3895 */
3896 if (dp->dl_retries--) {
3897 rpc_delay(task, 2 * HZ);
3898 return 0;
3899 }
3900 /*FALLTHRU*/
3901 default:
3902 return -1;
3903 }
3904}
3905
3906static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3907{
3908 struct nfs4_delegation *dp = cb_to_delegation(cb);
3909
3910 nfs4_put_stid(&dp->dl_stid);
3911}
3912
c4cb8974 3913static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
0162ac2b
CH
3914 .prepare = nfsd4_cb_recall_prepare,
3915 .done = nfsd4_cb_recall_done,
3916 .release = nfsd4_cb_recall_release,
3917};
3918
02e1215f
JL
3919static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3920{
3921 /*
3922 * We're assuming the state code never drops its reference
3923 * without first removing the lease. Since we're in this lease
3924 * callback (and since the lease code is serialized by the kernel
3925 * lock) we know the server hasn't removed the lease yet, we know
3926 * it's safe to take a reference.
3927 */
72c0b0fb 3928 atomic_inc(&dp->dl_stid.sc_count);
f0b5de1b 3929 nfsd4_run_cb(&dp->dl_recall);
6b57d9c8
BF
3930}
3931
1c8c601a 3932/* Called from break_lease() with i_lock held. */
4d01b7f5
JL
3933static bool
3934nfsd_break_deleg_cb(struct file_lock *fl)
6b57d9c8 3935{
4d01b7f5 3936 bool ret = false;
acfdf5c3
BF
3937 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3938 struct nfs4_delegation *dp;
6b57d9c8 3939
7fa10cd1
BF
3940 if (!fp) {
3941 WARN(1, "(%p)->fl_owner NULL\n", fl);
4d01b7f5 3942 return ret;
7fa10cd1
BF
3943 }
3944 if (fp->fi_had_conflict) {
3945 WARN(1, "duplicate break on %p\n", fp);
4d01b7f5 3946 return ret;
7fa10cd1 3947 }
0272e1fd
BF
3948 /*
3949 * We don't want the locks code to timeout the lease for us;
acfdf5c3 3950 * we'll remove it ourself if a delegation isn't returned
6b57d9c8 3951 * in time:
0272e1fd
BF
3952 */
3953 fl->fl_break_time = 0;
1da177e4 3954
02e1215f 3955 spin_lock(&fp->fi_lock);
417c6629
JL
3956 fp->fi_had_conflict = true;
3957 /*
4d01b7f5
JL
3958 * If there are no delegations on the list, then return true
3959 * so that the lease code will go ahead and delete it.
417c6629
JL
3960 */
3961 if (list_empty(&fp->fi_delegations))
4d01b7f5 3962 ret = true;
417c6629
JL
3963 else
3964 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3965 nfsd_break_one_deleg(dp);
02e1215f 3966 spin_unlock(&fp->fi_lock);
4d01b7f5 3967 return ret;
1da177e4
LT
3968}
3969
c45198ed 3970static int
7448cc37
JL
3971nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3972 struct list_head *dispose)
1da177e4
LT
3973{
3974 if (arg & F_UNLCK)
c45198ed 3975 return lease_modify(onlist, arg, dispose);
1da177e4
LT
3976 else
3977 return -EAGAIN;
3978}
3979
7b021967 3980static const struct lock_manager_operations nfsd_lease_mng_ops = {
8fb47a4f
BF
3981 .lm_break = nfsd_break_deleg_cb,
3982 .lm_change = nfsd_change_deleg_cb,
1da177e4
LT
3983};
3984
7a8711c9
BF
3985static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3986{
3987 if (nfsd4_has_session(cstate))
3988 return nfs_ok;
3989 if (seqid == so->so_seqid - 1)
3990 return nfserr_replay_me;
3991 if (seqid == so->so_seqid)
3992 return nfs_ok;
3993 return nfserr_bad_seqid;
3994}
1da177e4 3995
4b24ca7d
JL
3996static __be32 lookup_clientid(clientid_t *clid,
3997 struct nfsd4_compound_state *cstate,
3998 struct nfsd_net *nn)
3999{
4000 struct nfs4_client *found;
4001
4002 if (cstate->clp) {
4003 found = cstate->clp;
4004 if (!same_clid(&found->cl_clientid, clid))
4005 return nfserr_stale_clientid;
4006 return nfs_ok;
4007 }
4008
4009 if (STALE_CLIENTID(clid, nn))
4010 return nfserr_stale_clientid;
4011
4012 /*
4013 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4014 * cached already then we know this is for is for v4.0 and "sessions"
4015 * will be false.
4016 */
4017 WARN_ON_ONCE(cstate->session);
3e339f96 4018 spin_lock(&nn->client_lock);
4b24ca7d 4019 found = find_confirmed_client(clid, false, nn);
3e339f96
TM
4020 if (!found) {
4021 spin_unlock(&nn->client_lock);
4b24ca7d 4022 return nfserr_expired;
3e339f96
TM
4023 }
4024 atomic_inc(&found->cl_refcount);
4025 spin_unlock(&nn->client_lock);
4b24ca7d
JL
4026
4027 /* Cache the nfs4_client in cstate! */
4028 cstate->clp = found;
4b24ca7d
JL
4029 return nfs_ok;
4030}
4031
b37ad28b 4032__be32
6668958f 4033nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3320fef1 4034 struct nfsd4_open *open, struct nfsd_net *nn)
1da177e4 4035{
1da177e4
LT
4036 clientid_t *clientid = &open->op_clientid;
4037 struct nfs4_client *clp = NULL;
4038 unsigned int strhashval;
fe0750e5 4039 struct nfs4_openowner *oo = NULL;
4cdc951b 4040 __be32 status;
1da177e4 4041
2c142baa 4042 if (STALE_CLIENTID(&open->op_clientid, nn))
1da177e4 4043 return nfserr_stale_clientid;
32513b40
BF
4044 /*
4045 * In case we need it later, after we've already created the
4046 * file and don't want to risk a further failure:
4047 */
4048 open->op_file = nfsd4_alloc_file();
4049 if (open->op_file == NULL)
4050 return nfserr_jukebox;
1da177e4 4051
2d91e895
TM
4052 status = lookup_clientid(clientid, cstate, nn);
4053 if (status)
4054 return status;
4055 clp = cstate->clp;
4056
d4f0489f
TM
4057 strhashval = ownerstr_hashval(&open->op_owner);
4058 oo = find_openstateowner_str(strhashval, open, clp);
fe0750e5
BF
4059 open->op_openowner = oo;
4060 if (!oo) {
bcf130f9 4061 goto new_owner;
1da177e4 4062 }
dad1c067 4063 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
0f442aa2 4064 /* Replace unconfirmed owners without checking for replay. */
fe0750e5
BF
4065 release_openowner(oo);
4066 open->op_openowner = NULL;
bcf130f9 4067 goto new_owner;
0f442aa2 4068 }
4cdc951b
BF
4069 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4070 if (status)
4071 return status;
4cdc951b 4072 goto alloc_stateid;
bcf130f9 4073new_owner:
13d6f66b 4074 oo = alloc_init_open_stateowner(strhashval, open, cstate);
bcf130f9
BF
4075 if (oo == NULL)
4076 return nfserr_jukebox;
4077 open->op_openowner = oo;
4cdc951b 4078alloc_stateid:
b49e084d 4079 open->op_stp = nfs4_alloc_open_stateid(clp);
4cdc951b
BF
4080 if (!open->op_stp)
4081 return nfserr_jukebox;
8287f009
SB
4082
4083 if (nfsd4_has_session(cstate) &&
4084 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4085 open->op_odstate = alloc_clnt_odstate(clp);
4086 if (!open->op_odstate)
4087 return nfserr_jukebox;
4088 }
4089
0f442aa2 4090 return nfs_ok;
1da177e4
LT
4091}
4092
b37ad28b 4093static inline __be32
4a6e43e6
N
4094nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4095{
4096 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4097 return nfserr_openmode;
4098 else
4099 return nfs_ok;
4100}
4101
f459e453 4102static int share_access_to_flags(u32 share_access)
52f4fb43 4103{
f459e453 4104 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
52f4fb43
N
4105}
4106
38c2f4b1 4107static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
24a0111e 4108{
f459e453 4109 struct nfs4_stid *ret;
24a0111e 4110
584f0bb5
AE
4111 ret = find_stateid_by_type(cl, s,
4112 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
f459e453
BF
4113 if (!ret)
4114 return NULL;
4115 return delegstateid(ret);
24a0111e
BF
4116}
4117
8b289b2c
BF
4118static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4119{
4120 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4121 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4122}
4123
b37ad28b 4124static __be32
41d22663 4125nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
567d9829
N
4126 struct nfs4_delegation **dp)
4127{
4128 int flags;
b37ad28b 4129 __be32 status = nfserr_bad_stateid;
dcd94cc2 4130 struct nfs4_delegation *deleg;
567d9829 4131
dcd94cc2
TM
4132 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4133 if (deleg == NULL)
c44c5eeb 4134 goto out;
584f0bb5
AE
4135 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4136 nfs4_put_stid(&deleg->dl_stid);
4137 if (cl->cl_minorversion)
4138 status = nfserr_deleg_revoked;
4139 goto out;
4140 }
24a0111e 4141 flags = share_access_to_flags(open->op_share_access);
dcd94cc2
TM
4142 status = nfs4_check_delegmode(deleg, flags);
4143 if (status) {
4144 nfs4_put_stid(&deleg->dl_stid);
4145 goto out;
4146 }
4147 *dp = deleg;
c44c5eeb 4148out:
8b289b2c 4149 if (!nfsd4_is_deleg_cur(open))
c44c5eeb
N
4150 return nfs_ok;
4151 if (status)
4152 return status;
dad1c067 4153 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
c44c5eeb 4154 return nfs_ok;
567d9829
N
4155}
4156
21fb4016
BF
4157static inline int nfs4_access_to_access(u32 nfs4_access)
4158{
4159 int flags = 0;
4160
4161 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4162 flags |= NFSD_MAY_READ;
4163 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4164 flags |= NFSD_MAY_WRITE;
4165 return flags;
4166}
4167
7e6a72e5
CH
4168static inline __be32
4169nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4170 struct nfsd4_open *open)
4171{
4172 struct iattr iattr = {
4173 .ia_valid = ATTR_SIZE,
4174 .ia_size = 0,
4175 };
4176 if (!open->op_truncate)
4177 return 0;
4178 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4179 return nfserr_inval;
4180 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4181}
4182
0c12eaff 4183static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
6eb3a1d0
JL
4184 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4185 struct nfsd4_open *open)
f9d7562f 4186{
de18643d 4187 struct file *filp = NULL;
f9d7562f 4188 __be32 status;
0c12eaff
CB
4189 int oflag = nfs4_access_to_omode(open->op_share_access);
4190 int access = nfs4_access_to_access(open->op_share_access);
baeb4ff0 4191 unsigned char old_access_bmap, old_deny_bmap;
0c12eaff 4192
de18643d 4193 spin_lock(&fp->fi_lock);
baeb4ff0
JL
4194
4195 /*
4196 * Are we trying to set a deny mode that would conflict with
4197 * current access?
4198 */
4199 status = nfs4_file_check_deny(fp, open->op_share_deny);
4200 if (status != nfs_ok) {
4201 spin_unlock(&fp->fi_lock);
4202 goto out;
4203 }
4204
4205 /* set access to the file */
4206 status = nfs4_file_get_access(fp, open->op_share_access);
4207 if (status != nfs_ok) {
4208 spin_unlock(&fp->fi_lock);
4209 goto out;
4210 }
4211
4212 /* Set access bits in stateid */
4213 old_access_bmap = stp->st_access_bmap;
4214 set_access(open->op_share_access, stp);
4215
4216 /* Set new deny mask */
4217 old_deny_bmap = stp->st_deny_bmap;
4218 set_deny(open->op_share_deny, stp);
4219 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4220
f9d7562f 4221 if (!fp->fi_fds[oflag]) {
de18643d
TM
4222 spin_unlock(&fp->fi_lock);
4223 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
f9d7562f 4224 if (status)
baeb4ff0 4225 goto out_put_access;
de18643d
TM
4226 spin_lock(&fp->fi_lock);
4227 if (!fp->fi_fds[oflag]) {
4228 fp->fi_fds[oflag] = filp;
4229 filp = NULL;
4230 }
f9d7562f 4231 }
de18643d
TM
4232 spin_unlock(&fp->fi_lock);
4233 if (filp)
4234 fput(filp);
f9d7562f 4235
7e6a72e5
CH
4236 status = nfsd4_truncate(rqstp, cur_fh, open);
4237 if (status)
4238 goto out_put_access;
7e6a72e5
CH
4239out:
4240 return status;
baeb4ff0
JL
4241out_put_access:
4242 stp->st_access_bmap = old_access_bmap;
4243 nfs4_file_put_access(fp, open->op_share_access);
4244 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4245 goto out;
1da177e4
LT
4246}
4247
b37ad28b 4248static __be32
dcef0413 4249nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
1da177e4 4250{
b37ad28b 4251 __be32 status;
6ac75368 4252 unsigned char old_deny_bmap = stp->st_deny_bmap;
1da177e4 4253
6eb3a1d0 4254 if (!test_access(open->op_share_access, stp))
baeb4ff0 4255 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
7e6a72e5 4256
baeb4ff0
JL
4257 /* test and set deny mode */
4258 spin_lock(&fp->fi_lock);
4259 status = nfs4_file_check_deny(fp, open->op_share_deny);
4260 if (status == nfs_ok) {
baeb4ff0
JL
4261 set_deny(open->op_share_deny, stp);
4262 fp->fi_share_deny |=
4263 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4264 }
4265 spin_unlock(&fp->fi_lock);
4266
4267 if (status != nfs_ok)
1da177e4 4268 return status;
1da177e4 4269
baeb4ff0
JL
4270 status = nfsd4_truncate(rqstp, cur_fh, open);
4271 if (status != nfs_ok)
4272 reset_union_bmap_deny(old_deny_bmap, stp);
4273 return status;
4274}
1da177e4 4275
14a24e99
BF
4276/* Should we give out recallable state?: */
4277static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4278{
4279 if (clp->cl_cb_state == NFSD4_CB_UP)
4280 return true;
4281 /*
4282 * In the sessions case, since we don't have to establish a
4283 * separate connection for callbacks, we assume it's OK
4284 * until we hear otherwise:
4285 */
4286 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4287}
4288
d564fbec 4289static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
22d38c4c
BF
4290{
4291 struct file_lock *fl;
4292
4293 fl = locks_alloc_lock();
4294 if (!fl)
4295 return NULL;
22d38c4c 4296 fl->fl_lmops = &nfsd_lease_mng_ops;
617588d5 4297 fl->fl_flags = FL_DELEG;
22d38c4c
BF
4298 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4299 fl->fl_end = OFFSET_MAX;
d564fbec 4300 fl->fl_owner = (fl_owner_t)fp;
22d38c4c 4301 fl->fl_pid = current->tgid;
22d38c4c
BF
4302 return fl;
4303}
4304
34ed9872
AE
4305/**
4306 * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4307 * @dp: a pointer to the nfs4_delegation we're adding.
4308 *
4309 * Return:
4310 * On success: Return code will be 0 on success.
4311 *
4312 * On error: -EAGAIN if there was an existing delegation.
4313 * nonzero if there is an error in other cases.
4314 *
4315 */
4316
99c41515 4317static int nfs4_setlease(struct nfs4_delegation *dp)
edab9782 4318{
11b9164a 4319 struct nfs4_file *fp = dp->dl_stid.sc_file;
efde6b4d 4320 struct file_lock *fl;
417c6629
JL
4321 struct file *filp;
4322 int status = 0;
edab9782 4323
d564fbec 4324 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
edab9782
BF
4325 if (!fl)
4326 return -ENOMEM;
417c6629
JL
4327 filp = find_readable_file(fp);
4328 if (!filp) {
4329 /* We should always have a readable file here */
4330 WARN_ON_ONCE(1);
af9dbaf4 4331 locks_free_lock(fl);
417c6629
JL
4332 return -EBADF;
4333 }
4334 fl->fl_file = filp;
e6f5c789 4335 status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
1c7dd2ff 4336 if (fl)
417c6629 4337 locks_free_lock(fl);
1c7dd2ff 4338 if (status)
417c6629 4339 goto out_fput;
417c6629
JL
4340 spin_lock(&state_lock);
4341 spin_lock(&fp->fi_lock);
4342 /* Did the lease get broken before we took the lock? */
4343 status = -EAGAIN;
4344 if (fp->fi_had_conflict)
4345 goto out_unlock;
4346 /* Race breaker */
0c637be8 4347 if (fp->fi_deleg_file) {
34ed9872 4348 status = hash_delegation_locked(dp, fp);
417c6629
JL
4349 goto out_unlock;
4350 }
417c6629 4351 fp->fi_deleg_file = filp;
34ed9872
AE
4352 fp->fi_delegees = 0;
4353 status = hash_delegation_locked(dp, fp);
417c6629 4354 spin_unlock(&fp->fi_lock);
cdc97505 4355 spin_unlock(&state_lock);
34ed9872
AE
4356 if (status) {
4357 /* Should never happen, this is a new fi_deleg_file */
4358 WARN_ON_ONCE(1);
4359 goto out_fput;
4360 }
acfdf5c3 4361 return 0;
417c6629
JL
4362out_unlock:
4363 spin_unlock(&fp->fi_lock);
4364 spin_unlock(&state_lock);
4365out_fput:
4366 fput(filp);
e873088f 4367 return status;
acfdf5c3
BF
4368}
4369
0b26693c
JL
4370static struct nfs4_delegation *
4371nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
8287f009 4372 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
acfdf5c3 4373{
0b26693c
JL
4374 int status;
4375 struct nfs4_delegation *dp;
417c6629 4376
bf7bd3e9 4377 if (fp->fi_had_conflict)
0b26693c
JL
4378 return ERR_PTR(-EAGAIN);
4379
34ed9872
AE
4380 spin_lock(&state_lock);
4381 spin_lock(&fp->fi_lock);
4382 status = nfs4_get_existing_delegation(clp, fp);
4383 spin_unlock(&fp->fi_lock);
4384 spin_unlock(&state_lock);
4385
4386 if (status)
4387 return ERR_PTR(status);
4388
8287f009 4389 dp = alloc_init_deleg(clp, fh, odstate);
0b26693c
JL
4390 if (!dp)
4391 return ERR_PTR(-ENOMEM);
4392
bf7bd3e9 4393 get_nfs4_file(fp);
417c6629
JL
4394 spin_lock(&state_lock);
4395 spin_lock(&fp->fi_lock);
11b9164a 4396 dp->dl_stid.sc_file = fp;
0c637be8 4397 if (!fp->fi_deleg_file) {
417c6629
JL
4398 spin_unlock(&fp->fi_lock);
4399 spin_unlock(&state_lock);
0b26693c
JL
4400 status = nfs4_setlease(dp);
4401 goto out;
417c6629 4402 }
acfdf5c3 4403 if (fp->fi_had_conflict) {
417c6629
JL
4404 status = -EAGAIN;
4405 goto out_unlock;
acfdf5c3 4406 }
34ed9872 4407 status = hash_delegation_locked(dp, fp);
417c6629
JL
4408out_unlock:
4409 spin_unlock(&fp->fi_lock);
cdc97505 4410 spin_unlock(&state_lock);
0b26693c
JL
4411out:
4412 if (status) {
8287f009 4413 put_clnt_odstate(dp->dl_clnt_odstate);
6011695d 4414 nfs4_put_stid(&dp->dl_stid);
0b26693c
JL
4415 return ERR_PTR(status);
4416 }
4417 return dp;
edab9782
BF
4418}
4419
4aa8913c
BH
4420static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4421{
4422 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4423 if (status == -EAGAIN)
4424 open->op_why_no_deleg = WND4_CONTENTION;
4425 else {
4426 open->op_why_no_deleg = WND4_RESOURCE;
4427 switch (open->op_deleg_want) {
4428 case NFS4_SHARE_WANT_READ_DELEG:
4429 case NFS4_SHARE_WANT_WRITE_DELEG:
4430 case NFS4_SHARE_WANT_ANY_DELEG:
4431 break;
4432 case NFS4_SHARE_WANT_CANCEL:
4433 open->op_why_no_deleg = WND4_CANCELLED;
4434 break;
4435 case NFS4_SHARE_WANT_NO_DELEG:
063b0fb9 4436 WARN_ON_ONCE(1);
4aa8913c
BH
4437 }
4438 }
4439}
4440
1da177e4
LT
4441/*
4442 * Attempt to hand out a delegation.
99c41515
BF
4443 *
4444 * Note we don't support write delegations, and won't until the vfs has
4445 * proper support for them.
1da177e4
LT
4446 */
4447static void
4cf59221
JL
4448nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4449 struct nfs4_ol_stateid *stp)
1da177e4
LT
4450{
4451 struct nfs4_delegation *dp;
4cf59221
JL
4452 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4453 struct nfs4_client *clp = stp->st_stid.sc_client;
14a24e99 4454 int cb_up;
99c41515 4455 int status = 0;
1da177e4 4456
fe0750e5 4457 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
7b190fec
N
4458 open->op_recall = 0;
4459 switch (open->op_claim_type) {
4460 case NFS4_OPEN_CLAIM_PREVIOUS:
2bf23875 4461 if (!cb_up)
7b190fec 4462 open->op_recall = 1;
99c41515
BF
4463 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4464 goto out_no_deleg;
7b190fec
N
4465 break;
4466 case NFS4_OPEN_CLAIM_NULL:
ed47b062 4467 case NFS4_OPEN_CLAIM_FH:
99c41515
BF
4468 /*
4469 * Let's not give out any delegations till everyone's
c87fb4a3
BF
4470 * had the chance to reclaim theirs, *and* until
4471 * NLM locks have all been reclaimed:
99c41515 4472 */
4cf59221 4473 if (locks_in_grace(clp->net))
99c41515 4474 goto out_no_deleg;
dad1c067 4475 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
99c41515 4476 goto out_no_deleg;
9a0590ae
SD
4477 /*
4478 * Also, if the file was opened for write or
4479 * create, there's a good chance the client's
4480 * about to write to it, resulting in an
4481 * immediate recall (since we don't support
4482 * write delegations):
4483 */
7b190fec 4484 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
99c41515
BF
4485 goto out_no_deleg;
4486 if (open->op_create == NFS4_OPEN_CREATE)
4487 goto out_no_deleg;
7b190fec
N
4488 break;
4489 default:
99c41515 4490 goto out_no_deleg;
7b190fec 4491 }
8287f009 4492 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
0b26693c 4493 if (IS_ERR(dp))
dd239cc0 4494 goto out_no_deleg;
1da177e4 4495
d5477a8d 4496 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
1da177e4 4497
8c10cbdb 4498 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
d5477a8d 4499 STATEID_VAL(&dp->dl_stid.sc_stateid));
99c41515 4500 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
67cb1279 4501 nfs4_put_stid(&dp->dl_stid);
dd239cc0 4502 return;
dd239cc0 4503out_no_deleg:
99c41515
BF
4504 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4505 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
d08d32e6 4506 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
99c41515 4507 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
d08d32e6
BF
4508 open->op_recall = 1;
4509 }
99c41515
BF
4510
4511 /* 4.1 client asking for a delegation? */
4512 if (open->op_deleg_want)
4513 nfsd4_open_deleg_none_ext(open, status);
4514 return;
1da177e4
LT
4515}
4516
e27f49c3
BH
4517static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4518 struct nfs4_delegation *dp)
4519{
4520 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4521 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4522 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4523 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4524 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4525 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4526 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4527 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4528 }
4529 /* Otherwise the client must be confused wanting a delegation
4530 * it already has, therefore we don't return
4531 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4532 */
4533}
4534
b37ad28b 4535__be32
1da177e4
LT
4536nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4537{
6668958f 4538 struct nfsd4_compoundres *resp = rqstp->rq_resp;
38c2f4b1 4539 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
1da177e4 4540 struct nfs4_file *fp = NULL;
dcef0413 4541 struct nfs4_ol_stateid *stp = NULL;
567d9829 4542 struct nfs4_delegation *dp = NULL;
b37ad28b 4543 __be32 status;
73cfeab6 4544 bool new_stp = false;
1da177e4 4545
1da177e4
LT
4546 /*
4547 * Lookup file; if found, lookup stateid and check open request,
4548 * and check for delegations in the process of being recalled.
4549 * If not found, create the nfs4_file struct
4550 */
f9c00c3a 4551 fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
950e0118 4552 if (fp != open->op_file) {
41d22663 4553 status = nfs4_check_deleg(cl, open, &dp);
c44c5eeb
N
4554 if (status)
4555 goto out;
db77ab54 4556 stp = nfsd4_find_and_lock_existing_open(fp, open);
1da177e4 4557 } else {
950e0118 4558 open->op_file = NULL;
c44c5eeb 4559 status = nfserr_bad_stateid;
8b289b2c 4560 if (nfsd4_is_deleg_cur(open))
c44c5eeb 4561 goto out;
1da177e4
LT
4562 }
4563
73cfeab6
TM
4564 if (!stp) {
4565 stp = init_open_stateid(fp, open);
4566 if (!open->op_stp)
4567 new_stp = true;
4568 }
4569
1da177e4
LT
4570 /*
4571 * OPEN the file, or upgrade an existing OPEN.
4572 * If truncate fails, the OPEN fails.
73cfeab6
TM
4573 *
4574 * stp is already locked.
1da177e4 4575 */
73cfeab6 4576 if (!new_stp) {
1da177e4 4577 /* Stateid was found, this is an OPEN upgrade */
f9d7562f 4578 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
35a92fe8 4579 if (status) {
feb9dad5 4580 mutex_unlock(&stp->st_mutex);
1da177e4 4581 goto out;
35a92fe8 4582 }
1da177e4 4583 } else {
6eb3a1d0
JL
4584 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4585 if (status) {
73cfeab6 4586 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6eb3a1d0 4587 release_open_stateid(stp);
73cfeab6 4588 mutex_unlock(&stp->st_mutex);
6eb3a1d0
JL
4589 goto out;
4590 }
8287f009
SB
4591
4592 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4593 open->op_odstate);
4594 if (stp->st_clnt_odstate == open->op_odstate)
4595 open->op_odstate = NULL;
1da177e4 4596 }
73cfeab6 4597
9767feb2 4598 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
feb9dad5 4599 mutex_unlock(&stp->st_mutex);
1da177e4 4600
d24433cd 4601 if (nfsd4_has_session(&resp->cstate)) {
d24433cd
BH
4602 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4603 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4604 open->op_why_no_deleg = WND4_NOT_WANTED;
4605 goto nodeleg;
4606 }
4607 }
4608
1da177e4
LT
4609 /*
4610 * Attempt to hand out a delegation. No error return, because the
4611 * OPEN succeeds even if we fail.
4612 */
4cf59221 4613 nfs4_open_delegation(current_fh, open, stp);
d24433cd 4614nodeleg:
1da177e4
LT
4615 status = nfs_ok;
4616
8c10cbdb 4617 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
dcef0413 4618 STATEID_VAL(&stp->st_stid.sc_stateid));
1da177e4 4619out:
d24433cd
BH
4620 /* 4.1 client trying to upgrade/downgrade delegation? */
4621 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
e27f49c3
BH
4622 open->op_deleg_want)
4623 nfsd4_deleg_xgrade_none_ext(open, dp);
d24433cd 4624
13cd2184
N
4625 if (fp)
4626 put_nfs4_file(fp);
37515177 4627 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
87186022 4628 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
1da177e4
LT
4629 /*
4630 * To finish the open response, we just need to set the rflags.
4631 */
4632 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
19e4c347
JL
4633 if (nfsd4_has_session(&resp->cstate))
4634 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4635 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
1da177e4 4636 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
19e4c347 4637
dcd94cc2
TM
4638 if (dp)
4639 nfs4_put_stid(&dp->dl_stid);
d6f2bc5d
TM
4640 if (stp)
4641 nfs4_put_stid(&stp->st_stid);
1da177e4
LT
4642
4643 return status;
4644}
4645
58fb12e6 4646void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
42297899 4647 struct nfsd4_open *open)
d29b20cd
BF
4648{
4649 if (open->op_openowner) {
d3134b10
JL
4650 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4651
4652 nfsd4_cstate_assign_replay(cstate, so);
4653 nfs4_put_stateowner(so);
d29b20cd 4654 }
32513b40 4655 if (open->op_file)
5b095e99 4656 kmem_cache_free(file_slab, open->op_file);
4cdc951b 4657 if (open->op_stp)
6011695d 4658 nfs4_put_stid(&open->op_stp->st_stid);
8287f009
SB
4659 if (open->op_odstate)
4660 kmem_cache_free(odstate_slab, open->op_odstate);
d29b20cd
BF
4661}
4662
b37ad28b 4663__be32
b591480b 4664nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 4665 union nfsd4_op_u *u)
1da177e4 4666{
eb69853d 4667 clientid_t *clid = &u->renew;
1da177e4 4668 struct nfs4_client *clp;
b37ad28b 4669 __be32 status;
7f2210fa 4670 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1da177e4 4671
1da177e4
LT
4672 dprintk("process_renew(%08x/%08x): starting\n",
4673 clid->cl_boot, clid->cl_id);
4b24ca7d 4674 status = lookup_clientid(clid, cstate, nn);
9b2ef62b 4675 if (status)
1da177e4 4676 goto out;
4b24ca7d 4677 clp = cstate->clp;
1da177e4 4678 status = nfserr_cb_path_down;
ea1da636 4679 if (!list_empty(&clp->cl_delegations)
77a3569d 4680 && clp->cl_cb_state != NFSD4_CB_UP)
1da177e4
LT
4681 goto out;
4682 status = nfs_ok;
4683out:
1da177e4
LT
4684 return status;
4685}
4686
7f5ef2e9 4687void
12760c66 4688nfsd4_end_grace(struct nfsd_net *nn)
a76b4319 4689{
33dcc481 4690 /* do nothing if grace period already ended */
a51c84ed 4691 if (nn->grace_ended)
33dcc481
JL
4692 return;
4693
a76b4319 4694 dprintk("NFSD: end of grace period\n");
a51c84ed 4695 nn->grace_ended = true;
70b28235
BF
4696 /*
4697 * If the server goes down again right now, an NFSv4
4698 * client will still be allowed to reclaim after it comes back up,
4699 * even if it hasn't yet had a chance to reclaim state this time.
4700 *
4701 */
919b8049 4702 nfsd4_record_grace_done(nn);
70b28235
BF
4703 /*
4704 * At this point, NFSv4 clients can still reclaim. But if the
4705 * server crashes, any that have not yet reclaimed will be out
4706 * of luck on the next boot.
4707 *
4708 * (NFSv4.1+ clients are considered to have reclaimed once they
4709 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4710 * have reclaimed after their first OPEN.)
4711 */
5e1533c7 4712 locks_end_grace(&nn->nfsd4_manager);
70b28235
BF
4713 /*
4714 * At this point, and once lockd and/or any other containers
4715 * exit their grace period, further reclaims will fail and
4716 * regular locking can resume.
4717 */
a76b4319
N
4718}
4719
fd39ca9a 4720static time_t
09121281 4721nfs4_laundromat(struct nfsd_net *nn)
1da177e4
LT
4722{
4723 struct nfs4_client *clp;
fe0750e5 4724 struct nfs4_openowner *oo;
1da177e4 4725 struct nfs4_delegation *dp;
217526e7 4726 struct nfs4_ol_stateid *stp;
7919d0a2 4727 struct nfsd4_blocked_lock *nbl;
1da177e4 4728 struct list_head *pos, *next, reaplist;
3d733711 4729 time_t cutoff = get_seconds() - nn->nfsd4_lease;
a832e7ae 4730 time_t t, new_timeo = nn->nfsd4_lease;
1da177e4 4731
1da177e4 4732 dprintk("NFSD: laundromat service - starting\n");
12760c66 4733 nfsd4_end_grace(nn);
36acb66b 4734 INIT_LIST_HEAD(&reaplist);
c9a49628 4735 spin_lock(&nn->client_lock);
5ed58bb2 4736 list_for_each_safe(pos, next, &nn->client_lru) {
1da177e4
LT
4737 clp = list_entry(pos, struct nfs4_client, cl_lru);
4738 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4739 t = clp->cl_time - cutoff;
a832e7ae 4740 new_timeo = min(new_timeo, t);
1da177e4
LT
4741 break;
4742 }
221a6876 4743 if (mark_client_expired_locked(clp)) {
d7682988
BH
4744 dprintk("NFSD: client in use (clientid %08x)\n",
4745 clp->cl_clientid.cl_id);
4746 continue;
4747 }
4864af97 4748 list_add(&clp->cl_lru, &reaplist);
36acb66b 4749 }
c9a49628 4750 spin_unlock(&nn->client_lock);
36acb66b
BH
4751 list_for_each_safe(pos, next, &reaplist) {
4752 clp = list_entry(pos, struct nfs4_client, cl_lru);
1da177e4
LT
4753 dprintk("NFSD: purging unused client (clientid %08x)\n",
4754 clp->cl_clientid.cl_id);
4864af97 4755 list_del_init(&clp->cl_lru);
1da177e4
LT
4756 expire_client(clp);
4757 }
cdc97505 4758 spin_lock(&state_lock);
e8c69d17 4759 list_for_each_safe(pos, next, &nn->del_recall_lru) {
1da177e4
LT
4760 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4761 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
a832e7ae
JL
4762 t = dp->dl_time - cutoff;
4763 new_timeo = min(new_timeo, t);
1da177e4
LT
4764 break;
4765 }
3fcbbd24 4766 WARN_ON(!unhash_delegation_locked(dp));
42690676 4767 list_add(&dp->dl_recall_lru, &reaplist);
1da177e4 4768 }
cdc97505 4769 spin_unlock(&state_lock);
2d4a532d
JL
4770 while (!list_empty(&reaplist)) {
4771 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4772 dl_recall_lru);
4773 list_del_init(&dp->dl_recall_lru);
3bd64a5b 4774 revoke_delegation(dp);
1da177e4 4775 }
217526e7
JL
4776
4777 spin_lock(&nn->client_lock);
4778 while (!list_empty(&nn->close_lru)) {
4779 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4780 oo_close_lru);
4781 if (time_after((unsigned long)oo->oo_time,
4782 (unsigned long)cutoff)) {
a832e7ae
JL
4783 t = oo->oo_time - cutoff;
4784 new_timeo = min(new_timeo, t);
1da177e4
LT
4785 break;
4786 }
217526e7
JL
4787 list_del_init(&oo->oo_close_lru);
4788 stp = oo->oo_last_closed_stid;
4789 oo->oo_last_closed_stid = NULL;
4790 spin_unlock(&nn->client_lock);
4791 nfs4_put_stid(&stp->st_stid);
4792 spin_lock(&nn->client_lock);
1da177e4 4793 }
217526e7
JL
4794 spin_unlock(&nn->client_lock);
4795
7919d0a2
JL
4796 /*
4797 * It's possible for a client to try and acquire an already held lock
4798 * that is being held for a long time, and then lose interest in it.
4799 * So, we clean out any un-revisited request after a lease period
4800 * under the assumption that the client is no longer interested.
4801 *
4802 * RFC5661, sec. 9.6 states that the client must not rely on getting
4803 * notifications and must continue to poll for locks, even when the
4804 * server supports them. Thus this shouldn't lead to clients blocking
4805 * indefinitely once the lock does become free.
4806 */
4807 BUG_ON(!list_empty(&reaplist));
0cc11a61 4808 spin_lock(&nn->blocked_locks_lock);
7919d0a2
JL
4809 while (!list_empty(&nn->blocked_locks_lru)) {
4810 nbl = list_first_entry(&nn->blocked_locks_lru,
4811 struct nfsd4_blocked_lock, nbl_lru);
4812 if (time_after((unsigned long)nbl->nbl_time,
4813 (unsigned long)cutoff)) {
4814 t = nbl->nbl_time - cutoff;
4815 new_timeo = min(new_timeo, t);
4816 break;
4817 }
4818 list_move(&nbl->nbl_lru, &reaplist);
4819 list_del_init(&nbl->nbl_list);
4820 }
0cc11a61 4821 spin_unlock(&nn->blocked_locks_lock);
7919d0a2
JL
4822
4823 while (!list_empty(&reaplist)) {
b78da96d 4824 nbl = list_first_entry(&reaplist,
7919d0a2
JL
4825 struct nfsd4_blocked_lock, nbl_lru);
4826 list_del_init(&nbl->nbl_lru);
4827 posix_unblock_lock(&nbl->nbl_lock);
4828 free_blocked_lock(nbl);
4829 }
4830
a832e7ae 4831 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
a832e7ae 4832 return new_timeo;
1da177e4
LT
4833}
4834
a254b246
HH
4835static struct workqueue_struct *laundry_wq;
4836static void laundromat_main(struct work_struct *);
a254b246
HH
4837
4838static void
09121281 4839laundromat_main(struct work_struct *laundry)
1da177e4
LT
4840{
4841 time_t t;
2e55f3ab 4842 struct delayed_work *dwork = to_delayed_work(laundry);
09121281
SK
4843 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4844 laundromat_work);
1da177e4 4845
09121281 4846 t = nfs4_laundromat(nn);
1da177e4 4847 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
09121281 4848 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
1da177e4
LT
4849}
4850
8fcd461d 4851static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
1da177e4 4852{
8fcd461d 4853 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
f7a4d872
BF
4854 return nfserr_bad_stateid;
4855 return nfs_ok;
1da177e4
LT
4856}
4857
1da177e4 4858static inline int
82c5ff1b 4859access_permit_read(struct nfs4_ol_stateid *stp)
1da177e4 4860{
82c5ff1b
JL
4861 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4862 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4863 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
1da177e4
LT
4864}
4865
4866static inline int
82c5ff1b 4867access_permit_write(struct nfs4_ol_stateid *stp)
1da177e4 4868{
82c5ff1b
JL
4869 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4870 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
1da177e4
LT
4871}
4872
4873static
dcef0413 4874__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
1da177e4 4875{
b37ad28b 4876 __be32 status = nfserr_openmode;
1da177e4 4877
02921914
BF
4878 /* For lock stateid's, we test the parent open, not the lock: */
4879 if (stp->st_openstp)
4880 stp = stp->st_openstp;
82c5ff1b 4881 if ((flags & WR_STATE) && !access_permit_write(stp))
1da177e4 4882 goto out;
82c5ff1b 4883 if ((flags & RD_STATE) && !access_permit_read(stp))
1da177e4
LT
4884 goto out;
4885 status = nfs_ok;
4886out:
4887 return status;
4888}
4889
b37ad28b 4890static inline __be32
5ccb0066 4891check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
1da177e4 4892{
203a8c8e 4893 if (ONE_STATEID(stateid) && (flags & RD_STATE))
1da177e4 4894 return nfs_ok;
c87fb4a3 4895 else if (opens_in_grace(net)) {
25985edc 4896 /* Answer in remaining cases depends on existence of
1da177e4
LT
4897 * conflicting state; so we must wait out the grace period. */
4898 return nfserr_grace;
4899 } else if (flags & WR_STATE)
4900 return nfs4_share_conflict(current_fh,
4901 NFS4_SHARE_DENY_WRITE);
4902 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4903 return nfs4_share_conflict(current_fh,
4904 NFS4_SHARE_DENY_READ);
4905}
4906
4907/*
4908 * Allow READ/WRITE during grace period on recovered state only for files
4909 * that are not able to provide mandatory locking.
4910 */
4911static inline int
5ccb0066 4912grace_disallows_io(struct net *net, struct inode *inode)
1da177e4 4913{
c87fb4a3 4914 return opens_in_grace(net) && mandatory_lock(inode);
1da177e4
LT
4915}
4916
57b7b43b 4917static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
0836f587 4918{
6668958f
AA
4919 /*
4920 * When sessions are used the stateid generation number is ignored
4921 * when it is zero.
4922 */
28dde241 4923 if (has_session && in->si_generation == 0)
81b82965
BF
4924 return nfs_ok;
4925
4926 if (in->si_generation == ref->si_generation)
4927 return nfs_ok;
6668958f 4928
0836f587 4929 /* If the client sends us a stateid from the future, it's buggy: */
14b7f4a1 4930 if (nfsd4_stateid_generation_after(in, ref))
0836f587
BF
4931 return nfserr_bad_stateid;
4932 /*
81b82965
BF
4933 * However, we could see a stateid from the past, even from a
4934 * non-buggy client. For example, if the client sends a lock
4935 * while some IO is outstanding, the lock may bump si_generation
4936 * while the IO is still in flight. The client could avoid that
4937 * situation by waiting for responses on all the IO requests,
4938 * but better performance may result in retrying IO that
4939 * receives an old_stateid error if requests are rarely
4940 * reordered in flight:
0836f587 4941 */
81b82965 4942 return nfserr_old_stateid;
0836f587
BF
4943}
4944
ebe9cb3b
CH
4945static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4946{
4947 if (ols->st_stateowner->so_is_open_owner &&
4948 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4949 return nfserr_bad_stateid;
4950 return nfs_ok;
4951}
4952
7df302f7 4953static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
17456804 4954{
97b7e3b6 4955 struct nfs4_stid *s;
1af71cc8 4956 __be32 status = nfserr_bad_stateid;
17456804 4957
631db7f3
AE
4958 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4959 CLOSE_STATEID(stateid))
1af71cc8 4960 return status;
7df302f7
CL
4961 /* Client debugging aid. */
4962 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4963 char addr_str[INET6_ADDRSTRLEN];
4964 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4965 sizeof(addr_str));
4966 pr_warn_ratelimited("NFSD: client %s testing state ID "
4967 "with incorrect client ID\n", addr_str);
1af71cc8 4968 return status;
7df302f7 4969 }
1af71cc8
JL
4970 spin_lock(&cl->cl_lock);
4971 s = find_stateid_locked(cl, stateid);
97b7e3b6 4972 if (!s)
1af71cc8 4973 goto out_unlock;
36279ac1 4974 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
17456804 4975 if (status)
1af71cc8 4976 goto out_unlock;
23340032
BF
4977 switch (s->sc_type) {
4978 case NFS4_DELEG_STID:
1af71cc8
JL
4979 status = nfs_ok;
4980 break;
3bd64a5b 4981 case NFS4_REVOKED_DELEG_STID:
1af71cc8
JL
4982 status = nfserr_deleg_revoked;
4983 break;
23340032
BF
4984 case NFS4_OPEN_STID:
4985 case NFS4_LOCK_STID:
ebe9cb3b 4986 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
1af71cc8 4987 break;
23340032
BF
4988 default:
4989 printk("unknown stateid type %x\n", s->sc_type);
b0fc29d6 4990 /* Fallthrough */
23340032 4991 case NFS4_CLOSED_STID:
b0fc29d6 4992 case NFS4_CLOSED_DELEG_STID:
1af71cc8 4993 status = nfserr_bad_stateid;
23340032 4994 }
1af71cc8
JL
4995out_unlock:
4996 spin_unlock(&cl->cl_lock);
4997 return status;
17456804
BS
4998}
4999
cd61c522 5000__be32
2dd6e458
TM
5001nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5002 stateid_t *stateid, unsigned char typemask,
5003 struct nfs4_stid **s, struct nfsd_net *nn)
38c2f4b1 5004{
0eb6f20a 5005 __be32 status;
584f0bb5
AE
5006 bool return_revoked = false;
5007
5008 /*
5009 * only return revoked delegations if explicitly asked.
5010 * otherwise we report revoked or bad_stateid status.
5011 */
5012 if (typemask & NFS4_REVOKED_DELEG_STID)
5013 return_revoked = true;
5014 else if (typemask & NFS4_DELEG_STID)
5015 typemask |= NFS4_REVOKED_DELEG_STID;
38c2f4b1 5016
631db7f3
AE
5017 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5018 CLOSE_STATEID(stateid))
38c2f4b1 5019 return nfserr_bad_stateid;
4b24ca7d 5020 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
a8a7c677 5021 if (status == nfserr_stale_clientid) {
4b24ca7d 5022 if (cstate->session)
a8a7c677 5023 return nfserr_bad_stateid;
38c2f4b1 5024 return nfserr_stale_stateid;
a8a7c677 5025 }
0eb6f20a
BF
5026 if (status)
5027 return status;
4b24ca7d 5028 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
38c2f4b1
BF
5029 if (!*s)
5030 return nfserr_bad_stateid;
584f0bb5
AE
5031 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5032 nfs4_put_stid(*s);
5033 if (cstate->minorversion)
5034 return nfserr_deleg_revoked;
5035 return nfserr_bad_stateid;
5036 }
38c2f4b1 5037 return nfs_ok;
38c2f4b1
BF
5038}
5039
a0649b2d
CH
5040static struct file *
5041nfs4_find_file(struct nfs4_stid *s, int flags)
5042{
af90f707
CH
5043 if (!s)
5044 return NULL;
5045
a0649b2d
CH
5046 switch (s->sc_type) {
5047 case NFS4_DELEG_STID:
5048 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5049 return NULL;
5050 return get_file(s->sc_file->fi_deleg_file);
5051 case NFS4_OPEN_STID:
5052 case NFS4_LOCK_STID:
5053 if (flags & RD_STATE)
5054 return find_readable_file(s->sc_file);
5055 else
5056 return find_writeable_file(s->sc_file);
5057 break;
5058 }
5059
5060 return NULL;
5061}
5062
5063static __be32
5064nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5065{
5066 __be32 status;
5067
a0649b2d
CH
5068 status = nfsd4_check_openowner_confirmed(ols);
5069 if (status)
5070 return status;
5071 return nfs4_check_openmode(ols, flags);
5072}
5073
af90f707
CH
5074static __be32
5075nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5076 struct file **filpp, bool *tmp_file, int flags)
5077{
5078 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5079 struct file *file;
5080 __be32 status;
5081
5082 file = nfs4_find_file(s, flags);
5083 if (file) {
5084 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5085 acc | NFSD_MAY_OWNER_OVERRIDE);
5086 if (status) {
5087 fput(file);
5088 return status;
5089 }
5090
5091 *filpp = file;
5092 } else {
5093 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5094 if (status)
5095 return status;
5096
5097 if (tmp_file)
5098 *tmp_file = true;
5099 }
5100
5101 return 0;
5102}
5103
1da177e4 5104/*
a0649b2d
CH
5105 * Checks for stateid operations
5106 */
b37ad28b 5107__be32
af90f707 5108nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
aa0d6aed
AS
5109 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5110 stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
1da177e4 5111{
a0649b2d 5112 struct inode *ino = d_inode(fhp->fh_dentry);
af90f707 5113 struct net *net = SVC_NET(rqstp);
3320fef1 5114 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
af90f707 5115 struct nfs4_stid *s = NULL;
b37ad28b 5116 __be32 status;
1da177e4 5117
1da177e4
LT
5118 if (filpp)
5119 *filpp = NULL;
af90f707
CH
5120 if (tmp_file)
5121 *tmp_file = false;
1da177e4 5122
5ccb0066 5123 if (grace_disallows_io(net, ino))
1da177e4
LT
5124 return nfserr_grace;
5125
af90f707
CH
5126 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5127 status = check_special_stateids(net, fhp, stateid, flags);
5128 goto done;
5129 }
1da177e4 5130
2dd6e458 5131 status = nfsd4_lookup_stateid(cstate, stateid,
db24b3b4 5132 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
2dd6e458 5133 &s, nn);
38c2f4b1 5134 if (status)
c2d1d6a8 5135 return status;
a0649b2d
CH
5136 status = check_stateid_generation(stateid, &s->sc_stateid,
5137 nfsd4_has_session(cstate));
69064a27
BF
5138 if (status)
5139 goto out;
a0649b2d 5140
f7a4d872
BF
5141 switch (s->sc_type) {
5142 case NFS4_DELEG_STID:
a0649b2d 5143 status = nfs4_check_delegmode(delegstateid(s), flags);
f7a4d872
BF
5144 break;
5145 case NFS4_OPEN_STID:
5146 case NFS4_LOCK_STID:
a0649b2d 5147 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
f7a4d872
BF
5148 break;
5149 default:
14bcab1a 5150 status = nfserr_bad_stateid;
a0649b2d
CH
5151 break;
5152 }
8fcd461d
JL
5153 if (status)
5154 goto out;
5155 status = nfs4_check_fh(fhp, s);
a0649b2d 5156
af90f707
CH
5157done:
5158 if (!status && filpp)
5159 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
1da177e4 5160out:
af90f707
CH
5161 if (s)
5162 nfs4_put_stid(s);
1da177e4
LT
5163 return status;
5164}
5165
17456804
BS
5166/*
5167 * Test if the stateid is valid
5168 */
5169__be32
5170nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 5171 union nfsd4_op_u *u)
17456804 5172{
eb69853d 5173 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
03cfb420
BS
5174 struct nfsd4_test_stateid_id *stateid;
5175 struct nfs4_client *cl = cstate->session->se_client;
5176
03cfb420 5177 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
7df302f7
CL
5178 stateid->ts_id_status =
5179 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
03cfb420 5180
17456804
BS
5181 return nfs_ok;
5182}
5183
42691398
CL
5184static __be32
5185nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5186{
5187 struct nfs4_ol_stateid *stp = openlockstateid(s);
5188 __be32 ret;
5189
5190 mutex_lock(&stp->st_mutex);
5191
5192 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5193 if (ret)
5194 goto out;
5195
5196 ret = nfserr_locks_held;
5197 if (check_for_locks(stp->st_stid.sc_file,
5198 lockowner(stp->st_stateowner)))
5199 goto out;
5200
5201 release_lock_stateid(stp);
5202 ret = nfs_ok;
5203
5204out:
5205 mutex_unlock(&stp->st_mutex);
5206 nfs4_put_stid(s);
5207 return ret;
5208}
5209
e1ca12df
BS
5210__be32
5211nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 5212 union nfsd4_op_u *u)
e1ca12df 5213{
eb69853d 5214 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
e1ca12df 5215 stateid_t *stateid = &free_stateid->fr_stateid;
2da1cec7 5216 struct nfs4_stid *s;
3bd64a5b 5217 struct nfs4_delegation *dp;
38c2f4b1 5218 struct nfs4_client *cl = cstate->session->se_client;
2da1cec7 5219 __be32 ret = nfserr_bad_stateid;
e1ca12df 5220
1af71cc8
JL
5221 spin_lock(&cl->cl_lock);
5222 s = find_stateid_locked(cl, stateid);
2da1cec7 5223 if (!s)
1af71cc8 5224 goto out_unlock;
2da1cec7
BF
5225 switch (s->sc_type) {
5226 case NFS4_DELEG_STID:
e1ca12df 5227 ret = nfserr_locks_held;
1af71cc8 5228 break;
2da1cec7 5229 case NFS4_OPEN_STID:
2da1cec7
BF
5230 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5231 if (ret)
1af71cc8
JL
5232 break;
5233 ret = nfserr_locks_held;
f7a4d872 5234 break;
1af71cc8 5235 case NFS4_LOCK_STID:
42691398 5236 atomic_inc(&s->sc_count);
1af71cc8 5237 spin_unlock(&cl->cl_lock);
42691398 5238 ret = nfsd4_free_lock_stateid(stateid, s);
1af71cc8 5239 goto out;
3bd64a5b
BF
5240 case NFS4_REVOKED_DELEG_STID:
5241 dp = delegstateid(s);
2d4a532d
JL
5242 list_del_init(&dp->dl_recall_lru);
5243 spin_unlock(&cl->cl_lock);
6011695d 5244 nfs4_put_stid(s);
3bd64a5b 5245 ret = nfs_ok;
1af71cc8
JL
5246 goto out;
5247 /* Default falls through and returns nfserr_bad_stateid */
e1ca12df 5248 }
1af71cc8
JL
5249out_unlock:
5250 spin_unlock(&cl->cl_lock);
e1ca12df 5251out:
e1ca12df
BS
5252 return ret;
5253}
5254
4c4cd222
N
5255static inline int
5256setlkflg (int type)
5257{
5258 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5259 RD_STATE : WR_STATE;
5260}
1da177e4 5261
dcef0413 5262static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
c0a5d93e
BF
5263{
5264 struct svc_fh *current_fh = &cstate->current_fh;
5265 struct nfs4_stateowner *sop = stp->st_stateowner;
5266 __be32 status;
5267
c0a5d93e
BF
5268 status = nfsd4_check_seqid(cstate, sop, seqid);
5269 if (status)
5270 return status;
3bd364d1
TM
5271 status = nfsd4_lock_ol_stateid(stp);
5272 if (status != nfs_ok)
5273 return status;
f7a4d872 5274 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
35a92fe8
JL
5275 if (status == nfs_ok)
5276 status = nfs4_check_fh(current_fh, &stp->st_stid);
5277 if (status != nfs_ok)
feb9dad5 5278 mutex_unlock(&stp->st_mutex);
35a92fe8 5279 return status;
c0a5d93e
BF
5280}
5281
1da177e4
LT
5282/*
5283 * Checks for sequence id mutating operations.
5284 */
b37ad28b 5285static __be32
dd453dfd 5286nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
2288d0e3 5287 stateid_t *stateid, char typemask,
3320fef1
SK
5288 struct nfs4_ol_stateid **stpp,
5289 struct nfsd_net *nn)
1da177e4 5290{
0836f587 5291 __be32 status;
38c2f4b1 5292 struct nfs4_stid *s;
e17f99b7 5293 struct nfs4_ol_stateid *stp = NULL;
1da177e4 5294
8c10cbdb
BH
5295 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5296 seqid, STATEID_VAL(stateid));
3a4f98bb 5297
1da177e4 5298 *stpp = NULL;
2dd6e458 5299 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
c0a5d93e
BF
5300 if (status)
5301 return status;
e17f99b7 5302 stp = openlockstateid(s);
58fb12e6 5303 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
1da177e4 5304
e17f99b7 5305 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
fd911011 5306 if (!status)
e17f99b7 5307 *stpp = stp;
fd911011
TM
5308 else
5309 nfs4_put_stid(&stp->st_stid);
e17f99b7 5310 return status;
c0a5d93e 5311}
39325bd0 5312
3320fef1
SK
5313static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5314 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
c0a5d93e
BF
5315{
5316 __be32 status;
5317 struct nfs4_openowner *oo;
4cbfc9f7 5318 struct nfs4_ol_stateid *stp;
1da177e4 5319
c0a5d93e 5320 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4cbfc9f7 5321 NFS4_OPEN_STID, &stp, nn);
7a8711c9
BF
5322 if (status)
5323 return status;
4cbfc9f7
TM
5324 oo = openowner(stp->st_stateowner);
5325 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
feb9dad5 5326 mutex_unlock(&stp->st_mutex);
4cbfc9f7 5327 nfs4_put_stid(&stp->st_stid);
3a4f98bb 5328 return nfserr_bad_stateid;
4cbfc9f7
TM
5329 }
5330 *stpp = stp;
3a4f98bb 5331 return nfs_ok;
1da177e4
LT
5332}
5333
b37ad28b 5334__be32
ca364317 5335nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 5336 union nfsd4_op_u *u)
1da177e4 5337{
eb69853d 5338 struct nfsd4_open_confirm *oc = &u->open_confirm;
b37ad28b 5339 __be32 status;
fe0750e5 5340 struct nfs4_openowner *oo;
dcef0413 5341 struct nfs4_ol_stateid *stp;
3320fef1 5342 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1da177e4 5343
a6a9f18f
AV
5344 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5345 cstate->current_fh.fh_dentry);
1da177e4 5346
ca364317 5347 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
a8cddc5d
BF
5348 if (status)
5349 return status;
1da177e4 5350
9072d5c6 5351 status = nfs4_preprocess_seqid_op(cstate,
ca364317 5352 oc->oc_seqid, &oc->oc_req_stateid,
3320fef1 5353 NFS4_OPEN_STID, &stp, nn);
9072d5c6 5354 if (status)
68b66e82 5355 goto out;
fe0750e5 5356 oo = openowner(stp->st_stateowner);
68b66e82 5357 status = nfserr_bad_stateid;
35a92fe8 5358 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
feb9dad5 5359 mutex_unlock(&stp->st_mutex);
2585fc79 5360 goto put_stateid;
35a92fe8 5361 }
dad1c067 5362 oo->oo_flags |= NFS4_OO_CONFIRMED;
9767feb2 5363 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
feb9dad5 5364 mutex_unlock(&stp->st_mutex);
8c10cbdb 5365 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
dcef0413 5366 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
c7b9a459 5367
2a4317c5 5368 nfsd4_client_record_create(oo->oo_owner.so_client);
68b66e82 5369 status = nfs_ok;
2585fc79
TM
5370put_stateid:
5371 nfs4_put_stid(&stp->st_stid);
1da177e4 5372out:
9411b1d4 5373 nfsd4_bump_seqid(cstate, status);
1da177e4
LT
5374 return status;
5375}
5376
6409a5a6 5377static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
1da177e4 5378{
82c5ff1b 5379 if (!test_access(access, stp))
6409a5a6 5380 return;
11b9164a 5381 nfs4_file_put_access(stp->st_stid.sc_file, access);
82c5ff1b 5382 clear_access(access, stp);
6409a5a6 5383}
f197c271 5384
6409a5a6
BF
5385static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5386{
5387 switch (to_access) {
5388 case NFS4_SHARE_ACCESS_READ:
5389 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5390 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5391 break;
5392 case NFS4_SHARE_ACCESS_WRITE:
5393 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5394 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5395 break;
5396 case NFS4_SHARE_ACCESS_BOTH:
5397 break;
5398 default:
063b0fb9 5399 WARN_ON_ONCE(1);
1da177e4
LT
5400 }
5401}
5402
b37ad28b 5403__be32
ca364317 5404nfsd4_open_downgrade(struct svc_rqst *rqstp,
eb69853d 5405 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
1da177e4 5406{
eb69853d 5407 struct nfsd4_open_downgrade *od = &u->open_downgrade;
b37ad28b 5408 __be32 status;
dcef0413 5409 struct nfs4_ol_stateid *stp;
3320fef1 5410 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1da177e4 5411
a6a9f18f
AV
5412 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5413 cstate->current_fh.fh_dentry);
1da177e4 5414
c30e92df 5415 /* We don't yet support WANT bits: */
2c8bd7e0
BH
5416 if (od->od_deleg_want)
5417 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5418 od->od_deleg_want);
1da177e4 5419
c0a5d93e 5420 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3320fef1 5421 &od->od_stateid, &stp, nn);
9072d5c6 5422 if (status)
1da177e4 5423 goto out;
1da177e4 5424 status = nfserr_inval;
82c5ff1b 5425 if (!test_access(od->od_share_access, stp)) {
c11c591f 5426 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
1da177e4 5427 stp->st_access_bmap, od->od_share_access);
0667b1e9 5428 goto put_stateid;
1da177e4 5429 }
ce0fc43c 5430 if (!test_deny(od->od_share_deny, stp)) {
c11c591f 5431 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
1da177e4 5432 stp->st_deny_bmap, od->od_share_deny);
0667b1e9 5433 goto put_stateid;
1da177e4 5434 }
6409a5a6 5435 nfs4_stateid_downgrade(stp, od->od_share_access);
ce0fc43c 5436 reset_union_bmap_deny(od->od_share_deny, stp);
9767feb2 5437 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
1da177e4 5438 status = nfs_ok;
0667b1e9 5439put_stateid:
feb9dad5 5440 mutex_unlock(&stp->st_mutex);
0667b1e9 5441 nfs4_put_stid(&stp->st_stid);
1da177e4 5442out:
9411b1d4 5443 nfsd4_bump_seqid(cstate, status);
1da177e4
LT
5444 return status;
5445}
5446
f7a4d872
BF
5447static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5448{
acf9295b 5449 struct nfs4_client *clp = s->st_stid.sc_client;
e8568739 5450 bool unhashed;
d83017f9 5451 LIST_HEAD(reaplist);
acf9295b 5452
2c41beb0 5453 spin_lock(&clp->cl_lock);
e8568739 5454 unhashed = unhash_open_stateid(s, &reaplist);
acf9295b 5455
d83017f9 5456 if (clp->cl_minorversion) {
e8568739
JL
5457 if (unhashed)
5458 put_ol_stateid_locked(s, &reaplist);
d83017f9
JL
5459 spin_unlock(&clp->cl_lock);
5460 free_ol_stateid_reaplist(&reaplist);
5461 } else {
5462 spin_unlock(&clp->cl_lock);
5463 free_ol_stateid_reaplist(&reaplist);
e8568739
JL
5464 if (unhashed)
5465 move_to_close_lru(s, clp->net);
d83017f9 5466 }
38c387b5
BF
5467}
5468
1da177e4
LT
5469/*
5470 * nfs4_unlock_state() called after encode
5471 */
b37ad28b 5472__be32
ca364317 5473nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 5474 union nfsd4_op_u *u)
1da177e4 5475{
eb69853d 5476 struct nfsd4_close *close = &u->close;
b37ad28b 5477 __be32 status;
dcef0413 5478 struct nfs4_ol_stateid *stp;
3320fef1
SK
5479 struct net *net = SVC_NET(rqstp);
5480 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1da177e4 5481
a6a9f18f
AV
5482 dprintk("NFSD: nfsd4_close on file %pd\n",
5483 cstate->current_fh.fh_dentry);
1da177e4 5484
f7a4d872
BF
5485 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5486 &close->cl_stateid,
5487 NFS4_OPEN_STID|NFS4_CLOSED_STID,
3320fef1 5488 &stp, nn);
9411b1d4 5489 nfsd4_bump_seqid(cstate, status);
9072d5c6 5490 if (status)
1da177e4 5491 goto out;
db77ab54
TM
5492
5493 stp->st_stid.sc_type = NFS4_CLOSED_STID;
9767feb2 5494 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
1da177e4 5495
f7a4d872 5496 nfsd4_close_open_stateid(stp);
db77ab54 5497 mutex_unlock(&stp->st_mutex);
8a0b589d 5498
5f71ff51
TM
5499 /* See RFC5661 sectionm 18.2.4 */
5500 if (stp->st_stid.sc_client->cl_minorversion)
5501 memcpy(&close->cl_stateid, &close_stateid,
5502 sizeof(close->cl_stateid));
5503
8a0b589d
TM
5504 /* put reference from nfs4_preprocess_seqid_op */
5505 nfs4_put_stid(&stp->st_stid);
1da177e4 5506out:
1da177e4
LT
5507 return status;
5508}
5509
b37ad28b 5510__be32
ca364317 5511nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 5512 union nfsd4_op_u *u)
1da177e4 5513{
eb69853d 5514 struct nfsd4_delegreturn *dr = &u->delegreturn;
203a8c8e
BF
5515 struct nfs4_delegation *dp;
5516 stateid_t *stateid = &dr->dr_stateid;
38c2f4b1 5517 struct nfs4_stid *s;
b37ad28b 5518 __be32 status;
3320fef1 5519 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1da177e4 5520
ca364317 5521 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
203a8c8e 5522 return status;
1da177e4 5523
2dd6e458 5524 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
38c2f4b1 5525 if (status)
203a8c8e 5526 goto out;
38c2f4b1 5527 dp = delegstateid(s);
d5477a8d 5528 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
203a8c8e 5529 if (status)
fd911011 5530 goto put_stateid;
203a8c8e 5531
3bd64a5b 5532 destroy_delegation(dp);
fd911011
TM
5533put_stateid:
5534 nfs4_put_stid(&dp->dl_stid);
1da177e4
LT
5535out:
5536 return status;
5537}
5538
87df4de8
BH
5539static inline u64
5540end_offset(u64 start, u64 len)
5541{
5542 u64 end;
5543
5544 end = start + len;
5545 return end >= start ? end: NFS4_MAX_UINT64;
5546}
5547
5548/* last octet in a range */
5549static inline u64
5550last_byte_offset(u64 start, u64 len)
5551{
5552 u64 end;
5553
063b0fb9 5554 WARN_ON_ONCE(!len);
87df4de8
BH
5555 end = start + len;
5556 return end > start ? end - 1: NFS4_MAX_UINT64;
5557}
5558
1da177e4
LT
5559/*
5560 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5561 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5562 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5563 * locking, this prevents us from being completely protocol-compliant. The
5564 * real solution to this problem is to start using unsigned file offsets in
5565 * the VFS, but this is a very deep change!
5566 */
5567static inline void
5568nfs4_transform_lock_offset(struct file_lock *lock)
5569{
5570 if (lock->fl_start < 0)
5571 lock->fl_start = OFFSET_MAX;
5572 if (lock->fl_end < 0)
5573 lock->fl_end = OFFSET_MAX;
5574}
5575
cae80b30
JL
5576static fl_owner_t
5577nfsd4_fl_get_owner(fl_owner_t owner)
aef9583b 5578{
cae80b30
JL
5579 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5580
5581 nfs4_get_stateowner(&lo->lo_owner);
5582 return owner;
aef9583b
KM
5583}
5584
cae80b30
JL
5585static void
5586nfsd4_fl_put_owner(fl_owner_t owner)
aef9583b 5587{
cae80b30 5588 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
aef9583b 5589
cae80b30 5590 if (lo)
aef9583b 5591 nfs4_put_stateowner(&lo->lo_owner);
aef9583b
KM
5592}
5593
76d348fa
JL
5594static void
5595nfsd4_lm_notify(struct file_lock *fl)
5596{
5597 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
5598 struct net *net = lo->lo_owner.so_client->net;
5599 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5600 struct nfsd4_blocked_lock *nbl = container_of(fl,
5601 struct nfsd4_blocked_lock, nbl_lock);
5602 bool queue = false;
5603
7919d0a2 5604 /* An empty list means that something else is going to be using it */
0cc11a61 5605 spin_lock(&nn->blocked_locks_lock);
76d348fa
JL
5606 if (!list_empty(&nbl->nbl_list)) {
5607 list_del_init(&nbl->nbl_list);
7919d0a2 5608 list_del_init(&nbl->nbl_lru);
76d348fa
JL
5609 queue = true;
5610 }
0cc11a61 5611 spin_unlock(&nn->blocked_locks_lock);
76d348fa
JL
5612
5613 if (queue)
5614 nfsd4_run_cb(&nbl->nbl_cb);
5615}
5616
7b021967 5617static const struct lock_manager_operations nfsd_posix_mng_ops = {
76d348fa 5618 .lm_notify = nfsd4_lm_notify,
aef9583b
KM
5619 .lm_get_owner = nfsd4_fl_get_owner,
5620 .lm_put_owner = nfsd4_fl_put_owner,
d5b9026a 5621};
1da177e4
LT
5622
5623static inline void
5624nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5625{
fe0750e5 5626 struct nfs4_lockowner *lo;
1da177e4 5627
d5b9026a 5628 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
fe0750e5
BF
5629 lo = (struct nfs4_lockowner *) fl->fl_owner;
5630 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5631 lo->lo_owner.so_owner.len, GFP_KERNEL);
7c13f344
BF
5632 if (!deny->ld_owner.data)
5633 /* We just don't care that much */
5634 goto nevermind;
fe0750e5
BF
5635 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5636 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
d5b9026a 5637 } else {
7c13f344
BF
5638nevermind:
5639 deny->ld_owner.len = 0;
5640 deny->ld_owner.data = NULL;
d5b9026a
N
5641 deny->ld_clientid.cl_boot = 0;
5642 deny->ld_clientid.cl_id = 0;
1da177e4
LT
5643 }
5644 deny->ld_start = fl->fl_start;
87df4de8
BH
5645 deny->ld_length = NFS4_MAX_UINT64;
5646 if (fl->fl_end != NFS4_MAX_UINT64)
1da177e4
LT
5647 deny->ld_length = fl->fl_end - fl->fl_start + 1;
5648 deny->ld_type = NFS4_READ_LT;
5649 if (fl->fl_type != F_RDLCK)
5650 deny->ld_type = NFS4_WRITE_LT;
5651}
5652
fe0750e5 5653static struct nfs4_lockowner *
c8623999 5654find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
1da177e4 5655{
d4f0489f 5656 unsigned int strhashval = ownerstr_hashval(owner);
b3c32bcd 5657 struct nfs4_stateowner *so;
1da177e4 5658
0a880a28
TM
5659 lockdep_assert_held(&clp->cl_lock);
5660
d4f0489f
TM
5661 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5662 so_strhash) {
b3c32bcd
TM
5663 if (so->so_is_open_owner)
5664 continue;
b5971afa
KM
5665 if (same_owner_str(so, owner))
5666 return lockowner(nfs4_get_stateowner(so));
1da177e4
LT
5667 }
5668 return NULL;
5669}
5670
c58c6610 5671static struct nfs4_lockowner *
c8623999 5672find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
c58c6610
TM
5673{
5674 struct nfs4_lockowner *lo;
5675
d4f0489f 5676 spin_lock(&clp->cl_lock);
c8623999 5677 lo = find_lockowner_str_locked(clp, owner);
d4f0489f 5678 spin_unlock(&clp->cl_lock);
c58c6610
TM
5679 return lo;
5680}
5681
8f4b54c5
JL
5682static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5683{
c58c6610 5684 unhash_lockowner_locked(lockowner(sop));
8f4b54c5
JL
5685}
5686
6b180f0b
JL
5687static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5688{
5689 struct nfs4_lockowner *lo = lockowner(sop);
5690
5691 kmem_cache_free(lockowner_slab, lo);
5692}
5693
5694static const struct nfs4_stateowner_operations lockowner_ops = {
8f4b54c5
JL
5695 .so_unhash = nfs4_unhash_lockowner,
5696 .so_free = nfs4_free_lockowner,
6b180f0b
JL
5697};
5698
1da177e4
LT
5699/*
5700 * Alloc a lock owner structure.
5701 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
25985edc 5702 * occurred.
1da177e4 5703 *
16bfdaaf 5704 * strhashval = ownerstr_hashval
1da177e4 5705 */
fe0750e5 5706static struct nfs4_lockowner *
c58c6610
TM
5707alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5708 struct nfs4_ol_stateid *open_stp,
5709 struct nfsd4_lock *lock)
5710{
c58c6610 5711 struct nfs4_lockowner *lo, *ret;
1da177e4 5712
fe0750e5
BF
5713 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5714 if (!lo)
1da177e4 5715 return NULL;
76d348fa 5716 INIT_LIST_HEAD(&lo->lo_blocked);
fe0750e5
BF
5717 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5718 lo->lo_owner.so_is_open_owner = 0;
5db1c03f 5719 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6b180f0b 5720 lo->lo_owner.so_ops = &lockowner_ops;
d4f0489f 5721 spin_lock(&clp->cl_lock);
c8623999 5722 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
c58c6610
TM
5723 if (ret == NULL) {
5724 list_add(&lo->lo_owner.so_strhash,
d4f0489f 5725 &clp->cl_ownerstr_hashtbl[strhashval]);
c58c6610
TM
5726 ret = lo;
5727 } else
d50ffded
KM
5728 nfs4_free_stateowner(&lo->lo_owner);
5729
d4f0489f 5730 spin_unlock(&clp->cl_lock);
340f0ba1 5731 return ret;
1da177e4
LT
5732}
5733
356a95ec
JL
5734static void
5735init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5736 struct nfs4_file *fp, struct inode *inode,
5737 struct nfs4_ol_stateid *open_stp)
1da177e4 5738{
d3b313a4 5739 struct nfs4_client *clp = lo->lo_owner.so_client;
1da177e4 5740
356a95ec
JL
5741 lockdep_assert_held(&clp->cl_lock);
5742
3d0fabd5 5743 atomic_inc(&stp->st_stid.sc_count);
3abdb607 5744 stp->st_stid.sc_type = NFS4_LOCK_STID;
b5971afa 5745 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
13cd2184 5746 get_nfs4_file(fp);
11b9164a 5747 stp->st_stid.sc_file = fp;
0997b173 5748 stp->st_access_bmap = 0;
1da177e4 5749 stp->st_deny_bmap = open_stp->st_deny_bmap;
4c4cd222 5750 stp->st_openstp = open_stp;
feb9dad5 5751 mutex_init(&stp->st_mutex);
3c87b9b7 5752 list_add(&stp->st_locks, &open_stp->st_locks);
1c755dc1 5753 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
1d31a253
TM
5754 spin_lock(&fp->fi_lock);
5755 list_add(&stp->st_perfile, &fp->fi_stateids);
5756 spin_unlock(&fp->fi_lock);
1da177e4
LT
5757}
5758
c53530da
JL
5759static struct nfs4_ol_stateid *
5760find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5761{
5762 struct nfs4_ol_stateid *lst;
356a95ec
JL
5763 struct nfs4_client *clp = lo->lo_owner.so_client;
5764
5765 lockdep_assert_held(&clp->cl_lock);
c53530da
JL
5766
5767 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
3d0fabd5
TM
5768 if (lst->st_stid.sc_file == fp) {
5769 atomic_inc(&lst->st_stid.sc_count);
c53530da 5770 return lst;
3d0fabd5 5771 }
c53530da
JL
5772 }
5773 return NULL;
5774}
5775
356a95ec
JL
5776static struct nfs4_ol_stateid *
5777find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5778 struct inode *inode, struct nfs4_ol_stateid *ost,
5779 bool *new)
5780{
5781 struct nfs4_stid *ns = NULL;
5782 struct nfs4_ol_stateid *lst;
5783 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5784 struct nfs4_client *clp = oo->oo_owner.so_client;
5785
5786 spin_lock(&clp->cl_lock);
5787 lst = find_lock_stateid(lo, fi);
5788 if (lst == NULL) {
5789 spin_unlock(&clp->cl_lock);
d19fb70d 5790 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
356a95ec
JL
5791 if (ns == NULL)
5792 return NULL;
5793
5794 spin_lock(&clp->cl_lock);
5795 lst = find_lock_stateid(lo, fi);
5796 if (likely(!lst)) {
5797 lst = openlockstateid(ns);
5798 init_lock_stateid(lst, lo, fi, inode, ost);
5799 ns = NULL;
5800 *new = true;
5801 }
5802 }
5803 spin_unlock(&clp->cl_lock);
5804 if (ns)
5805 nfs4_put_stid(ns);
5806 return lst;
5807}
c53530da 5808
fd39ca9a 5809static int
1da177e4
LT
5810check_lock_length(u64 offset, u64 length)
5811{
e7969315
KM
5812 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5813 (length > ~offset)));
1da177e4
LT
5814}
5815
dcef0413 5816static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
0997b173 5817{
11b9164a 5818 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
0997b173 5819
7214e860
JL
5820 lockdep_assert_held(&fp->fi_lock);
5821
82c5ff1b 5822 if (test_access(access, lock_stp))
0997b173 5823 return;
12659651 5824 __nfs4_file_get_access(fp, access);
82c5ff1b 5825 set_access(access, lock_stp);
0997b173
BF
5826}
5827
356a95ec
JL
5828static __be32
5829lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5830 struct nfs4_ol_stateid *ost,
5831 struct nfsd4_lock *lock,
dd257933 5832 struct nfs4_ol_stateid **plst, bool *new)
64a284d0 5833{
5db1c03f 5834 __be32 status;
11b9164a 5835 struct nfs4_file *fi = ost->st_stid.sc_file;
64a284d0
BF
5836 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5837 struct nfs4_client *cl = oo->oo_owner.so_client;
2b0143b5 5838 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
64a284d0 5839 struct nfs4_lockowner *lo;
dd257933 5840 struct nfs4_ol_stateid *lst;
64a284d0 5841 unsigned int strhashval;
dd257933 5842 bool hashed;
64a284d0 5843
c8623999 5844 lo = find_lockowner_str(cl, &lock->lk_new_owner);
c53530da 5845 if (!lo) {
76f6c9e1 5846 strhashval = ownerstr_hashval(&lock->lk_new_owner);
c53530da
JL
5847 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5848 if (lo == NULL)
5849 return nfserr_jukebox;
5850 } else {
5851 /* with an existing lockowner, seqids must be the same */
5db1c03f 5852 status = nfserr_bad_seqid;
c53530da
JL
5853 if (!cstate->minorversion &&
5854 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5db1c03f 5855 goto out;
64a284d0 5856 }
c53530da 5857
dd257933
JL
5858retry:
5859 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5860 if (lst == NULL) {
5db1c03f
JL
5861 status = nfserr_jukebox;
5862 goto out;
64a284d0 5863 }
dd257933
JL
5864
5865 mutex_lock(&lst->st_mutex);
5866
5867 /* See if it's still hashed to avoid race with FREE_STATEID */
5868 spin_lock(&cl->cl_lock);
5869 hashed = !list_empty(&lst->st_perfile);
5870 spin_unlock(&cl->cl_lock);
5871
5872 if (!hashed) {
5873 mutex_unlock(&lst->st_mutex);
5874 nfs4_put_stid(&lst->st_stid);
5875 goto retry;
5876 }
5db1c03f 5877 status = nfs_ok;
dd257933 5878 *plst = lst;
5db1c03f
JL
5879out:
5880 nfs4_put_stateowner(&lo->lo_owner);
5881 return status;
64a284d0
BF
5882}
5883
1da177e4
LT
5884/*
5885 * LOCK operation
5886 */
b37ad28b 5887__be32
ca364317 5888nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 5889 union nfsd4_op_u *u)
1da177e4 5890{
eb69853d 5891 struct nfsd4_lock *lock = &u->lock;
fe0750e5
BF
5892 struct nfs4_openowner *open_sop = NULL;
5893 struct nfs4_lockowner *lock_sop = NULL;
3d0fabd5 5894 struct nfs4_ol_stateid *lock_stp = NULL;
0667b1e9 5895 struct nfs4_ol_stateid *open_stp = NULL;
7214e860 5896 struct nfs4_file *fp;
7d947842 5897 struct file *filp = NULL;
76d348fa 5898 struct nfsd4_blocked_lock *nbl = NULL;
21179d81
JL
5899 struct file_lock *file_lock = NULL;
5900 struct file_lock *conflock = NULL;
b37ad28b 5901 __be32 status = 0;
b34f27aa 5902 int lkflg;
b8dd7b9a 5903 int err;
5db1c03f 5904 bool new = false;
76d348fa
JL
5905 unsigned char fl_type;
5906 unsigned int fl_flags = FL_POSIX;
3320fef1
SK
5907 struct net *net = SVC_NET(rqstp);
5908 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1da177e4
LT
5909
5910 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5911 (long long) lock->lk_offset,
5912 (long long) lock->lk_length);
5913
1da177e4
LT
5914 if (check_lock_length(lock->lk_offset, lock->lk_length))
5915 return nfserr_inval;
5916
ca364317 5917 if ((status = fh_verify(rqstp, &cstate->current_fh,
8837abca 5918 S_IFREG, NFSD_MAY_LOCK))) {
a6f6ef2f
AA
5919 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5920 return status;
5921 }
5922
1da177e4 5923 if (lock->lk_is_new) {
684e5638
BF
5924 if (nfsd4_has_session(cstate))
5925 /* See rfc 5661 18.10.3: given clientid is ignored: */
76f6c9e1 5926 memcpy(&lock->lk_new_clientid,
684e5638
BF
5927 &cstate->session->se_client->cl_clientid,
5928 sizeof(clientid_t));
5929
1da177e4 5930 status = nfserr_stale_clientid;
2c142baa 5931 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
1da177e4 5932 goto out;
1da177e4 5933
1da177e4 5934 /* validate and update open stateid and open seqid */
c0a5d93e 5935 status = nfs4_preprocess_confirmed_seqid_op(cstate,
1da177e4
LT
5936 lock->lk_new_open_seqid,
5937 &lock->lk_new_open_stateid,
3320fef1 5938 &open_stp, nn);
37515177 5939 if (status)
1da177e4 5940 goto out;
feb9dad5 5941 mutex_unlock(&open_stp->st_mutex);
fe0750e5 5942 open_sop = openowner(open_stp->st_stateowner);
b34f27aa 5943 status = nfserr_bad_stateid;
684e5638 5944 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
76f6c9e1 5945 &lock->lk_new_clientid))
b34f27aa 5946 goto out;
64a284d0 5947 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5db1c03f 5948 &lock_stp, &new);
3d0fabd5 5949 } else {
dd453dfd 5950 status = nfs4_preprocess_seqid_op(cstate,
fe0750e5
BF
5951 lock->lk_old_lock_seqid,
5952 &lock->lk_old_lock_stateid,
3320fef1 5953 NFS4_LOCK_STID, &lock_stp, nn);
3d0fabd5 5954 }
e1aaa891
BF
5955 if (status)
5956 goto out;
64a284d0 5957 lock_sop = lockowner(lock_stp->st_stateowner);
1da177e4 5958
b34f27aa
BF
5959 lkflg = setlkflg(lock->lk_type);
5960 status = nfs4_check_openmode(lock_stp, lkflg);
5961 if (status)
5962 goto out;
5963
0dd395dc 5964 status = nfserr_grace;
3320fef1 5965 if (locks_in_grace(net) && !lock->lk_reclaim)
0dd395dc
N
5966 goto out;
5967 status = nfserr_no_grace;
3320fef1 5968 if (!locks_in_grace(net) && lock->lk_reclaim)
0dd395dc
N
5969 goto out;
5970
11b9164a 5971 fp = lock_stp->st_stid.sc_file;
1da177e4 5972 switch (lock->lk_type) {
1da177e4 5973 case NFS4_READW_LT:
76d348fa
JL
5974 if (nfsd4_has_session(cstate))
5975 fl_flags |= FL_SLEEP;
5976 /* Fallthrough */
5977 case NFS4_READ_LT:
7214e860
JL
5978 spin_lock(&fp->fi_lock);
5979 filp = find_readable_file_locked(fp);
0997b173
BF
5980 if (filp)
5981 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7214e860 5982 spin_unlock(&fp->fi_lock);
76d348fa 5983 fl_type = F_RDLCK;
529d7b2a 5984 break;
1da177e4 5985 case NFS4_WRITEW_LT:
76d348fa
JL
5986 if (nfsd4_has_session(cstate))
5987 fl_flags |= FL_SLEEP;
5988 /* Fallthrough */
5989 case NFS4_WRITE_LT:
7214e860
JL
5990 spin_lock(&fp->fi_lock);
5991 filp = find_writeable_file_locked(fp);
0997b173
BF
5992 if (filp)
5993 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7214e860 5994 spin_unlock(&fp->fi_lock);
76d348fa 5995 fl_type = F_WRLCK;
529d7b2a 5996 break;
1da177e4
LT
5997 default:
5998 status = nfserr_inval;
5999 goto out;
6000 }
76d348fa 6001
f9d7562f
BF
6002 if (!filp) {
6003 status = nfserr_openmode;
6004 goto out;
6005 }
aef9583b 6006
76d348fa
JL
6007 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6008 if (!nbl) {
6009 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6010 status = nfserr_jukebox;
6011 goto out;
6012 }
6013
6014 file_lock = &nbl->nbl_lock;
6015 file_lock->fl_type = fl_type;
aef9583b 6016 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
21179d81
JL
6017 file_lock->fl_pid = current->tgid;
6018 file_lock->fl_file = filp;
76d348fa 6019 file_lock->fl_flags = fl_flags;
21179d81
JL
6020 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6021 file_lock->fl_start = lock->lk_offset;
6022 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6023 nfs4_transform_lock_offset(file_lock);
6024
6025 conflock = locks_alloc_lock();
6026 if (!conflock) {
6027 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6028 status = nfserr_jukebox;
6029 goto out;
6030 }
1da177e4 6031
76d348fa 6032 if (fl_flags & FL_SLEEP) {
7919d0a2 6033 nbl->nbl_time = jiffies;
0cc11a61 6034 spin_lock(&nn->blocked_locks_lock);
76d348fa 6035 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
7919d0a2 6036 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
0cc11a61 6037 spin_unlock(&nn->blocked_locks_lock);
76d348fa
JL
6038 }
6039
21179d81 6040 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
76d348fa 6041 switch (err) {
1da177e4 6042 case 0: /* success! */
9767feb2 6043 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
b8dd7b9a 6044 status = 0;
eb76b3fd 6045 break;
76d348fa
JL
6046 case FILE_LOCK_DEFERRED:
6047 nbl = NULL;
6048 /* Fallthrough */
6049 case -EAGAIN: /* conflock holds conflicting lock */
eb76b3fd
AA
6050 status = nfserr_denied;
6051 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
21179d81 6052 nfs4_set_lock_denied(conflock, &lock->lk_denied);
eb76b3fd 6053 break;
76d348fa 6054 case -EDEADLK:
1da177e4 6055 status = nfserr_deadlock;
eb76b3fd 6056 break;
3e772463 6057 default:
fd85b817 6058 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
3e772463 6059 status = nfserrno(err);
eb76b3fd 6060 break;
1da177e4 6061 }
1da177e4 6062out:
76d348fa
JL
6063 if (nbl) {
6064 /* dequeue it if we queued it before */
6065 if (fl_flags & FL_SLEEP) {
0cc11a61 6066 spin_lock(&nn->blocked_locks_lock);
76d348fa 6067 list_del_init(&nbl->nbl_list);
7919d0a2 6068 list_del_init(&nbl->nbl_lru);
0cc11a61 6069 spin_unlock(&nn->blocked_locks_lock);
76d348fa
JL
6070 }
6071 free_blocked_lock(nbl);
6072 }
de18643d
TM
6073 if (filp)
6074 fput(filp);
5db1c03f
JL
6075 if (lock_stp) {
6076 /* Bump seqid manually if the 4.0 replay owner is openowner */
6077 if (cstate->replay_owner &&
6078 cstate->replay_owner != &lock_sop->lo_owner &&
6079 seqid_mutating_err(ntohl(status)))
6080 lock_sop->lo_owner.so_seqid++;
6081
feb9dad5 6082 mutex_unlock(&lock_stp->st_mutex);
35a92fe8 6083
5db1c03f
JL
6084 /*
6085 * If this is a new, never-before-used stateid, and we are
6086 * returning an error, then just go ahead and release it.
6087 */
6088 if (status && new)
6089 release_lock_stateid(lock_stp);
6090
3d0fabd5 6091 nfs4_put_stid(&lock_stp->st_stid);
5db1c03f 6092 }
0667b1e9
TM
6093 if (open_stp)
6094 nfs4_put_stid(&open_stp->st_stid);
9411b1d4 6095 nfsd4_bump_seqid(cstate, status);
21179d81
JL
6096 if (conflock)
6097 locks_free_lock(conflock);
1da177e4
LT
6098 return status;
6099}
6100
55ef1274
BF
6101/*
6102 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6103 * so we do a temporary open here just to get an open file to pass to
6104 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6105 * inode operation.)
6106 */
04da6e9d 6107static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
55ef1274
BF
6108{
6109 struct file *file;
04da6e9d
AV
6110 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6111 if (!err) {
6112 err = nfserrno(vfs_test_lock(file, lock));
fd891454 6113 fput(file);
04da6e9d 6114 }
55ef1274
BF
6115 return err;
6116}
6117
1da177e4
LT
6118/*
6119 * LOCKT operation
6120 */
b37ad28b 6121__be32
ca364317 6122nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 6123 union nfsd4_op_u *u)
1da177e4 6124{
eb69853d 6125 struct nfsd4_lockt *lockt = &u->lockt;
21179d81 6126 struct file_lock *file_lock = NULL;
5db1c03f 6127 struct nfs4_lockowner *lo = NULL;
b37ad28b 6128 __be32 status;
7f2210fa 6129 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1da177e4 6130
5ccb0066 6131 if (locks_in_grace(SVC_NET(rqstp)))
1da177e4
LT
6132 return nfserr_grace;
6133
6134 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6135 return nfserr_inval;
6136
9b2ef62b 6137 if (!nfsd4_has_session(cstate)) {
4b24ca7d 6138 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
9b2ef62b
BF
6139 if (status)
6140 goto out;
6141 }
1da177e4 6142
75c096f7 6143 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
1da177e4 6144 goto out;
1da177e4 6145
21179d81
JL
6146 file_lock = locks_alloc_lock();
6147 if (!file_lock) {
6148 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6149 status = nfserr_jukebox;
6150 goto out;
6151 }
6cd90662 6152
1da177e4
LT
6153 switch (lockt->lt_type) {
6154 case NFS4_READ_LT:
6155 case NFS4_READW_LT:
21179d81 6156 file_lock->fl_type = F_RDLCK;
1da177e4
LT
6157 break;
6158 case NFS4_WRITE_LT:
6159 case NFS4_WRITEW_LT:
21179d81 6160 file_lock->fl_type = F_WRLCK;
1da177e4
LT
6161 break;
6162 default:
2fdada03 6163 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
1da177e4
LT
6164 status = nfserr_inval;
6165 goto out;
6166 }
6167
c8623999 6168 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
fe0750e5 6169 if (lo)
21179d81
JL
6170 file_lock->fl_owner = (fl_owner_t)lo;
6171 file_lock->fl_pid = current->tgid;
6172 file_lock->fl_flags = FL_POSIX;
1da177e4 6173
21179d81
JL
6174 file_lock->fl_start = lockt->lt_offset;
6175 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
1da177e4 6176
21179d81 6177 nfs4_transform_lock_offset(file_lock);
1da177e4 6178
21179d81 6179 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
04da6e9d 6180 if (status)
fd85b817 6181 goto out;
04da6e9d 6182
21179d81 6183 if (file_lock->fl_type != F_UNLCK) {
1da177e4 6184 status = nfserr_denied;
21179d81 6185 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
1da177e4
LT
6186 }
6187out:
5db1c03f
JL
6188 if (lo)
6189 nfs4_put_stateowner(&lo->lo_owner);
21179d81
JL
6190 if (file_lock)
6191 locks_free_lock(file_lock);
1da177e4
LT
6192 return status;
6193}
6194
b37ad28b 6195__be32
ca364317 6196nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
eb69853d 6197 union nfsd4_op_u *u)
1da177e4 6198{
eb69853d 6199 struct nfsd4_locku *locku = &u->locku;
dcef0413 6200 struct nfs4_ol_stateid *stp;
1da177e4 6201 struct file *filp = NULL;
21179d81 6202 struct file_lock *file_lock = NULL;
b37ad28b 6203 __be32 status;
b8dd7b9a 6204 int err;
3320fef1
SK
6205 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6206
1da177e4
LT
6207 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6208 (long long) locku->lu_offset,
6209 (long long) locku->lu_length);
6210
6211 if (check_lock_length(locku->lu_offset, locku->lu_length))
6212 return nfserr_inval;
6213
9072d5c6 6214 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
3320fef1
SK
6215 &locku->lu_stateid, NFS4_LOCK_STID,
6216 &stp, nn);
9072d5c6 6217 if (status)
1da177e4 6218 goto out;
11b9164a 6219 filp = find_any_file(stp->st_stid.sc_file);
f9d7562f
BF
6220 if (!filp) {
6221 status = nfserr_lock_range;
858cc573 6222 goto put_stateid;
f9d7562f 6223 }
21179d81
JL
6224 file_lock = locks_alloc_lock();
6225 if (!file_lock) {
6226 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6227 status = nfserr_jukebox;
de18643d 6228 goto fput;
21179d81 6229 }
6cd90662 6230
21179d81 6231 file_lock->fl_type = F_UNLCK;
aef9583b 6232 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
21179d81
JL
6233 file_lock->fl_pid = current->tgid;
6234 file_lock->fl_file = filp;
6235 file_lock->fl_flags = FL_POSIX;
6236 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6237 file_lock->fl_start = locku->lu_offset;
6238
6239 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6240 locku->lu_length);
6241 nfs4_transform_lock_offset(file_lock);
1da177e4 6242
21179d81 6243 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
b8dd7b9a 6244 if (err) {
fd85b817 6245 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
1da177e4
LT
6246 goto out_nfserr;
6247 }
9767feb2 6248 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
de18643d
TM
6249fput:
6250 fput(filp);
858cc573 6251put_stateid:
feb9dad5 6252 mutex_unlock(&stp->st_mutex);
858cc573 6253 nfs4_put_stid(&stp->st_stid);
1da177e4 6254out:
9411b1d4 6255 nfsd4_bump_seqid(cstate, status);
21179d81
JL
6256 if (file_lock)
6257 locks_free_lock(file_lock);
1da177e4
LT
6258 return status;
6259
6260out_nfserr:
b8dd7b9a 6261 status = nfserrno(err);
de18643d 6262 goto fput;
1da177e4
LT
6263}
6264
6265/*
6266 * returns
f9c00c3a
JL
6267 * true: locks held by lockowner
6268 * false: no locks held by lockowner
1da177e4 6269 */
f9c00c3a
JL
6270static bool
6271check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
1da177e4 6272{
bd61e0a9 6273 struct file_lock *fl;
f9c00c3a
JL
6274 int status = false;
6275 struct file *filp = find_any_file(fp);
6276 struct inode *inode;
bd61e0a9 6277 struct file_lock_context *flctx;
f9c00c3a
JL
6278
6279 if (!filp) {
6280 /* Any valid lock stateid should have some sort of access */
6281 WARN_ON_ONCE(1);
6282 return status;
6283 }
6284
6285 inode = file_inode(filp);
bd61e0a9
JL
6286 flctx = inode->i_flctx;
6287
6288 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6109c850 6289 spin_lock(&flctx->flc_lock);
bd61e0a9
JL
6290 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6291 if (fl->fl_owner == (fl_owner_t)lowner) {
6292 status = true;
6293 break;
6294 }
796dadfd 6295 }
6109c850 6296 spin_unlock(&flctx->flc_lock);
1da177e4 6297 }
f9c00c3a 6298 fput(filp);
1da177e4
LT
6299 return status;
6300}
6301
b37ad28b 6302__be32
b591480b
BF
6303nfsd4_release_lockowner(struct svc_rqst *rqstp,
6304 struct nfsd4_compound_state *cstate,
eb69853d 6305 union nfsd4_op_u *u)
1da177e4 6306{
eb69853d 6307 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
1da177e4 6308 clientid_t *clid = &rlockowner->rl_clientid;
882e9d25
JL
6309 struct nfs4_stateowner *sop;
6310 struct nfs4_lockowner *lo = NULL;
dcef0413 6311 struct nfs4_ol_stateid *stp;
1da177e4 6312 struct xdr_netobj *owner = &rlockowner->rl_owner;
d4f0489f 6313 unsigned int hashval = ownerstr_hashval(owner);
b37ad28b 6314 __be32 status;
7f2210fa 6315 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
c58c6610 6316 struct nfs4_client *clp;
88584818 6317 LIST_HEAD (reaplist);
1da177e4
LT
6318
6319 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6320 clid->cl_boot, clid->cl_id);
6321
4b24ca7d 6322 status = lookup_clientid(clid, cstate, nn);
9b2ef62b 6323 if (status)
51f5e783 6324 return status;
9b2ef62b 6325
d4f0489f 6326 clp = cstate->clp;
fd44907c 6327 /* Find the matching lock stateowner */
d4f0489f 6328 spin_lock(&clp->cl_lock);
882e9d25 6329 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
d4f0489f 6330 so_strhash) {
fd44907c 6331
882e9d25
JL
6332 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6333 continue;
fd44907c 6334
882e9d25
JL
6335 /* see if there are still any locks associated with it */
6336 lo = lockowner(sop);
6337 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6338 if (check_for_locks(stp->st_stid.sc_file, lo)) {
6339 status = nfserr_locks_held;
6340 spin_unlock(&clp->cl_lock);
51f5e783 6341 return status;
882e9d25 6342 }
5adfd885 6343 }
882e9d25 6344
b5971afa 6345 nfs4_get_stateowner(sop);
882e9d25 6346 break;
1da177e4 6347 }
88584818
CL
6348 if (!lo) {
6349 spin_unlock(&clp->cl_lock);
6350 return status;
6351 }
6352
6353 unhash_lockowner_locked(lo);
6354 while (!list_empty(&lo->lo_owner.so_stateids)) {
6355 stp = list_first_entry(&lo->lo_owner.so_stateids,
6356 struct nfs4_ol_stateid,
6357 st_perstateowner);
6358 WARN_ON(!unhash_lock_stateid(stp));
6359 put_ol_stateid_locked(stp, &reaplist);
6360 }
c58c6610 6361 spin_unlock(&clp->cl_lock);
88584818 6362 free_ol_stateid_reaplist(&reaplist);
797bfd05 6363 remove_blocked_locks(lo);
88584818
CL
6364 nfs4_put_stateowner(&lo->lo_owner);
6365
1da177e4
LT
6366 return status;
6367}
6368
6369static inline struct nfs4_client_reclaim *
a55370a3 6370alloc_reclaim(void)
1da177e4 6371{
a55370a3 6372 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
1da177e4
LT
6373}
6374
0ce0c2b5 6375bool
52e19c09 6376nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
c7b9a459 6377{
0ce0c2b5 6378 struct nfs4_client_reclaim *crp;
c7b9a459 6379
52e19c09 6380 crp = nfsd4_find_reclaim_client(name, nn);
0ce0c2b5 6381 return (crp && crp->cr_clp);
c7b9a459
N
6382}
6383
1da177e4
LT
6384/*
6385 * failure => all reset bets are off, nfserr_no_grace...
6386 */
772a9bbb 6387struct nfs4_client_reclaim *
52e19c09 6388nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
1da177e4
LT
6389{
6390 unsigned int strhashval;
772a9bbb 6391 struct nfs4_client_reclaim *crp;
1da177e4 6392
a55370a3
N
6393 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6394 crp = alloc_reclaim();
772a9bbb
JL
6395 if (crp) {
6396 strhashval = clientstr_hashval(name);
6397 INIT_LIST_HEAD(&crp->cr_strhash);
52e19c09 6398 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
772a9bbb 6399 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
0ce0c2b5 6400 crp->cr_clp = NULL;
52e19c09 6401 nn->reclaim_str_hashtbl_size++;
772a9bbb
JL
6402 }
6403 return crp;
1da177e4
LT
6404}
6405
ce30e539 6406void
52e19c09 6407nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
ce30e539
JL
6408{
6409 list_del(&crp->cr_strhash);
6410 kfree(crp);
52e19c09 6411 nn->reclaim_str_hashtbl_size--;
ce30e539
JL
6412}
6413
2a4317c5 6414void
52e19c09 6415nfs4_release_reclaim(struct nfsd_net *nn)
1da177e4
LT
6416{
6417 struct nfs4_client_reclaim *crp = NULL;
6418 int i;
6419
1da177e4 6420 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
52e19c09
SK
6421 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6422 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
1da177e4 6423 struct nfs4_client_reclaim, cr_strhash);
52e19c09 6424 nfs4_remove_reclaim_record(crp, nn);
1da177e4
LT
6425 }
6426 }
063b0fb9 6427 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
1da177e4
LT
6428}
6429
6430/*
6431 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
2a4317c5 6432struct nfs4_client_reclaim *
52e19c09 6433nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
1da177e4
LT
6434{
6435 unsigned int strhashval;
1da177e4
LT
6436 struct nfs4_client_reclaim *crp = NULL;
6437
278c931c 6438 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
1da177e4 6439
278c931c 6440 strhashval = clientstr_hashval(recdir);
52e19c09 6441 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
278c931c 6442 if (same_name(crp->cr_recdir, recdir)) {
1da177e4
LT
6443 return crp;
6444 }
6445 }
6446 return NULL;
6447}
6448
6449/*
6450* Called from OPEN. Look for clientid in reclaim list.
6451*/
b37ad28b 6452__be32
0fe492db
TM
6453nfs4_check_open_reclaim(clientid_t *clid,
6454 struct nfsd4_compound_state *cstate,
6455 struct nfsd_net *nn)
1da177e4 6456{
0fe492db 6457 __be32 status;
a52d726b
JL
6458
6459 /* find clientid in conf_id_hashtbl */
0fe492db
TM
6460 status = lookup_clientid(clid, cstate, nn);
6461 if (status)
a52d726b
JL
6462 return nfserr_reclaim_bad;
6463
3b3e7b72
JL
6464 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6465 return nfserr_no_grace;
6466
0fe492db
TM
6467 if (nfsd4_client_record_check(cstate->clp))
6468 return nfserr_reclaim_bad;
6469
6470 return nfs_ok;
1da177e4
LT
6471}
6472
65178db4 6473#ifdef CONFIG_NFSD_FAULT_INJECTION
016200c3
JL
6474static inline void
6475put_client(struct nfs4_client *clp)
6476{
6477 atomic_dec(&clp->cl_refcount);
6478}
6479
285abdee
JL
6480static struct nfs4_client *
6481nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6482{
6483 struct nfs4_client *clp;
6484 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6485 nfsd_net_id);
6486
6487 if (!nfsd_netns_ready(nn))
6488 return NULL;
6489
6490 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6491 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6492 return clp;
6493 }
6494 return NULL;
6495}
6496
7ec0e36f 6497u64
285abdee 6498nfsd_inject_print_clients(void)
7ec0e36f
JL
6499{
6500 struct nfs4_client *clp;
6501 u64 count = 0;
6502 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6503 nfsd_net_id);
6504 char buf[INET6_ADDRSTRLEN];
6505
6506 if (!nfsd_netns_ready(nn))
6507 return 0;
6508
6509 spin_lock(&nn->client_lock);
6510 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6511 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6512 pr_info("NFS Client: %s\n", buf);
6513 ++count;
6514 }
6515 spin_unlock(&nn->client_lock);
6516
6517 return count;
6518}
65178db4 6519
a0926d15 6520u64
285abdee 6521nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
a0926d15
JL
6522{
6523 u64 count = 0;
6524 struct nfs4_client *clp;
6525 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6526 nfsd_net_id);
6527
6528 if (!nfsd_netns_ready(nn))
6529 return count;
6530
6531 spin_lock(&nn->client_lock);
6532 clp = nfsd_find_client(addr, addr_size);
6533 if (clp) {
6534 if (mark_client_expired_locked(clp) == nfs_ok)
6535 ++count;
6536 else
6537 clp = NULL;
6538 }
6539 spin_unlock(&nn->client_lock);
6540
6541 if (clp)
6542 expire_client(clp);
6543
6544 return count;
6545}
6546
69fc9edf 6547u64
285abdee 6548nfsd_inject_forget_clients(u64 max)
69fc9edf
JL
6549{
6550 u64 count = 0;
6551 struct nfs4_client *clp, *next;
6552 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6553 nfsd_net_id);
6554 LIST_HEAD(reaplist);
6555
6556 if (!nfsd_netns_ready(nn))
6557 return count;
6558
6559 spin_lock(&nn->client_lock);
6560 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6561 if (mark_client_expired_locked(clp) == nfs_ok) {
6562 list_add(&clp->cl_lru, &reaplist);
6563 if (max != 0 && ++count >= max)
6564 break;
6565 }
6566 }
6567 spin_unlock(&nn->client_lock);
6568
6569 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6570 expire_client(clp);
6571
6572 return count;
6573}
6574
184c1847
BS
6575static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6576 const char *type)
6577{
6578 char buf[INET6_ADDRSTRLEN];
0a5c33e2 6579 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
184c1847
BS
6580 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6581}
6582
016200c3
JL
6583static void
6584nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6585 struct list_head *collect)
6586{
6587 struct nfs4_client *clp = lst->st_stid.sc_client;
6588 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6589 nfsd_net_id);
6590
6591 if (!collect)
6592 return;
6593
6594 lockdep_assert_held(&nn->client_lock);
6595 atomic_inc(&clp->cl_refcount);
6596 list_add(&lst->st_locks, collect);
6597}
6598
3c87b9b7 6599static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
3738d50e 6600 struct list_head *collect,
e8568739 6601 bool (*func)(struct nfs4_ol_stateid *))
fc29171f
BS
6602{
6603 struct nfs4_openowner *oop;
fc29171f 6604 struct nfs4_ol_stateid *stp, *st_next;
3c87b9b7 6605 struct nfs4_ol_stateid *lst, *lst_next;
fc29171f
BS
6606 u64 count = 0;
6607
016200c3 6608 spin_lock(&clp->cl_lock);
fc29171f 6609 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
3c87b9b7
TM
6610 list_for_each_entry_safe(stp, st_next,
6611 &oop->oo_owner.so_stateids, st_perstateowner) {
6612 list_for_each_entry_safe(lst, lst_next,
6613 &stp->st_locks, st_locks) {
3738d50e 6614 if (func) {
e8568739
JL
6615 if (func(lst))
6616 nfsd_inject_add_lock_to_list(lst,
6617 collect);
3738d50e 6618 }
016200c3
JL
6619 ++count;
6620 /*
6621 * Despite the fact that these functions deal
6622 * with 64-bit integers for "count", we must
6623 * ensure that it doesn't blow up the
6624 * clp->cl_refcount. Throw a warning if we
6625 * start to approach INT_MAX here.
6626 */
6627 WARN_ON_ONCE(count == (INT_MAX / 2));
6628 if (count == max)
6629 goto out;
fc29171f
BS
6630 }
6631 }
6632 }
016200c3
JL
6633out:
6634 spin_unlock(&clp->cl_lock);
fc29171f
BS
6635
6636 return count;
6637}
6638
016200c3
JL
6639static u64
6640nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6641 u64 max)
fc29171f 6642{
016200c3 6643 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
fc29171f
BS
6644}
6645
016200c3
JL
6646static u64
6647nfsd_print_client_locks(struct nfs4_client *clp)
184c1847 6648{
016200c3 6649 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
184c1847
BS
6650 nfsd_print_count(clp, count, "locked files");
6651 return count;
6652}
6653
016200c3 6654u64
285abdee 6655nfsd_inject_print_locks(void)
016200c3
JL
6656{
6657 struct nfs4_client *clp;
6658 u64 count = 0;
6659 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6660 nfsd_net_id);
6661
6662 if (!nfsd_netns_ready(nn))
6663 return 0;
6664
6665 spin_lock(&nn->client_lock);
6666 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6667 count += nfsd_print_client_locks(clp);
6668 spin_unlock(&nn->client_lock);
6669
6670 return count;
6671}
6672
6673static void
6674nfsd_reap_locks(struct list_head *reaplist)
6675{
6676 struct nfs4_client *clp;
6677 struct nfs4_ol_stateid *stp, *next;
6678
6679 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6680 list_del_init(&stp->st_locks);
6681 clp = stp->st_stid.sc_client;
6682 nfs4_put_stid(&stp->st_stid);
6683 put_client(clp);
6684 }
6685}
6686
6687u64
285abdee 6688nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
016200c3
JL
6689{
6690 unsigned int count = 0;
6691 struct nfs4_client *clp;
6692 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6693 nfsd_net_id);
6694 LIST_HEAD(reaplist);
6695
6696 if (!nfsd_netns_ready(nn))
6697 return count;
6698
6699 spin_lock(&nn->client_lock);
6700 clp = nfsd_find_client(addr, addr_size);
6701 if (clp)
6702 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6703 spin_unlock(&nn->client_lock);
6704 nfsd_reap_locks(&reaplist);
6705 return count;
6706}
6707
6708u64
285abdee 6709nfsd_inject_forget_locks(u64 max)
016200c3
JL
6710{
6711 u64 count = 0;
6712 struct nfs4_client *clp;
6713 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6714 nfsd_net_id);
6715 LIST_HEAD(reaplist);
6716
6717 if (!nfsd_netns_ready(nn))
6718 return count;
6719
6720 spin_lock(&nn->client_lock);
6721 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6722 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6723 if (max != 0 && count >= max)
6724 break;
6725 }
6726 spin_unlock(&nn->client_lock);
6727 nfsd_reap_locks(&reaplist);
6728 return count;
6729}
6730
82e05efa
JL
6731static u64
6732nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6733 struct list_head *collect,
6734 void (*func)(struct nfs4_openowner *))
4dbdbda8
BS
6735{
6736 struct nfs4_openowner *oop, *next;
82e05efa
JL
6737 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6738 nfsd_net_id);
4dbdbda8
BS
6739 u64 count = 0;
6740
82e05efa
JL
6741 lockdep_assert_held(&nn->client_lock);
6742
6743 spin_lock(&clp->cl_lock);
4dbdbda8 6744 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
82e05efa 6745 if (func) {
4dbdbda8 6746 func(oop);
82e05efa
JL
6747 if (collect) {
6748 atomic_inc(&clp->cl_refcount);
6749 list_add(&oop->oo_perclient, collect);
6750 }
6751 }
6752 ++count;
6753 /*
6754 * Despite the fact that these functions deal with
6755 * 64-bit integers for "count", we must ensure that
6756 * it doesn't blow up the clp->cl_refcount. Throw a
6757 * warning if we start to approach INT_MAX here.
6758 */
6759 WARN_ON_ONCE(count == (INT_MAX / 2));
6760 if (count == max)
4dbdbda8
BS
6761 break;
6762 }
82e05efa
JL
6763 spin_unlock(&clp->cl_lock);
6764
6765 return count;
6766}
6767
6768static u64
6769nfsd_print_client_openowners(struct nfs4_client *clp)
6770{
6771 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6772
6773 nfsd_print_count(clp, count, "openowners");
6774 return count;
6775}
6776
6777static u64
6778nfsd_collect_client_openowners(struct nfs4_client *clp,
6779 struct list_head *collect, u64 max)
6780{
6781 return nfsd_foreach_client_openowner(clp, max, collect,
6782 unhash_openowner_locked);
6783}
6784
6785u64
285abdee 6786nfsd_inject_print_openowners(void)
82e05efa
JL
6787{
6788 struct nfs4_client *clp;
6789 u64 count = 0;
6790 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6791 nfsd_net_id);
6792
6793 if (!nfsd_netns_ready(nn))
6794 return 0;
6795
6796 spin_lock(&nn->client_lock);
6797 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6798 count += nfsd_print_client_openowners(clp);
6799 spin_unlock(&nn->client_lock);
4dbdbda8
BS
6800
6801 return count;
6802}
6803
82e05efa
JL
6804static void
6805nfsd_reap_openowners(struct list_head *reaplist)
6806{
6807 struct nfs4_client *clp;
6808 struct nfs4_openowner *oop, *next;
6809
6810 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6811 list_del_init(&oop->oo_perclient);
6812 clp = oop->oo_owner.so_client;
6813 release_openowner(oop);
6814 put_client(clp);
6815 }
6816}
6817
6818u64
285abdee
JL
6819nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6820 size_t addr_size)
4dbdbda8 6821{
82e05efa
JL
6822 unsigned int count = 0;
6823 struct nfs4_client *clp;
6824 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6825 nfsd_net_id);
6826 LIST_HEAD(reaplist);
6827
6828 if (!nfsd_netns_ready(nn))
6829 return count;
6830
6831 spin_lock(&nn->client_lock);
6832 clp = nfsd_find_client(addr, addr_size);
6833 if (clp)
6834 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6835 spin_unlock(&nn->client_lock);
6836 nfsd_reap_openowners(&reaplist);
6837 return count;
4dbdbda8
BS
6838}
6839
82e05efa 6840u64
285abdee 6841nfsd_inject_forget_openowners(u64 max)
184c1847 6842{
82e05efa
JL
6843 u64 count = 0;
6844 struct nfs4_client *clp;
6845 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6846 nfsd_net_id);
6847 LIST_HEAD(reaplist);
6848
6849 if (!nfsd_netns_ready(nn))
6850 return count;
6851
6852 spin_lock(&nn->client_lock);
6853 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6854 count += nfsd_collect_client_openowners(clp, &reaplist,
6855 max - count);
6856 if (max != 0 && count >= max)
6857 break;
6858 }
6859 spin_unlock(&nn->client_lock);
6860 nfsd_reap_openowners(&reaplist);
184c1847
BS
6861 return count;
6862}
6863
269de30f
BS
6864static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6865 struct list_head *victims)
6866{
6867 struct nfs4_delegation *dp, *next;
98d5c7c5
JL
6868 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6869 nfsd_net_id);
269de30f
BS
6870 u64 count = 0;
6871
98d5c7c5
JL
6872 lockdep_assert_held(&nn->client_lock);
6873
6874 spin_lock(&state_lock);
269de30f 6875 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
dff1399f
JL
6876 if (victims) {
6877 /*
6878 * It's not safe to mess with delegations that have a
6879 * non-zero dl_time. They might have already been broken
6880 * and could be processed by the laundromat outside of
6881 * the state_lock. Just leave them be.
6882 */
6883 if (dp->dl_time != 0)
6884 continue;
6885
98d5c7c5 6886 atomic_inc(&clp->cl_refcount);
3fcbbd24 6887 WARN_ON(!unhash_delegation_locked(dp));
42690676 6888 list_add(&dp->dl_recall_lru, victims);
dff1399f 6889 }
98d5c7c5
JL
6890 ++count;
6891 /*
6892 * Despite the fact that these functions deal with
6893 * 64-bit integers for "count", we must ensure that
6894 * it doesn't blow up the clp->cl_refcount. Throw a
6895 * warning if we start to approach INT_MAX here.
6896 */
6897 WARN_ON_ONCE(count == (INT_MAX / 2));
6898 if (count == max)
269de30f
BS
6899 break;
6900 }
98d5c7c5 6901 spin_unlock(&state_lock);
269de30f
BS
6902 return count;
6903}
6904
98d5c7c5
JL
6905static u64
6906nfsd_print_client_delegations(struct nfs4_client *clp)
269de30f 6907{
98d5c7c5 6908 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
269de30f 6909
98d5c7c5
JL
6910 nfsd_print_count(clp, count, "delegations");
6911 return count;
6912}
6913
6914u64
285abdee 6915nfsd_inject_print_delegations(void)
98d5c7c5
JL
6916{
6917 struct nfs4_client *clp;
6918 u64 count = 0;
6919 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6920 nfsd_net_id);
6921
6922 if (!nfsd_netns_ready(nn))
6923 return 0;
269de30f 6924
98d5c7c5
JL
6925 spin_lock(&nn->client_lock);
6926 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6927 count += nfsd_print_client_delegations(clp);
6928 spin_unlock(&nn->client_lock);
6929
6930 return count;
6931}
6932
6933static void
6934nfsd_forget_delegations(struct list_head *reaplist)
6935{
6936 struct nfs4_client *clp;
6937 struct nfs4_delegation *dp, *next;
6938
6939 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
2d4a532d 6940 list_del_init(&dp->dl_recall_lru);
98d5c7c5 6941 clp = dp->dl_stid.sc_client;
3bd64a5b 6942 revoke_delegation(dp);
98d5c7c5 6943 put_client(clp);
2d4a532d 6944 }
98d5c7c5
JL
6945}
6946
6947u64
285abdee
JL
6948nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6949 size_t addr_size)
98d5c7c5
JL
6950{
6951 u64 count = 0;
6952 struct nfs4_client *clp;
6953 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6954 nfsd_net_id);
6955 LIST_HEAD(reaplist);
6956
6957 if (!nfsd_netns_ready(nn))
6958 return count;
6959
6960 spin_lock(&nn->client_lock);
6961 clp = nfsd_find_client(addr, addr_size);
6962 if (clp)
6963 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6964 spin_unlock(&nn->client_lock);
6965
6966 nfsd_forget_delegations(&reaplist);
6967 return count;
6968}
269de30f 6969
98d5c7c5 6970u64
285abdee 6971nfsd_inject_forget_delegations(u64 max)
98d5c7c5
JL
6972{
6973 u64 count = 0;
6974 struct nfs4_client *clp;
6975 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6976 nfsd_net_id);
6977 LIST_HEAD(reaplist);
6978
6979 if (!nfsd_netns_ready(nn))
6980 return count;
6981
6982 spin_lock(&nn->client_lock);
6983 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6984 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6985 if (max != 0 && count >= max)
6986 break;
6987 }
6988 spin_unlock(&nn->client_lock);
6989 nfsd_forget_delegations(&reaplist);
269de30f
BS
6990 return count;
6991}
6992
98d5c7c5
JL
6993static void
6994nfsd_recall_delegations(struct list_head *reaplist)
269de30f 6995{
98d5c7c5
JL
6996 struct nfs4_client *clp;
6997 struct nfs4_delegation *dp, *next;
269de30f 6998
98d5c7c5 6999 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
dff1399f 7000 list_del_init(&dp->dl_recall_lru);
98d5c7c5
JL
7001 clp = dp->dl_stid.sc_client;
7002 /*
7003 * We skipped all entries that had a zero dl_time before,
7004 * so we can now reset the dl_time back to 0. If a delegation
7005 * break comes in now, then it won't make any difference since
7006 * we're recalling it either way.
7007 */
7008 spin_lock(&state_lock);
dff1399f 7009 dp->dl_time = 0;
98d5c7c5 7010 spin_unlock(&state_lock);
269de30f 7011 nfsd_break_one_deleg(dp);
98d5c7c5 7012 put_client(clp);
dff1399f 7013 }
98d5c7c5 7014}
269de30f 7015
98d5c7c5 7016u64
285abdee 7017nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
98d5c7c5
JL
7018 size_t addr_size)
7019{
7020 u64 count = 0;
7021 struct nfs4_client *clp;
7022 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7023 nfsd_net_id);
7024 LIST_HEAD(reaplist);
7025
7026 if (!nfsd_netns_ready(nn))
7027 return count;
7028
7029 spin_lock(&nn->client_lock);
7030 clp = nfsd_find_client(addr, addr_size);
7031 if (clp)
7032 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7033 spin_unlock(&nn->client_lock);
7034
7035 nfsd_recall_delegations(&reaplist);
269de30f
BS
7036 return count;
7037}
7038
98d5c7c5 7039u64
285abdee 7040nfsd_inject_recall_delegations(u64 max)
184c1847
BS
7041{
7042 u64 count = 0;
98d5c7c5
JL
7043 struct nfs4_client *clp, *next;
7044 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7045 nfsd_net_id);
7046 LIST_HEAD(reaplist);
184c1847 7047
98d5c7c5
JL
7048 if (!nfsd_netns_ready(nn))
7049 return count;
184c1847 7050
98d5c7c5
JL
7051 spin_lock(&nn->client_lock);
7052 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7053 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7054 if (max != 0 && ++count >= max)
7055 break;
7056 }
7057 spin_unlock(&nn->client_lock);
7058 nfsd_recall_delegations(&reaplist);
184c1847
BS
7059 return count;
7060}
65178db4
BS
7061#endif /* CONFIG_NFSD_FAULT_INJECTION */
7062
c2f1a551
MS
7063/*
7064 * Since the lifetime of a delegation isn't limited to that of an open, a
7065 * client may quite reasonably hang on to a delegation as long as it has
7066 * the inode cached. This becomes an obvious problem the first time a
7067 * client's inode cache approaches the size of the server's total memory.
7068 *
7069 * For now we avoid this problem by imposing a hard limit on the number
7070 * of delegations, which varies according to the server's memory size.
7071 */
7072static void
7073set_max_delegations(void)
7074{
7075 /*
7076 * Allow at most 4 delegations per megabyte of RAM. Quick
7077 * estimates suggest that in the worst case (where every delegation
7078 * is for a different inode), a delegation could take about 1.5K,
7079 * giving a worst case usage of about 6% of memory.
7080 */
7081 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7082}
7083
d85ed443 7084static int nfs4_state_create_net(struct net *net)
8daae4dc
SK
7085{
7086 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7087 int i;
7088
7089 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7090 CLIENT_HASH_SIZE, GFP_KERNEL);
7091 if (!nn->conf_id_hashtbl)
382a62e7 7092 goto err;
0a7ec377
SK
7093 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7094 CLIENT_HASH_SIZE, GFP_KERNEL);
7095 if (!nn->unconf_id_hashtbl)
7096 goto err_unconf_id;
1872de0e
SK
7097 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
7098 SESSION_HASH_SIZE, GFP_KERNEL);
7099 if (!nn->sessionid_hashtbl)
7100 goto err_sessionid;
8daae4dc 7101
382a62e7 7102 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8daae4dc 7103 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
0a7ec377 7104 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
382a62e7 7105 }
1872de0e
SK
7106 for (i = 0; i < SESSION_HASH_SIZE; i++)
7107 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
382a62e7 7108 nn->conf_name_tree = RB_ROOT;
a99454aa 7109 nn->unconf_name_tree = RB_ROOT;
02cfbaa6
VA
7110 nn->boot_time = get_seconds();
7111 nn->grace_ended = false;
7112 nn->nfsd4_manager.block_opens = true;
7113 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
5ed58bb2 7114 INIT_LIST_HEAD(&nn->client_lru);
73758fed 7115 INIT_LIST_HEAD(&nn->close_lru);
e8c69d17 7116 INIT_LIST_HEAD(&nn->del_recall_lru);
c9a49628 7117 spin_lock_init(&nn->client_lock);
8daae4dc 7118
0cc11a61
JL
7119 spin_lock_init(&nn->blocked_locks_lock);
7120 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7121
09121281 7122 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
d85ed443 7123 get_net(net);
09121281 7124
8daae4dc 7125 return 0;
382a62e7 7126
1872de0e 7127err_sessionid:
9b531137 7128 kfree(nn->unconf_id_hashtbl);
0a7ec377
SK
7129err_unconf_id:
7130 kfree(nn->conf_id_hashtbl);
382a62e7
SK
7131err:
7132 return -ENOMEM;
8daae4dc
SK
7133}
7134
7135static void
4dce0ac9 7136nfs4_state_destroy_net(struct net *net)
8daae4dc
SK
7137{
7138 int i;
7139 struct nfs4_client *clp = NULL;
7140 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7141
7142 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7143 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7144 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7145 destroy_client(clp);
7146 }
7147 }
a99454aa 7148
797bfd05
JL
7149 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7150
2b905635
KM
7151 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7152 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7153 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7154 destroy_client(clp);
7155 }
a99454aa
SK
7156 }
7157
1872de0e 7158 kfree(nn->sessionid_hashtbl);
0a7ec377 7159 kfree(nn->unconf_id_hashtbl);
8daae4dc 7160 kfree(nn->conf_id_hashtbl);
4dce0ac9 7161 put_net(net);
8daae4dc
SK
7162}
7163
f252bc68 7164int
d85ed443 7165nfs4_state_start_net(struct net *net)
ac4d8ff2 7166{
5e1533c7 7167 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
b5a1a81e
BF
7168 int ret;
7169
d85ed443 7170 ret = nfs4_state_create_net(net);
8daae4dc
SK
7171 if (ret)
7172 return ret;
d4318acd
JL
7173 locks_start_grace(net, &nn->nfsd4_manager);
7174 nfsd4_client_tracking_init(net);
d85ed443 7175 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
5284b44e
SK
7176 nn->nfsd4_grace, net);
7177 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
d85ed443
SK
7178 return 0;
7179}
7180
7181/* initialization to perform when the nfsd service is started: */
7182
7183int
7184nfs4_state_start(void)
7185{
7186 int ret;
7187
b5a1a81e 7188 ret = set_callback_cred();
d85ed443 7189 if (ret)
f7d1ddbe
KM
7190 return ret;
7191
51a54568 7192 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
a6d6b781
JL
7193 if (laundry_wq == NULL) {
7194 ret = -ENOMEM;
f7d1ddbe 7195 goto out_cleanup_cred;
a6d6b781 7196 }
b5a1a81e
BF
7197 ret = nfsd4_create_callback_queue();
7198 if (ret)
7199 goto out_free_laundry;
09121281 7200
c2f1a551 7201 set_max_delegations();
b5a1a81e 7202 return 0;
d85ed443 7203
b5a1a81e
BF
7204out_free_laundry:
7205 destroy_workqueue(laundry_wq);
f7d1ddbe
KM
7206out_cleanup_cred:
7207 cleanup_callback_cred();
b5a1a81e 7208 return ret;
1da177e4
LT
7209}
7210
f252bc68 7211void
4dce0ac9 7212nfs4_state_shutdown_net(struct net *net)
1da177e4 7213{
1da177e4 7214 struct nfs4_delegation *dp = NULL;
1da177e4 7215 struct list_head *pos, *next, reaplist;
4dce0ac9 7216 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1da177e4 7217
4dce0ac9
SK
7218 cancel_delayed_work_sync(&nn->laundromat_work);
7219 locks_end_grace(&nn->nfsd4_manager);
ac55fdc4 7220
1da177e4 7221 INIT_LIST_HEAD(&reaplist);
cdc97505 7222 spin_lock(&state_lock);
e8c69d17 7223 list_for_each_safe(pos, next, &nn->del_recall_lru) {
1da177e4 7224 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3fcbbd24 7225 WARN_ON(!unhash_delegation_locked(dp));
42690676 7226 list_add(&dp->dl_recall_lru, &reaplist);
1da177e4 7227 }
cdc97505 7228 spin_unlock(&state_lock);
1da177e4
LT
7229 list_for_each_safe(pos, next, &reaplist) {
7230 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
42690676 7231 list_del_init(&dp->dl_recall_lru);
8287f009 7232 put_clnt_odstate(dp->dl_clnt_odstate);
afbda402 7233 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6011695d 7234 nfs4_put_stid(&dp->dl_stid);
1da177e4
LT
7235 }
7236
3320fef1 7237 nfsd4_client_tracking_exit(net);
4dce0ac9 7238 nfs4_state_destroy_net(net);
1da177e4
LT
7239}
7240
7241void
7242nfs4_state_shutdown(void)
7243{
5e8d5c29 7244 destroy_workqueue(laundry_wq);
c3935e30 7245 nfsd4_destroy_callback_queue();
f7d1ddbe 7246 cleanup_callback_cred();
1da177e4 7247}
8b70484c
TM
7248
7249static void
7250get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7251{
37c593c5
TM
7252 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7253 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8b70484c
TM
7254}
7255
7256static void
7257put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7258{
37c593c5
TM
7259 if (cstate->minorversion) {
7260 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7261 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7262 }
7263}
7264
7265void
7266clear_current_stateid(struct nfsd4_compound_state *cstate)
7267{
7268 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
8b70484c
TM
7269}
7270
62cd4a59
TM
7271/*
7272 * functions to set current state id
7273 */
9428fe1a 7274void
b60e9859
CH
7275nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7276 union nfsd4_op_u *u)
9428fe1a 7277{
b60e9859 7278 put_stateid(cstate, &u->open_downgrade.od_stateid);
9428fe1a
TM
7279}
7280
8b70484c 7281void
b60e9859
CH
7282nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7283 union nfsd4_op_u *u)
8b70484c 7284{
b60e9859 7285 put_stateid(cstate, &u->open.op_stateid);
8b70484c
TM
7286}
7287
62cd4a59 7288void
b60e9859
CH
7289nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7290 union nfsd4_op_u *u)
62cd4a59 7291{
b60e9859 7292 put_stateid(cstate, &u->close.cl_stateid);
62cd4a59
TM
7293}
7294
7295void
b60e9859
CH
7296nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7297 union nfsd4_op_u *u)
62cd4a59 7298{
b60e9859 7299 put_stateid(cstate, &u->lock.lk_resp_stateid);
62cd4a59
TM
7300}
7301
7302/*
7303 * functions to consume current state id
7304 */
1e97b519 7305
9428fe1a 7306void
57832e7b
CH
7307nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7308 union nfsd4_op_u *u)
9428fe1a 7309{
57832e7b 7310 get_stateid(cstate, &u->open_downgrade.od_stateid);
9428fe1a
TM
7311}
7312
7313void
57832e7b
CH
7314nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7315 union nfsd4_op_u *u)
9428fe1a 7316{
57832e7b 7317 get_stateid(cstate, &u->delegreturn.dr_stateid);
9428fe1a
TM
7318}
7319
1e97b519 7320void
57832e7b
CH
7321nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7322 union nfsd4_op_u *u)
1e97b519 7323{
57832e7b 7324 get_stateid(cstate, &u->free_stateid.fr_stateid);
1e97b519
TM
7325}
7326
7327void
57832e7b
CH
7328nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7329 union nfsd4_op_u *u)
1e97b519 7330{
57832e7b 7331 get_stateid(cstate, &u->setattr.sa_stateid);
1e97b519
TM
7332}
7333
8b70484c 7334void
57832e7b
CH
7335nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7336 union nfsd4_op_u *u)
8b70484c 7337{
57832e7b 7338 get_stateid(cstate, &u->close.cl_stateid);
8b70484c
TM
7339}
7340
7341void
57832e7b
CH
7342nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7343 union nfsd4_op_u *u)
8b70484c 7344{
57832e7b 7345 get_stateid(cstate, &u->locku.lu_stateid);
8b70484c 7346}
30813e27
TM
7347
7348void
57832e7b
CH
7349nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7350 union nfsd4_op_u *u)
30813e27 7351{
57832e7b 7352 get_stateid(cstate, &u->read.rd_stateid);
30813e27
TM
7353}
7354
7355void
57832e7b
CH
7356nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7357 union nfsd4_op_u *u)
30813e27 7358{
57832e7b 7359 get_stateid(cstate, &u->write.wr_stateid);
30813e27 7360}