NFSv4: don't reprocess cached open CLAIM_PREVIOUS
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/nfs_idmap.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4session.h"
67 #include "fscache.h"
68
69 #define NFSDBG_FACILITY NFSDBG_PROC
70
71 #define NFS4_POLL_RETRY_MIN (HZ/10)
72 #define NFS4_POLL_RETRY_MAX (15*HZ)
73
74 struct nfs4_opendata;
75 static int _nfs4_proc_open(struct nfs4_opendata *data);
76 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
77 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
78 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
79 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
80 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
81 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
82 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
83 struct nfs_fattr *fattr, struct iattr *sattr,
84 struct nfs4_state *state);
85 #ifdef CONFIG_NFS_V4_1
86 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
87 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
88 #endif
89 /* Prevent leaks of NFSv4 errors into userland */
90 static int nfs4_map_errors(int err)
91 {
92 if (err >= -1000)
93 return err;
94 switch (err) {
95 case -NFS4ERR_RESOURCE:
96 case -NFS4ERR_LAYOUTTRYLATER:
97 case -NFS4ERR_RECALLCONFLICT:
98 return -EREMOTEIO;
99 case -NFS4ERR_WRONGSEC:
100 return -EPERM;
101 case -NFS4ERR_BADOWNER:
102 case -NFS4ERR_BADNAME:
103 return -EINVAL;
104 case -NFS4ERR_SHARE_DENIED:
105 return -EACCES;
106 case -NFS4ERR_MINOR_VERS_MISMATCH:
107 return -EPROTONOSUPPORT;
108 case -NFS4ERR_ACCESS:
109 return -EACCES;
110 case -NFS4ERR_FILE_OPEN:
111 return -EBUSY;
112 default:
113 dprintk("%s could not handle NFSv4 error %d\n",
114 __func__, -err);
115 break;
116 }
117 return -EIO;
118 }
119
120 /*
121 * This is our standard bitmap for GETATTR requests.
122 */
123 const u32 nfs4_fattr_bitmap[3] = {
124 FATTR4_WORD0_TYPE
125 | FATTR4_WORD0_CHANGE
126 | FATTR4_WORD0_SIZE
127 | FATTR4_WORD0_FSID
128 | FATTR4_WORD0_FILEID,
129 FATTR4_WORD1_MODE
130 | FATTR4_WORD1_NUMLINKS
131 | FATTR4_WORD1_OWNER
132 | FATTR4_WORD1_OWNER_GROUP
133 | FATTR4_WORD1_RAWDEV
134 | FATTR4_WORD1_SPACE_USED
135 | FATTR4_WORD1_TIME_ACCESS
136 | FATTR4_WORD1_TIME_METADATA
137 | FATTR4_WORD1_TIME_MODIFY
138 };
139
140 static const u32 nfs4_pnfs_open_bitmap[3] = {
141 FATTR4_WORD0_TYPE
142 | FATTR4_WORD0_CHANGE
143 | FATTR4_WORD0_SIZE
144 | FATTR4_WORD0_FSID
145 | FATTR4_WORD0_FILEID,
146 FATTR4_WORD1_MODE
147 | FATTR4_WORD1_NUMLINKS
148 | FATTR4_WORD1_OWNER
149 | FATTR4_WORD1_OWNER_GROUP
150 | FATTR4_WORD1_RAWDEV
151 | FATTR4_WORD1_SPACE_USED
152 | FATTR4_WORD1_TIME_ACCESS
153 | FATTR4_WORD1_TIME_METADATA
154 | FATTR4_WORD1_TIME_MODIFY,
155 FATTR4_WORD2_MDSTHRESHOLD
156 };
157
158 static const u32 nfs4_open_noattr_bitmap[3] = {
159 FATTR4_WORD0_TYPE
160 | FATTR4_WORD0_CHANGE
161 | FATTR4_WORD0_FILEID,
162 };
163
164 const u32 nfs4_statfs_bitmap[2] = {
165 FATTR4_WORD0_FILES_AVAIL
166 | FATTR4_WORD0_FILES_FREE
167 | FATTR4_WORD0_FILES_TOTAL,
168 FATTR4_WORD1_SPACE_AVAIL
169 | FATTR4_WORD1_SPACE_FREE
170 | FATTR4_WORD1_SPACE_TOTAL
171 };
172
173 const u32 nfs4_pathconf_bitmap[2] = {
174 FATTR4_WORD0_MAXLINK
175 | FATTR4_WORD0_MAXNAME,
176 0
177 };
178
179 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
180 | FATTR4_WORD0_MAXREAD
181 | FATTR4_WORD0_MAXWRITE
182 | FATTR4_WORD0_LEASE_TIME,
183 FATTR4_WORD1_TIME_DELTA
184 | FATTR4_WORD1_FS_LAYOUT_TYPES,
185 FATTR4_WORD2_LAYOUT_BLKSIZE
186 };
187
188 const u32 nfs4_fs_locations_bitmap[2] = {
189 FATTR4_WORD0_TYPE
190 | FATTR4_WORD0_CHANGE
191 | FATTR4_WORD0_SIZE
192 | FATTR4_WORD0_FSID
193 | FATTR4_WORD0_FILEID
194 | FATTR4_WORD0_FS_LOCATIONS,
195 FATTR4_WORD1_MODE
196 | FATTR4_WORD1_NUMLINKS
197 | FATTR4_WORD1_OWNER
198 | FATTR4_WORD1_OWNER_GROUP
199 | FATTR4_WORD1_RAWDEV
200 | FATTR4_WORD1_SPACE_USED
201 | FATTR4_WORD1_TIME_ACCESS
202 | FATTR4_WORD1_TIME_METADATA
203 | FATTR4_WORD1_TIME_MODIFY
204 | FATTR4_WORD1_MOUNTED_ON_FILEID
205 };
206
207 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
208 struct nfs4_readdir_arg *readdir)
209 {
210 __be32 *start, *p;
211
212 if (cookie > 2) {
213 readdir->cookie = cookie;
214 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
215 return;
216 }
217
218 readdir->cookie = 0;
219 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
220 if (cookie == 2)
221 return;
222
223 /*
224 * NFSv4 servers do not return entries for '.' and '..'
225 * Therefore, we fake these entries here. We let '.'
226 * have cookie 0 and '..' have cookie 1. Note that
227 * when talking to the server, we always send cookie 0
228 * instead of 1 or 2.
229 */
230 start = p = kmap_atomic(*readdir->pages);
231
232 if (cookie == 0) {
233 *p++ = xdr_one; /* next */
234 *p++ = xdr_zero; /* cookie, first word */
235 *p++ = xdr_one; /* cookie, second word */
236 *p++ = xdr_one; /* entry len */
237 memcpy(p, ".\0\0\0", 4); /* entry */
238 p++;
239 *p++ = xdr_one; /* bitmap length */
240 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
241 *p++ = htonl(8); /* attribute buffer length */
242 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
243 }
244
245 *p++ = xdr_one; /* next */
246 *p++ = xdr_zero; /* cookie, first word */
247 *p++ = xdr_two; /* cookie, second word */
248 *p++ = xdr_two; /* entry len */
249 memcpy(p, "..\0\0", 4); /* entry */
250 p++;
251 *p++ = xdr_one; /* bitmap length */
252 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
253 *p++ = htonl(8); /* attribute buffer length */
254 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
255
256 readdir->pgbase = (char *)p - (char *)start;
257 readdir->count -= readdir->pgbase;
258 kunmap_atomic(start);
259 }
260
261 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
262 {
263 int res = 0;
264
265 might_sleep();
266
267 if (*timeout <= 0)
268 *timeout = NFS4_POLL_RETRY_MIN;
269 if (*timeout > NFS4_POLL_RETRY_MAX)
270 *timeout = NFS4_POLL_RETRY_MAX;
271 freezable_schedule_timeout_killable(*timeout);
272 if (fatal_signal_pending(current))
273 res = -ERESTARTSYS;
274 *timeout <<= 1;
275 return res;
276 }
277
278 /* This is the error handling routine for processes that are allowed
279 * to sleep.
280 */
281 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
282 {
283 struct nfs_client *clp = server->nfs_client;
284 struct nfs4_state *state = exception->state;
285 struct inode *inode = exception->inode;
286 int ret = errorcode;
287
288 exception->retry = 0;
289 switch(errorcode) {
290 case 0:
291 return 0;
292 case -NFS4ERR_OPENMODE:
293 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
294 nfs4_inode_return_delegation(inode);
295 exception->retry = 1;
296 return 0;
297 }
298 if (state == NULL)
299 break;
300 ret = nfs4_schedule_stateid_recovery(server, state);
301 if (ret < 0)
302 break;
303 goto wait_on_recovery;
304 case -NFS4ERR_DELEG_REVOKED:
305 case -NFS4ERR_ADMIN_REVOKED:
306 case -NFS4ERR_BAD_STATEID:
307 if (inode != NULL && nfs4_have_delegation(inode, FMODE_READ)) {
308 nfs_remove_bad_delegation(inode);
309 exception->retry = 1;
310 break;
311 }
312 if (state == NULL)
313 break;
314 ret = nfs4_schedule_stateid_recovery(server, state);
315 if (ret < 0)
316 break;
317 goto wait_on_recovery;
318 case -NFS4ERR_EXPIRED:
319 if (state != NULL) {
320 ret = nfs4_schedule_stateid_recovery(server, state);
321 if (ret < 0)
322 break;
323 }
324 case -NFS4ERR_STALE_STATEID:
325 case -NFS4ERR_STALE_CLIENTID:
326 nfs4_schedule_lease_recovery(clp);
327 goto wait_on_recovery;
328 #if defined(CONFIG_NFS_V4_1)
329 case -NFS4ERR_BADSESSION:
330 case -NFS4ERR_BADSLOT:
331 case -NFS4ERR_BAD_HIGH_SLOT:
332 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
333 case -NFS4ERR_DEADSESSION:
334 case -NFS4ERR_SEQ_FALSE_RETRY:
335 case -NFS4ERR_SEQ_MISORDERED:
336 dprintk("%s ERROR: %d Reset session\n", __func__,
337 errorcode);
338 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
339 goto wait_on_recovery;
340 #endif /* defined(CONFIG_NFS_V4_1) */
341 case -NFS4ERR_FILE_OPEN:
342 if (exception->timeout > HZ) {
343 /* We have retried a decent amount, time to
344 * fail
345 */
346 ret = -EBUSY;
347 break;
348 }
349 case -NFS4ERR_GRACE:
350 case -NFS4ERR_DELAY:
351 ret = nfs4_delay(server->client, &exception->timeout);
352 if (ret != 0)
353 break;
354 case -NFS4ERR_RETRY_UNCACHED_REP:
355 case -NFS4ERR_OLD_STATEID:
356 exception->retry = 1;
357 break;
358 case -NFS4ERR_BADOWNER:
359 /* The following works around a Linux server bug! */
360 case -NFS4ERR_BADNAME:
361 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
362 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
363 exception->retry = 1;
364 printk(KERN_WARNING "NFS: v4 server %s "
365 "does not accept raw "
366 "uid/gids. "
367 "Reenabling the idmapper.\n",
368 server->nfs_client->cl_hostname);
369 }
370 }
371 /* We failed to handle the error */
372 return nfs4_map_errors(ret);
373 wait_on_recovery:
374 ret = nfs4_wait_clnt_recover(clp);
375 if (ret == 0)
376 exception->retry = 1;
377 return ret;
378 }
379
380
381 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
382 {
383 spin_lock(&clp->cl_lock);
384 if (time_before(clp->cl_last_renewal,timestamp))
385 clp->cl_last_renewal = timestamp;
386 spin_unlock(&clp->cl_lock);
387 }
388
389 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
390 {
391 do_renew_lease(server->nfs_client, timestamp);
392 }
393
394 #if defined(CONFIG_NFS_V4_1)
395
396 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
397 {
398 struct nfs4_session *session;
399 struct nfs4_slot_table *tbl;
400 bool send_new_highest_used_slotid = false;
401
402 if (!res->sr_slot) {
403 /* just wake up the next guy waiting since
404 * we may have not consumed a slot after all */
405 dprintk("%s: No slot\n", __func__);
406 return;
407 }
408 tbl = res->sr_slot->table;
409 session = tbl->session;
410
411 spin_lock(&tbl->slot_tbl_lock);
412 /* Be nice to the server: try to ensure that the last transmitted
413 * value for highest_user_slotid <= target_highest_slotid
414 */
415 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
416 send_new_highest_used_slotid = true;
417
418 if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) {
419 send_new_highest_used_slotid = false;
420 goto out_unlock;
421 }
422 nfs4_free_slot(tbl, res->sr_slot);
423
424 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
425 send_new_highest_used_slotid = false;
426 out_unlock:
427 spin_unlock(&tbl->slot_tbl_lock);
428 res->sr_slot = NULL;
429 if (send_new_highest_used_slotid)
430 nfs41_server_notify_highest_slotid_update(session->clp);
431 }
432
433 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
434 {
435 struct nfs4_session *session;
436 struct nfs4_slot *slot;
437 struct nfs_client *clp;
438 bool interrupted = false;
439 int ret = 1;
440
441 /* don't increment the sequence number if the task wasn't sent */
442 if (!RPC_WAS_SENT(task))
443 goto out;
444
445 slot = res->sr_slot;
446 session = slot->table->session;
447
448 if (slot->interrupted) {
449 slot->interrupted = 0;
450 interrupted = true;
451 }
452
453 /* Check the SEQUENCE operation status */
454 switch (res->sr_status) {
455 case 0:
456 /* Update the slot's sequence and clientid lease timer */
457 ++slot->seq_nr;
458 clp = session->clp;
459 do_renew_lease(clp, res->sr_timestamp);
460 /* Check sequence flags */
461 if (res->sr_status_flags != 0)
462 nfs4_schedule_lease_recovery(clp);
463 nfs41_update_target_slotid(slot->table, slot, res);
464 break;
465 case 1:
466 /*
467 * sr_status remains 1 if an RPC level error occurred.
468 * The server may or may not have processed the sequence
469 * operation..
470 * Mark the slot as having hosted an interrupted RPC call.
471 */
472 slot->interrupted = 1;
473 goto out;
474 case -NFS4ERR_DELAY:
475 /* The server detected a resend of the RPC call and
476 * returned NFS4ERR_DELAY as per Section 2.10.6.2
477 * of RFC5661.
478 */
479 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
480 __func__,
481 slot->slot_nr,
482 slot->seq_nr);
483 goto out_retry;
484 case -NFS4ERR_BADSLOT:
485 /*
486 * The slot id we used was probably retired. Try again
487 * using a different slot id.
488 */
489 goto retry_nowait;
490 case -NFS4ERR_SEQ_MISORDERED:
491 /*
492 * Was the last operation on this sequence interrupted?
493 * If so, retry after bumping the sequence number.
494 */
495 if (interrupted) {
496 ++slot->seq_nr;
497 goto retry_nowait;
498 }
499 /*
500 * Could this slot have been previously retired?
501 * If so, then the server may be expecting seq_nr = 1!
502 */
503 if (slot->seq_nr != 1) {
504 slot->seq_nr = 1;
505 goto retry_nowait;
506 }
507 break;
508 case -NFS4ERR_SEQ_FALSE_RETRY:
509 ++slot->seq_nr;
510 goto retry_nowait;
511 default:
512 /* Just update the slot sequence no. */
513 ++slot->seq_nr;
514 }
515 out:
516 /* The session may be reset by one of the error handlers. */
517 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
518 nfs41_sequence_free_slot(res);
519 return ret;
520 retry_nowait:
521 if (rpc_restart_call_prepare(task)) {
522 task->tk_status = 0;
523 ret = 0;
524 }
525 goto out;
526 out_retry:
527 if (!rpc_restart_call(task))
528 goto out;
529 rpc_delay(task, NFS4_POLL_RETRY_MAX);
530 return 0;
531 }
532
533 static int nfs4_sequence_done(struct rpc_task *task,
534 struct nfs4_sequence_res *res)
535 {
536 if (res->sr_slot == NULL)
537 return 1;
538 return nfs41_sequence_done(task, res);
539 }
540
541 static void nfs41_init_sequence(struct nfs4_sequence_args *args,
542 struct nfs4_sequence_res *res, int cache_reply)
543 {
544 args->sa_slot = NULL;
545 args->sa_cache_this = 0;
546 args->sa_privileged = 0;
547 if (cache_reply)
548 args->sa_cache_this = 1;
549 res->sr_slot = NULL;
550 }
551
552 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
553 {
554 args->sa_privileged = 1;
555 }
556
557 int nfs41_setup_sequence(struct nfs4_session *session,
558 struct nfs4_sequence_args *args,
559 struct nfs4_sequence_res *res,
560 struct rpc_task *task)
561 {
562 struct nfs4_slot *slot;
563 struct nfs4_slot_table *tbl;
564
565 dprintk("--> %s\n", __func__);
566 /* slot already allocated? */
567 if (res->sr_slot != NULL)
568 goto out_success;
569
570 tbl = &session->fc_slot_table;
571
572 task->tk_timeout = 0;
573
574 spin_lock(&tbl->slot_tbl_lock);
575 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
576 !args->sa_privileged) {
577 /* The state manager will wait until the slot table is empty */
578 dprintk("%s session is draining\n", __func__);
579 goto out_sleep;
580 }
581
582 slot = nfs4_alloc_slot(tbl);
583 if (IS_ERR(slot)) {
584 /* If out of memory, try again in 1/4 second */
585 if (slot == ERR_PTR(-ENOMEM))
586 task->tk_timeout = HZ >> 2;
587 dprintk("<-- %s: no free slots\n", __func__);
588 goto out_sleep;
589 }
590 spin_unlock(&tbl->slot_tbl_lock);
591
592 args->sa_slot = slot;
593
594 dprintk("<-- %s slotid=%d seqid=%d\n", __func__,
595 slot->slot_nr, slot->seq_nr);
596
597 res->sr_slot = slot;
598 res->sr_timestamp = jiffies;
599 res->sr_status_flags = 0;
600 /*
601 * sr_status is only set in decode_sequence, and so will remain
602 * set to 1 if an rpc level failure occurs.
603 */
604 res->sr_status = 1;
605 out_success:
606 rpc_call_start(task);
607 return 0;
608 out_sleep:
609 /* Privileged tasks are queued with top priority */
610 if (args->sa_privileged)
611 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
612 NULL, RPC_PRIORITY_PRIVILEGED);
613 else
614 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
615 spin_unlock(&tbl->slot_tbl_lock);
616 return -EAGAIN;
617 }
618 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
619
620 int nfs4_setup_sequence(const struct nfs_server *server,
621 struct nfs4_sequence_args *args,
622 struct nfs4_sequence_res *res,
623 struct rpc_task *task)
624 {
625 struct nfs4_session *session = nfs4_get_session(server);
626 int ret = 0;
627
628 if (session == NULL) {
629 rpc_call_start(task);
630 goto out;
631 }
632
633 dprintk("--> %s clp %p session %p sr_slot %d\n",
634 __func__, session->clp, session, res->sr_slot ?
635 res->sr_slot->slot_nr : -1);
636
637 ret = nfs41_setup_sequence(session, args, res, task);
638 out:
639 dprintk("<-- %s status=%d\n", __func__, ret);
640 return ret;
641 }
642
643 struct nfs41_call_sync_data {
644 const struct nfs_server *seq_server;
645 struct nfs4_sequence_args *seq_args;
646 struct nfs4_sequence_res *seq_res;
647 };
648
649 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
650 {
651 struct nfs41_call_sync_data *data = calldata;
652 struct nfs4_session *session = nfs4_get_session(data->seq_server);
653
654 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
655
656 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
657 }
658
659 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
660 {
661 struct nfs41_call_sync_data *data = calldata;
662
663 nfs41_sequence_done(task, data->seq_res);
664 }
665
666 static const struct rpc_call_ops nfs41_call_sync_ops = {
667 .rpc_call_prepare = nfs41_call_sync_prepare,
668 .rpc_call_done = nfs41_call_sync_done,
669 };
670
671 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
672 struct nfs_server *server,
673 struct rpc_message *msg,
674 struct nfs4_sequence_args *args,
675 struct nfs4_sequence_res *res)
676 {
677 int ret;
678 struct rpc_task *task;
679 struct nfs41_call_sync_data data = {
680 .seq_server = server,
681 .seq_args = args,
682 .seq_res = res,
683 };
684 struct rpc_task_setup task_setup = {
685 .rpc_client = clnt,
686 .rpc_message = msg,
687 .callback_ops = &nfs41_call_sync_ops,
688 .callback_data = &data
689 };
690
691 task = rpc_run_task(&task_setup);
692 if (IS_ERR(task))
693 ret = PTR_ERR(task);
694 else {
695 ret = task->tk_status;
696 rpc_put_task(task);
697 }
698 return ret;
699 }
700
701 #else
702 static
703 void nfs41_init_sequence(struct nfs4_sequence_args *args,
704 struct nfs4_sequence_res *res, int cache_reply)
705 {
706 }
707
708 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
709 {
710 }
711
712
713 static int nfs4_sequence_done(struct rpc_task *task,
714 struct nfs4_sequence_res *res)
715 {
716 return 1;
717 }
718 #endif /* CONFIG_NFS_V4_1 */
719
720 static
721 int _nfs4_call_sync(struct rpc_clnt *clnt,
722 struct nfs_server *server,
723 struct rpc_message *msg,
724 struct nfs4_sequence_args *args,
725 struct nfs4_sequence_res *res)
726 {
727 return rpc_call_sync(clnt, msg, 0);
728 }
729
730 static
731 int nfs4_call_sync(struct rpc_clnt *clnt,
732 struct nfs_server *server,
733 struct rpc_message *msg,
734 struct nfs4_sequence_args *args,
735 struct nfs4_sequence_res *res,
736 int cache_reply)
737 {
738 nfs41_init_sequence(args, res, cache_reply);
739 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
740 args, res);
741 }
742
743 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
744 {
745 struct nfs_inode *nfsi = NFS_I(dir);
746
747 spin_lock(&dir->i_lock);
748 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
749 if (!cinfo->atomic || cinfo->before != dir->i_version)
750 nfs_force_lookup_revalidate(dir);
751 dir->i_version = cinfo->after;
752 nfs_fscache_invalidate(dir);
753 spin_unlock(&dir->i_lock);
754 }
755
756 struct nfs4_opendata {
757 struct kref kref;
758 struct nfs_openargs o_arg;
759 struct nfs_openres o_res;
760 struct nfs_open_confirmargs c_arg;
761 struct nfs_open_confirmres c_res;
762 struct nfs4_string owner_name;
763 struct nfs4_string group_name;
764 struct nfs_fattr f_attr;
765 struct dentry *dir;
766 struct dentry *dentry;
767 struct nfs4_state_owner *owner;
768 struct nfs4_state *state;
769 struct iattr attrs;
770 unsigned long timestamp;
771 unsigned int rpc_done : 1;
772 unsigned int is_recover : 1;
773 int rpc_status;
774 int cancelled;
775 };
776
777 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
778 int err, struct nfs4_exception *exception)
779 {
780 if (err != -EINVAL)
781 return false;
782 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
783 return false;
784 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
785 exception->retry = 1;
786 return true;
787 }
788
789 static enum open_claim_type4
790 nfs4_map_atomic_open_claim(struct nfs_server *server,
791 enum open_claim_type4 claim)
792 {
793 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
794 return claim;
795 switch (claim) {
796 default:
797 return claim;
798 case NFS4_OPEN_CLAIM_FH:
799 return NFS4_OPEN_CLAIM_NULL;
800 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
801 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
802 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
803 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
804 }
805 }
806
807 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
808 {
809 p->o_res.f_attr = &p->f_attr;
810 p->o_res.seqid = p->o_arg.seqid;
811 p->c_res.seqid = p->c_arg.seqid;
812 p->o_res.server = p->o_arg.server;
813 p->o_res.access_request = p->o_arg.access;
814 nfs_fattr_init(&p->f_attr);
815 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
816 }
817
818 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
819 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
820 const struct iattr *attrs,
821 enum open_claim_type4 claim,
822 gfp_t gfp_mask)
823 {
824 struct dentry *parent = dget_parent(dentry);
825 struct inode *dir = parent->d_inode;
826 struct nfs_server *server = NFS_SERVER(dir);
827 struct nfs4_opendata *p;
828
829 p = kzalloc(sizeof(*p), gfp_mask);
830 if (p == NULL)
831 goto err;
832 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
833 if (p->o_arg.seqid == NULL)
834 goto err_free;
835 nfs_sb_active(dentry->d_sb);
836 p->dentry = dget(dentry);
837 p->dir = parent;
838 p->owner = sp;
839 atomic_inc(&sp->so_count);
840 p->o_arg.open_flags = flags;
841 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
842 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
843 * will return permission denied for all bits until close */
844 if (!(flags & O_EXCL)) {
845 /* ask server to check for all possible rights as results
846 * are cached */
847 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
848 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
849 }
850 p->o_arg.clientid = server->nfs_client->cl_clientid;
851 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
852 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
853 p->o_arg.name = &dentry->d_name;
854 p->o_arg.server = server;
855 p->o_arg.bitmask = server->attr_bitmask;
856 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
857 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
858 switch (p->o_arg.claim) {
859 case NFS4_OPEN_CLAIM_NULL:
860 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
861 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
862 p->o_arg.fh = NFS_FH(dir);
863 break;
864 case NFS4_OPEN_CLAIM_PREVIOUS:
865 case NFS4_OPEN_CLAIM_FH:
866 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
867 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
868 p->o_arg.fh = NFS_FH(dentry->d_inode);
869 }
870 if (attrs != NULL && attrs->ia_valid != 0) {
871 __be32 verf[2];
872
873 p->o_arg.u.attrs = &p->attrs;
874 memcpy(&p->attrs, attrs, sizeof(p->attrs));
875
876 verf[0] = jiffies;
877 verf[1] = current->pid;
878 memcpy(p->o_arg.u.verifier.data, verf,
879 sizeof(p->o_arg.u.verifier.data));
880 }
881 p->c_arg.fh = &p->o_res.fh;
882 p->c_arg.stateid = &p->o_res.stateid;
883 p->c_arg.seqid = p->o_arg.seqid;
884 nfs4_init_opendata_res(p);
885 kref_init(&p->kref);
886 return p;
887 err_free:
888 kfree(p);
889 err:
890 dput(parent);
891 return NULL;
892 }
893
894 static void nfs4_opendata_free(struct kref *kref)
895 {
896 struct nfs4_opendata *p = container_of(kref,
897 struct nfs4_opendata, kref);
898 struct super_block *sb = p->dentry->d_sb;
899
900 nfs_free_seqid(p->o_arg.seqid);
901 if (p->state != NULL)
902 nfs4_put_open_state(p->state);
903 nfs4_put_state_owner(p->owner);
904 dput(p->dir);
905 dput(p->dentry);
906 nfs_sb_deactive(sb);
907 nfs_fattr_free_names(&p->f_attr);
908 kfree(p);
909 }
910
911 static void nfs4_opendata_put(struct nfs4_opendata *p)
912 {
913 if (p != NULL)
914 kref_put(&p->kref, nfs4_opendata_free);
915 }
916
917 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
918 {
919 int ret;
920
921 ret = rpc_wait_for_completion_task(task);
922 return ret;
923 }
924
925 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
926 {
927 int ret = 0;
928
929 if (open_mode & (O_EXCL|O_TRUNC))
930 goto out;
931 switch (mode & (FMODE_READ|FMODE_WRITE)) {
932 case FMODE_READ:
933 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
934 && state->n_rdonly != 0;
935 break;
936 case FMODE_WRITE:
937 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
938 && state->n_wronly != 0;
939 break;
940 case FMODE_READ|FMODE_WRITE:
941 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
942 && state->n_rdwr != 0;
943 }
944 out:
945 return ret;
946 }
947
948 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
949 {
950 if (delegation == NULL)
951 return 0;
952 if ((delegation->type & fmode) != fmode)
953 return 0;
954 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
955 return 0;
956 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
957 return 0;
958 nfs_mark_delegation_referenced(delegation);
959 return 1;
960 }
961
962 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
963 {
964 switch (fmode) {
965 case FMODE_WRITE:
966 state->n_wronly++;
967 break;
968 case FMODE_READ:
969 state->n_rdonly++;
970 break;
971 case FMODE_READ|FMODE_WRITE:
972 state->n_rdwr++;
973 }
974 nfs4_state_set_mode_locked(state, state->state | fmode);
975 }
976
977 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
978 {
979 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
980 nfs4_stateid_copy(&state->stateid, stateid);
981 nfs4_stateid_copy(&state->open_stateid, stateid);
982 set_bit(NFS_OPEN_STATE, &state->flags);
983 switch (fmode) {
984 case FMODE_READ:
985 set_bit(NFS_O_RDONLY_STATE, &state->flags);
986 break;
987 case FMODE_WRITE:
988 set_bit(NFS_O_WRONLY_STATE, &state->flags);
989 break;
990 case FMODE_READ|FMODE_WRITE:
991 set_bit(NFS_O_RDWR_STATE, &state->flags);
992 }
993 }
994
995 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
996 {
997 write_seqlock(&state->seqlock);
998 nfs_set_open_stateid_locked(state, stateid, fmode);
999 write_sequnlock(&state->seqlock);
1000 }
1001
1002 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1003 {
1004 /*
1005 * Protect the call to nfs4_state_set_mode_locked and
1006 * serialise the stateid update
1007 */
1008 write_seqlock(&state->seqlock);
1009 if (deleg_stateid != NULL) {
1010 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1011 set_bit(NFS_DELEGATED_STATE, &state->flags);
1012 }
1013 if (open_stateid != NULL)
1014 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1015 write_sequnlock(&state->seqlock);
1016 spin_lock(&state->owner->so_lock);
1017 update_open_stateflags(state, fmode);
1018 spin_unlock(&state->owner->so_lock);
1019 }
1020
1021 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1022 {
1023 struct nfs_inode *nfsi = NFS_I(state->inode);
1024 struct nfs_delegation *deleg_cur;
1025 int ret = 0;
1026
1027 fmode &= (FMODE_READ|FMODE_WRITE);
1028
1029 rcu_read_lock();
1030 deleg_cur = rcu_dereference(nfsi->delegation);
1031 if (deleg_cur == NULL)
1032 goto no_delegation;
1033
1034 spin_lock(&deleg_cur->lock);
1035 if (nfsi->delegation != deleg_cur ||
1036 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1037 (deleg_cur->type & fmode) != fmode)
1038 goto no_delegation_unlock;
1039
1040 if (delegation == NULL)
1041 delegation = &deleg_cur->stateid;
1042 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1043 goto no_delegation_unlock;
1044
1045 nfs_mark_delegation_referenced(deleg_cur);
1046 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1047 ret = 1;
1048 no_delegation_unlock:
1049 spin_unlock(&deleg_cur->lock);
1050 no_delegation:
1051 rcu_read_unlock();
1052
1053 if (!ret && open_stateid != NULL) {
1054 __update_open_stateid(state, open_stateid, NULL, fmode);
1055 ret = 1;
1056 }
1057
1058 return ret;
1059 }
1060
1061
1062 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1063 {
1064 struct nfs_delegation *delegation;
1065
1066 rcu_read_lock();
1067 delegation = rcu_dereference(NFS_I(inode)->delegation);
1068 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1069 rcu_read_unlock();
1070 return;
1071 }
1072 rcu_read_unlock();
1073 nfs4_inode_return_delegation(inode);
1074 }
1075
1076 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1077 {
1078 struct nfs4_state *state = opendata->state;
1079 struct nfs_inode *nfsi = NFS_I(state->inode);
1080 struct nfs_delegation *delegation;
1081 int open_mode = opendata->o_arg.open_flags;
1082 fmode_t fmode = opendata->o_arg.fmode;
1083 nfs4_stateid stateid;
1084 int ret = -EAGAIN;
1085
1086 for (;;) {
1087 if (can_open_cached(state, fmode, open_mode)) {
1088 spin_lock(&state->owner->so_lock);
1089 if (can_open_cached(state, fmode, open_mode)) {
1090 update_open_stateflags(state, fmode);
1091 spin_unlock(&state->owner->so_lock);
1092 goto out_return_state;
1093 }
1094 spin_unlock(&state->owner->so_lock);
1095 }
1096 rcu_read_lock();
1097 delegation = rcu_dereference(nfsi->delegation);
1098 if (!can_open_delegated(delegation, fmode)) {
1099 rcu_read_unlock();
1100 break;
1101 }
1102 /* Save the delegation */
1103 nfs4_stateid_copy(&stateid, &delegation->stateid);
1104 rcu_read_unlock();
1105 nfs_release_seqid(opendata->o_arg.seqid);
1106 if (!opendata->is_recover) {
1107 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1108 if (ret != 0)
1109 goto out;
1110 }
1111 ret = -EAGAIN;
1112
1113 /* Try to update the stateid using the delegation */
1114 if (update_open_stateid(state, NULL, &stateid, fmode))
1115 goto out_return_state;
1116 }
1117 out:
1118 return ERR_PTR(ret);
1119 out_return_state:
1120 atomic_inc(&state->count);
1121 return state;
1122 }
1123
1124 static void
1125 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1126 {
1127 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1128 struct nfs_delegation *delegation;
1129 int delegation_flags = 0;
1130
1131 rcu_read_lock();
1132 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1133 if (delegation)
1134 delegation_flags = delegation->flags;
1135 rcu_read_unlock();
1136 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1137 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1138 "returning a delegation for "
1139 "OPEN(CLAIM_DELEGATE_CUR)\n",
1140 clp->cl_hostname);
1141 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1142 nfs_inode_set_delegation(state->inode,
1143 data->owner->so_cred,
1144 &data->o_res);
1145 else
1146 nfs_inode_reclaim_delegation(state->inode,
1147 data->owner->so_cred,
1148 &data->o_res);
1149 }
1150
1151 /*
1152 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1153 * and update the nfs4_state.
1154 */
1155 static struct nfs4_state *
1156 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1157 {
1158 struct inode *inode = data->state->inode;
1159 struct nfs4_state *state = data->state;
1160 int ret;
1161
1162 if (!data->rpc_done) {
1163 if (data->rpc_status) {
1164 ret = data->rpc_status;
1165 goto err;
1166 }
1167 /* cached opens have already been processed */
1168 goto update;
1169 }
1170
1171 ret = -ENOMEM;
1172 state = nfs4_get_open_state(inode, data->owner);
1173 if (state == NULL)
1174 goto err;
1175
1176 ret = nfs_refresh_inode(inode, &data->f_attr);
1177 if (ret)
1178 goto err;
1179
1180 if (data->o_res.delegation_type != 0)
1181 nfs4_opendata_check_deleg(data, state);
1182 update:
1183 update_open_stateid(state, &data->o_res.stateid, NULL,
1184 data->o_arg.fmode);
1185
1186 return state;
1187 err:
1188 return ERR_PTR(ret);
1189
1190 }
1191
1192 static struct nfs4_state *
1193 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1194 {
1195 struct inode *inode;
1196 struct nfs4_state *state = NULL;
1197 int ret;
1198
1199 if (!data->rpc_done) {
1200 state = nfs4_try_open_cached(data);
1201 goto out;
1202 }
1203
1204 ret = -EAGAIN;
1205 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1206 goto err;
1207 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1208 ret = PTR_ERR(inode);
1209 if (IS_ERR(inode))
1210 goto err;
1211 ret = -ENOMEM;
1212 state = nfs4_get_open_state(inode, data->owner);
1213 if (state == NULL)
1214 goto err_put_inode;
1215 if (data->o_res.delegation_type != 0)
1216 nfs4_opendata_check_deleg(data, state);
1217 update_open_stateid(state, &data->o_res.stateid, NULL,
1218 data->o_arg.fmode);
1219 iput(inode);
1220 out:
1221 nfs_release_seqid(data->o_arg.seqid);
1222 return state;
1223 err_put_inode:
1224 iput(inode);
1225 err:
1226 return ERR_PTR(ret);
1227 }
1228
1229 static struct nfs4_state *
1230 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1231 {
1232 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1233 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1234 return _nfs4_opendata_to_nfs4_state(data);
1235 }
1236
1237 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1238 {
1239 struct nfs_inode *nfsi = NFS_I(state->inode);
1240 struct nfs_open_context *ctx;
1241
1242 spin_lock(&state->inode->i_lock);
1243 list_for_each_entry(ctx, &nfsi->open_files, list) {
1244 if (ctx->state != state)
1245 continue;
1246 get_nfs_open_context(ctx);
1247 spin_unlock(&state->inode->i_lock);
1248 return ctx;
1249 }
1250 spin_unlock(&state->inode->i_lock);
1251 return ERR_PTR(-ENOENT);
1252 }
1253
1254 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1255 struct nfs4_state *state, enum open_claim_type4 claim)
1256 {
1257 struct nfs4_opendata *opendata;
1258
1259 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1260 NULL, claim, GFP_NOFS);
1261 if (opendata == NULL)
1262 return ERR_PTR(-ENOMEM);
1263 opendata->state = state;
1264 atomic_inc(&state->count);
1265 return opendata;
1266 }
1267
1268 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1269 {
1270 struct nfs4_state *newstate;
1271 int ret;
1272
1273 opendata->o_arg.open_flags = 0;
1274 opendata->o_arg.fmode = fmode;
1275 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1276 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1277 nfs4_init_opendata_res(opendata);
1278 ret = _nfs4_recover_proc_open(opendata);
1279 if (ret != 0)
1280 return ret;
1281 newstate = nfs4_opendata_to_nfs4_state(opendata);
1282 if (IS_ERR(newstate))
1283 return PTR_ERR(newstate);
1284 nfs4_close_state(newstate, fmode);
1285 *res = newstate;
1286 return 0;
1287 }
1288
1289 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1290 {
1291 struct nfs4_state *newstate;
1292 int ret;
1293
1294 /* memory barrier prior to reading state->n_* */
1295 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1296 clear_bit(NFS_OPEN_STATE, &state->flags);
1297 smp_rmb();
1298 if (state->n_rdwr != 0) {
1299 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1300 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1301 if (ret != 0)
1302 return ret;
1303 if (newstate != state)
1304 return -ESTALE;
1305 }
1306 if (state->n_wronly != 0) {
1307 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1308 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1309 if (ret != 0)
1310 return ret;
1311 if (newstate != state)
1312 return -ESTALE;
1313 }
1314 if (state->n_rdonly != 0) {
1315 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1316 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1317 if (ret != 0)
1318 return ret;
1319 if (newstate != state)
1320 return -ESTALE;
1321 }
1322 /*
1323 * We may have performed cached opens for all three recoveries.
1324 * Check if we need to update the current stateid.
1325 */
1326 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1327 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1328 write_seqlock(&state->seqlock);
1329 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1330 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1331 write_sequnlock(&state->seqlock);
1332 }
1333 return 0;
1334 }
1335
1336 /*
1337 * OPEN_RECLAIM:
1338 * reclaim state on the server after a reboot.
1339 */
1340 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1341 {
1342 struct nfs_delegation *delegation;
1343 struct nfs4_opendata *opendata;
1344 fmode_t delegation_type = 0;
1345 int status;
1346
1347 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1348 NFS4_OPEN_CLAIM_PREVIOUS);
1349 if (IS_ERR(opendata))
1350 return PTR_ERR(opendata);
1351 rcu_read_lock();
1352 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1353 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1354 delegation_type = delegation->type;
1355 rcu_read_unlock();
1356 opendata->o_arg.u.delegation_type = delegation_type;
1357 status = nfs4_open_recover(opendata, state);
1358 nfs4_opendata_put(opendata);
1359 return status;
1360 }
1361
1362 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1363 {
1364 struct nfs_server *server = NFS_SERVER(state->inode);
1365 struct nfs4_exception exception = { };
1366 int err;
1367 do {
1368 err = _nfs4_do_open_reclaim(ctx, state);
1369 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1370 continue;
1371 if (err != -NFS4ERR_DELAY)
1372 break;
1373 nfs4_handle_exception(server, err, &exception);
1374 } while (exception.retry);
1375 return err;
1376 }
1377
1378 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1379 {
1380 struct nfs_open_context *ctx;
1381 int ret;
1382
1383 ctx = nfs4_state_find_open_context(state);
1384 if (IS_ERR(ctx))
1385 return -EAGAIN;
1386 ret = nfs4_do_open_reclaim(ctx, state);
1387 put_nfs_open_context(ctx);
1388 return ret;
1389 }
1390
1391 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1392 {
1393 switch (err) {
1394 default:
1395 printk(KERN_ERR "NFS: %s: unhandled error "
1396 "%d.\n", __func__, err);
1397 case 0:
1398 case -ENOENT:
1399 case -ESTALE:
1400 break;
1401 case -NFS4ERR_BADSESSION:
1402 case -NFS4ERR_BADSLOT:
1403 case -NFS4ERR_BAD_HIGH_SLOT:
1404 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1405 case -NFS4ERR_DEADSESSION:
1406 set_bit(NFS_DELEGATED_STATE, &state->flags);
1407 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1408 return -EAGAIN;
1409 case -NFS4ERR_STALE_CLIENTID:
1410 case -NFS4ERR_STALE_STATEID:
1411 set_bit(NFS_DELEGATED_STATE, &state->flags);
1412 case -NFS4ERR_EXPIRED:
1413 /* Don't recall a delegation if it was lost */
1414 nfs4_schedule_lease_recovery(server->nfs_client);
1415 return -EAGAIN;
1416 case -NFS4ERR_DELEG_REVOKED:
1417 case -NFS4ERR_ADMIN_REVOKED:
1418 case -NFS4ERR_BAD_STATEID:
1419 case -NFS4ERR_OPENMODE:
1420 nfs_inode_find_state_and_recover(state->inode,
1421 stateid);
1422 nfs4_schedule_stateid_recovery(server, state);
1423 return 0;
1424 case -NFS4ERR_DELAY:
1425 case -NFS4ERR_GRACE:
1426 set_bit(NFS_DELEGATED_STATE, &state->flags);
1427 ssleep(1);
1428 return -EAGAIN;
1429 case -ENOMEM:
1430 case -NFS4ERR_DENIED:
1431 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1432 return 0;
1433 }
1434 return err;
1435 }
1436
1437 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1438 {
1439 struct nfs_server *server = NFS_SERVER(state->inode);
1440 struct nfs4_opendata *opendata;
1441 int err;
1442
1443 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1444 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1445 if (IS_ERR(opendata))
1446 return PTR_ERR(opendata);
1447 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1448 err = nfs4_open_recover(opendata, state);
1449 nfs4_opendata_put(opendata);
1450 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1451 }
1452
1453 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1454 {
1455 struct nfs4_opendata *data = calldata;
1456
1457 data->rpc_status = task->tk_status;
1458 if (data->rpc_status == 0) {
1459 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1460 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1461 renew_lease(data->o_res.server, data->timestamp);
1462 data->rpc_done = 1;
1463 }
1464 }
1465
1466 static void nfs4_open_confirm_release(void *calldata)
1467 {
1468 struct nfs4_opendata *data = calldata;
1469 struct nfs4_state *state = NULL;
1470
1471 /* If this request hasn't been cancelled, do nothing */
1472 if (data->cancelled == 0)
1473 goto out_free;
1474 /* In case of error, no cleanup! */
1475 if (!data->rpc_done)
1476 goto out_free;
1477 state = nfs4_opendata_to_nfs4_state(data);
1478 if (!IS_ERR(state))
1479 nfs4_close_state(state, data->o_arg.fmode);
1480 out_free:
1481 nfs4_opendata_put(data);
1482 }
1483
1484 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1485 .rpc_call_done = nfs4_open_confirm_done,
1486 .rpc_release = nfs4_open_confirm_release,
1487 };
1488
1489 /*
1490 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1491 */
1492 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1493 {
1494 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1495 struct rpc_task *task;
1496 struct rpc_message msg = {
1497 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1498 .rpc_argp = &data->c_arg,
1499 .rpc_resp = &data->c_res,
1500 .rpc_cred = data->owner->so_cred,
1501 };
1502 struct rpc_task_setup task_setup_data = {
1503 .rpc_client = server->client,
1504 .rpc_message = &msg,
1505 .callback_ops = &nfs4_open_confirm_ops,
1506 .callback_data = data,
1507 .workqueue = nfsiod_workqueue,
1508 .flags = RPC_TASK_ASYNC,
1509 };
1510 int status;
1511
1512 kref_get(&data->kref);
1513 data->rpc_done = 0;
1514 data->rpc_status = 0;
1515 data->timestamp = jiffies;
1516 task = rpc_run_task(&task_setup_data);
1517 if (IS_ERR(task))
1518 return PTR_ERR(task);
1519 status = nfs4_wait_for_completion_rpc_task(task);
1520 if (status != 0) {
1521 data->cancelled = 1;
1522 smp_wmb();
1523 } else
1524 status = data->rpc_status;
1525 rpc_put_task(task);
1526 return status;
1527 }
1528
1529 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1530 {
1531 struct nfs4_opendata *data = calldata;
1532 struct nfs4_state_owner *sp = data->owner;
1533 struct nfs_client *clp = sp->so_server->nfs_client;
1534
1535 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1536 goto out_wait;
1537 /*
1538 * Check if we still need to send an OPEN call, or if we can use
1539 * a delegation instead.
1540 */
1541 if (data->state != NULL) {
1542 struct nfs_delegation *delegation;
1543
1544 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1545 goto out_no_action;
1546 rcu_read_lock();
1547 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1548 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1549 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
1550 can_open_delegated(delegation, data->o_arg.fmode))
1551 goto unlock_no_action;
1552 rcu_read_unlock();
1553 }
1554 /* Update client id. */
1555 data->o_arg.clientid = clp->cl_clientid;
1556 switch (data->o_arg.claim) {
1557 case NFS4_OPEN_CLAIM_PREVIOUS:
1558 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1559 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1560 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1561 case NFS4_OPEN_CLAIM_FH:
1562 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1563 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1564 }
1565 data->timestamp = jiffies;
1566 if (nfs4_setup_sequence(data->o_arg.server,
1567 &data->o_arg.seq_args,
1568 &data->o_res.seq_res,
1569 task) != 0)
1570 nfs_release_seqid(data->o_arg.seqid);
1571
1572 /* Set the create mode (note dependency on the session type) */
1573 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
1574 if (data->o_arg.open_flags & O_EXCL) {
1575 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
1576 if (nfs4_has_persistent_session(clp))
1577 data->o_arg.createmode = NFS4_CREATE_GUARDED;
1578 else if (clp->cl_mvops->minor_version > 0)
1579 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
1580 }
1581 return;
1582 unlock_no_action:
1583 rcu_read_unlock();
1584 out_no_action:
1585 task->tk_action = NULL;
1586 out_wait:
1587 nfs4_sequence_done(task, &data->o_res.seq_res);
1588 }
1589
1590 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1591 {
1592 struct nfs4_opendata *data = calldata;
1593
1594 data->rpc_status = task->tk_status;
1595
1596 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1597 return;
1598
1599 if (task->tk_status == 0) {
1600 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1601 switch (data->o_res.f_attr->mode & S_IFMT) {
1602 case S_IFREG:
1603 break;
1604 case S_IFLNK:
1605 data->rpc_status = -ELOOP;
1606 break;
1607 case S_IFDIR:
1608 data->rpc_status = -EISDIR;
1609 break;
1610 default:
1611 data->rpc_status = -ENOTDIR;
1612 }
1613 }
1614 renew_lease(data->o_res.server, data->timestamp);
1615 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1616 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1617 }
1618 data->rpc_done = 1;
1619 }
1620
1621 static void nfs4_open_release(void *calldata)
1622 {
1623 struct nfs4_opendata *data = calldata;
1624 struct nfs4_state *state = NULL;
1625
1626 /* If this request hasn't been cancelled, do nothing */
1627 if (data->cancelled == 0)
1628 goto out_free;
1629 /* In case of error, no cleanup! */
1630 if (data->rpc_status != 0 || !data->rpc_done)
1631 goto out_free;
1632 /* In case we need an open_confirm, no cleanup! */
1633 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1634 goto out_free;
1635 state = nfs4_opendata_to_nfs4_state(data);
1636 if (!IS_ERR(state))
1637 nfs4_close_state(state, data->o_arg.fmode);
1638 out_free:
1639 nfs4_opendata_put(data);
1640 }
1641
1642 static const struct rpc_call_ops nfs4_open_ops = {
1643 .rpc_call_prepare = nfs4_open_prepare,
1644 .rpc_call_done = nfs4_open_done,
1645 .rpc_release = nfs4_open_release,
1646 };
1647
1648 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1649 {
1650 struct inode *dir = data->dir->d_inode;
1651 struct nfs_server *server = NFS_SERVER(dir);
1652 struct nfs_openargs *o_arg = &data->o_arg;
1653 struct nfs_openres *o_res = &data->o_res;
1654 struct rpc_task *task;
1655 struct rpc_message msg = {
1656 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1657 .rpc_argp = o_arg,
1658 .rpc_resp = o_res,
1659 .rpc_cred = data->owner->so_cred,
1660 };
1661 struct rpc_task_setup task_setup_data = {
1662 .rpc_client = server->client,
1663 .rpc_message = &msg,
1664 .callback_ops = &nfs4_open_ops,
1665 .callback_data = data,
1666 .workqueue = nfsiod_workqueue,
1667 .flags = RPC_TASK_ASYNC,
1668 };
1669 int status;
1670
1671 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1672 kref_get(&data->kref);
1673 data->rpc_done = 0;
1674 data->rpc_status = 0;
1675 data->cancelled = 0;
1676 data->is_recover = 0;
1677 if (isrecover) {
1678 nfs4_set_sequence_privileged(&o_arg->seq_args);
1679 data->is_recover = 1;
1680 }
1681 task = rpc_run_task(&task_setup_data);
1682 if (IS_ERR(task))
1683 return PTR_ERR(task);
1684 status = nfs4_wait_for_completion_rpc_task(task);
1685 if (status != 0) {
1686 data->cancelled = 1;
1687 smp_wmb();
1688 } else
1689 status = data->rpc_status;
1690 rpc_put_task(task);
1691
1692 return status;
1693 }
1694
1695 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1696 {
1697 struct inode *dir = data->dir->d_inode;
1698 struct nfs_openres *o_res = &data->o_res;
1699 int status;
1700
1701 status = nfs4_run_open_task(data, 1);
1702 if (status != 0 || !data->rpc_done)
1703 return status;
1704
1705 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1706
1707 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1708 status = _nfs4_proc_open_confirm(data);
1709 if (status != 0)
1710 return status;
1711 }
1712
1713 return status;
1714 }
1715
1716 static int nfs4_opendata_access(struct rpc_cred *cred,
1717 struct nfs4_opendata *opendata,
1718 struct nfs4_state *state, fmode_t fmode,
1719 int openflags)
1720 {
1721 struct nfs_access_entry cache;
1722 u32 mask;
1723
1724 /* access call failed or for some reason the server doesn't
1725 * support any access modes -- defer access call until later */
1726 if (opendata->o_res.access_supported == 0)
1727 return 0;
1728
1729 mask = 0;
1730 /* don't check MAY_WRITE - a newly created file may not have
1731 * write mode bits, but POSIX allows the creating process to write.
1732 * use openflags to check for exec, because fmode won't
1733 * always have FMODE_EXEC set when file open for exec. */
1734 if (openflags & __FMODE_EXEC) {
1735 /* ONLY check for exec rights */
1736 mask = MAY_EXEC;
1737 } else if (fmode & FMODE_READ)
1738 mask = MAY_READ;
1739
1740 cache.cred = cred;
1741 cache.jiffies = jiffies;
1742 nfs_access_set_mask(&cache, opendata->o_res.access_result);
1743 nfs_access_add_cache(state->inode, &cache);
1744
1745 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
1746 return 0;
1747
1748 /* even though OPEN succeeded, access is denied. Close the file */
1749 nfs4_close_state(state, fmode);
1750 return -EACCES;
1751 }
1752
1753 /*
1754 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1755 */
1756 static int _nfs4_proc_open(struct nfs4_opendata *data)
1757 {
1758 struct inode *dir = data->dir->d_inode;
1759 struct nfs_server *server = NFS_SERVER(dir);
1760 struct nfs_openargs *o_arg = &data->o_arg;
1761 struct nfs_openres *o_res = &data->o_res;
1762 int status;
1763
1764 status = nfs4_run_open_task(data, 0);
1765 if (!data->rpc_done)
1766 return status;
1767 if (status != 0) {
1768 if (status == -NFS4ERR_BADNAME &&
1769 !(o_arg->open_flags & O_CREAT))
1770 return -ENOENT;
1771 return status;
1772 }
1773
1774 nfs_fattr_map_and_free_names(server, &data->f_attr);
1775
1776 if (o_arg->open_flags & O_CREAT)
1777 update_changeattr(dir, &o_res->cinfo);
1778 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1779 server->caps &= ~NFS_CAP_POSIX_LOCK;
1780 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1781 status = _nfs4_proc_open_confirm(data);
1782 if (status != 0)
1783 return status;
1784 }
1785 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1786 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1787 return 0;
1788 }
1789
1790 static int nfs4_recover_expired_lease(struct nfs_server *server)
1791 {
1792 return nfs4_client_recover_expired_lease(server->nfs_client);
1793 }
1794
1795 /*
1796 * OPEN_EXPIRED:
1797 * reclaim state on the server after a network partition.
1798 * Assumes caller holds the appropriate lock
1799 */
1800 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1801 {
1802 struct nfs4_opendata *opendata;
1803 int ret;
1804
1805 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1806 NFS4_OPEN_CLAIM_FH);
1807 if (IS_ERR(opendata))
1808 return PTR_ERR(opendata);
1809 ret = nfs4_open_recover(opendata, state);
1810 if (ret == -ESTALE)
1811 d_drop(ctx->dentry);
1812 nfs4_opendata_put(opendata);
1813 return ret;
1814 }
1815
1816 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1817 {
1818 struct nfs_server *server = NFS_SERVER(state->inode);
1819 struct nfs4_exception exception = { };
1820 int err;
1821
1822 do {
1823 err = _nfs4_open_expired(ctx, state);
1824 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1825 continue;
1826 switch (err) {
1827 default:
1828 goto out;
1829 case -NFS4ERR_GRACE:
1830 case -NFS4ERR_DELAY:
1831 nfs4_handle_exception(server, err, &exception);
1832 err = 0;
1833 }
1834 } while (exception.retry);
1835 out:
1836 return err;
1837 }
1838
1839 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1840 {
1841 struct nfs_open_context *ctx;
1842 int ret;
1843
1844 ctx = nfs4_state_find_open_context(state);
1845 if (IS_ERR(ctx))
1846 return -EAGAIN;
1847 ret = nfs4_do_open_expired(ctx, state);
1848 put_nfs_open_context(ctx);
1849 return ret;
1850 }
1851
1852 #if defined(CONFIG_NFS_V4_1)
1853 static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
1854 {
1855 struct nfs_server *server = NFS_SERVER(state->inode);
1856 nfs4_stateid *stateid = &state->stateid;
1857 int status;
1858
1859 /* If a state reset has been done, test_stateid is unneeded */
1860 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1861 return;
1862
1863 status = nfs41_test_stateid(server, stateid);
1864 if (status != NFS_OK) {
1865 /* Free the stateid unless the server explicitly
1866 * informs us the stateid is unrecognized. */
1867 if (status != -NFS4ERR_BAD_STATEID)
1868 nfs41_free_stateid(server, stateid);
1869 nfs_remove_bad_delegation(state->inode);
1870
1871 write_seqlock(&state->seqlock);
1872 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1873 write_sequnlock(&state->seqlock);
1874 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1875 }
1876 }
1877
1878 /**
1879 * nfs41_check_open_stateid - possibly free an open stateid
1880 *
1881 * @state: NFSv4 state for an inode
1882 *
1883 * Returns NFS_OK if recovery for this stateid is now finished.
1884 * Otherwise a negative NFS4ERR value is returned.
1885 */
1886 static int nfs41_check_open_stateid(struct nfs4_state *state)
1887 {
1888 struct nfs_server *server = NFS_SERVER(state->inode);
1889 nfs4_stateid *stateid = &state->open_stateid;
1890 int status;
1891
1892 /* If a state reset has been done, test_stateid is unneeded */
1893 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
1894 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
1895 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
1896 return -NFS4ERR_BAD_STATEID;
1897
1898 status = nfs41_test_stateid(server, stateid);
1899 if (status != NFS_OK) {
1900 /* Free the stateid unless the server explicitly
1901 * informs us the stateid is unrecognized. */
1902 if (status != -NFS4ERR_BAD_STATEID)
1903 nfs41_free_stateid(server, stateid);
1904
1905 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1906 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1907 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1908 clear_bit(NFS_OPEN_STATE, &state->flags);
1909 }
1910 return status;
1911 }
1912
1913 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1914 {
1915 int status;
1916
1917 nfs41_clear_delegation_stateid(state);
1918 status = nfs41_check_open_stateid(state);
1919 if (status != NFS_OK)
1920 status = nfs4_open_expired(sp, state);
1921 return status;
1922 }
1923 #endif
1924
1925 /*
1926 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1927 * fields corresponding to attributes that were used to store the verifier.
1928 * Make sure we clobber those fields in the later setattr call
1929 */
1930 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1931 {
1932 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1933 !(sattr->ia_valid & ATTR_ATIME_SET))
1934 sattr->ia_valid |= ATTR_ATIME;
1935
1936 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1937 !(sattr->ia_valid & ATTR_MTIME_SET))
1938 sattr->ia_valid |= ATTR_MTIME;
1939 }
1940
1941 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
1942 fmode_t fmode,
1943 int flags,
1944 struct nfs4_state **res)
1945 {
1946 struct nfs4_state_owner *sp = opendata->owner;
1947 struct nfs_server *server = sp->so_server;
1948 struct nfs4_state *state;
1949 unsigned int seq;
1950 int ret;
1951
1952 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
1953
1954 ret = _nfs4_proc_open(opendata);
1955 if (ret != 0)
1956 goto out;
1957
1958 state = nfs4_opendata_to_nfs4_state(opendata);
1959 ret = PTR_ERR(state);
1960 if (IS_ERR(state))
1961 goto out;
1962 if (server->caps & NFS_CAP_POSIX_LOCK)
1963 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1964
1965 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
1966 if (ret != 0)
1967 goto out;
1968
1969 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
1970 nfs4_schedule_stateid_recovery(server, state);
1971 *res = state;
1972 out:
1973 return ret;
1974 }
1975
1976 /*
1977 * Returns a referenced nfs4_state
1978 */
1979 static int _nfs4_do_open(struct inode *dir,
1980 struct dentry *dentry,
1981 fmode_t fmode,
1982 int flags,
1983 struct iattr *sattr,
1984 struct rpc_cred *cred,
1985 struct nfs4_state **res,
1986 struct nfs4_threshold **ctx_th)
1987 {
1988 struct nfs4_state_owner *sp;
1989 struct nfs4_state *state = NULL;
1990 struct nfs_server *server = NFS_SERVER(dir);
1991 struct nfs4_opendata *opendata;
1992 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
1993 int status;
1994
1995 /* Protect against reboot recovery conflicts */
1996 status = -ENOMEM;
1997 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1998 if (sp == NULL) {
1999 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2000 goto out_err;
2001 }
2002 status = nfs4_recover_expired_lease(server);
2003 if (status != 0)
2004 goto err_put_state_owner;
2005 if (dentry->d_inode != NULL)
2006 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
2007 status = -ENOMEM;
2008 if (dentry->d_inode)
2009 claim = NFS4_OPEN_CLAIM_FH;
2010 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2011 claim, GFP_KERNEL);
2012 if (opendata == NULL)
2013 goto err_put_state_owner;
2014
2015 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2016 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2017 if (!opendata->f_attr.mdsthreshold)
2018 goto err_opendata_put;
2019 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2020 }
2021 if (dentry->d_inode != NULL)
2022 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
2023
2024 status = _nfs4_open_and_get_state(opendata, fmode, flags, &state);
2025 if (status != 0)
2026 goto err_opendata_put;
2027
2028 if ((opendata->o_arg.open_flags & O_EXCL) &&
2029 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2030 nfs4_exclusive_attrset(opendata, sattr);
2031
2032 nfs_fattr_init(opendata->o_res.f_attr);
2033 status = nfs4_do_setattr(state->inode, cred,
2034 opendata->o_res.f_attr, sattr,
2035 state);
2036 if (status == 0)
2037 nfs_setattr_update_inode(state->inode, sattr);
2038 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
2039 }
2040
2041 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
2042 *ctx_th = opendata->f_attr.mdsthreshold;
2043 else
2044 kfree(opendata->f_attr.mdsthreshold);
2045 opendata->f_attr.mdsthreshold = NULL;
2046
2047 nfs4_opendata_put(opendata);
2048 nfs4_put_state_owner(sp);
2049 *res = state;
2050 return 0;
2051 err_opendata_put:
2052 kfree(opendata->f_attr.mdsthreshold);
2053 nfs4_opendata_put(opendata);
2054 err_put_state_owner:
2055 nfs4_put_state_owner(sp);
2056 out_err:
2057 *res = NULL;
2058 return status;
2059 }
2060
2061
2062 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2063 struct dentry *dentry,
2064 fmode_t fmode,
2065 int flags,
2066 struct iattr *sattr,
2067 struct rpc_cred *cred,
2068 struct nfs4_threshold **ctx_th)
2069 {
2070 struct nfs_server *server = NFS_SERVER(dir);
2071 struct nfs4_exception exception = { };
2072 struct nfs4_state *res;
2073 int status;
2074
2075 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC;
2076 do {
2077 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
2078 &res, ctx_th);
2079 if (status == 0)
2080 break;
2081 /* NOTE: BAD_SEQID means the server and client disagree about the
2082 * book-keeping w.r.t. state-changing operations
2083 * (OPEN/CLOSE/LOCK/LOCKU...)
2084 * It is actually a sign of a bug on the client or on the server.
2085 *
2086 * If we receive a BAD_SEQID error in the particular case of
2087 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2088 * have unhashed the old state_owner for us, and that we can
2089 * therefore safely retry using a new one. We should still warn
2090 * the user though...
2091 */
2092 if (status == -NFS4ERR_BAD_SEQID) {
2093 pr_warn_ratelimited("NFS: v4 server %s "
2094 " returned a bad sequence-id error!\n",
2095 NFS_SERVER(dir)->nfs_client->cl_hostname);
2096 exception.retry = 1;
2097 continue;
2098 }
2099 /*
2100 * BAD_STATEID on OPEN means that the server cancelled our
2101 * state before it received the OPEN_CONFIRM.
2102 * Recover by retrying the request as per the discussion
2103 * on Page 181 of RFC3530.
2104 */
2105 if (status == -NFS4ERR_BAD_STATEID) {
2106 exception.retry = 1;
2107 continue;
2108 }
2109 if (status == -EAGAIN) {
2110 /* We must have found a delegation */
2111 exception.retry = 1;
2112 continue;
2113 }
2114 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2115 continue;
2116 res = ERR_PTR(nfs4_handle_exception(server,
2117 status, &exception));
2118 } while (exception.retry);
2119 return res;
2120 }
2121
2122 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2123 struct nfs_fattr *fattr, struct iattr *sattr,
2124 struct nfs4_state *state)
2125 {
2126 struct nfs_server *server = NFS_SERVER(inode);
2127 struct nfs_setattrargs arg = {
2128 .fh = NFS_FH(inode),
2129 .iap = sattr,
2130 .server = server,
2131 .bitmask = server->attr_bitmask,
2132 };
2133 struct nfs_setattrres res = {
2134 .fattr = fattr,
2135 .server = server,
2136 };
2137 struct rpc_message msg = {
2138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2139 .rpc_argp = &arg,
2140 .rpc_resp = &res,
2141 .rpc_cred = cred,
2142 };
2143 unsigned long timestamp = jiffies;
2144 fmode_t fmode;
2145 bool truncate;
2146 int status;
2147
2148 nfs_fattr_init(fattr);
2149
2150 /* Servers should only apply open mode checks for file size changes */
2151 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2152 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2153
2154 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2155 /* Use that stateid */
2156 } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) {
2157 struct nfs_lockowner lockowner = {
2158 .l_owner = current->files,
2159 .l_pid = current->tgid,
2160 };
2161 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2162 &lockowner);
2163 } else
2164 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2165
2166 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2167 if (status == 0 && state != NULL)
2168 renew_lease(server, timestamp);
2169 return status;
2170 }
2171
2172 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2173 struct nfs_fattr *fattr, struct iattr *sattr,
2174 struct nfs4_state *state)
2175 {
2176 struct nfs_server *server = NFS_SERVER(inode);
2177 struct nfs4_exception exception = {
2178 .state = state,
2179 .inode = inode,
2180 };
2181 int err;
2182 do {
2183 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
2184 switch (err) {
2185 case -NFS4ERR_OPENMODE:
2186 if (!(sattr->ia_valid & ATTR_SIZE)) {
2187 pr_warn_once("NFSv4: server %s is incorrectly "
2188 "applying open mode checks to "
2189 "a SETATTR that is not "
2190 "changing file size.\n",
2191 server->nfs_client->cl_hostname);
2192 }
2193 if (state && !(state->state & FMODE_WRITE)) {
2194 err = -EBADF;
2195 if (sattr->ia_valid & ATTR_OPEN)
2196 err = -EACCES;
2197 goto out;
2198 }
2199 }
2200 err = nfs4_handle_exception(server, err, &exception);
2201 } while (exception.retry);
2202 out:
2203 return err;
2204 }
2205
2206 struct nfs4_closedata {
2207 struct inode *inode;
2208 struct nfs4_state *state;
2209 struct nfs_closeargs arg;
2210 struct nfs_closeres res;
2211 struct nfs_fattr fattr;
2212 unsigned long timestamp;
2213 bool roc;
2214 u32 roc_barrier;
2215 };
2216
2217 static void nfs4_free_closedata(void *data)
2218 {
2219 struct nfs4_closedata *calldata = data;
2220 struct nfs4_state_owner *sp = calldata->state->owner;
2221 struct super_block *sb = calldata->state->inode->i_sb;
2222
2223 if (calldata->roc)
2224 pnfs_roc_release(calldata->state->inode);
2225 nfs4_put_open_state(calldata->state);
2226 nfs_free_seqid(calldata->arg.seqid);
2227 nfs4_put_state_owner(sp);
2228 nfs_sb_deactive(sb);
2229 kfree(calldata);
2230 }
2231
2232 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2233 fmode_t fmode)
2234 {
2235 spin_lock(&state->owner->so_lock);
2236 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2237 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
2238 case FMODE_WRITE:
2239 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2240 break;
2241 case FMODE_READ:
2242 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2243 break;
2244 case 0:
2245 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2246 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2247 clear_bit(NFS_OPEN_STATE, &state->flags);
2248 }
2249 spin_unlock(&state->owner->so_lock);
2250 }
2251
2252 static void nfs4_close_done(struct rpc_task *task, void *data)
2253 {
2254 struct nfs4_closedata *calldata = data;
2255 struct nfs4_state *state = calldata->state;
2256 struct nfs_server *server = NFS_SERVER(calldata->inode);
2257
2258 dprintk("%s: begin!\n", __func__);
2259 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2260 return;
2261 /* hmm. we are done with the inode, and in the process of freeing
2262 * the state_owner. we keep this around to process errors
2263 */
2264 switch (task->tk_status) {
2265 case 0:
2266 if (calldata->roc)
2267 pnfs_roc_set_barrier(state->inode,
2268 calldata->roc_barrier);
2269 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2270 renew_lease(server, calldata->timestamp);
2271 nfs4_close_clear_stateid_flags(state,
2272 calldata->arg.fmode);
2273 break;
2274 case -NFS4ERR_STALE_STATEID:
2275 case -NFS4ERR_OLD_STATEID:
2276 case -NFS4ERR_BAD_STATEID:
2277 case -NFS4ERR_EXPIRED:
2278 if (calldata->arg.fmode == 0)
2279 break;
2280 default:
2281 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2282 rpc_restart_call_prepare(task);
2283 }
2284 nfs_release_seqid(calldata->arg.seqid);
2285 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2286 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2287 }
2288
2289 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2290 {
2291 struct nfs4_closedata *calldata = data;
2292 struct nfs4_state *state = calldata->state;
2293 struct inode *inode = calldata->inode;
2294 int call_close = 0;
2295
2296 dprintk("%s: begin!\n", __func__);
2297 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2298 goto out_wait;
2299
2300 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2301 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2302 spin_lock(&state->owner->so_lock);
2303 /* Calculate the change in open mode */
2304 if (state->n_rdwr == 0) {
2305 if (state->n_rdonly == 0) {
2306 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2307 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2308 calldata->arg.fmode &= ~FMODE_READ;
2309 }
2310 if (state->n_wronly == 0) {
2311 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2312 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2313 calldata->arg.fmode &= ~FMODE_WRITE;
2314 }
2315 }
2316 if (!nfs4_valid_open_stateid(state))
2317 call_close = 0;
2318 spin_unlock(&state->owner->so_lock);
2319
2320 if (!call_close) {
2321 /* Note: exit _without_ calling nfs4_close_done */
2322 goto out_no_action;
2323 }
2324
2325 if (calldata->arg.fmode == 0) {
2326 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2327 if (calldata->roc &&
2328 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
2329 nfs_release_seqid(calldata->arg.seqid);
2330 goto out_wait;
2331 }
2332 }
2333
2334 nfs_fattr_init(calldata->res.fattr);
2335 calldata->timestamp = jiffies;
2336 if (nfs4_setup_sequence(NFS_SERVER(inode),
2337 &calldata->arg.seq_args,
2338 &calldata->res.seq_res,
2339 task) != 0)
2340 nfs_release_seqid(calldata->arg.seqid);
2341 dprintk("%s: done!\n", __func__);
2342 return;
2343 out_no_action:
2344 task->tk_action = NULL;
2345 out_wait:
2346 nfs4_sequence_done(task, &calldata->res.seq_res);
2347 }
2348
2349 static const struct rpc_call_ops nfs4_close_ops = {
2350 .rpc_call_prepare = nfs4_close_prepare,
2351 .rpc_call_done = nfs4_close_done,
2352 .rpc_release = nfs4_free_closedata,
2353 };
2354
2355 /*
2356 * It is possible for data to be read/written from a mem-mapped file
2357 * after the sys_close call (which hits the vfs layer as a flush).
2358 * This means that we can't safely call nfsv4 close on a file until
2359 * the inode is cleared. This in turn means that we are not good
2360 * NFSv4 citizens - we do not indicate to the server to update the file's
2361 * share state even when we are done with one of the three share
2362 * stateid's in the inode.
2363 *
2364 * NOTE: Caller must be holding the sp->so_owner semaphore!
2365 */
2366 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2367 {
2368 struct nfs_server *server = NFS_SERVER(state->inode);
2369 struct nfs4_closedata *calldata;
2370 struct nfs4_state_owner *sp = state->owner;
2371 struct rpc_task *task;
2372 struct rpc_message msg = {
2373 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2374 .rpc_cred = state->owner->so_cred,
2375 };
2376 struct rpc_task_setup task_setup_data = {
2377 .rpc_client = server->client,
2378 .rpc_message = &msg,
2379 .callback_ops = &nfs4_close_ops,
2380 .workqueue = nfsiod_workqueue,
2381 .flags = RPC_TASK_ASYNC,
2382 };
2383 int status = -ENOMEM;
2384
2385 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2386 if (calldata == NULL)
2387 goto out;
2388 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2389 calldata->inode = state->inode;
2390 calldata->state = state;
2391 calldata->arg.fh = NFS_FH(state->inode);
2392 calldata->arg.stateid = &state->open_stateid;
2393 /* Serialization for the sequence id */
2394 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2395 if (calldata->arg.seqid == NULL)
2396 goto out_free_calldata;
2397 calldata->arg.fmode = 0;
2398 calldata->arg.bitmask = server->cache_consistency_bitmask;
2399 calldata->res.fattr = &calldata->fattr;
2400 calldata->res.seqid = calldata->arg.seqid;
2401 calldata->res.server = server;
2402 calldata->roc = pnfs_roc(state->inode);
2403 nfs_sb_active(calldata->inode->i_sb);
2404
2405 msg.rpc_argp = &calldata->arg;
2406 msg.rpc_resp = &calldata->res;
2407 task_setup_data.callback_data = calldata;
2408 task = rpc_run_task(&task_setup_data);
2409 if (IS_ERR(task))
2410 return PTR_ERR(task);
2411 status = 0;
2412 if (wait)
2413 status = rpc_wait_for_completion_task(task);
2414 rpc_put_task(task);
2415 return status;
2416 out_free_calldata:
2417 kfree(calldata);
2418 out:
2419 nfs4_put_open_state(state);
2420 nfs4_put_state_owner(sp);
2421 return status;
2422 }
2423
2424 static struct inode *
2425 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2426 {
2427 struct nfs4_state *state;
2428
2429 /* Protect against concurrent sillydeletes */
2430 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2431 ctx->cred, &ctx->mdsthreshold);
2432 if (IS_ERR(state))
2433 return ERR_CAST(state);
2434 ctx->state = state;
2435 return igrab(state->inode);
2436 }
2437
2438 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2439 {
2440 if (ctx->state == NULL)
2441 return;
2442 if (is_sync)
2443 nfs4_close_sync(ctx->state, ctx->mode);
2444 else
2445 nfs4_close_state(ctx->state, ctx->mode);
2446 }
2447
2448 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2449 {
2450 struct nfs4_server_caps_arg args = {
2451 .fhandle = fhandle,
2452 };
2453 struct nfs4_server_caps_res res = {};
2454 struct rpc_message msg = {
2455 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2456 .rpc_argp = &args,
2457 .rpc_resp = &res,
2458 };
2459 int status;
2460
2461 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2462 if (status == 0) {
2463 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2464 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2465 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2466 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2467 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2468 NFS_CAP_CTIME|NFS_CAP_MTIME);
2469 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2470 server->caps |= NFS_CAP_ACLS;
2471 if (res.has_links != 0)
2472 server->caps |= NFS_CAP_HARDLINKS;
2473 if (res.has_symlinks != 0)
2474 server->caps |= NFS_CAP_SYMLINKS;
2475 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2476 server->caps |= NFS_CAP_FILEID;
2477 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2478 server->caps |= NFS_CAP_MODE;
2479 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2480 server->caps |= NFS_CAP_NLINK;
2481 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2482 server->caps |= NFS_CAP_OWNER;
2483 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2484 server->caps |= NFS_CAP_OWNER_GROUP;
2485 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2486 server->caps |= NFS_CAP_ATIME;
2487 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2488 server->caps |= NFS_CAP_CTIME;
2489 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2490 server->caps |= NFS_CAP_MTIME;
2491
2492 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2493 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2494 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2495 server->acl_bitmask = res.acl_bitmask;
2496 server->fh_expire_type = res.fh_expire_type;
2497 }
2498
2499 return status;
2500 }
2501
2502 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2503 {
2504 struct nfs4_exception exception = { };
2505 int err;
2506 do {
2507 err = nfs4_handle_exception(server,
2508 _nfs4_server_capabilities(server, fhandle),
2509 &exception);
2510 } while (exception.retry);
2511 return err;
2512 }
2513
2514 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2515 struct nfs_fsinfo *info)
2516 {
2517 struct nfs4_lookup_root_arg args = {
2518 .bitmask = nfs4_fattr_bitmap,
2519 };
2520 struct nfs4_lookup_res res = {
2521 .server = server,
2522 .fattr = info->fattr,
2523 .fh = fhandle,
2524 };
2525 struct rpc_message msg = {
2526 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2527 .rpc_argp = &args,
2528 .rpc_resp = &res,
2529 };
2530
2531 nfs_fattr_init(info->fattr);
2532 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2533 }
2534
2535 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2536 struct nfs_fsinfo *info)
2537 {
2538 struct nfs4_exception exception = { };
2539 int err;
2540 do {
2541 err = _nfs4_lookup_root(server, fhandle, info);
2542 switch (err) {
2543 case 0:
2544 case -NFS4ERR_WRONGSEC:
2545 goto out;
2546 default:
2547 err = nfs4_handle_exception(server, err, &exception);
2548 }
2549 } while (exception.retry);
2550 out:
2551 return err;
2552 }
2553
2554 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2555 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2556 {
2557 struct rpc_auth *auth;
2558 int ret;
2559
2560 auth = rpcauth_create(flavor, server->client);
2561 if (IS_ERR(auth)) {
2562 ret = -EACCES;
2563 goto out;
2564 }
2565 ret = nfs4_lookup_root(server, fhandle, info);
2566 out:
2567 return ret;
2568 }
2569
2570 /*
2571 * Retry pseudoroot lookup with various security flavors. We do this when:
2572 *
2573 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
2574 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
2575 *
2576 * Returns zero on success, or a negative NFS4ERR value, or a
2577 * negative errno value.
2578 */
2579 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2580 struct nfs_fsinfo *info)
2581 {
2582 /* Per 3530bis 15.33.5 */
2583 static const rpc_authflavor_t flav_array[] = {
2584 RPC_AUTH_GSS_KRB5P,
2585 RPC_AUTH_GSS_KRB5I,
2586 RPC_AUTH_GSS_KRB5,
2587 RPC_AUTH_UNIX, /* courtesy */
2588 RPC_AUTH_NULL,
2589 };
2590 int status = -EPERM;
2591 size_t i;
2592
2593 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
2594 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2595 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2596 continue;
2597 break;
2598 }
2599
2600 /*
2601 * -EACCESS could mean that the user doesn't have correct permissions
2602 * to access the mount. It could also mean that we tried to mount
2603 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2604 * existing mount programs don't handle -EACCES very well so it should
2605 * be mapped to -EPERM instead.
2606 */
2607 if (status == -EACCES)
2608 status = -EPERM;
2609 return status;
2610 }
2611
2612 static int nfs4_do_find_root_sec(struct nfs_server *server,
2613 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
2614 {
2615 int mv = server->nfs_client->cl_minorversion;
2616 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
2617 }
2618
2619 /**
2620 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
2621 * @server: initialized nfs_server handle
2622 * @fhandle: we fill in the pseudo-fs root file handle
2623 * @info: we fill in an FSINFO struct
2624 *
2625 * Returns zero on success, or a negative errno.
2626 */
2627 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2628 struct nfs_fsinfo *info)
2629 {
2630 int status;
2631
2632 status = nfs4_lookup_root(server, fhandle, info);
2633 if ((status == -NFS4ERR_WRONGSEC) &&
2634 !(server->flags & NFS_MOUNT_SECFLAVOUR))
2635 status = nfs4_do_find_root_sec(server, fhandle, info);
2636
2637 if (status == 0)
2638 status = nfs4_server_capabilities(server, fhandle);
2639 if (status == 0)
2640 status = nfs4_do_fsinfo(server, fhandle, info);
2641
2642 return nfs4_map_errors(status);
2643 }
2644
2645 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2646 struct nfs_fsinfo *info)
2647 {
2648 int error;
2649 struct nfs_fattr *fattr = info->fattr;
2650
2651 error = nfs4_server_capabilities(server, mntfh);
2652 if (error < 0) {
2653 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2654 return error;
2655 }
2656
2657 error = nfs4_proc_getattr(server, mntfh, fattr);
2658 if (error < 0) {
2659 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2660 return error;
2661 }
2662
2663 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2664 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2665 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2666
2667 return error;
2668 }
2669
2670 /*
2671 * Get locations and (maybe) other attributes of a referral.
2672 * Note that we'll actually follow the referral later when
2673 * we detect fsid mismatch in inode revalidation
2674 */
2675 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2676 const struct qstr *name, struct nfs_fattr *fattr,
2677 struct nfs_fh *fhandle)
2678 {
2679 int status = -ENOMEM;
2680 struct page *page = NULL;
2681 struct nfs4_fs_locations *locations = NULL;
2682
2683 page = alloc_page(GFP_KERNEL);
2684 if (page == NULL)
2685 goto out;
2686 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2687 if (locations == NULL)
2688 goto out;
2689
2690 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2691 if (status != 0)
2692 goto out;
2693 /* Make sure server returned a different fsid for the referral */
2694 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2695 dprintk("%s: server did not return a different fsid for"
2696 " a referral at %s\n", __func__, name->name);
2697 status = -EIO;
2698 goto out;
2699 }
2700 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2701 nfs_fixup_referral_attributes(&locations->fattr);
2702
2703 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2704 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2705 memset(fhandle, 0, sizeof(struct nfs_fh));
2706 out:
2707 if (page)
2708 __free_page(page);
2709 kfree(locations);
2710 return status;
2711 }
2712
2713 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2714 {
2715 struct nfs4_getattr_arg args = {
2716 .fh = fhandle,
2717 .bitmask = server->attr_bitmask,
2718 };
2719 struct nfs4_getattr_res res = {
2720 .fattr = fattr,
2721 .server = server,
2722 };
2723 struct rpc_message msg = {
2724 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2725 .rpc_argp = &args,
2726 .rpc_resp = &res,
2727 };
2728
2729 nfs_fattr_init(fattr);
2730 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2731 }
2732
2733 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2734 {
2735 struct nfs4_exception exception = { };
2736 int err;
2737 do {
2738 err = nfs4_handle_exception(server,
2739 _nfs4_proc_getattr(server, fhandle, fattr),
2740 &exception);
2741 } while (exception.retry);
2742 return err;
2743 }
2744
2745 /*
2746 * The file is not closed if it is opened due to the a request to change
2747 * the size of the file. The open call will not be needed once the
2748 * VFS layer lookup-intents are implemented.
2749 *
2750 * Close is called when the inode is destroyed.
2751 * If we haven't opened the file for O_WRONLY, we
2752 * need to in the size_change case to obtain a stateid.
2753 *
2754 * Got race?
2755 * Because OPEN is always done by name in nfsv4, it is
2756 * possible that we opened a different file by the same
2757 * name. We can recognize this race condition, but we
2758 * can't do anything about it besides returning an error.
2759 *
2760 * This will be fixed with VFS changes (lookup-intent).
2761 */
2762 static int
2763 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2764 struct iattr *sattr)
2765 {
2766 struct inode *inode = dentry->d_inode;
2767 struct rpc_cred *cred = NULL;
2768 struct nfs4_state *state = NULL;
2769 int status;
2770
2771 if (pnfs_ld_layoutret_on_setattr(inode))
2772 pnfs_commit_and_return_layout(inode);
2773
2774 nfs_fattr_init(fattr);
2775
2776 /* Deal with open(O_TRUNC) */
2777 if (sattr->ia_valid & ATTR_OPEN)
2778 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2779
2780 /* Optimization: if the end result is no change, don't RPC */
2781 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
2782 return 0;
2783
2784 /* Search for an existing open(O_WRITE) file */
2785 if (sattr->ia_valid & ATTR_FILE) {
2786 struct nfs_open_context *ctx;
2787
2788 ctx = nfs_file_open_context(sattr->ia_file);
2789 if (ctx) {
2790 cred = ctx->cred;
2791 state = ctx->state;
2792 }
2793 }
2794
2795 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2796 if (status == 0)
2797 nfs_setattr_update_inode(inode, sattr);
2798 return status;
2799 }
2800
2801 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2802 const struct qstr *name, struct nfs_fh *fhandle,
2803 struct nfs_fattr *fattr)
2804 {
2805 struct nfs_server *server = NFS_SERVER(dir);
2806 int status;
2807 struct nfs4_lookup_arg args = {
2808 .bitmask = server->attr_bitmask,
2809 .dir_fh = NFS_FH(dir),
2810 .name = name,
2811 };
2812 struct nfs4_lookup_res res = {
2813 .server = server,
2814 .fattr = fattr,
2815 .fh = fhandle,
2816 };
2817 struct rpc_message msg = {
2818 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2819 .rpc_argp = &args,
2820 .rpc_resp = &res,
2821 };
2822
2823 nfs_fattr_init(fattr);
2824
2825 dprintk("NFS call lookup %s\n", name->name);
2826 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2827 dprintk("NFS reply lookup: %d\n", status);
2828 return status;
2829 }
2830
2831 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2832 {
2833 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2834 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2835 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2836 fattr->nlink = 2;
2837 }
2838
2839 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2840 struct qstr *name, struct nfs_fh *fhandle,
2841 struct nfs_fattr *fattr)
2842 {
2843 struct nfs4_exception exception = { };
2844 struct rpc_clnt *client = *clnt;
2845 int err;
2846 do {
2847 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2848 switch (err) {
2849 case -NFS4ERR_BADNAME:
2850 err = -ENOENT;
2851 goto out;
2852 case -NFS4ERR_MOVED:
2853 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2854 goto out;
2855 case -NFS4ERR_WRONGSEC:
2856 err = -EPERM;
2857 if (client != *clnt)
2858 goto out;
2859
2860 client = nfs4_create_sec_client(client, dir, name);
2861 if (IS_ERR(client))
2862 return PTR_ERR(client);
2863
2864 exception.retry = 1;
2865 break;
2866 default:
2867 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2868 }
2869 } while (exception.retry);
2870
2871 out:
2872 if (err == 0)
2873 *clnt = client;
2874 else if (client != *clnt)
2875 rpc_shutdown_client(client);
2876
2877 return err;
2878 }
2879
2880 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2881 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2882 {
2883 int status;
2884 struct rpc_clnt *client = NFS_CLIENT(dir);
2885
2886 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2887 if (client != NFS_CLIENT(dir)) {
2888 rpc_shutdown_client(client);
2889 nfs_fixup_secinfo_attributes(fattr);
2890 }
2891 return status;
2892 }
2893
2894 struct rpc_clnt *
2895 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2896 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2897 {
2898 int status;
2899 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2900
2901 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2902 if (status < 0) {
2903 rpc_shutdown_client(client);
2904 return ERR_PTR(status);
2905 }
2906 return client;
2907 }
2908
2909 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2910 {
2911 struct nfs_server *server = NFS_SERVER(inode);
2912 struct nfs4_accessargs args = {
2913 .fh = NFS_FH(inode),
2914 .bitmask = server->cache_consistency_bitmask,
2915 };
2916 struct nfs4_accessres res = {
2917 .server = server,
2918 };
2919 struct rpc_message msg = {
2920 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2921 .rpc_argp = &args,
2922 .rpc_resp = &res,
2923 .rpc_cred = entry->cred,
2924 };
2925 int mode = entry->mask;
2926 int status;
2927
2928 /*
2929 * Determine which access bits we want to ask for...
2930 */
2931 if (mode & MAY_READ)
2932 args.access |= NFS4_ACCESS_READ;
2933 if (S_ISDIR(inode->i_mode)) {
2934 if (mode & MAY_WRITE)
2935 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2936 if (mode & MAY_EXEC)
2937 args.access |= NFS4_ACCESS_LOOKUP;
2938 } else {
2939 if (mode & MAY_WRITE)
2940 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2941 if (mode & MAY_EXEC)
2942 args.access |= NFS4_ACCESS_EXECUTE;
2943 }
2944
2945 res.fattr = nfs_alloc_fattr();
2946 if (res.fattr == NULL)
2947 return -ENOMEM;
2948
2949 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2950 if (!status) {
2951 nfs_access_set_mask(entry, res.access);
2952 nfs_refresh_inode(inode, res.fattr);
2953 }
2954 nfs_free_fattr(res.fattr);
2955 return status;
2956 }
2957
2958 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2959 {
2960 struct nfs4_exception exception = { };
2961 int err;
2962 do {
2963 err = nfs4_handle_exception(NFS_SERVER(inode),
2964 _nfs4_proc_access(inode, entry),
2965 &exception);
2966 } while (exception.retry);
2967 return err;
2968 }
2969
2970 /*
2971 * TODO: For the time being, we don't try to get any attributes
2972 * along with any of the zero-copy operations READ, READDIR,
2973 * READLINK, WRITE.
2974 *
2975 * In the case of the first three, we want to put the GETATTR
2976 * after the read-type operation -- this is because it is hard
2977 * to predict the length of a GETATTR response in v4, and thus
2978 * align the READ data correctly. This means that the GETATTR
2979 * may end up partially falling into the page cache, and we should
2980 * shift it into the 'tail' of the xdr_buf before processing.
2981 * To do this efficiently, we need to know the total length
2982 * of data received, which doesn't seem to be available outside
2983 * of the RPC layer.
2984 *
2985 * In the case of WRITE, we also want to put the GETATTR after
2986 * the operation -- in this case because we want to make sure
2987 * we get the post-operation mtime and size.
2988 *
2989 * Both of these changes to the XDR layer would in fact be quite
2990 * minor, but I decided to leave them for a subsequent patch.
2991 */
2992 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2993 unsigned int pgbase, unsigned int pglen)
2994 {
2995 struct nfs4_readlink args = {
2996 .fh = NFS_FH(inode),
2997 .pgbase = pgbase,
2998 .pglen = pglen,
2999 .pages = &page,
3000 };
3001 struct nfs4_readlink_res res;
3002 struct rpc_message msg = {
3003 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3004 .rpc_argp = &args,
3005 .rpc_resp = &res,
3006 };
3007
3008 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3009 }
3010
3011 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3012 unsigned int pgbase, unsigned int pglen)
3013 {
3014 struct nfs4_exception exception = { };
3015 int err;
3016 do {
3017 err = nfs4_handle_exception(NFS_SERVER(inode),
3018 _nfs4_proc_readlink(inode, page, pgbase, pglen),
3019 &exception);
3020 } while (exception.retry);
3021 return err;
3022 }
3023
3024 /*
3025 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3026 */
3027 static int
3028 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3029 int flags)
3030 {
3031 struct nfs_open_context *ctx;
3032 struct nfs4_state *state;
3033 int status = 0;
3034
3035 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3036 if (IS_ERR(ctx))
3037 return PTR_ERR(ctx);
3038
3039 sattr->ia_mode &= ~current_umask();
3040 state = nfs4_do_open(dir, dentry, ctx->mode,
3041 flags, sattr, ctx->cred,
3042 &ctx->mdsthreshold);
3043 d_drop(dentry);
3044 if (IS_ERR(state)) {
3045 status = PTR_ERR(state);
3046 goto out;
3047 }
3048 d_add(dentry, igrab(state->inode));
3049 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
3050 ctx->state = state;
3051 out:
3052 put_nfs_open_context(ctx);
3053 return status;
3054 }
3055
3056 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3057 {
3058 struct nfs_server *server = NFS_SERVER(dir);
3059 struct nfs_removeargs args = {
3060 .fh = NFS_FH(dir),
3061 .name = *name,
3062 };
3063 struct nfs_removeres res = {
3064 .server = server,
3065 };
3066 struct rpc_message msg = {
3067 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3068 .rpc_argp = &args,
3069 .rpc_resp = &res,
3070 };
3071 int status;
3072
3073 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3074 if (status == 0)
3075 update_changeattr(dir, &res.cinfo);
3076 return status;
3077 }
3078
3079 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3080 {
3081 struct nfs4_exception exception = { };
3082 int err;
3083 do {
3084 err = nfs4_handle_exception(NFS_SERVER(dir),
3085 _nfs4_proc_remove(dir, name),
3086 &exception);
3087 } while (exception.retry);
3088 return err;
3089 }
3090
3091 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3092 {
3093 struct nfs_server *server = NFS_SERVER(dir);
3094 struct nfs_removeargs *args = msg->rpc_argp;
3095 struct nfs_removeres *res = msg->rpc_resp;
3096
3097 res->server = server;
3098 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3099 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
3100 }
3101
3102 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3103 {
3104 nfs4_setup_sequence(NFS_SERVER(data->dir),
3105 &data->args.seq_args,
3106 &data->res.seq_res,
3107 task);
3108 }
3109
3110 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3111 {
3112 struct nfs_removeres *res = task->tk_msg.rpc_resp;
3113
3114 if (!nfs4_sequence_done(task, &res->seq_res))
3115 return 0;
3116 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
3117 return 0;
3118 update_changeattr(dir, &res->cinfo);
3119 return 1;
3120 }
3121
3122 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3123 {
3124 struct nfs_server *server = NFS_SERVER(dir);
3125 struct nfs_renameargs *arg = msg->rpc_argp;
3126 struct nfs_renameres *res = msg->rpc_resp;
3127
3128 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3129 res->server = server;
3130 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
3131 }
3132
3133 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3134 {
3135 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3136 &data->args.seq_args,
3137 &data->res.seq_res,
3138 task);
3139 }
3140
3141 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3142 struct inode *new_dir)
3143 {
3144 struct nfs_renameres *res = task->tk_msg.rpc_resp;
3145
3146 if (!nfs4_sequence_done(task, &res->seq_res))
3147 return 0;
3148 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
3149 return 0;
3150
3151 update_changeattr(old_dir, &res->old_cinfo);
3152 update_changeattr(new_dir, &res->new_cinfo);
3153 return 1;
3154 }
3155
3156 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3157 struct inode *new_dir, struct qstr *new_name)
3158 {
3159 struct nfs_server *server = NFS_SERVER(old_dir);
3160 struct nfs_renameargs arg = {
3161 .old_dir = NFS_FH(old_dir),
3162 .new_dir = NFS_FH(new_dir),
3163 .old_name = old_name,
3164 .new_name = new_name,
3165 };
3166 struct nfs_renameres res = {
3167 .server = server,
3168 };
3169 struct rpc_message msg = {
3170 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
3171 .rpc_argp = &arg,
3172 .rpc_resp = &res,
3173 };
3174 int status = -ENOMEM;
3175
3176 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3177 if (!status) {
3178 update_changeattr(old_dir, &res.old_cinfo);
3179 update_changeattr(new_dir, &res.new_cinfo);
3180 }
3181 return status;
3182 }
3183
3184 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3185 struct inode *new_dir, struct qstr *new_name)
3186 {
3187 struct nfs4_exception exception = { };
3188 int err;
3189 do {
3190 err = nfs4_handle_exception(NFS_SERVER(old_dir),
3191 _nfs4_proc_rename(old_dir, old_name,
3192 new_dir, new_name),
3193 &exception);
3194 } while (exception.retry);
3195 return err;
3196 }
3197
3198 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3199 {
3200 struct nfs_server *server = NFS_SERVER(inode);
3201 struct nfs4_link_arg arg = {
3202 .fh = NFS_FH(inode),
3203 .dir_fh = NFS_FH(dir),
3204 .name = name,
3205 .bitmask = server->attr_bitmask,
3206 };
3207 struct nfs4_link_res res = {
3208 .server = server,
3209 };
3210 struct rpc_message msg = {
3211 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3212 .rpc_argp = &arg,
3213 .rpc_resp = &res,
3214 };
3215 int status = -ENOMEM;
3216
3217 res.fattr = nfs_alloc_fattr();
3218 if (res.fattr == NULL)
3219 goto out;
3220
3221 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3222 if (!status) {
3223 update_changeattr(dir, &res.cinfo);
3224 nfs_post_op_update_inode(inode, res.fattr);
3225 }
3226 out:
3227 nfs_free_fattr(res.fattr);
3228 return status;
3229 }
3230
3231 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3232 {
3233 struct nfs4_exception exception = { };
3234 int err;
3235 do {
3236 err = nfs4_handle_exception(NFS_SERVER(inode),
3237 _nfs4_proc_link(inode, dir, name),
3238 &exception);
3239 } while (exception.retry);
3240 return err;
3241 }
3242
3243 struct nfs4_createdata {
3244 struct rpc_message msg;
3245 struct nfs4_create_arg arg;
3246 struct nfs4_create_res res;
3247 struct nfs_fh fh;
3248 struct nfs_fattr fattr;
3249 };
3250
3251 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3252 struct qstr *name, struct iattr *sattr, u32 ftype)
3253 {
3254 struct nfs4_createdata *data;
3255
3256 data = kzalloc(sizeof(*data), GFP_KERNEL);
3257 if (data != NULL) {
3258 struct nfs_server *server = NFS_SERVER(dir);
3259
3260 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3261 data->msg.rpc_argp = &data->arg;
3262 data->msg.rpc_resp = &data->res;
3263 data->arg.dir_fh = NFS_FH(dir);
3264 data->arg.server = server;
3265 data->arg.name = name;
3266 data->arg.attrs = sattr;
3267 data->arg.ftype = ftype;
3268 data->arg.bitmask = server->attr_bitmask;
3269 data->res.server = server;
3270 data->res.fh = &data->fh;
3271 data->res.fattr = &data->fattr;
3272 nfs_fattr_init(data->res.fattr);
3273 }
3274 return data;
3275 }
3276
3277 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3278 {
3279 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3280 &data->arg.seq_args, &data->res.seq_res, 1);
3281 if (status == 0) {
3282 update_changeattr(dir, &data->res.dir_cinfo);
3283 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3284 }
3285 return status;
3286 }
3287
3288 static void nfs4_free_createdata(struct nfs4_createdata *data)
3289 {
3290 kfree(data);
3291 }
3292
3293 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3294 struct page *page, unsigned int len, struct iattr *sattr)
3295 {
3296 struct nfs4_createdata *data;
3297 int status = -ENAMETOOLONG;
3298
3299 if (len > NFS4_MAXPATHLEN)
3300 goto out;
3301
3302 status = -ENOMEM;
3303 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3304 if (data == NULL)
3305 goto out;
3306
3307 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3308 data->arg.u.symlink.pages = &page;
3309 data->arg.u.symlink.len = len;
3310
3311 status = nfs4_do_create(dir, dentry, data);
3312
3313 nfs4_free_createdata(data);
3314 out:
3315 return status;
3316 }
3317
3318 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3319 struct page *page, unsigned int len, struct iattr *sattr)
3320 {
3321 struct nfs4_exception exception = { };
3322 int err;
3323 do {
3324 err = nfs4_handle_exception(NFS_SERVER(dir),
3325 _nfs4_proc_symlink(dir, dentry, page,
3326 len, sattr),
3327 &exception);
3328 } while (exception.retry);
3329 return err;
3330 }
3331
3332 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3333 struct iattr *sattr)
3334 {
3335 struct nfs4_createdata *data;
3336 int status = -ENOMEM;
3337
3338 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3339 if (data == NULL)
3340 goto out;
3341
3342 status = nfs4_do_create(dir, dentry, data);
3343
3344 nfs4_free_createdata(data);
3345 out:
3346 return status;
3347 }
3348
3349 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3350 struct iattr *sattr)
3351 {
3352 struct nfs4_exception exception = { };
3353 int err;
3354
3355 sattr->ia_mode &= ~current_umask();
3356 do {
3357 err = nfs4_handle_exception(NFS_SERVER(dir),
3358 _nfs4_proc_mkdir(dir, dentry, sattr),
3359 &exception);
3360 } while (exception.retry);
3361 return err;
3362 }
3363
3364 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3365 u64 cookie, struct page **pages, unsigned int count, int plus)
3366 {
3367 struct inode *dir = dentry->d_inode;
3368 struct nfs4_readdir_arg args = {
3369 .fh = NFS_FH(dir),
3370 .pages = pages,
3371 .pgbase = 0,
3372 .count = count,
3373 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3374 .plus = plus,
3375 };
3376 struct nfs4_readdir_res res;
3377 struct rpc_message msg = {
3378 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3379 .rpc_argp = &args,
3380 .rpc_resp = &res,
3381 .rpc_cred = cred,
3382 };
3383 int status;
3384
3385 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3386 dentry->d_parent->d_name.name,
3387 dentry->d_name.name,
3388 (unsigned long long)cookie);
3389 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3390 res.pgbase = args.pgbase;
3391 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3392 if (status >= 0) {
3393 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3394 status += args.pgbase;
3395 }
3396
3397 nfs_invalidate_atime(dir);
3398
3399 dprintk("%s: returns %d\n", __func__, status);
3400 return status;
3401 }
3402
3403 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3404 u64 cookie, struct page **pages, unsigned int count, int plus)
3405 {
3406 struct nfs4_exception exception = { };
3407 int err;
3408 do {
3409 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3410 _nfs4_proc_readdir(dentry, cred, cookie,
3411 pages, count, plus),
3412 &exception);
3413 } while (exception.retry);
3414 return err;
3415 }
3416
3417 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3418 struct iattr *sattr, dev_t rdev)
3419 {
3420 struct nfs4_createdata *data;
3421 int mode = sattr->ia_mode;
3422 int status = -ENOMEM;
3423
3424 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3425 if (data == NULL)
3426 goto out;
3427
3428 if (S_ISFIFO(mode))
3429 data->arg.ftype = NF4FIFO;
3430 else if (S_ISBLK(mode)) {
3431 data->arg.ftype = NF4BLK;
3432 data->arg.u.device.specdata1 = MAJOR(rdev);
3433 data->arg.u.device.specdata2 = MINOR(rdev);
3434 }
3435 else if (S_ISCHR(mode)) {
3436 data->arg.ftype = NF4CHR;
3437 data->arg.u.device.specdata1 = MAJOR(rdev);
3438 data->arg.u.device.specdata2 = MINOR(rdev);
3439 } else if (!S_ISSOCK(mode)) {
3440 status = -EINVAL;
3441 goto out_free;
3442 }
3443
3444 status = nfs4_do_create(dir, dentry, data);
3445 out_free:
3446 nfs4_free_createdata(data);
3447 out:
3448 return status;
3449 }
3450
3451 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3452 struct iattr *sattr, dev_t rdev)
3453 {
3454 struct nfs4_exception exception = { };
3455 int err;
3456
3457 sattr->ia_mode &= ~current_umask();
3458 do {
3459 err = nfs4_handle_exception(NFS_SERVER(dir),
3460 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3461 &exception);
3462 } while (exception.retry);
3463 return err;
3464 }
3465
3466 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3467 struct nfs_fsstat *fsstat)
3468 {
3469 struct nfs4_statfs_arg args = {
3470 .fh = fhandle,
3471 .bitmask = server->attr_bitmask,
3472 };
3473 struct nfs4_statfs_res res = {
3474 .fsstat = fsstat,
3475 };
3476 struct rpc_message msg = {
3477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3478 .rpc_argp = &args,
3479 .rpc_resp = &res,
3480 };
3481
3482 nfs_fattr_init(fsstat->fattr);
3483 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3484 }
3485
3486 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3487 {
3488 struct nfs4_exception exception = { };
3489 int err;
3490 do {
3491 err = nfs4_handle_exception(server,
3492 _nfs4_proc_statfs(server, fhandle, fsstat),
3493 &exception);
3494 } while (exception.retry);
3495 return err;
3496 }
3497
3498 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3499 struct nfs_fsinfo *fsinfo)
3500 {
3501 struct nfs4_fsinfo_arg args = {
3502 .fh = fhandle,
3503 .bitmask = server->attr_bitmask,
3504 };
3505 struct nfs4_fsinfo_res res = {
3506 .fsinfo = fsinfo,
3507 };
3508 struct rpc_message msg = {
3509 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3510 .rpc_argp = &args,
3511 .rpc_resp = &res,
3512 };
3513
3514 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3515 }
3516
3517 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3518 {
3519 struct nfs4_exception exception = { };
3520 unsigned long now = jiffies;
3521 int err;
3522
3523 do {
3524 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
3525 if (err == 0) {
3526 struct nfs_client *clp = server->nfs_client;
3527
3528 spin_lock(&clp->cl_lock);
3529 clp->cl_lease_time = fsinfo->lease_time * HZ;
3530 clp->cl_last_renewal = now;
3531 spin_unlock(&clp->cl_lock);
3532 break;
3533 }
3534 err = nfs4_handle_exception(server, err, &exception);
3535 } while (exception.retry);
3536 return err;
3537 }
3538
3539 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3540 {
3541 int error;
3542
3543 nfs_fattr_init(fsinfo->fattr);
3544 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
3545 if (error == 0) {
3546 /* block layout checks this! */
3547 server->pnfs_blksize = fsinfo->blksize;
3548 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
3549 }
3550
3551 return error;
3552 }
3553
3554 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3555 struct nfs_pathconf *pathconf)
3556 {
3557 struct nfs4_pathconf_arg args = {
3558 .fh = fhandle,
3559 .bitmask = server->attr_bitmask,
3560 };
3561 struct nfs4_pathconf_res res = {
3562 .pathconf = pathconf,
3563 };
3564 struct rpc_message msg = {
3565 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3566 .rpc_argp = &args,
3567 .rpc_resp = &res,
3568 };
3569
3570 /* None of the pathconf attributes are mandatory to implement */
3571 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3572 memset(pathconf, 0, sizeof(*pathconf));
3573 return 0;
3574 }
3575
3576 nfs_fattr_init(pathconf->fattr);
3577 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3578 }
3579
3580 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3581 struct nfs_pathconf *pathconf)
3582 {
3583 struct nfs4_exception exception = { };
3584 int err;
3585
3586 do {
3587 err = nfs4_handle_exception(server,
3588 _nfs4_proc_pathconf(server, fhandle, pathconf),
3589 &exception);
3590 } while (exception.retry);
3591 return err;
3592 }
3593
3594 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
3595 const struct nfs_open_context *ctx,
3596 const struct nfs_lock_context *l_ctx,
3597 fmode_t fmode)
3598 {
3599 const struct nfs_lockowner *lockowner = NULL;
3600
3601 if (l_ctx != NULL)
3602 lockowner = &l_ctx->lockowner;
3603 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
3604 }
3605 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
3606
3607 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
3608 const struct nfs_open_context *ctx,
3609 const struct nfs_lock_context *l_ctx,
3610 fmode_t fmode)
3611 {
3612 nfs4_stateid current_stateid;
3613
3614 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
3615 return false;
3616 return nfs4_stateid_match(stateid, &current_stateid);
3617 }
3618
3619 static bool nfs4_error_stateid_expired(int err)
3620 {
3621 switch (err) {
3622 case -NFS4ERR_DELEG_REVOKED:
3623 case -NFS4ERR_ADMIN_REVOKED:
3624 case -NFS4ERR_BAD_STATEID:
3625 case -NFS4ERR_STALE_STATEID:
3626 case -NFS4ERR_OLD_STATEID:
3627 case -NFS4ERR_OPENMODE:
3628 case -NFS4ERR_EXPIRED:
3629 return true;
3630 }
3631 return false;
3632 }
3633
3634 void __nfs4_read_done_cb(struct nfs_read_data *data)
3635 {
3636 nfs_invalidate_atime(data->header->inode);
3637 }
3638
3639 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3640 {
3641 struct nfs_server *server = NFS_SERVER(data->header->inode);
3642
3643 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3644 rpc_restart_call_prepare(task);
3645 return -EAGAIN;
3646 }
3647
3648 __nfs4_read_done_cb(data);
3649 if (task->tk_status > 0)
3650 renew_lease(server, data->timestamp);
3651 return 0;
3652 }
3653
3654 static bool nfs4_read_stateid_changed(struct rpc_task *task,
3655 struct nfs_readargs *args)
3656 {
3657
3658 if (!nfs4_error_stateid_expired(task->tk_status) ||
3659 nfs4_stateid_is_current(&args->stateid,
3660 args->context,
3661 args->lock_context,
3662 FMODE_READ))
3663 return false;
3664 rpc_restart_call_prepare(task);
3665 return true;
3666 }
3667
3668 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3669 {
3670
3671 dprintk("--> %s\n", __func__);
3672
3673 if (!nfs4_sequence_done(task, &data->res.seq_res))
3674 return -EAGAIN;
3675 if (nfs4_read_stateid_changed(task, &data->args))
3676 return -EAGAIN;
3677 return data->read_done_cb ? data->read_done_cb(task, data) :
3678 nfs4_read_done_cb(task, data);
3679 }
3680
3681 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3682 {
3683 data->timestamp = jiffies;
3684 data->read_done_cb = nfs4_read_done_cb;
3685 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3686 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3687 }
3688
3689 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3690 {
3691 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3692 &data->args.seq_args,
3693 &data->res.seq_res,
3694 task))
3695 return;
3696 nfs4_set_rw_stateid(&data->args.stateid, data->args.context,
3697 data->args.lock_context, FMODE_READ);
3698 }
3699
3700 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3701 {
3702 struct inode *inode = data->header->inode;
3703
3704 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3705 rpc_restart_call_prepare(task);
3706 return -EAGAIN;
3707 }
3708 if (task->tk_status >= 0) {
3709 renew_lease(NFS_SERVER(inode), data->timestamp);
3710 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3711 }
3712 return 0;
3713 }
3714
3715 static bool nfs4_write_stateid_changed(struct rpc_task *task,
3716 struct nfs_writeargs *args)
3717 {
3718
3719 if (!nfs4_error_stateid_expired(task->tk_status) ||
3720 nfs4_stateid_is_current(&args->stateid,
3721 args->context,
3722 args->lock_context,
3723 FMODE_WRITE))
3724 return false;
3725 rpc_restart_call_prepare(task);
3726 return true;
3727 }
3728
3729 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3730 {
3731 if (!nfs4_sequence_done(task, &data->res.seq_res))
3732 return -EAGAIN;
3733 if (nfs4_write_stateid_changed(task, &data->args))
3734 return -EAGAIN;
3735 return data->write_done_cb ? data->write_done_cb(task, data) :
3736 nfs4_write_done_cb(task, data);
3737 }
3738
3739 static
3740 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3741 {
3742 const struct nfs_pgio_header *hdr = data->header;
3743
3744 /* Don't request attributes for pNFS or O_DIRECT writes */
3745 if (data->ds_clp != NULL || hdr->dreq != NULL)
3746 return false;
3747 /* Otherwise, request attributes if and only if we don't hold
3748 * a delegation
3749 */
3750 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
3751 }
3752
3753 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3754 {
3755 struct nfs_server *server = NFS_SERVER(data->header->inode);
3756
3757 if (!nfs4_write_need_cache_consistency_data(data)) {
3758 data->args.bitmask = NULL;
3759 data->res.fattr = NULL;
3760 } else
3761 data->args.bitmask = server->cache_consistency_bitmask;
3762
3763 if (!data->write_done_cb)
3764 data->write_done_cb = nfs4_write_done_cb;
3765 data->res.server = server;
3766 data->timestamp = jiffies;
3767
3768 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3769 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3770 }
3771
3772 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3773 {
3774 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3775 &data->args.seq_args,
3776 &data->res.seq_res,
3777 task))
3778 return;
3779 nfs4_set_rw_stateid(&data->args.stateid, data->args.context,
3780 data->args.lock_context, FMODE_WRITE);
3781 }
3782
3783 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3784 {
3785 nfs4_setup_sequence(NFS_SERVER(data->inode),
3786 &data->args.seq_args,
3787 &data->res.seq_res,
3788 task);
3789 }
3790
3791 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3792 {
3793 struct inode *inode = data->inode;
3794
3795 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3796 rpc_restart_call_prepare(task);
3797 return -EAGAIN;
3798 }
3799 return 0;
3800 }
3801
3802 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3803 {
3804 if (!nfs4_sequence_done(task, &data->res.seq_res))
3805 return -EAGAIN;
3806 return data->commit_done_cb(task, data);
3807 }
3808
3809 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3810 {
3811 struct nfs_server *server = NFS_SERVER(data->inode);
3812
3813 if (data->commit_done_cb == NULL)
3814 data->commit_done_cb = nfs4_commit_done_cb;
3815 data->res.server = server;
3816 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3817 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3818 }
3819
3820 struct nfs4_renewdata {
3821 struct nfs_client *client;
3822 unsigned long timestamp;
3823 };
3824
3825 /*
3826 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3827 * standalone procedure for queueing an asynchronous RENEW.
3828 */
3829 static void nfs4_renew_release(void *calldata)
3830 {
3831 struct nfs4_renewdata *data = calldata;
3832 struct nfs_client *clp = data->client;
3833
3834 if (atomic_read(&clp->cl_count) > 1)
3835 nfs4_schedule_state_renewal(clp);
3836 nfs_put_client(clp);
3837 kfree(data);
3838 }
3839
3840 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3841 {
3842 struct nfs4_renewdata *data = calldata;
3843 struct nfs_client *clp = data->client;
3844 unsigned long timestamp = data->timestamp;
3845
3846 if (task->tk_status < 0) {
3847 /* Unless we're shutting down, schedule state recovery! */
3848 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3849 return;
3850 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3851 nfs4_schedule_lease_recovery(clp);
3852 return;
3853 }
3854 nfs4_schedule_path_down_recovery(clp);
3855 }
3856 do_renew_lease(clp, timestamp);
3857 }
3858
3859 static const struct rpc_call_ops nfs4_renew_ops = {
3860 .rpc_call_done = nfs4_renew_done,
3861 .rpc_release = nfs4_renew_release,
3862 };
3863
3864 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3865 {
3866 struct rpc_message msg = {
3867 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3868 .rpc_argp = clp,
3869 .rpc_cred = cred,
3870 };
3871 struct nfs4_renewdata *data;
3872
3873 if (renew_flags == 0)
3874 return 0;
3875 if (!atomic_inc_not_zero(&clp->cl_count))
3876 return -EIO;
3877 data = kmalloc(sizeof(*data), GFP_NOFS);
3878 if (data == NULL)
3879 return -ENOMEM;
3880 data->client = clp;
3881 data->timestamp = jiffies;
3882 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
3883 &nfs4_renew_ops, data);
3884 }
3885
3886 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3887 {
3888 struct rpc_message msg = {
3889 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3890 .rpc_argp = clp,
3891 .rpc_cred = cred,
3892 };
3893 unsigned long now = jiffies;
3894 int status;
3895
3896 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3897 if (status < 0)
3898 return status;
3899 do_renew_lease(clp, now);
3900 return 0;
3901 }
3902
3903 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3904 {
3905 return (server->caps & NFS_CAP_ACLS)
3906 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3907 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3908 }
3909
3910 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
3911 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
3912 * the stack.
3913 */
3914 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
3915
3916 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3917 struct page **pages, unsigned int *pgbase)
3918 {
3919 struct page *newpage, **spages;
3920 int rc = 0;
3921 size_t len;
3922 spages = pages;
3923
3924 do {
3925 len = min_t(size_t, PAGE_SIZE, buflen);
3926 newpage = alloc_page(GFP_KERNEL);
3927
3928 if (newpage == NULL)
3929 goto unwind;
3930 memcpy(page_address(newpage), buf, len);
3931 buf += len;
3932 buflen -= len;
3933 *pages++ = newpage;
3934 rc++;
3935 } while (buflen != 0);
3936
3937 return rc;
3938
3939 unwind:
3940 for(; rc > 0; rc--)
3941 __free_page(spages[rc-1]);
3942 return -ENOMEM;
3943 }
3944
3945 struct nfs4_cached_acl {
3946 int cached;
3947 size_t len;
3948 char data[0];
3949 };
3950
3951 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3952 {
3953 struct nfs_inode *nfsi = NFS_I(inode);
3954
3955 spin_lock(&inode->i_lock);
3956 kfree(nfsi->nfs4_acl);
3957 nfsi->nfs4_acl = acl;
3958 spin_unlock(&inode->i_lock);
3959 }
3960
3961 static void nfs4_zap_acl_attr(struct inode *inode)
3962 {
3963 nfs4_set_cached_acl(inode, NULL);
3964 }
3965
3966 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3967 {
3968 struct nfs_inode *nfsi = NFS_I(inode);
3969 struct nfs4_cached_acl *acl;
3970 int ret = -ENOENT;
3971
3972 spin_lock(&inode->i_lock);
3973 acl = nfsi->nfs4_acl;
3974 if (acl == NULL)
3975 goto out;
3976 if (buf == NULL) /* user is just asking for length */
3977 goto out_len;
3978 if (acl->cached == 0)
3979 goto out;
3980 ret = -ERANGE; /* see getxattr(2) man page */
3981 if (acl->len > buflen)
3982 goto out;
3983 memcpy(buf, acl->data, acl->len);
3984 out_len:
3985 ret = acl->len;
3986 out:
3987 spin_unlock(&inode->i_lock);
3988 return ret;
3989 }
3990
3991 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3992 {
3993 struct nfs4_cached_acl *acl;
3994 size_t buflen = sizeof(*acl) + acl_len;
3995
3996 if (buflen <= PAGE_SIZE) {
3997 acl = kmalloc(buflen, GFP_KERNEL);
3998 if (acl == NULL)
3999 goto out;
4000 acl->cached = 1;
4001 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4002 } else {
4003 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4004 if (acl == NULL)
4005 goto out;
4006 acl->cached = 0;
4007 }
4008 acl->len = acl_len;
4009 out:
4010 nfs4_set_cached_acl(inode, acl);
4011 }
4012
4013 /*
4014 * The getxattr API returns the required buffer length when called with a
4015 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4016 * the required buf. On a NULL buf, we send a page of data to the server
4017 * guessing that the ACL request can be serviced by a page. If so, we cache
4018 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4019 * the cache. If not so, we throw away the page, and cache the required
4020 * length. The next getxattr call will then produce another round trip to
4021 * the server, this time with the input buf of the required size.
4022 */
4023 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4024 {
4025 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4026 struct nfs_getaclargs args = {
4027 .fh = NFS_FH(inode),
4028 .acl_pages = pages,
4029 .acl_len = buflen,
4030 };
4031 struct nfs_getaclres res = {
4032 .acl_len = buflen,
4033 };
4034 struct rpc_message msg = {
4035 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4036 .rpc_argp = &args,
4037 .rpc_resp = &res,
4038 };
4039 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4040 int ret = -ENOMEM, i;
4041
4042 /* As long as we're doing a round trip to the server anyway,
4043 * let's be prepared for a page of acl data. */
4044 if (npages == 0)
4045 npages = 1;
4046 if (npages > ARRAY_SIZE(pages))
4047 return -ERANGE;
4048
4049 for (i = 0; i < npages; i++) {
4050 pages[i] = alloc_page(GFP_KERNEL);
4051 if (!pages[i])
4052 goto out_free;
4053 }
4054
4055 /* for decoding across pages */
4056 res.acl_scratch = alloc_page(GFP_KERNEL);
4057 if (!res.acl_scratch)
4058 goto out_free;
4059
4060 args.acl_len = npages * PAGE_SIZE;
4061 args.acl_pgbase = 0;
4062
4063 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4064 __func__, buf, buflen, npages, args.acl_len);
4065 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4066 &msg, &args.seq_args, &res.seq_res, 0);
4067 if (ret)
4068 goto out_free;
4069
4070 /* Handle the case where the passed-in buffer is too short */
4071 if (res.acl_flags & NFS4_ACL_TRUNC) {
4072 /* Did the user only issue a request for the acl length? */
4073 if (buf == NULL)
4074 goto out_ok;
4075 ret = -ERANGE;
4076 goto out_free;
4077 }
4078 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4079 if (buf) {
4080 if (res.acl_len > buflen) {
4081 ret = -ERANGE;
4082 goto out_free;
4083 }
4084 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4085 }
4086 out_ok:
4087 ret = res.acl_len;
4088 out_free:
4089 for (i = 0; i < npages; i++)
4090 if (pages[i])
4091 __free_page(pages[i]);
4092 if (res.acl_scratch)
4093 __free_page(res.acl_scratch);
4094 return ret;
4095 }
4096
4097 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4098 {
4099 struct nfs4_exception exception = { };
4100 ssize_t ret;
4101 do {
4102 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4103 if (ret >= 0)
4104 break;
4105 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4106 } while (exception.retry);
4107 return ret;
4108 }
4109
4110 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4111 {
4112 struct nfs_server *server = NFS_SERVER(inode);
4113 int ret;
4114
4115 if (!nfs4_server_supports_acls(server))
4116 return -EOPNOTSUPP;
4117 ret = nfs_revalidate_inode(server, inode);
4118 if (ret < 0)
4119 return ret;
4120 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4121 nfs_zap_acl_cache(inode);
4122 ret = nfs4_read_cached_acl(inode, buf, buflen);
4123 if (ret != -ENOENT)
4124 /* -ENOENT is returned if there is no ACL or if there is an ACL
4125 * but no cached acl data, just the acl length */
4126 return ret;
4127 return nfs4_get_acl_uncached(inode, buf, buflen);
4128 }
4129
4130 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4131 {
4132 struct nfs_server *server = NFS_SERVER(inode);
4133 struct page *pages[NFS4ACL_MAXPAGES];
4134 struct nfs_setaclargs arg = {
4135 .fh = NFS_FH(inode),
4136 .acl_pages = pages,
4137 .acl_len = buflen,
4138 };
4139 struct nfs_setaclres res;
4140 struct rpc_message msg = {
4141 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4142 .rpc_argp = &arg,
4143 .rpc_resp = &res,
4144 };
4145 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4146 int ret, i;
4147
4148 if (!nfs4_server_supports_acls(server))
4149 return -EOPNOTSUPP;
4150 if (npages > ARRAY_SIZE(pages))
4151 return -ERANGE;
4152 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4153 if (i < 0)
4154 return i;
4155 nfs4_inode_return_delegation(inode);
4156 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4157
4158 /*
4159 * Free each page after tx, so the only ref left is
4160 * held by the network stack
4161 */
4162 for (; i > 0; i--)
4163 put_page(pages[i-1]);
4164
4165 /*
4166 * Acl update can result in inode attribute update.
4167 * so mark the attribute cache invalid.
4168 */
4169 spin_lock(&inode->i_lock);
4170 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4171 spin_unlock(&inode->i_lock);
4172 nfs_access_zap_cache(inode);
4173 nfs_zap_acl_cache(inode);
4174 return ret;
4175 }
4176
4177 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4178 {
4179 struct nfs4_exception exception = { };
4180 int err;
4181 do {
4182 err = nfs4_handle_exception(NFS_SERVER(inode),
4183 __nfs4_proc_set_acl(inode, buf, buflen),
4184 &exception);
4185 } while (exception.retry);
4186 return err;
4187 }
4188
4189 static int
4190 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
4191 {
4192 struct nfs_client *clp = server->nfs_client;
4193
4194 if (task->tk_status >= 0)
4195 return 0;
4196 switch(task->tk_status) {
4197 case -NFS4ERR_DELEG_REVOKED:
4198 case -NFS4ERR_ADMIN_REVOKED:
4199 case -NFS4ERR_BAD_STATEID:
4200 if (state == NULL)
4201 break;
4202 nfs_remove_bad_delegation(state->inode);
4203 case -NFS4ERR_OPENMODE:
4204 if (state == NULL)
4205 break;
4206 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4207 goto stateid_invalid;
4208 goto wait_on_recovery;
4209 case -NFS4ERR_EXPIRED:
4210 if (state != NULL) {
4211 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4212 goto stateid_invalid;
4213 }
4214 case -NFS4ERR_STALE_STATEID:
4215 case -NFS4ERR_STALE_CLIENTID:
4216 nfs4_schedule_lease_recovery(clp);
4217 goto wait_on_recovery;
4218 #if defined(CONFIG_NFS_V4_1)
4219 case -NFS4ERR_BADSESSION:
4220 case -NFS4ERR_BADSLOT:
4221 case -NFS4ERR_BAD_HIGH_SLOT:
4222 case -NFS4ERR_DEADSESSION:
4223 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4224 case -NFS4ERR_SEQ_FALSE_RETRY:
4225 case -NFS4ERR_SEQ_MISORDERED:
4226 dprintk("%s ERROR %d, Reset session\n", __func__,
4227 task->tk_status);
4228 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4229 task->tk_status = 0;
4230 return -EAGAIN;
4231 #endif /* CONFIG_NFS_V4_1 */
4232 case -NFS4ERR_DELAY:
4233 nfs_inc_server_stats(server, NFSIOS_DELAY);
4234 case -NFS4ERR_GRACE:
4235 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4236 task->tk_status = 0;
4237 return -EAGAIN;
4238 case -NFS4ERR_RETRY_UNCACHED_REP:
4239 case -NFS4ERR_OLD_STATEID:
4240 task->tk_status = 0;
4241 return -EAGAIN;
4242 }
4243 task->tk_status = nfs4_map_errors(task->tk_status);
4244 return 0;
4245 stateid_invalid:
4246 task->tk_status = -EIO;
4247 return 0;
4248 wait_on_recovery:
4249 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4250 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4251 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4252 task->tk_status = 0;
4253 return -EAGAIN;
4254 }
4255
4256 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4257 nfs4_verifier *bootverf)
4258 {
4259 __be32 verf[2];
4260
4261 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4262 /* An impossible timestamp guarantees this value
4263 * will never match a generated boot time. */
4264 verf[0] = 0;
4265 verf[1] = (__be32)(NSEC_PER_SEC + 1);
4266 } else {
4267 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4268 verf[0] = (__be32)nn->boot_time.tv_sec;
4269 verf[1] = (__be32)nn->boot_time.tv_nsec;
4270 }
4271 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4272 }
4273
4274 static unsigned int
4275 nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
4276 char *buf, size_t len)
4277 {
4278 unsigned int result;
4279
4280 rcu_read_lock();
4281 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
4282 clp->cl_ipaddr,
4283 rpc_peeraddr2str(clp->cl_rpcclient,
4284 RPC_DISPLAY_ADDR),
4285 rpc_peeraddr2str(clp->cl_rpcclient,
4286 RPC_DISPLAY_PROTO));
4287 rcu_read_unlock();
4288 return result;
4289 }
4290
4291 static unsigned int
4292 nfs4_init_uniform_client_string(const struct nfs_client *clp,
4293 char *buf, size_t len)
4294 {
4295 char *nodename = clp->cl_rpcclient->cl_nodename;
4296
4297 if (nfs4_client_id_uniquifier[0] != '\0')
4298 nodename = nfs4_client_id_uniquifier;
4299 return scnprintf(buf, len, "Linux NFSv%u.%u %s",
4300 clp->rpc_ops->version, clp->cl_minorversion,
4301 nodename);
4302 }
4303
4304 /**
4305 * nfs4_proc_setclientid - Negotiate client ID
4306 * @clp: state data structure
4307 * @program: RPC program for NFSv4 callback service
4308 * @port: IP port number for NFS4 callback service
4309 * @cred: RPC credential to use for this call
4310 * @res: where to place the result
4311 *
4312 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4313 */
4314 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
4315 unsigned short port, struct rpc_cred *cred,
4316 struct nfs4_setclientid_res *res)
4317 {
4318 nfs4_verifier sc_verifier;
4319 struct nfs4_setclientid setclientid = {
4320 .sc_verifier = &sc_verifier,
4321 .sc_prog = program,
4322 .sc_cb_ident = clp->cl_cb_ident,
4323 };
4324 struct rpc_message msg = {
4325 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
4326 .rpc_argp = &setclientid,
4327 .rpc_resp = res,
4328 .rpc_cred = cred,
4329 };
4330 int status;
4331
4332 /* nfs_client_id4 */
4333 nfs4_init_boot_verifier(clp, &sc_verifier);
4334 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
4335 setclientid.sc_name_len =
4336 nfs4_init_uniform_client_string(clp,
4337 setclientid.sc_name,
4338 sizeof(setclientid.sc_name));
4339 else
4340 setclientid.sc_name_len =
4341 nfs4_init_nonuniform_client_string(clp,
4342 setclientid.sc_name,
4343 sizeof(setclientid.sc_name));
4344 /* cb_client4 */
4345 rcu_read_lock();
4346 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
4347 sizeof(setclientid.sc_netid),
4348 rpc_peeraddr2str(clp->cl_rpcclient,
4349 RPC_DISPLAY_NETID));
4350 rcu_read_unlock();
4351 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
4352 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
4353 clp->cl_ipaddr, port >> 8, port & 255);
4354
4355 dprintk("NFS call setclientid auth=%s, '%.*s'\n",
4356 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4357 setclientid.sc_name_len, setclientid.sc_name);
4358 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4359 dprintk("NFS reply setclientid: %d\n", status);
4360 return status;
4361 }
4362
4363 /**
4364 * nfs4_proc_setclientid_confirm - Confirm client ID
4365 * @clp: state data structure
4366 * @res: result of a previous SETCLIENTID
4367 * @cred: RPC credential to use for this call
4368 *
4369 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4370 */
4371 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4372 struct nfs4_setclientid_res *arg,
4373 struct rpc_cred *cred)
4374 {
4375 struct rpc_message msg = {
4376 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4377 .rpc_argp = arg,
4378 .rpc_cred = cred,
4379 };
4380 int status;
4381
4382 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
4383 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4384 clp->cl_clientid);
4385 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4386 dprintk("NFS reply setclientid_confirm: %d\n", status);
4387 return status;
4388 }
4389
4390 struct nfs4_delegreturndata {
4391 struct nfs4_delegreturnargs args;
4392 struct nfs4_delegreturnres res;
4393 struct nfs_fh fh;
4394 nfs4_stateid stateid;
4395 unsigned long timestamp;
4396 struct nfs_fattr fattr;
4397 int rpc_status;
4398 };
4399
4400 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4401 {
4402 struct nfs4_delegreturndata *data = calldata;
4403
4404 if (!nfs4_sequence_done(task, &data->res.seq_res))
4405 return;
4406
4407 switch (task->tk_status) {
4408 case -NFS4ERR_STALE_STATEID:
4409 case -NFS4ERR_EXPIRED:
4410 case 0:
4411 renew_lease(data->res.server, data->timestamp);
4412 break;
4413 default:
4414 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4415 -EAGAIN) {
4416 rpc_restart_call_prepare(task);
4417 return;
4418 }
4419 }
4420 data->rpc_status = task->tk_status;
4421 }
4422
4423 static void nfs4_delegreturn_release(void *calldata)
4424 {
4425 kfree(calldata);
4426 }
4427
4428 #if defined(CONFIG_NFS_V4_1)
4429 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4430 {
4431 struct nfs4_delegreturndata *d_data;
4432
4433 d_data = (struct nfs4_delegreturndata *)data;
4434
4435 nfs4_setup_sequence(d_data->res.server,
4436 &d_data->args.seq_args,
4437 &d_data->res.seq_res,
4438 task);
4439 }
4440 #endif /* CONFIG_NFS_V4_1 */
4441
4442 static const struct rpc_call_ops nfs4_delegreturn_ops = {
4443 #if defined(CONFIG_NFS_V4_1)
4444 .rpc_call_prepare = nfs4_delegreturn_prepare,
4445 #endif /* CONFIG_NFS_V4_1 */
4446 .rpc_call_done = nfs4_delegreturn_done,
4447 .rpc_release = nfs4_delegreturn_release,
4448 };
4449
4450 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4451 {
4452 struct nfs4_delegreturndata *data;
4453 struct nfs_server *server = NFS_SERVER(inode);
4454 struct rpc_task *task;
4455 struct rpc_message msg = {
4456 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4457 .rpc_cred = cred,
4458 };
4459 struct rpc_task_setup task_setup_data = {
4460 .rpc_client = server->client,
4461 .rpc_message = &msg,
4462 .callback_ops = &nfs4_delegreturn_ops,
4463 .flags = RPC_TASK_ASYNC,
4464 };
4465 int status = 0;
4466
4467 data = kzalloc(sizeof(*data), GFP_NOFS);
4468 if (data == NULL)
4469 return -ENOMEM;
4470 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4471 data->args.fhandle = &data->fh;
4472 data->args.stateid = &data->stateid;
4473 data->args.bitmask = server->cache_consistency_bitmask;
4474 nfs_copy_fh(&data->fh, NFS_FH(inode));
4475 nfs4_stateid_copy(&data->stateid, stateid);
4476 data->res.fattr = &data->fattr;
4477 data->res.server = server;
4478 nfs_fattr_init(data->res.fattr);
4479 data->timestamp = jiffies;
4480 data->rpc_status = 0;
4481
4482 task_setup_data.callback_data = data;
4483 msg.rpc_argp = &data->args;
4484 msg.rpc_resp = &data->res;
4485 task = rpc_run_task(&task_setup_data);
4486 if (IS_ERR(task))
4487 return PTR_ERR(task);
4488 if (!issync)
4489 goto out;
4490 status = nfs4_wait_for_completion_rpc_task(task);
4491 if (status != 0)
4492 goto out;
4493 status = data->rpc_status;
4494 if (status == 0)
4495 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4496 else
4497 nfs_refresh_inode(inode, &data->fattr);
4498 out:
4499 rpc_put_task(task);
4500 return status;
4501 }
4502
4503 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4504 {
4505 struct nfs_server *server = NFS_SERVER(inode);
4506 struct nfs4_exception exception = { };
4507 int err;
4508 do {
4509 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4510 switch (err) {
4511 case -NFS4ERR_STALE_STATEID:
4512 case -NFS4ERR_EXPIRED:
4513 case 0:
4514 return 0;
4515 }
4516 err = nfs4_handle_exception(server, err, &exception);
4517 } while (exception.retry);
4518 return err;
4519 }
4520
4521 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4522 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4523
4524 /*
4525 * sleep, with exponential backoff, and retry the LOCK operation.
4526 */
4527 static unsigned long
4528 nfs4_set_lock_task_retry(unsigned long timeout)
4529 {
4530 freezable_schedule_timeout_killable(timeout);
4531 timeout <<= 1;
4532 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4533 return NFS4_LOCK_MAXTIMEOUT;
4534 return timeout;
4535 }
4536
4537 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4538 {
4539 struct inode *inode = state->inode;
4540 struct nfs_server *server = NFS_SERVER(inode);
4541 struct nfs_client *clp = server->nfs_client;
4542 struct nfs_lockt_args arg = {
4543 .fh = NFS_FH(inode),
4544 .fl = request,
4545 };
4546 struct nfs_lockt_res res = {
4547 .denied = request,
4548 };
4549 struct rpc_message msg = {
4550 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4551 .rpc_argp = &arg,
4552 .rpc_resp = &res,
4553 .rpc_cred = state->owner->so_cred,
4554 };
4555 struct nfs4_lock_state *lsp;
4556 int status;
4557
4558 arg.lock_owner.clientid = clp->cl_clientid;
4559 status = nfs4_set_lock_state(state, request);
4560 if (status != 0)
4561 goto out;
4562 lsp = request->fl_u.nfs4_fl.owner;
4563 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4564 arg.lock_owner.s_dev = server->s_dev;
4565 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4566 switch (status) {
4567 case 0:
4568 request->fl_type = F_UNLCK;
4569 break;
4570 case -NFS4ERR_DENIED:
4571 status = 0;
4572 }
4573 request->fl_ops->fl_release_private(request);
4574 request->fl_ops = NULL;
4575 out:
4576 return status;
4577 }
4578
4579 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4580 {
4581 struct nfs4_exception exception = { };
4582 int err;
4583
4584 do {
4585 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4586 _nfs4_proc_getlk(state, cmd, request),
4587 &exception);
4588 } while (exception.retry);
4589 return err;
4590 }
4591
4592 static int do_vfs_lock(struct file *file, struct file_lock *fl)
4593 {
4594 int res = 0;
4595 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4596 case FL_POSIX:
4597 res = posix_lock_file_wait(file, fl);
4598 break;
4599 case FL_FLOCK:
4600 res = flock_lock_file_wait(file, fl);
4601 break;
4602 default:
4603 BUG();
4604 }
4605 return res;
4606 }
4607
4608 struct nfs4_unlockdata {
4609 struct nfs_locku_args arg;
4610 struct nfs_locku_res res;
4611 struct nfs4_lock_state *lsp;
4612 struct nfs_open_context *ctx;
4613 struct file_lock fl;
4614 const struct nfs_server *server;
4615 unsigned long timestamp;
4616 };
4617
4618 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4619 struct nfs_open_context *ctx,
4620 struct nfs4_lock_state *lsp,
4621 struct nfs_seqid *seqid)
4622 {
4623 struct nfs4_unlockdata *p;
4624 struct inode *inode = lsp->ls_state->inode;
4625
4626 p = kzalloc(sizeof(*p), GFP_NOFS);
4627 if (p == NULL)
4628 return NULL;
4629 p->arg.fh = NFS_FH(inode);
4630 p->arg.fl = &p->fl;
4631 p->arg.seqid = seqid;
4632 p->res.seqid = seqid;
4633 p->arg.stateid = &lsp->ls_stateid;
4634 p->lsp = lsp;
4635 atomic_inc(&lsp->ls_count);
4636 /* Ensure we don't close file until we're done freeing locks! */
4637 p->ctx = get_nfs_open_context(ctx);
4638 memcpy(&p->fl, fl, sizeof(p->fl));
4639 p->server = NFS_SERVER(inode);
4640 return p;
4641 }
4642
4643 static void nfs4_locku_release_calldata(void *data)
4644 {
4645 struct nfs4_unlockdata *calldata = data;
4646 nfs_free_seqid(calldata->arg.seqid);
4647 nfs4_put_lock_state(calldata->lsp);
4648 put_nfs_open_context(calldata->ctx);
4649 kfree(calldata);
4650 }
4651
4652 static void nfs4_locku_done(struct rpc_task *task, void *data)
4653 {
4654 struct nfs4_unlockdata *calldata = data;
4655
4656 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4657 return;
4658 switch (task->tk_status) {
4659 case 0:
4660 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4661 &calldata->res.stateid);
4662 renew_lease(calldata->server, calldata->timestamp);
4663 break;
4664 case -NFS4ERR_BAD_STATEID:
4665 case -NFS4ERR_OLD_STATEID:
4666 case -NFS4ERR_STALE_STATEID:
4667 case -NFS4ERR_EXPIRED:
4668 break;
4669 default:
4670 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4671 rpc_restart_call_prepare(task);
4672 }
4673 nfs_release_seqid(calldata->arg.seqid);
4674 }
4675
4676 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4677 {
4678 struct nfs4_unlockdata *calldata = data;
4679
4680 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4681 goto out_wait;
4682 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
4683 /* Note: exit _without_ running nfs4_locku_done */
4684 goto out_no_action;
4685 }
4686 calldata->timestamp = jiffies;
4687 if (nfs4_setup_sequence(calldata->server,
4688 &calldata->arg.seq_args,
4689 &calldata->res.seq_res,
4690 task) != 0)
4691 nfs_release_seqid(calldata->arg.seqid);
4692 return;
4693 out_no_action:
4694 task->tk_action = NULL;
4695 out_wait:
4696 nfs4_sequence_done(task, &calldata->res.seq_res);
4697 }
4698
4699 static const struct rpc_call_ops nfs4_locku_ops = {
4700 .rpc_call_prepare = nfs4_locku_prepare,
4701 .rpc_call_done = nfs4_locku_done,
4702 .rpc_release = nfs4_locku_release_calldata,
4703 };
4704
4705 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4706 struct nfs_open_context *ctx,
4707 struct nfs4_lock_state *lsp,
4708 struct nfs_seqid *seqid)
4709 {
4710 struct nfs4_unlockdata *data;
4711 struct rpc_message msg = {
4712 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4713 .rpc_cred = ctx->cred,
4714 };
4715 struct rpc_task_setup task_setup_data = {
4716 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4717 .rpc_message = &msg,
4718 .callback_ops = &nfs4_locku_ops,
4719 .workqueue = nfsiod_workqueue,
4720 .flags = RPC_TASK_ASYNC,
4721 };
4722
4723 /* Ensure this is an unlock - when canceling a lock, the
4724 * canceled lock is passed in, and it won't be an unlock.
4725 */
4726 fl->fl_type = F_UNLCK;
4727
4728 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4729 if (data == NULL) {
4730 nfs_free_seqid(seqid);
4731 return ERR_PTR(-ENOMEM);
4732 }
4733
4734 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4735 msg.rpc_argp = &data->arg;
4736 msg.rpc_resp = &data->res;
4737 task_setup_data.callback_data = data;
4738 return rpc_run_task(&task_setup_data);
4739 }
4740
4741 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4742 {
4743 struct inode *inode = state->inode;
4744 struct nfs4_state_owner *sp = state->owner;
4745 struct nfs_inode *nfsi = NFS_I(inode);
4746 struct nfs_seqid *seqid;
4747 struct nfs4_lock_state *lsp;
4748 struct rpc_task *task;
4749 int status = 0;
4750 unsigned char fl_flags = request->fl_flags;
4751
4752 status = nfs4_set_lock_state(state, request);
4753 /* Unlock _before_ we do the RPC call */
4754 request->fl_flags |= FL_EXISTS;
4755 /* Exclude nfs_delegation_claim_locks() */
4756 mutex_lock(&sp->so_delegreturn_mutex);
4757 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
4758 down_read(&nfsi->rwsem);
4759 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4760 up_read(&nfsi->rwsem);
4761 mutex_unlock(&sp->so_delegreturn_mutex);
4762 goto out;
4763 }
4764 up_read(&nfsi->rwsem);
4765 mutex_unlock(&sp->so_delegreturn_mutex);
4766 if (status != 0)
4767 goto out;
4768 /* Is this a delegated lock? */
4769 lsp = request->fl_u.nfs4_fl.owner;
4770 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
4771 goto out;
4772 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4773 status = -ENOMEM;
4774 if (seqid == NULL)
4775 goto out;
4776 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4777 status = PTR_ERR(task);
4778 if (IS_ERR(task))
4779 goto out;
4780 status = nfs4_wait_for_completion_rpc_task(task);
4781 rpc_put_task(task);
4782 out:
4783 request->fl_flags = fl_flags;
4784 return status;
4785 }
4786
4787 struct nfs4_lockdata {
4788 struct nfs_lock_args arg;
4789 struct nfs_lock_res res;
4790 struct nfs4_lock_state *lsp;
4791 struct nfs_open_context *ctx;
4792 struct file_lock fl;
4793 unsigned long timestamp;
4794 int rpc_status;
4795 int cancelled;
4796 struct nfs_server *server;
4797 };
4798
4799 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4800 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4801 gfp_t gfp_mask)
4802 {
4803 struct nfs4_lockdata *p;
4804 struct inode *inode = lsp->ls_state->inode;
4805 struct nfs_server *server = NFS_SERVER(inode);
4806
4807 p = kzalloc(sizeof(*p), gfp_mask);
4808 if (p == NULL)
4809 return NULL;
4810
4811 p->arg.fh = NFS_FH(inode);
4812 p->arg.fl = &p->fl;
4813 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4814 if (p->arg.open_seqid == NULL)
4815 goto out_free;
4816 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4817 if (p->arg.lock_seqid == NULL)
4818 goto out_free_seqid;
4819 p->arg.lock_stateid = &lsp->ls_stateid;
4820 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4821 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4822 p->arg.lock_owner.s_dev = server->s_dev;
4823 p->res.lock_seqid = p->arg.lock_seqid;
4824 p->lsp = lsp;
4825 p->server = server;
4826 atomic_inc(&lsp->ls_count);
4827 p->ctx = get_nfs_open_context(ctx);
4828 memcpy(&p->fl, fl, sizeof(p->fl));
4829 return p;
4830 out_free_seqid:
4831 nfs_free_seqid(p->arg.open_seqid);
4832 out_free:
4833 kfree(p);
4834 return NULL;
4835 }
4836
4837 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4838 {
4839 struct nfs4_lockdata *data = calldata;
4840 struct nfs4_state *state = data->lsp->ls_state;
4841
4842 dprintk("%s: begin!\n", __func__);
4843 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4844 goto out_wait;
4845 /* Do we need to do an open_to_lock_owner? */
4846 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4847 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
4848 goto out_release_lock_seqid;
4849 }
4850 data->arg.open_stateid = &state->open_stateid;
4851 data->arg.new_lock_owner = 1;
4852 data->res.open_seqid = data->arg.open_seqid;
4853 } else
4854 data->arg.new_lock_owner = 0;
4855 if (!nfs4_valid_open_stateid(state)) {
4856 data->rpc_status = -EBADF;
4857 task->tk_action = NULL;
4858 goto out_release_open_seqid;
4859 }
4860 data->timestamp = jiffies;
4861 if (nfs4_setup_sequence(data->server,
4862 &data->arg.seq_args,
4863 &data->res.seq_res,
4864 task) == 0)
4865 return;
4866 out_release_open_seqid:
4867 nfs_release_seqid(data->arg.open_seqid);
4868 out_release_lock_seqid:
4869 nfs_release_seqid(data->arg.lock_seqid);
4870 out_wait:
4871 nfs4_sequence_done(task, &data->res.seq_res);
4872 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4873 }
4874
4875 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4876 {
4877 struct nfs4_lockdata *data = calldata;
4878
4879 dprintk("%s: begin!\n", __func__);
4880
4881 if (!nfs4_sequence_done(task, &data->res.seq_res))
4882 return;
4883
4884 data->rpc_status = task->tk_status;
4885 if (data->arg.new_lock_owner != 0) {
4886 if (data->rpc_status == 0)
4887 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4888 else
4889 goto out;
4890 }
4891 if (data->rpc_status == 0) {
4892 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4893 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
4894 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4895 }
4896 out:
4897 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4898 }
4899
4900 static void nfs4_lock_release(void *calldata)
4901 {
4902 struct nfs4_lockdata *data = calldata;
4903
4904 dprintk("%s: begin!\n", __func__);
4905 nfs_free_seqid(data->arg.open_seqid);
4906 if (data->cancelled != 0) {
4907 struct rpc_task *task;
4908 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4909 data->arg.lock_seqid);
4910 if (!IS_ERR(task))
4911 rpc_put_task_async(task);
4912 dprintk("%s: cancelling lock!\n", __func__);
4913 } else
4914 nfs_free_seqid(data->arg.lock_seqid);
4915 nfs4_put_lock_state(data->lsp);
4916 put_nfs_open_context(data->ctx);
4917 kfree(data);
4918 dprintk("%s: done!\n", __func__);
4919 }
4920
4921 static const struct rpc_call_ops nfs4_lock_ops = {
4922 .rpc_call_prepare = nfs4_lock_prepare,
4923 .rpc_call_done = nfs4_lock_done,
4924 .rpc_release = nfs4_lock_release,
4925 };
4926
4927 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4928 {
4929 switch (error) {
4930 case -NFS4ERR_ADMIN_REVOKED:
4931 case -NFS4ERR_BAD_STATEID:
4932 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4933 if (new_lock_owner != 0 ||
4934 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
4935 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4936 break;
4937 case -NFS4ERR_STALE_STATEID:
4938 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4939 case -NFS4ERR_EXPIRED:
4940 nfs4_schedule_lease_recovery(server->nfs_client);
4941 };
4942 }
4943
4944 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4945 {
4946 struct nfs4_lockdata *data;
4947 struct rpc_task *task;
4948 struct rpc_message msg = {
4949 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4950 .rpc_cred = state->owner->so_cred,
4951 };
4952 struct rpc_task_setup task_setup_data = {
4953 .rpc_client = NFS_CLIENT(state->inode),
4954 .rpc_message = &msg,
4955 .callback_ops = &nfs4_lock_ops,
4956 .workqueue = nfsiod_workqueue,
4957 .flags = RPC_TASK_ASYNC,
4958 };
4959 int ret;
4960
4961 dprintk("%s: begin!\n", __func__);
4962 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4963 fl->fl_u.nfs4_fl.owner,
4964 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4965 if (data == NULL)
4966 return -ENOMEM;
4967 if (IS_SETLKW(cmd))
4968 data->arg.block = 1;
4969 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4970 msg.rpc_argp = &data->arg;
4971 msg.rpc_resp = &data->res;
4972 task_setup_data.callback_data = data;
4973 if (recovery_type > NFS_LOCK_NEW) {
4974 if (recovery_type == NFS_LOCK_RECLAIM)
4975 data->arg.reclaim = NFS_LOCK_RECLAIM;
4976 nfs4_set_sequence_privileged(&data->arg.seq_args);
4977 }
4978 task = rpc_run_task(&task_setup_data);
4979 if (IS_ERR(task))
4980 return PTR_ERR(task);
4981 ret = nfs4_wait_for_completion_rpc_task(task);
4982 if (ret == 0) {
4983 ret = data->rpc_status;
4984 if (ret)
4985 nfs4_handle_setlk_error(data->server, data->lsp,
4986 data->arg.new_lock_owner, ret);
4987 } else
4988 data->cancelled = 1;
4989 rpc_put_task(task);
4990 dprintk("%s: done, ret = %d!\n", __func__, ret);
4991 return ret;
4992 }
4993
4994 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4995 {
4996 struct nfs_server *server = NFS_SERVER(state->inode);
4997 struct nfs4_exception exception = {
4998 .inode = state->inode,
4999 };
5000 int err;
5001
5002 do {
5003 /* Cache the lock if possible... */
5004 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5005 return 0;
5006 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5007 if (err != -NFS4ERR_DELAY)
5008 break;
5009 nfs4_handle_exception(server, err, &exception);
5010 } while (exception.retry);
5011 return err;
5012 }
5013
5014 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5015 {
5016 struct nfs_server *server = NFS_SERVER(state->inode);
5017 struct nfs4_exception exception = {
5018 .inode = state->inode,
5019 };
5020 int err;
5021
5022 err = nfs4_set_lock_state(state, request);
5023 if (err != 0)
5024 return err;
5025 do {
5026 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5027 return 0;
5028 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5029 switch (err) {
5030 default:
5031 goto out;
5032 case -NFS4ERR_GRACE:
5033 case -NFS4ERR_DELAY:
5034 nfs4_handle_exception(server, err, &exception);
5035 err = 0;
5036 }
5037 } while (exception.retry);
5038 out:
5039 return err;
5040 }
5041
5042 #if defined(CONFIG_NFS_V4_1)
5043 /**
5044 * nfs41_check_expired_locks - possibly free a lock stateid
5045 *
5046 * @state: NFSv4 state for an inode
5047 *
5048 * Returns NFS_OK if recovery for this stateid is now finished.
5049 * Otherwise a negative NFS4ERR value is returned.
5050 */
5051 static int nfs41_check_expired_locks(struct nfs4_state *state)
5052 {
5053 int status, ret = -NFS4ERR_BAD_STATEID;
5054 struct nfs4_lock_state *lsp;
5055 struct nfs_server *server = NFS_SERVER(state->inode);
5056
5057 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5058 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5059 status = nfs41_test_stateid(server, &lsp->ls_stateid);
5060 if (status != NFS_OK) {
5061 /* Free the stateid unless the server
5062 * informs us the stateid is unrecognized. */
5063 if (status != -NFS4ERR_BAD_STATEID)
5064 nfs41_free_stateid(server,
5065 &lsp->ls_stateid);
5066 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5067 ret = status;
5068 }
5069 }
5070 };
5071
5072 return ret;
5073 }
5074
5075 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
5076 {
5077 int status = NFS_OK;
5078
5079 if (test_bit(LK_STATE_IN_USE, &state->flags))
5080 status = nfs41_check_expired_locks(state);
5081 if (status != NFS_OK)
5082 status = nfs4_lock_expired(state, request);
5083 return status;
5084 }
5085 #endif
5086
5087 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5088 {
5089 struct nfs4_state_owner *sp = state->owner;
5090 struct nfs_inode *nfsi = NFS_I(state->inode);
5091 unsigned char fl_flags = request->fl_flags;
5092 unsigned int seq;
5093 int status = -ENOLCK;
5094
5095 if ((fl_flags & FL_POSIX) &&
5096 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
5097 goto out;
5098 /* Is this a delegated open? */
5099 status = nfs4_set_lock_state(state, request);
5100 if (status != 0)
5101 goto out;
5102 request->fl_flags |= FL_ACCESS;
5103 status = do_vfs_lock(request->fl_file, request);
5104 if (status < 0)
5105 goto out;
5106 down_read(&nfsi->rwsem);
5107 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
5108 /* Yes: cache locks! */
5109 /* ...but avoid races with delegation recall... */
5110 request->fl_flags = fl_flags & ~FL_SLEEP;
5111 status = do_vfs_lock(request->fl_file, request);
5112 goto out_unlock;
5113 }
5114 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
5115 up_read(&nfsi->rwsem);
5116 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
5117 if (status != 0)
5118 goto out;
5119 down_read(&nfsi->rwsem);
5120 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) {
5121 status = -NFS4ERR_DELAY;
5122 goto out_unlock;
5123 }
5124 /* Note: we always want to sleep here! */
5125 request->fl_flags = fl_flags | FL_SLEEP;
5126 if (do_vfs_lock(request->fl_file, request) < 0)
5127 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
5128 "manager!\n", __func__);
5129 out_unlock:
5130 up_read(&nfsi->rwsem);
5131 out:
5132 request->fl_flags = fl_flags;
5133 return status;
5134 }
5135
5136 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5137 {
5138 struct nfs4_exception exception = {
5139 .state = state,
5140 .inode = state->inode,
5141 };
5142 int err;
5143
5144 do {
5145 err = _nfs4_proc_setlk(state, cmd, request);
5146 if (err == -NFS4ERR_DENIED)
5147 err = -EAGAIN;
5148 err = nfs4_handle_exception(NFS_SERVER(state->inode),
5149 err, &exception);
5150 } while (exception.retry);
5151 return err;
5152 }
5153
5154 static int
5155 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
5156 {
5157 struct nfs_open_context *ctx;
5158 struct nfs4_state *state;
5159 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
5160 int status;
5161
5162 /* verify open state */
5163 ctx = nfs_file_open_context(filp);
5164 state = ctx->state;
5165
5166 if (request->fl_start < 0 || request->fl_end < 0)
5167 return -EINVAL;
5168
5169 if (IS_GETLK(cmd)) {
5170 if (state != NULL)
5171 return nfs4_proc_getlk(state, F_GETLK, request);
5172 return 0;
5173 }
5174
5175 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
5176 return -EINVAL;
5177
5178 if (request->fl_type == F_UNLCK) {
5179 if (state != NULL)
5180 return nfs4_proc_unlck(state, cmd, request);
5181 return 0;
5182 }
5183
5184 if (state == NULL)
5185 return -ENOLCK;
5186 /*
5187 * Don't rely on the VFS having checked the file open mode,
5188 * since it won't do this for flock() locks.
5189 */
5190 switch (request->fl_type) {
5191 case F_RDLCK:
5192 if (!(filp->f_mode & FMODE_READ))
5193 return -EBADF;
5194 break;
5195 case F_WRLCK:
5196 if (!(filp->f_mode & FMODE_WRITE))
5197 return -EBADF;
5198 }
5199
5200 do {
5201 status = nfs4_proc_setlk(state, cmd, request);
5202 if ((status != -EAGAIN) || IS_SETLK(cmd))
5203 break;
5204 timeout = nfs4_set_lock_task_retry(timeout);
5205 status = -ERESTARTSYS;
5206 if (signalled())
5207 break;
5208 } while(status < 0);
5209 return status;
5210 }
5211
5212 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
5213 {
5214 struct nfs_server *server = NFS_SERVER(state->inode);
5215 int err;
5216
5217 err = nfs4_set_lock_state(state, fl);
5218 if (err != 0)
5219 return err;
5220 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
5221 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
5222 }
5223
5224 struct nfs_release_lockowner_data {
5225 struct nfs4_lock_state *lsp;
5226 struct nfs_server *server;
5227 struct nfs_release_lockowner_args args;
5228 };
5229
5230 static void nfs4_release_lockowner_release(void *calldata)
5231 {
5232 struct nfs_release_lockowner_data *data = calldata;
5233 nfs4_free_lock_state(data->server, data->lsp);
5234 kfree(calldata);
5235 }
5236
5237 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
5238 .rpc_release = nfs4_release_lockowner_release,
5239 };
5240
5241 static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
5242 {
5243 struct nfs_release_lockowner_data *data;
5244 struct rpc_message msg = {
5245 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
5246 };
5247
5248 if (server->nfs_client->cl_mvops->minor_version != 0)
5249 return -EINVAL;
5250 data = kmalloc(sizeof(*data), GFP_NOFS);
5251 if (!data)
5252 return -ENOMEM;
5253 data->lsp = lsp;
5254 data->server = server;
5255 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
5256 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
5257 data->args.lock_owner.s_dev = server->s_dev;
5258 msg.rpc_argp = &data->args;
5259 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
5260 return 0;
5261 }
5262
5263 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
5264
5265 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
5266 const void *buf, size_t buflen,
5267 int flags, int type)
5268 {
5269 if (strcmp(key, "") != 0)
5270 return -EINVAL;
5271
5272 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
5273 }
5274
5275 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
5276 void *buf, size_t buflen, int type)
5277 {
5278 if (strcmp(key, "") != 0)
5279 return -EINVAL;
5280
5281 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
5282 }
5283
5284 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
5285 size_t list_len, const char *name,
5286 size_t name_len, int type)
5287 {
5288 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
5289
5290 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
5291 return 0;
5292
5293 if (list && len <= list_len)
5294 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
5295 return len;
5296 }
5297
5298 /*
5299 * nfs_fhget will use either the mounted_on_fileid or the fileid
5300 */
5301 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
5302 {
5303 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
5304 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
5305 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
5306 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
5307 return;
5308
5309 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
5310 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
5311 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
5312 fattr->nlink = 2;
5313 }
5314
5315 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5316 const struct qstr *name,
5317 struct nfs4_fs_locations *fs_locations,
5318 struct page *page)
5319 {
5320 struct nfs_server *server = NFS_SERVER(dir);
5321 u32 bitmask[2] = {
5322 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
5323 };
5324 struct nfs4_fs_locations_arg args = {
5325 .dir_fh = NFS_FH(dir),
5326 .name = name,
5327 .page = page,
5328 .bitmask = bitmask,
5329 };
5330 struct nfs4_fs_locations_res res = {
5331 .fs_locations = fs_locations,
5332 };
5333 struct rpc_message msg = {
5334 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
5335 .rpc_argp = &args,
5336 .rpc_resp = &res,
5337 };
5338 int status;
5339
5340 dprintk("%s: start\n", __func__);
5341
5342 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5343 * is not supported */
5344 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5345 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5346 else
5347 bitmask[0] |= FATTR4_WORD0_FILEID;
5348
5349 nfs_fattr_init(&fs_locations->fattr);
5350 fs_locations->server = server;
5351 fs_locations->nlocations = 0;
5352 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5353 dprintk("%s: returned status = %d\n", __func__, status);
5354 return status;
5355 }
5356
5357 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5358 const struct qstr *name,
5359 struct nfs4_fs_locations *fs_locations,
5360 struct page *page)
5361 {
5362 struct nfs4_exception exception = { };
5363 int err;
5364 do {
5365 err = nfs4_handle_exception(NFS_SERVER(dir),
5366 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5367 &exception);
5368 } while (exception.retry);
5369 return err;
5370 }
5371
5372 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5373 {
5374 int status;
5375 struct nfs4_secinfo_arg args = {
5376 .dir_fh = NFS_FH(dir),
5377 .name = name,
5378 };
5379 struct nfs4_secinfo_res res = {
5380 .flavors = flavors,
5381 };
5382 struct rpc_message msg = {
5383 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5384 .rpc_argp = &args,
5385 .rpc_resp = &res,
5386 };
5387
5388 dprintk("NFS call secinfo %s\n", name->name);
5389 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5390 dprintk("NFS reply secinfo: %d\n", status);
5391 return status;
5392 }
5393
5394 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5395 struct nfs4_secinfo_flavors *flavors)
5396 {
5397 struct nfs4_exception exception = { };
5398 int err;
5399 do {
5400 err = nfs4_handle_exception(NFS_SERVER(dir),
5401 _nfs4_proc_secinfo(dir, name, flavors),
5402 &exception);
5403 } while (exception.retry);
5404 return err;
5405 }
5406
5407 #ifdef CONFIG_NFS_V4_1
5408 /*
5409 * Check the exchange flags returned by the server for invalid flags, having
5410 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5411 * DS flags set.
5412 */
5413 static int nfs4_check_cl_exchange_flags(u32 flags)
5414 {
5415 if (flags & ~EXCHGID4_FLAG_MASK_R)
5416 goto out_inval;
5417 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5418 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5419 goto out_inval;
5420 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5421 goto out_inval;
5422 return NFS_OK;
5423 out_inval:
5424 return -NFS4ERR_INVAL;
5425 }
5426
5427 static bool
5428 nfs41_same_server_scope(struct nfs41_server_scope *a,
5429 struct nfs41_server_scope *b)
5430 {
5431 if (a->server_scope_sz == b->server_scope_sz &&
5432 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5433 return true;
5434
5435 return false;
5436 }
5437
5438 /*
5439 * nfs4_proc_bind_conn_to_session()
5440 *
5441 * The 4.1 client currently uses the same TCP connection for the
5442 * fore and backchannel.
5443 */
5444 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5445 {
5446 int status;
5447 struct nfs41_bind_conn_to_session_res res;
5448 struct rpc_message msg = {
5449 .rpc_proc =
5450 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5451 .rpc_argp = clp,
5452 .rpc_resp = &res,
5453 .rpc_cred = cred,
5454 };
5455
5456 dprintk("--> %s\n", __func__);
5457
5458 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5459 if (unlikely(res.session == NULL)) {
5460 status = -ENOMEM;
5461 goto out;
5462 }
5463
5464 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5465 if (status == 0) {
5466 if (memcmp(res.session->sess_id.data,
5467 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5468 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5469 status = -EIO;
5470 goto out_session;
5471 }
5472 if (res.dir != NFS4_CDFS4_BOTH) {
5473 dprintk("NFS: %s: Unexpected direction from server\n",
5474 __func__);
5475 status = -EIO;
5476 goto out_session;
5477 }
5478 if (res.use_conn_in_rdma_mode) {
5479 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5480 __func__);
5481 status = -EIO;
5482 goto out_session;
5483 }
5484 }
5485 out_session:
5486 kfree(res.session);
5487 out:
5488 dprintk("<-- %s status= %d\n", __func__, status);
5489 return status;
5490 }
5491
5492 /*
5493 * nfs4_proc_exchange_id()
5494 *
5495 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5496 *
5497 * Since the clientid has expired, all compounds using sessions
5498 * associated with the stale clientid will be returning
5499 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5500 * be in some phase of session reset.
5501 */
5502 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5503 {
5504 nfs4_verifier verifier;
5505 struct nfs41_exchange_id_args args = {
5506 .verifier = &verifier,
5507 .client = clp,
5508 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5509 };
5510 struct nfs41_exchange_id_res res = {
5511 0
5512 };
5513 int status;
5514 struct rpc_message msg = {
5515 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5516 .rpc_argp = &args,
5517 .rpc_resp = &res,
5518 .rpc_cred = cred,
5519 };
5520
5521 nfs4_init_boot_verifier(clp, &verifier);
5522 args.id_len = nfs4_init_uniform_client_string(clp, args.id,
5523 sizeof(args.id));
5524 dprintk("NFS call exchange_id auth=%s, '%.*s'\n",
5525 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5526 args.id_len, args.id);
5527
5528 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5529 GFP_NOFS);
5530 if (unlikely(res.server_owner == NULL)) {
5531 status = -ENOMEM;
5532 goto out;
5533 }
5534
5535 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5536 GFP_NOFS);
5537 if (unlikely(res.server_scope == NULL)) {
5538 status = -ENOMEM;
5539 goto out_server_owner;
5540 }
5541
5542 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5543 if (unlikely(res.impl_id == NULL)) {
5544 status = -ENOMEM;
5545 goto out_server_scope;
5546 }
5547
5548 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5549 if (status == 0)
5550 status = nfs4_check_cl_exchange_flags(res.flags);
5551
5552 if (status == 0) {
5553 clp->cl_clientid = res.clientid;
5554 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5555 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5556 clp->cl_seqid = res.seqid;
5557
5558 kfree(clp->cl_serverowner);
5559 clp->cl_serverowner = res.server_owner;
5560 res.server_owner = NULL;
5561
5562 /* use the most recent implementation id */
5563 kfree(clp->cl_implid);
5564 clp->cl_implid = res.impl_id;
5565
5566 if (clp->cl_serverscope != NULL &&
5567 !nfs41_same_server_scope(clp->cl_serverscope,
5568 res.server_scope)) {
5569 dprintk("%s: server_scope mismatch detected\n",
5570 __func__);
5571 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5572 kfree(clp->cl_serverscope);
5573 clp->cl_serverscope = NULL;
5574 }
5575
5576 if (clp->cl_serverscope == NULL) {
5577 clp->cl_serverscope = res.server_scope;
5578 goto out;
5579 }
5580 } else
5581 kfree(res.impl_id);
5582
5583 out_server_owner:
5584 kfree(res.server_owner);
5585 out_server_scope:
5586 kfree(res.server_scope);
5587 out:
5588 if (clp->cl_implid != NULL)
5589 dprintk("NFS reply exchange_id: Server Implementation ID: "
5590 "domain: %s, name: %s, date: %llu,%u\n",
5591 clp->cl_implid->domain, clp->cl_implid->name,
5592 clp->cl_implid->date.seconds,
5593 clp->cl_implid->date.nseconds);
5594 dprintk("NFS reply exchange_id: %d\n", status);
5595 return status;
5596 }
5597
5598 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5599 struct rpc_cred *cred)
5600 {
5601 struct rpc_message msg = {
5602 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5603 .rpc_argp = clp,
5604 .rpc_cred = cred,
5605 };
5606 int status;
5607
5608 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5609 if (status)
5610 dprintk("NFS: Got error %d from the server %s on "
5611 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5612 return status;
5613 }
5614
5615 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5616 struct rpc_cred *cred)
5617 {
5618 unsigned int loop;
5619 int ret;
5620
5621 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5622 ret = _nfs4_proc_destroy_clientid(clp, cred);
5623 switch (ret) {
5624 case -NFS4ERR_DELAY:
5625 case -NFS4ERR_CLIENTID_BUSY:
5626 ssleep(1);
5627 break;
5628 default:
5629 return ret;
5630 }
5631 }
5632 return 0;
5633 }
5634
5635 int nfs4_destroy_clientid(struct nfs_client *clp)
5636 {
5637 struct rpc_cred *cred;
5638 int ret = 0;
5639
5640 if (clp->cl_mvops->minor_version < 1)
5641 goto out;
5642 if (clp->cl_exchange_flags == 0)
5643 goto out;
5644 if (clp->cl_preserve_clid)
5645 goto out;
5646 cred = nfs4_get_exchange_id_cred(clp);
5647 ret = nfs4_proc_destroy_clientid(clp, cred);
5648 if (cred)
5649 put_rpccred(cred);
5650 switch (ret) {
5651 case 0:
5652 case -NFS4ERR_STALE_CLIENTID:
5653 clp->cl_exchange_flags = 0;
5654 }
5655 out:
5656 return ret;
5657 }
5658
5659 struct nfs4_get_lease_time_data {
5660 struct nfs4_get_lease_time_args *args;
5661 struct nfs4_get_lease_time_res *res;
5662 struct nfs_client *clp;
5663 };
5664
5665 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5666 void *calldata)
5667 {
5668 struct nfs4_get_lease_time_data *data =
5669 (struct nfs4_get_lease_time_data *)calldata;
5670
5671 dprintk("--> %s\n", __func__);
5672 /* just setup sequence, do not trigger session recovery
5673 since we're invoked within one */
5674 nfs41_setup_sequence(data->clp->cl_session,
5675 &data->args->la_seq_args,
5676 &data->res->lr_seq_res,
5677 task);
5678 dprintk("<-- %s\n", __func__);
5679 }
5680
5681 /*
5682 * Called from nfs4_state_manager thread for session setup, so don't recover
5683 * from sequence operation or clientid errors.
5684 */
5685 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5686 {
5687 struct nfs4_get_lease_time_data *data =
5688 (struct nfs4_get_lease_time_data *)calldata;
5689
5690 dprintk("--> %s\n", __func__);
5691 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5692 return;
5693 switch (task->tk_status) {
5694 case -NFS4ERR_DELAY:
5695 case -NFS4ERR_GRACE:
5696 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5697 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5698 task->tk_status = 0;
5699 /* fall through */
5700 case -NFS4ERR_RETRY_UNCACHED_REP:
5701 rpc_restart_call_prepare(task);
5702 return;
5703 }
5704 dprintk("<-- %s\n", __func__);
5705 }
5706
5707 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5708 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5709 .rpc_call_done = nfs4_get_lease_time_done,
5710 };
5711
5712 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5713 {
5714 struct rpc_task *task;
5715 struct nfs4_get_lease_time_args args;
5716 struct nfs4_get_lease_time_res res = {
5717 .lr_fsinfo = fsinfo,
5718 };
5719 struct nfs4_get_lease_time_data data = {
5720 .args = &args,
5721 .res = &res,
5722 .clp = clp,
5723 };
5724 struct rpc_message msg = {
5725 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5726 .rpc_argp = &args,
5727 .rpc_resp = &res,
5728 };
5729 struct rpc_task_setup task_setup = {
5730 .rpc_client = clp->cl_rpcclient,
5731 .rpc_message = &msg,
5732 .callback_ops = &nfs4_get_lease_time_ops,
5733 .callback_data = &data,
5734 .flags = RPC_TASK_TIMEOUT,
5735 };
5736 int status;
5737
5738 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5739 nfs4_set_sequence_privileged(&args.la_seq_args);
5740 dprintk("--> %s\n", __func__);
5741 task = rpc_run_task(&task_setup);
5742
5743 if (IS_ERR(task))
5744 status = PTR_ERR(task);
5745 else {
5746 status = task->tk_status;
5747 rpc_put_task(task);
5748 }
5749 dprintk("<-- %s return %d\n", __func__, status);
5750
5751 return status;
5752 }
5753
5754 /*
5755 * Initialize the values to be used by the client in CREATE_SESSION
5756 * If nfs4_init_session set the fore channel request and response sizes,
5757 * use them.
5758 *
5759 * Set the back channel max_resp_sz_cached to zero to force the client to
5760 * always set csa_cachethis to FALSE because the current implementation
5761 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5762 */
5763 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5764 {
5765 struct nfs4_session *session = args->client->cl_session;
5766 unsigned int mxrqst_sz = session->fc_target_max_rqst_sz,
5767 mxresp_sz = session->fc_target_max_resp_sz;
5768
5769 if (mxrqst_sz == 0)
5770 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5771 if (mxresp_sz == 0)
5772 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5773 /* Fore channel attributes */
5774 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5775 args->fc_attrs.max_resp_sz = mxresp_sz;
5776 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5777 args->fc_attrs.max_reqs = max_session_slots;
5778
5779 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5780 "max_ops=%u max_reqs=%u\n",
5781 __func__,
5782 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5783 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5784
5785 /* Back channel attributes */
5786 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5787 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5788 args->bc_attrs.max_resp_sz_cached = 0;
5789 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5790 args->bc_attrs.max_reqs = 1;
5791
5792 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5793 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5794 __func__,
5795 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5796 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5797 args->bc_attrs.max_reqs);
5798 }
5799
5800 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5801 {
5802 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5803 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5804
5805 if (rcvd->max_resp_sz > sent->max_resp_sz)
5806 return -EINVAL;
5807 /*
5808 * Our requested max_ops is the minimum we need; we're not
5809 * prepared to break up compounds into smaller pieces than that.
5810 * So, no point even trying to continue if the server won't
5811 * cooperate:
5812 */
5813 if (rcvd->max_ops < sent->max_ops)
5814 return -EINVAL;
5815 if (rcvd->max_reqs == 0)
5816 return -EINVAL;
5817 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5818 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5819 return 0;
5820 }
5821
5822 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5823 {
5824 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5825 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5826
5827 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5828 return -EINVAL;
5829 if (rcvd->max_resp_sz < sent->max_resp_sz)
5830 return -EINVAL;
5831 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5832 return -EINVAL;
5833 /* These would render the backchannel useless: */
5834 if (rcvd->max_ops != sent->max_ops)
5835 return -EINVAL;
5836 if (rcvd->max_reqs != sent->max_reqs)
5837 return -EINVAL;
5838 return 0;
5839 }
5840
5841 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5842 struct nfs4_session *session)
5843 {
5844 int ret;
5845
5846 ret = nfs4_verify_fore_channel_attrs(args, session);
5847 if (ret)
5848 return ret;
5849 return nfs4_verify_back_channel_attrs(args, session);
5850 }
5851
5852 static int _nfs4_proc_create_session(struct nfs_client *clp,
5853 struct rpc_cred *cred)
5854 {
5855 struct nfs4_session *session = clp->cl_session;
5856 struct nfs41_create_session_args args = {
5857 .client = clp,
5858 .cb_program = NFS4_CALLBACK,
5859 };
5860 struct nfs41_create_session_res res = {
5861 .client = clp,
5862 };
5863 struct rpc_message msg = {
5864 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5865 .rpc_argp = &args,
5866 .rpc_resp = &res,
5867 .rpc_cred = cred,
5868 };
5869 int status;
5870
5871 nfs4_init_channel_attrs(&args);
5872 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5873
5874 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5875
5876 if (!status) {
5877 /* Verify the session's negotiated channel_attrs values */
5878 status = nfs4_verify_channel_attrs(&args, session);
5879 /* Increment the clientid slot sequence id */
5880 clp->cl_seqid++;
5881 }
5882
5883 return status;
5884 }
5885
5886 /*
5887 * Issues a CREATE_SESSION operation to the server.
5888 * It is the responsibility of the caller to verify the session is
5889 * expired before calling this routine.
5890 */
5891 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
5892 {
5893 int status;
5894 unsigned *ptr;
5895 struct nfs4_session *session = clp->cl_session;
5896
5897 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5898
5899 status = _nfs4_proc_create_session(clp, cred);
5900 if (status)
5901 goto out;
5902
5903 /* Init or reset the session slot tables */
5904 status = nfs4_setup_session_slot_tables(session);
5905 dprintk("slot table setup returned %d\n", status);
5906 if (status)
5907 goto out;
5908
5909 ptr = (unsigned *)&session->sess_id.data[0];
5910 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5911 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5912 out:
5913 dprintk("<-- %s\n", __func__);
5914 return status;
5915 }
5916
5917 /*
5918 * Issue the over-the-wire RPC DESTROY_SESSION.
5919 * The caller must serialize access to this routine.
5920 */
5921 int nfs4_proc_destroy_session(struct nfs4_session *session,
5922 struct rpc_cred *cred)
5923 {
5924 struct rpc_message msg = {
5925 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
5926 .rpc_argp = session,
5927 .rpc_cred = cred,
5928 };
5929 int status = 0;
5930
5931 dprintk("--> nfs4_proc_destroy_session\n");
5932
5933 /* session is still being setup */
5934 if (session->clp->cl_cons_state != NFS_CS_READY)
5935 return status;
5936
5937 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5938
5939 if (status)
5940 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
5941 "Session has been destroyed regardless...\n", status);
5942
5943 dprintk("<-- nfs4_proc_destroy_session\n");
5944 return status;
5945 }
5946
5947 /*
5948 * Renew the cl_session lease.
5949 */
5950 struct nfs4_sequence_data {
5951 struct nfs_client *clp;
5952 struct nfs4_sequence_args args;
5953 struct nfs4_sequence_res res;
5954 };
5955
5956 static void nfs41_sequence_release(void *data)
5957 {
5958 struct nfs4_sequence_data *calldata = data;
5959 struct nfs_client *clp = calldata->clp;
5960
5961 if (atomic_read(&clp->cl_count) > 1)
5962 nfs4_schedule_state_renewal(clp);
5963 nfs_put_client(clp);
5964 kfree(calldata);
5965 }
5966
5967 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5968 {
5969 switch(task->tk_status) {
5970 case -NFS4ERR_DELAY:
5971 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5972 return -EAGAIN;
5973 default:
5974 nfs4_schedule_lease_recovery(clp);
5975 }
5976 return 0;
5977 }
5978
5979 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5980 {
5981 struct nfs4_sequence_data *calldata = data;
5982 struct nfs_client *clp = calldata->clp;
5983
5984 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5985 return;
5986
5987 if (task->tk_status < 0) {
5988 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5989 if (atomic_read(&clp->cl_count) == 1)
5990 goto out;
5991
5992 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5993 rpc_restart_call_prepare(task);
5994 return;
5995 }
5996 }
5997 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5998 out:
5999 dprintk("<-- %s\n", __func__);
6000 }
6001
6002 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
6003 {
6004 struct nfs4_sequence_data *calldata = data;
6005 struct nfs_client *clp = calldata->clp;
6006 struct nfs4_sequence_args *args;
6007 struct nfs4_sequence_res *res;
6008
6009 args = task->tk_msg.rpc_argp;
6010 res = task->tk_msg.rpc_resp;
6011
6012 nfs41_setup_sequence(clp->cl_session, args, res, task);
6013 }
6014
6015 static const struct rpc_call_ops nfs41_sequence_ops = {
6016 .rpc_call_done = nfs41_sequence_call_done,
6017 .rpc_call_prepare = nfs41_sequence_prepare,
6018 .rpc_release = nfs41_sequence_release,
6019 };
6020
6021 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
6022 struct rpc_cred *cred,
6023 bool is_privileged)
6024 {
6025 struct nfs4_sequence_data *calldata;
6026 struct rpc_message msg = {
6027 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
6028 .rpc_cred = cred,
6029 };
6030 struct rpc_task_setup task_setup_data = {
6031 .rpc_client = clp->cl_rpcclient,
6032 .rpc_message = &msg,
6033 .callback_ops = &nfs41_sequence_ops,
6034 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6035 };
6036
6037 if (!atomic_inc_not_zero(&clp->cl_count))
6038 return ERR_PTR(-EIO);
6039 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6040 if (calldata == NULL) {
6041 nfs_put_client(clp);
6042 return ERR_PTR(-ENOMEM);
6043 }
6044 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
6045 if (is_privileged)
6046 nfs4_set_sequence_privileged(&calldata->args);
6047 msg.rpc_argp = &calldata->args;
6048 msg.rpc_resp = &calldata->res;
6049 calldata->clp = clp;
6050 task_setup_data.callback_data = calldata;
6051
6052 return rpc_run_task(&task_setup_data);
6053 }
6054
6055 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
6056 {
6057 struct rpc_task *task;
6058 int ret = 0;
6059
6060 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
6061 return 0;
6062 task = _nfs41_proc_sequence(clp, cred, false);
6063 if (IS_ERR(task))
6064 ret = PTR_ERR(task);
6065 else
6066 rpc_put_task_async(task);
6067 dprintk("<-- %s status=%d\n", __func__, ret);
6068 return ret;
6069 }
6070
6071 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
6072 {
6073 struct rpc_task *task;
6074 int ret;
6075
6076 task = _nfs41_proc_sequence(clp, cred, true);
6077 if (IS_ERR(task)) {
6078 ret = PTR_ERR(task);
6079 goto out;
6080 }
6081 ret = rpc_wait_for_completion_task(task);
6082 if (!ret) {
6083 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
6084
6085 if (task->tk_status == 0)
6086 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
6087 ret = task->tk_status;
6088 }
6089 rpc_put_task(task);
6090 out:
6091 dprintk("<-- %s status=%d\n", __func__, ret);
6092 return ret;
6093 }
6094
6095 struct nfs4_reclaim_complete_data {
6096 struct nfs_client *clp;
6097 struct nfs41_reclaim_complete_args arg;
6098 struct nfs41_reclaim_complete_res res;
6099 };
6100
6101 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
6102 {
6103 struct nfs4_reclaim_complete_data *calldata = data;
6104
6105 nfs41_setup_sequence(calldata->clp->cl_session,
6106 &calldata->arg.seq_args,
6107 &calldata->res.seq_res,
6108 task);
6109 }
6110
6111 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6112 {
6113 switch(task->tk_status) {
6114 case 0:
6115 case -NFS4ERR_COMPLETE_ALREADY:
6116 case -NFS4ERR_WRONG_CRED: /* What to do here? */
6117 break;
6118 case -NFS4ERR_DELAY:
6119 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6120 /* fall through */
6121 case -NFS4ERR_RETRY_UNCACHED_REP:
6122 return -EAGAIN;
6123 default:
6124 nfs4_schedule_lease_recovery(clp);
6125 }
6126 return 0;
6127 }
6128
6129 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
6130 {
6131 struct nfs4_reclaim_complete_data *calldata = data;
6132 struct nfs_client *clp = calldata->clp;
6133 struct nfs4_sequence_res *res = &calldata->res.seq_res;
6134
6135 dprintk("--> %s\n", __func__);
6136 if (!nfs41_sequence_done(task, res))
6137 return;
6138
6139 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
6140 rpc_restart_call_prepare(task);
6141 return;
6142 }
6143 dprintk("<-- %s\n", __func__);
6144 }
6145
6146 static void nfs4_free_reclaim_complete_data(void *data)
6147 {
6148 struct nfs4_reclaim_complete_data *calldata = data;
6149
6150 kfree(calldata);
6151 }
6152
6153 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
6154 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
6155 .rpc_call_done = nfs4_reclaim_complete_done,
6156 .rpc_release = nfs4_free_reclaim_complete_data,
6157 };
6158
6159 /*
6160 * Issue a global reclaim complete.
6161 */
6162 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
6163 {
6164 struct nfs4_reclaim_complete_data *calldata;
6165 struct rpc_task *task;
6166 struct rpc_message msg = {
6167 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
6168 };
6169 struct rpc_task_setup task_setup_data = {
6170 .rpc_client = clp->cl_rpcclient,
6171 .rpc_message = &msg,
6172 .callback_ops = &nfs4_reclaim_complete_call_ops,
6173 .flags = RPC_TASK_ASYNC,
6174 };
6175 int status = -ENOMEM;
6176
6177 dprintk("--> %s\n", __func__);
6178 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6179 if (calldata == NULL)
6180 goto out;
6181 calldata->clp = clp;
6182 calldata->arg.one_fs = 0;
6183
6184 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
6185 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
6186 msg.rpc_argp = &calldata->arg;
6187 msg.rpc_resp = &calldata->res;
6188 task_setup_data.callback_data = calldata;
6189 task = rpc_run_task(&task_setup_data);
6190 if (IS_ERR(task)) {
6191 status = PTR_ERR(task);
6192 goto out;
6193 }
6194 status = nfs4_wait_for_completion_rpc_task(task);
6195 if (status == 0)
6196 status = task->tk_status;
6197 rpc_put_task(task);
6198 return 0;
6199 out:
6200 dprintk("<-- %s status=%d\n", __func__, status);
6201 return status;
6202 }
6203
6204 static void
6205 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
6206 {
6207 struct nfs4_layoutget *lgp = calldata;
6208 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6209 struct nfs4_session *session = nfs4_get_session(server);
6210
6211 dprintk("--> %s\n", __func__);
6212 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6213 * right now covering the LAYOUTGET we are about to send.
6214 * However, that is not so catastrophic, and there seems
6215 * to be no way to prevent it completely.
6216 */
6217 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
6218 &lgp->res.seq_res, task))
6219 return;
6220 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
6221 NFS_I(lgp->args.inode)->layout,
6222 lgp->args.ctx->state)) {
6223 rpc_exit(task, NFS4_OK);
6224 }
6225 }
6226
6227 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6228 {
6229 struct nfs4_layoutget *lgp = calldata;
6230 struct inode *inode = lgp->args.inode;
6231 struct nfs_server *server = NFS_SERVER(inode);
6232 struct pnfs_layout_hdr *lo;
6233 struct nfs4_state *state = NULL;
6234 unsigned long timeo, giveup;
6235
6236 dprintk("--> %s\n", __func__);
6237
6238 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
6239 goto out;
6240
6241 switch (task->tk_status) {
6242 case 0:
6243 goto out;
6244 case -NFS4ERR_LAYOUTTRYLATER:
6245 case -NFS4ERR_RECALLCONFLICT:
6246 timeo = rpc_get_timeout(task->tk_client);
6247 giveup = lgp->args.timestamp + timeo;
6248 if (time_after(giveup, jiffies))
6249 task->tk_status = -NFS4ERR_DELAY;
6250 break;
6251 case -NFS4ERR_EXPIRED:
6252 case -NFS4ERR_BAD_STATEID:
6253 spin_lock(&inode->i_lock);
6254 lo = NFS_I(inode)->layout;
6255 if (!lo || list_empty(&lo->plh_segs)) {
6256 spin_unlock(&inode->i_lock);
6257 /* If the open stateid was bad, then recover it. */
6258 state = lgp->args.ctx->state;
6259 } else {
6260 LIST_HEAD(head);
6261
6262 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
6263 spin_unlock(&inode->i_lock);
6264 /* Mark the bad layout state as invalid, then
6265 * retry using the open stateid. */
6266 pnfs_free_lseg_list(&head);
6267 }
6268 }
6269 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
6270 rpc_restart_call_prepare(task);
6271 out:
6272 dprintk("<-- %s\n", __func__);
6273 }
6274
6275 static size_t max_response_pages(struct nfs_server *server)
6276 {
6277 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
6278 return nfs_page_array_len(0, max_resp_sz);
6279 }
6280
6281 static void nfs4_free_pages(struct page **pages, size_t size)
6282 {
6283 int i;
6284
6285 if (!pages)
6286 return;
6287
6288 for (i = 0; i < size; i++) {
6289 if (!pages[i])
6290 break;
6291 __free_page(pages[i]);
6292 }
6293 kfree(pages);
6294 }
6295
6296 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6297 {
6298 struct page **pages;
6299 int i;
6300
6301 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6302 if (!pages) {
6303 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6304 return NULL;
6305 }
6306
6307 for (i = 0; i < size; i++) {
6308 pages[i] = alloc_page(gfp_flags);
6309 if (!pages[i]) {
6310 dprintk("%s: failed to allocate page\n", __func__);
6311 nfs4_free_pages(pages, size);
6312 return NULL;
6313 }
6314 }
6315
6316 return pages;
6317 }
6318
6319 static void nfs4_layoutget_release(void *calldata)
6320 {
6321 struct nfs4_layoutget *lgp = calldata;
6322 struct inode *inode = lgp->args.inode;
6323 struct nfs_server *server = NFS_SERVER(inode);
6324 size_t max_pages = max_response_pages(server);
6325
6326 dprintk("--> %s\n", __func__);
6327 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6328 pnfs_put_layout_hdr(NFS_I(inode)->layout);
6329 put_nfs_open_context(lgp->args.ctx);
6330 kfree(calldata);
6331 dprintk("<-- %s\n", __func__);
6332 }
6333
6334 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6335 .rpc_call_prepare = nfs4_layoutget_prepare,
6336 .rpc_call_done = nfs4_layoutget_done,
6337 .rpc_release = nfs4_layoutget_release,
6338 };
6339
6340 struct pnfs_layout_segment *
6341 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6342 {
6343 struct inode *inode = lgp->args.inode;
6344 struct nfs_server *server = NFS_SERVER(inode);
6345 size_t max_pages = max_response_pages(server);
6346 struct rpc_task *task;
6347 struct rpc_message msg = {
6348 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6349 .rpc_argp = &lgp->args,
6350 .rpc_resp = &lgp->res,
6351 };
6352 struct rpc_task_setup task_setup_data = {
6353 .rpc_client = server->client,
6354 .rpc_message = &msg,
6355 .callback_ops = &nfs4_layoutget_call_ops,
6356 .callback_data = lgp,
6357 .flags = RPC_TASK_ASYNC,
6358 };
6359 struct pnfs_layout_segment *lseg = NULL;
6360 int status = 0;
6361
6362 dprintk("--> %s\n", __func__);
6363
6364 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6365 if (!lgp->args.layout.pages) {
6366 nfs4_layoutget_release(lgp);
6367 return ERR_PTR(-ENOMEM);
6368 }
6369 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6370 lgp->args.timestamp = jiffies;
6371
6372 lgp->res.layoutp = &lgp->args.layout;
6373 lgp->res.seq_res.sr_slot = NULL;
6374 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6375
6376 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
6377 pnfs_get_layout_hdr(NFS_I(inode)->layout);
6378
6379 task = rpc_run_task(&task_setup_data);
6380 if (IS_ERR(task))
6381 return ERR_CAST(task);
6382 status = nfs4_wait_for_completion_rpc_task(task);
6383 if (status == 0)
6384 status = task->tk_status;
6385 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
6386 if (status == 0 && lgp->res.layoutp->len)
6387 lseg = pnfs_layout_process(lgp);
6388 rpc_put_task(task);
6389 dprintk("<-- %s status=%d\n", __func__, status);
6390 if (status)
6391 return ERR_PTR(status);
6392 return lseg;
6393 }
6394
6395 static void
6396 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6397 {
6398 struct nfs4_layoutreturn *lrp = calldata;
6399
6400 dprintk("--> %s\n", __func__);
6401 nfs41_setup_sequence(lrp->clp->cl_session,
6402 &lrp->args.seq_args,
6403 &lrp->res.seq_res,
6404 task);
6405 }
6406
6407 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6408 {
6409 struct nfs4_layoutreturn *lrp = calldata;
6410 struct nfs_server *server;
6411
6412 dprintk("--> %s\n", __func__);
6413
6414 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
6415 return;
6416
6417 server = NFS_SERVER(lrp->args.inode);
6418 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6419 rpc_restart_call_prepare(task);
6420 return;
6421 }
6422 dprintk("<-- %s\n", __func__);
6423 }
6424
6425 static void nfs4_layoutreturn_release(void *calldata)
6426 {
6427 struct nfs4_layoutreturn *lrp = calldata;
6428 struct pnfs_layout_hdr *lo = lrp->args.layout;
6429
6430 dprintk("--> %s\n", __func__);
6431 spin_lock(&lo->plh_inode->i_lock);
6432 if (lrp->res.lrs_present)
6433 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6434 lo->plh_block_lgets--;
6435 spin_unlock(&lo->plh_inode->i_lock);
6436 pnfs_put_layout_hdr(lrp->args.layout);
6437 kfree(calldata);
6438 dprintk("<-- %s\n", __func__);
6439 }
6440
6441 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6442 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6443 .rpc_call_done = nfs4_layoutreturn_done,
6444 .rpc_release = nfs4_layoutreturn_release,
6445 };
6446
6447 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6448 {
6449 struct rpc_task *task;
6450 struct rpc_message msg = {
6451 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6452 .rpc_argp = &lrp->args,
6453 .rpc_resp = &lrp->res,
6454 };
6455 struct rpc_task_setup task_setup_data = {
6456 .rpc_client = lrp->clp->cl_rpcclient,
6457 .rpc_message = &msg,
6458 .callback_ops = &nfs4_layoutreturn_call_ops,
6459 .callback_data = lrp,
6460 };
6461 int status;
6462
6463 dprintk("--> %s\n", __func__);
6464 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6465 task = rpc_run_task(&task_setup_data);
6466 if (IS_ERR(task))
6467 return PTR_ERR(task);
6468 status = task->tk_status;
6469 dprintk("<-- %s status=%d\n", __func__, status);
6470 rpc_put_task(task);
6471 return status;
6472 }
6473
6474 /*
6475 * Retrieve the list of Data Server devices from the MDS.
6476 */
6477 static int _nfs4_getdevicelist(struct nfs_server *server,
6478 const struct nfs_fh *fh,
6479 struct pnfs_devicelist *devlist)
6480 {
6481 struct nfs4_getdevicelist_args args = {
6482 .fh = fh,
6483 .layoutclass = server->pnfs_curr_ld->id,
6484 };
6485 struct nfs4_getdevicelist_res res = {
6486 .devlist = devlist,
6487 };
6488 struct rpc_message msg = {
6489 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6490 .rpc_argp = &args,
6491 .rpc_resp = &res,
6492 };
6493 int status;
6494
6495 dprintk("--> %s\n", __func__);
6496 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6497 &res.seq_res, 0);
6498 dprintk("<-- %s status=%d\n", __func__, status);
6499 return status;
6500 }
6501
6502 int nfs4_proc_getdevicelist(struct nfs_server *server,
6503 const struct nfs_fh *fh,
6504 struct pnfs_devicelist *devlist)
6505 {
6506 struct nfs4_exception exception = { };
6507 int err;
6508
6509 do {
6510 err = nfs4_handle_exception(server,
6511 _nfs4_getdevicelist(server, fh, devlist),
6512 &exception);
6513 } while (exception.retry);
6514
6515 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6516 err, devlist->num_devs);
6517
6518 return err;
6519 }
6520 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6521
6522 static int
6523 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6524 {
6525 struct nfs4_getdeviceinfo_args args = {
6526 .pdev = pdev,
6527 };
6528 struct nfs4_getdeviceinfo_res res = {
6529 .pdev = pdev,
6530 };
6531 struct rpc_message msg = {
6532 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6533 .rpc_argp = &args,
6534 .rpc_resp = &res,
6535 };
6536 int status;
6537
6538 dprintk("--> %s\n", __func__);
6539 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6540 dprintk("<-- %s status=%d\n", __func__, status);
6541
6542 return status;
6543 }
6544
6545 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6546 {
6547 struct nfs4_exception exception = { };
6548 int err;
6549
6550 do {
6551 err = nfs4_handle_exception(server,
6552 _nfs4_proc_getdeviceinfo(server, pdev),
6553 &exception);
6554 } while (exception.retry);
6555 return err;
6556 }
6557 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6558
6559 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6560 {
6561 struct nfs4_layoutcommit_data *data = calldata;
6562 struct nfs_server *server = NFS_SERVER(data->args.inode);
6563 struct nfs4_session *session = nfs4_get_session(server);
6564
6565 nfs41_setup_sequence(session,
6566 &data->args.seq_args,
6567 &data->res.seq_res,
6568 task);
6569 }
6570
6571 static void
6572 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6573 {
6574 struct nfs4_layoutcommit_data *data = calldata;
6575 struct nfs_server *server = NFS_SERVER(data->args.inode);
6576
6577 if (!nfs41_sequence_done(task, &data->res.seq_res))
6578 return;
6579
6580 switch (task->tk_status) { /* Just ignore these failures */
6581 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6582 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6583 case -NFS4ERR_BADLAYOUT: /* no layout */
6584 case -NFS4ERR_GRACE: /* loca_recalim always false */
6585 task->tk_status = 0;
6586 break;
6587 case 0:
6588 nfs_post_op_update_inode_force_wcc(data->args.inode,
6589 data->res.fattr);
6590 break;
6591 default:
6592 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6593 rpc_restart_call_prepare(task);
6594 return;
6595 }
6596 }
6597 }
6598
6599 static void nfs4_layoutcommit_release(void *calldata)
6600 {
6601 struct nfs4_layoutcommit_data *data = calldata;
6602
6603 pnfs_cleanup_layoutcommit(data);
6604 put_rpccred(data->cred);
6605 kfree(data);
6606 }
6607
6608 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6609 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6610 .rpc_call_done = nfs4_layoutcommit_done,
6611 .rpc_release = nfs4_layoutcommit_release,
6612 };
6613
6614 int
6615 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6616 {
6617 struct rpc_message msg = {
6618 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6619 .rpc_argp = &data->args,
6620 .rpc_resp = &data->res,
6621 .rpc_cred = data->cred,
6622 };
6623 struct rpc_task_setup task_setup_data = {
6624 .task = &data->task,
6625 .rpc_client = NFS_CLIENT(data->args.inode),
6626 .rpc_message = &msg,
6627 .callback_ops = &nfs4_layoutcommit_ops,
6628 .callback_data = data,
6629 .flags = RPC_TASK_ASYNC,
6630 };
6631 struct rpc_task *task;
6632 int status = 0;
6633
6634 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6635 "lbw: %llu inode %lu\n",
6636 data->task.tk_pid, sync,
6637 data->args.lastbytewritten,
6638 data->args.inode->i_ino);
6639
6640 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6641 task = rpc_run_task(&task_setup_data);
6642 if (IS_ERR(task))
6643 return PTR_ERR(task);
6644 if (sync == false)
6645 goto out;
6646 status = nfs4_wait_for_completion_rpc_task(task);
6647 if (status != 0)
6648 goto out;
6649 status = task->tk_status;
6650 out:
6651 dprintk("%s: status %d\n", __func__, status);
6652 rpc_put_task(task);
6653 return status;
6654 }
6655
6656 static int
6657 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6658 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6659 {
6660 struct nfs41_secinfo_no_name_args args = {
6661 .style = SECINFO_STYLE_CURRENT_FH,
6662 };
6663 struct nfs4_secinfo_res res = {
6664 .flavors = flavors,
6665 };
6666 struct rpc_message msg = {
6667 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6668 .rpc_argp = &args,
6669 .rpc_resp = &res,
6670 };
6671 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6672 }
6673
6674 static int
6675 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6676 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6677 {
6678 struct nfs4_exception exception = { };
6679 int err;
6680 do {
6681 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6682 switch (err) {
6683 case 0:
6684 case -NFS4ERR_WRONGSEC:
6685 case -NFS4ERR_NOTSUPP:
6686 goto out;
6687 default:
6688 err = nfs4_handle_exception(server, err, &exception);
6689 }
6690 } while (exception.retry);
6691 out:
6692 return err;
6693 }
6694
6695 static int
6696 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6697 struct nfs_fsinfo *info)
6698 {
6699 int err;
6700 struct page *page;
6701 rpc_authflavor_t flavor;
6702 struct nfs4_secinfo_flavors *flavors;
6703
6704 page = alloc_page(GFP_KERNEL);
6705 if (!page) {
6706 err = -ENOMEM;
6707 goto out;
6708 }
6709
6710 flavors = page_address(page);
6711 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6712
6713 /*
6714 * Fall back on "guess and check" method if
6715 * the server doesn't support SECINFO_NO_NAME
6716 */
6717 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6718 err = nfs4_find_root_sec(server, fhandle, info);
6719 goto out_freepage;
6720 }
6721 if (err)
6722 goto out_freepage;
6723
6724 flavor = nfs_find_best_sec(flavors);
6725 if (err == 0)
6726 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6727
6728 out_freepage:
6729 put_page(page);
6730 if (err == -EACCES)
6731 return -EPERM;
6732 out:
6733 return err;
6734 }
6735
6736 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6737 {
6738 int status;
6739 struct nfs41_test_stateid_args args = {
6740 .stateid = stateid,
6741 };
6742 struct nfs41_test_stateid_res res;
6743 struct rpc_message msg = {
6744 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6745 .rpc_argp = &args,
6746 .rpc_resp = &res,
6747 };
6748
6749 dprintk("NFS call test_stateid %p\n", stateid);
6750 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6751 nfs4_set_sequence_privileged(&args.seq_args);
6752 status = nfs4_call_sync_sequence(server->client, server, &msg,
6753 &args.seq_args, &res.seq_res);
6754 if (status != NFS_OK) {
6755 dprintk("NFS reply test_stateid: failed, %d\n", status);
6756 return status;
6757 }
6758 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
6759 return -res.status;
6760 }
6761
6762 /**
6763 * nfs41_test_stateid - perform a TEST_STATEID operation
6764 *
6765 * @server: server / transport on which to perform the operation
6766 * @stateid: state ID to test
6767 *
6768 * Returns NFS_OK if the server recognizes that "stateid" is valid.
6769 * Otherwise a negative NFS4ERR value is returned if the operation
6770 * failed or the state ID is not currently valid.
6771 */
6772 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6773 {
6774 struct nfs4_exception exception = { };
6775 int err;
6776 do {
6777 err = _nfs41_test_stateid(server, stateid);
6778 if (err != -NFS4ERR_DELAY)
6779 break;
6780 nfs4_handle_exception(server, err, &exception);
6781 } while (exception.retry);
6782 return err;
6783 }
6784
6785 struct nfs_free_stateid_data {
6786 struct nfs_server *server;
6787 struct nfs41_free_stateid_args args;
6788 struct nfs41_free_stateid_res res;
6789 };
6790
6791 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
6792 {
6793 struct nfs_free_stateid_data *data = calldata;
6794 nfs41_setup_sequence(nfs4_get_session(data->server),
6795 &data->args.seq_args,
6796 &data->res.seq_res,
6797 task);
6798 }
6799
6800 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
6801 {
6802 struct nfs_free_stateid_data *data = calldata;
6803
6804 nfs41_sequence_done(task, &data->res.seq_res);
6805
6806 switch (task->tk_status) {
6807 case -NFS4ERR_DELAY:
6808 if (nfs4_async_handle_error(task, data->server, NULL) == -EAGAIN)
6809 rpc_restart_call_prepare(task);
6810 }
6811 }
6812
6813 static void nfs41_free_stateid_release(void *calldata)
6814 {
6815 kfree(calldata);
6816 }
6817
6818 const struct rpc_call_ops nfs41_free_stateid_ops = {
6819 .rpc_call_prepare = nfs41_free_stateid_prepare,
6820 .rpc_call_done = nfs41_free_stateid_done,
6821 .rpc_release = nfs41_free_stateid_release,
6822 };
6823
6824 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
6825 nfs4_stateid *stateid,
6826 bool privileged)
6827 {
6828 struct rpc_message msg = {
6829 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6830 };
6831 struct rpc_task_setup task_setup = {
6832 .rpc_client = server->client,
6833 .rpc_message = &msg,
6834 .callback_ops = &nfs41_free_stateid_ops,
6835 .flags = RPC_TASK_ASYNC,
6836 };
6837 struct nfs_free_stateid_data *data;
6838
6839 dprintk("NFS call free_stateid %p\n", stateid);
6840 data = kmalloc(sizeof(*data), GFP_NOFS);
6841 if (!data)
6842 return ERR_PTR(-ENOMEM);
6843 data->server = server;
6844 nfs4_stateid_copy(&data->args.stateid, stateid);
6845
6846 task_setup.callback_data = data;
6847
6848 msg.rpc_argp = &data->args;
6849 msg.rpc_resp = &data->res;
6850 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6851 if (privileged)
6852 nfs4_set_sequence_privileged(&data->args.seq_args);
6853
6854 return rpc_run_task(&task_setup);
6855 }
6856
6857 /**
6858 * nfs41_free_stateid - perform a FREE_STATEID operation
6859 *
6860 * @server: server / transport on which to perform the operation
6861 * @stateid: state ID to release
6862 *
6863 * Returns NFS_OK if the server freed "stateid". Otherwise a
6864 * negative NFS4ERR value is returned.
6865 */
6866 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6867 {
6868 struct rpc_task *task;
6869 int ret;
6870
6871 task = _nfs41_free_stateid(server, stateid, true);
6872 if (IS_ERR(task))
6873 return PTR_ERR(task);
6874 ret = rpc_wait_for_completion_task(task);
6875 if (!ret)
6876 ret = task->tk_status;
6877 rpc_put_task(task);
6878 return ret;
6879 }
6880
6881 static int nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
6882 {
6883 struct rpc_task *task;
6884
6885 task = _nfs41_free_stateid(server, &lsp->ls_stateid, false);
6886 nfs4_free_lock_state(server, lsp);
6887 if (IS_ERR(task))
6888 return PTR_ERR(task);
6889 rpc_put_task(task);
6890 return 0;
6891 }
6892
6893 static bool nfs41_match_stateid(const nfs4_stateid *s1,
6894 const nfs4_stateid *s2)
6895 {
6896 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6897 return false;
6898
6899 if (s1->seqid == s2->seqid)
6900 return true;
6901 if (s1->seqid == 0 || s2->seqid == 0)
6902 return true;
6903
6904 return false;
6905 }
6906
6907 #endif /* CONFIG_NFS_V4_1 */
6908
6909 static bool nfs4_match_stateid(const nfs4_stateid *s1,
6910 const nfs4_stateid *s2)
6911 {
6912 return nfs4_stateid_match(s1, s2);
6913 }
6914
6915
6916 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6917 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6918 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6919 .recover_open = nfs4_open_reclaim,
6920 .recover_lock = nfs4_lock_reclaim,
6921 .establish_clid = nfs4_init_clientid,
6922 .get_clid_cred = nfs4_get_setclientid_cred,
6923 .detect_trunking = nfs40_discover_server_trunking,
6924 };
6925
6926 #if defined(CONFIG_NFS_V4_1)
6927 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6928 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6929 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6930 .recover_open = nfs4_open_reclaim,
6931 .recover_lock = nfs4_lock_reclaim,
6932 .establish_clid = nfs41_init_clientid,
6933 .get_clid_cred = nfs4_get_exchange_id_cred,
6934 .reclaim_complete = nfs41_proc_reclaim_complete,
6935 .detect_trunking = nfs41_discover_server_trunking,
6936 };
6937 #endif /* CONFIG_NFS_V4_1 */
6938
6939 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6940 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6941 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6942 .recover_open = nfs4_open_expired,
6943 .recover_lock = nfs4_lock_expired,
6944 .establish_clid = nfs4_init_clientid,
6945 .get_clid_cred = nfs4_get_setclientid_cred,
6946 };
6947
6948 #if defined(CONFIG_NFS_V4_1)
6949 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6950 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6951 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6952 .recover_open = nfs41_open_expired,
6953 .recover_lock = nfs41_lock_expired,
6954 .establish_clid = nfs41_init_clientid,
6955 .get_clid_cred = nfs4_get_exchange_id_cred,
6956 };
6957 #endif /* CONFIG_NFS_V4_1 */
6958
6959 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6960 .sched_state_renewal = nfs4_proc_async_renew,
6961 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6962 .renew_lease = nfs4_proc_renew,
6963 };
6964
6965 #if defined(CONFIG_NFS_V4_1)
6966 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6967 .sched_state_renewal = nfs41_proc_async_sequence,
6968 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6969 .renew_lease = nfs4_proc_sequence,
6970 };
6971 #endif
6972
6973 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6974 .minor_version = 0,
6975 .init_caps = NFS_CAP_READDIRPLUS
6976 | NFS_CAP_ATOMIC_OPEN
6977 | NFS_CAP_CHANGE_ATTR
6978 | NFS_CAP_POSIX_LOCK,
6979 .call_sync = _nfs4_call_sync,
6980 .match_stateid = nfs4_match_stateid,
6981 .find_root_sec = nfs4_find_root_sec,
6982 .free_lock_state = nfs4_release_lockowner,
6983 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6984 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6985 .state_renewal_ops = &nfs40_state_renewal_ops,
6986 };
6987
6988 #if defined(CONFIG_NFS_V4_1)
6989 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6990 .minor_version = 1,
6991 .init_caps = NFS_CAP_READDIRPLUS
6992 | NFS_CAP_ATOMIC_OPEN
6993 | NFS_CAP_CHANGE_ATTR
6994 | NFS_CAP_POSIX_LOCK
6995 | NFS_CAP_STATEID_NFSV41
6996 | NFS_CAP_ATOMIC_OPEN_V1,
6997 .call_sync = nfs4_call_sync_sequence,
6998 .match_stateid = nfs41_match_stateid,
6999 .find_root_sec = nfs41_find_root_sec,
7000 .free_lock_state = nfs41_free_lock_state,
7001 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
7002 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
7003 .state_renewal_ops = &nfs41_state_renewal_ops,
7004 };
7005 #endif
7006
7007 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
7008 [0] = &nfs_v4_0_minor_ops,
7009 #if defined(CONFIG_NFS_V4_1)
7010 [1] = &nfs_v4_1_minor_ops,
7011 #endif
7012 };
7013
7014 const struct inode_operations nfs4_dir_inode_operations = {
7015 .create = nfs_create,
7016 .lookup = nfs_lookup,
7017 .atomic_open = nfs_atomic_open,
7018 .link = nfs_link,
7019 .unlink = nfs_unlink,
7020 .symlink = nfs_symlink,
7021 .mkdir = nfs_mkdir,
7022 .rmdir = nfs_rmdir,
7023 .mknod = nfs_mknod,
7024 .rename = nfs_rename,
7025 .permission = nfs_permission,
7026 .getattr = nfs_getattr,
7027 .setattr = nfs_setattr,
7028 .getxattr = generic_getxattr,
7029 .setxattr = generic_setxattr,
7030 .listxattr = generic_listxattr,
7031 .removexattr = generic_removexattr,
7032 };
7033
7034 static const struct inode_operations nfs4_file_inode_operations = {
7035 .permission = nfs_permission,
7036 .getattr = nfs_getattr,
7037 .setattr = nfs_setattr,
7038 .getxattr = generic_getxattr,
7039 .setxattr = generic_setxattr,
7040 .listxattr = generic_listxattr,
7041 .removexattr = generic_removexattr,
7042 };
7043
7044 const struct nfs_rpc_ops nfs_v4_clientops = {
7045 .version = 4, /* protocol version */
7046 .dentry_ops = &nfs4_dentry_operations,
7047 .dir_inode_ops = &nfs4_dir_inode_operations,
7048 .file_inode_ops = &nfs4_file_inode_operations,
7049 .file_ops = &nfs4_file_operations,
7050 .getroot = nfs4_proc_get_root,
7051 .submount = nfs4_submount,
7052 .try_mount = nfs4_try_mount,
7053 .getattr = nfs4_proc_getattr,
7054 .setattr = nfs4_proc_setattr,
7055 .lookup = nfs4_proc_lookup,
7056 .access = nfs4_proc_access,
7057 .readlink = nfs4_proc_readlink,
7058 .create = nfs4_proc_create,
7059 .remove = nfs4_proc_remove,
7060 .unlink_setup = nfs4_proc_unlink_setup,
7061 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
7062 .unlink_done = nfs4_proc_unlink_done,
7063 .rename = nfs4_proc_rename,
7064 .rename_setup = nfs4_proc_rename_setup,
7065 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
7066 .rename_done = nfs4_proc_rename_done,
7067 .link = nfs4_proc_link,
7068 .symlink = nfs4_proc_symlink,
7069 .mkdir = nfs4_proc_mkdir,
7070 .rmdir = nfs4_proc_remove,
7071 .readdir = nfs4_proc_readdir,
7072 .mknod = nfs4_proc_mknod,
7073 .statfs = nfs4_proc_statfs,
7074 .fsinfo = nfs4_proc_fsinfo,
7075 .pathconf = nfs4_proc_pathconf,
7076 .set_capabilities = nfs4_server_capabilities,
7077 .decode_dirent = nfs4_decode_dirent,
7078 .read_setup = nfs4_proc_read_setup,
7079 .read_pageio_init = pnfs_pageio_init_read,
7080 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
7081 .read_done = nfs4_read_done,
7082 .write_setup = nfs4_proc_write_setup,
7083 .write_pageio_init = pnfs_pageio_init_write,
7084 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
7085 .write_done = nfs4_write_done,
7086 .commit_setup = nfs4_proc_commit_setup,
7087 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
7088 .commit_done = nfs4_commit_done,
7089 .lock = nfs4_proc_lock,
7090 .clear_acl_cache = nfs4_zap_acl_attr,
7091 .close_context = nfs4_close_context,
7092 .open_context = nfs4_atomic_open,
7093 .have_delegation = nfs4_have_delegation,
7094 .return_delegation = nfs4_inode_return_delegation,
7095 .alloc_client = nfs4_alloc_client,
7096 .init_client = nfs4_init_client,
7097 .free_client = nfs4_free_client,
7098 .create_server = nfs4_create_server,
7099 .clone_server = nfs_clone_server,
7100 };
7101
7102 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
7103 .prefix = XATTR_NAME_NFSV4_ACL,
7104 .list = nfs4_xattr_list_nfs4_acl,
7105 .get = nfs4_xattr_get_nfs4_acl,
7106 .set = nfs4_xattr_set_nfs4_acl,
7107 };
7108
7109 const struct xattr_handler *nfs4_xattr_handlers[] = {
7110 &nfs4_xattr_nfs4_acl_handler,
7111 NULL
7112 };
7113
7114 /*
7115 * Local variables:
7116 * c-basic-offset: 8
7117 * End:
7118 */