Merge tag 'nfs-for-3.5-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/gss_api.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/nfs_idmap.h>
56 #include <linux/sunrpc/bc_xprt.h>
57 #include <linux/xattr.h>
58 #include <linux/utsname.h>
59 #include <linux/freezer.h>
60
61 #include "nfs4_fs.h"
62 #include "delegation.h"
63 #include "internal.h"
64 #include "iostat.h"
65 #include "callback.h"
66 #include "pnfs.h"
67 #include "netns.h"
68
69 #define NFSDBG_FACILITY NFSDBG_PROC
70
71 #define NFS4_POLL_RETRY_MIN (HZ/10)
72 #define NFS4_POLL_RETRY_MAX (15*HZ)
73
74 #define NFS4_MAX_LOOP_ON_RECOVER (10)
75
76 static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
77
78 struct nfs4_opendata;
79 static int _nfs4_proc_open(struct nfs4_opendata *data);
80 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
81 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
82 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
83 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
84 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
85 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
86 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
87 struct nfs_fattr *fattr, struct iattr *sattr,
88 struct nfs4_state *state);
89 #ifdef CONFIG_NFS_V4_1
90 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
92 #endif
93 /* Prevent leaks of NFSv4 errors into userland */
94 static int nfs4_map_errors(int err)
95 {
96 if (err >= -1000)
97 return err;
98 switch (err) {
99 case -NFS4ERR_RESOURCE:
100 return -EREMOTEIO;
101 case -NFS4ERR_WRONGSEC:
102 return -EPERM;
103 case -NFS4ERR_BADOWNER:
104 case -NFS4ERR_BADNAME:
105 return -EINVAL;
106 case -NFS4ERR_SHARE_DENIED:
107 return -EACCES;
108 default:
109 dprintk("%s could not handle NFSv4 error %d\n",
110 __func__, -err);
111 break;
112 }
113 return -EIO;
114 }
115
116 /*
117 * This is our standard bitmap for GETATTR requests.
118 */
119 const u32 nfs4_fattr_bitmap[2] = {
120 FATTR4_WORD0_TYPE
121 | FATTR4_WORD0_CHANGE
122 | FATTR4_WORD0_SIZE
123 | FATTR4_WORD0_FSID
124 | FATTR4_WORD0_FILEID,
125 FATTR4_WORD1_MODE
126 | FATTR4_WORD1_NUMLINKS
127 | FATTR4_WORD1_OWNER
128 | FATTR4_WORD1_OWNER_GROUP
129 | FATTR4_WORD1_RAWDEV
130 | FATTR4_WORD1_SPACE_USED
131 | FATTR4_WORD1_TIME_ACCESS
132 | FATTR4_WORD1_TIME_METADATA
133 | FATTR4_WORD1_TIME_MODIFY
134 };
135
136 const u32 nfs4_statfs_bitmap[2] = {
137 FATTR4_WORD0_FILES_AVAIL
138 | FATTR4_WORD0_FILES_FREE
139 | FATTR4_WORD0_FILES_TOTAL,
140 FATTR4_WORD1_SPACE_AVAIL
141 | FATTR4_WORD1_SPACE_FREE
142 | FATTR4_WORD1_SPACE_TOTAL
143 };
144
145 const u32 nfs4_pathconf_bitmap[2] = {
146 FATTR4_WORD0_MAXLINK
147 | FATTR4_WORD0_MAXNAME,
148 0
149 };
150
151 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
152 | FATTR4_WORD0_MAXREAD
153 | FATTR4_WORD0_MAXWRITE
154 | FATTR4_WORD0_LEASE_TIME,
155 FATTR4_WORD1_TIME_DELTA
156 | FATTR4_WORD1_FS_LAYOUT_TYPES,
157 FATTR4_WORD2_LAYOUT_BLKSIZE
158 };
159
160 const u32 nfs4_fs_locations_bitmap[2] = {
161 FATTR4_WORD0_TYPE
162 | FATTR4_WORD0_CHANGE
163 | FATTR4_WORD0_SIZE
164 | FATTR4_WORD0_FSID
165 | FATTR4_WORD0_FILEID
166 | FATTR4_WORD0_FS_LOCATIONS,
167 FATTR4_WORD1_MODE
168 | FATTR4_WORD1_NUMLINKS
169 | FATTR4_WORD1_OWNER
170 | FATTR4_WORD1_OWNER_GROUP
171 | FATTR4_WORD1_RAWDEV
172 | FATTR4_WORD1_SPACE_USED
173 | FATTR4_WORD1_TIME_ACCESS
174 | FATTR4_WORD1_TIME_METADATA
175 | FATTR4_WORD1_TIME_MODIFY
176 | FATTR4_WORD1_MOUNTED_ON_FILEID
177 };
178
179 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
180 struct nfs4_readdir_arg *readdir)
181 {
182 __be32 *start, *p;
183
184 BUG_ON(readdir->count < 80);
185 if (cookie > 2) {
186 readdir->cookie = cookie;
187 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
188 return;
189 }
190
191 readdir->cookie = 0;
192 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
193 if (cookie == 2)
194 return;
195
196 /*
197 * NFSv4 servers do not return entries for '.' and '..'
198 * Therefore, we fake these entries here. We let '.'
199 * have cookie 0 and '..' have cookie 1. Note that
200 * when talking to the server, we always send cookie 0
201 * instead of 1 or 2.
202 */
203 start = p = kmap_atomic(*readdir->pages);
204
205 if (cookie == 0) {
206 *p++ = xdr_one; /* next */
207 *p++ = xdr_zero; /* cookie, first word */
208 *p++ = xdr_one; /* cookie, second word */
209 *p++ = xdr_one; /* entry len */
210 memcpy(p, ".\0\0\0", 4); /* entry */
211 p++;
212 *p++ = xdr_one; /* bitmap length */
213 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
214 *p++ = htonl(8); /* attribute buffer length */
215 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
216 }
217
218 *p++ = xdr_one; /* next */
219 *p++ = xdr_zero; /* cookie, first word */
220 *p++ = xdr_two; /* cookie, second word */
221 *p++ = xdr_two; /* entry len */
222 memcpy(p, "..\0\0", 4); /* entry */
223 p++;
224 *p++ = xdr_one; /* bitmap length */
225 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
226 *p++ = htonl(8); /* attribute buffer length */
227 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
228
229 readdir->pgbase = (char *)p - (char *)start;
230 readdir->count -= readdir->pgbase;
231 kunmap_atomic(start);
232 }
233
234 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
235 {
236 int res;
237
238 might_sleep();
239
240 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
241 nfs_wait_bit_killable, TASK_KILLABLE);
242 return res;
243 }
244
245 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
246 {
247 int res = 0;
248
249 might_sleep();
250
251 if (*timeout <= 0)
252 *timeout = NFS4_POLL_RETRY_MIN;
253 if (*timeout > NFS4_POLL_RETRY_MAX)
254 *timeout = NFS4_POLL_RETRY_MAX;
255 freezable_schedule_timeout_killable(*timeout);
256 if (fatal_signal_pending(current))
257 res = -ERESTARTSYS;
258 *timeout <<= 1;
259 return res;
260 }
261
262 /* This is the error handling routine for processes that are allowed
263 * to sleep.
264 */
265 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
266 {
267 struct nfs_client *clp = server->nfs_client;
268 struct nfs4_state *state = exception->state;
269 struct inode *inode = exception->inode;
270 int ret = errorcode;
271
272 exception->retry = 0;
273 switch(errorcode) {
274 case 0:
275 return 0;
276 case -NFS4ERR_OPENMODE:
277 if (inode && nfs_have_delegation(inode, FMODE_READ)) {
278 nfs_inode_return_delegation(inode);
279 exception->retry = 1;
280 return 0;
281 }
282 if (state == NULL)
283 break;
284 nfs4_schedule_stateid_recovery(server, state);
285 goto wait_on_recovery;
286 case -NFS4ERR_DELEG_REVOKED:
287 case -NFS4ERR_ADMIN_REVOKED:
288 case -NFS4ERR_BAD_STATEID:
289 if (state == NULL)
290 break;
291 nfs_remove_bad_delegation(state->inode);
292 nfs4_schedule_stateid_recovery(server, state);
293 goto wait_on_recovery;
294 case -NFS4ERR_EXPIRED:
295 if (state != NULL)
296 nfs4_schedule_stateid_recovery(server, state);
297 case -NFS4ERR_STALE_STATEID:
298 case -NFS4ERR_STALE_CLIENTID:
299 nfs4_schedule_lease_recovery(clp);
300 goto wait_on_recovery;
301 #if defined(CONFIG_NFS_V4_1)
302 case -NFS4ERR_BADSESSION:
303 case -NFS4ERR_BADSLOT:
304 case -NFS4ERR_BAD_HIGH_SLOT:
305 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
306 case -NFS4ERR_DEADSESSION:
307 case -NFS4ERR_SEQ_FALSE_RETRY:
308 case -NFS4ERR_SEQ_MISORDERED:
309 dprintk("%s ERROR: %d Reset session\n", __func__,
310 errorcode);
311 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
312 exception->retry = 1;
313 break;
314 #endif /* defined(CONFIG_NFS_V4_1) */
315 case -NFS4ERR_FILE_OPEN:
316 if (exception->timeout > HZ) {
317 /* We have retried a decent amount, time to
318 * fail
319 */
320 ret = -EBUSY;
321 break;
322 }
323 case -NFS4ERR_GRACE:
324 case -NFS4ERR_DELAY:
325 case -EKEYEXPIRED:
326 ret = nfs4_delay(server->client, &exception->timeout);
327 if (ret != 0)
328 break;
329 case -NFS4ERR_RETRY_UNCACHED_REP:
330 case -NFS4ERR_OLD_STATEID:
331 exception->retry = 1;
332 break;
333 case -NFS4ERR_BADOWNER:
334 /* The following works around a Linux server bug! */
335 case -NFS4ERR_BADNAME:
336 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
337 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
338 exception->retry = 1;
339 printk(KERN_WARNING "NFS: v4 server %s "
340 "does not accept raw "
341 "uid/gids. "
342 "Reenabling the idmapper.\n",
343 server->nfs_client->cl_hostname);
344 }
345 }
346 /* We failed to handle the error */
347 return nfs4_map_errors(ret);
348 wait_on_recovery:
349 ret = nfs4_wait_clnt_recover(clp);
350 if (ret == 0)
351 exception->retry = 1;
352 return ret;
353 }
354
355
356 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
357 {
358 spin_lock(&clp->cl_lock);
359 if (time_before(clp->cl_last_renewal,timestamp))
360 clp->cl_last_renewal = timestamp;
361 spin_unlock(&clp->cl_lock);
362 }
363
364 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
365 {
366 do_renew_lease(server->nfs_client, timestamp);
367 }
368
369 #if defined(CONFIG_NFS_V4_1)
370
371 /*
372 * nfs4_free_slot - free a slot and efficiently update slot table.
373 *
374 * freeing a slot is trivially done by clearing its respective bit
375 * in the bitmap.
376 * If the freed slotid equals highest_used_slotid we want to update it
377 * so that the server would be able to size down the slot table if needed,
378 * otherwise we know that the highest_used_slotid is still in use.
379 * When updating highest_used_slotid there may be "holes" in the bitmap
380 * so we need to scan down from highest_used_slotid to 0 looking for the now
381 * highest slotid in use.
382 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
383 *
384 * Must be called while holding tbl->slot_tbl_lock
385 */
386 static void
387 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
388 {
389 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
390 /* clear used bit in bitmap */
391 __clear_bit(slotid, tbl->used_slots);
392
393 /* update highest_used_slotid when it is freed */
394 if (slotid == tbl->highest_used_slotid) {
395 slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
396 if (slotid < tbl->max_slots)
397 tbl->highest_used_slotid = slotid;
398 else
399 tbl->highest_used_slotid = NFS4_NO_SLOT;
400 }
401 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
402 slotid, tbl->highest_used_slotid);
403 }
404
405 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
406 {
407 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
408 return true;
409 }
410
411 /*
412 * Signal state manager thread if session fore channel is drained
413 */
414 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
415 {
416 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
417 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
418 nfs4_set_task_privileged, NULL);
419 return;
420 }
421
422 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
423 return;
424
425 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
426 complete(&ses->fc_slot_table.complete);
427 }
428
429 /*
430 * Signal state manager thread if session back channel is drained
431 */
432 void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
433 {
434 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
435 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
436 return;
437 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
438 complete(&ses->bc_slot_table.complete);
439 }
440
441 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
442 {
443 struct nfs4_slot_table *tbl;
444
445 tbl = &res->sr_session->fc_slot_table;
446 if (!res->sr_slot) {
447 /* just wake up the next guy waiting since
448 * we may have not consumed a slot after all */
449 dprintk("%s: No slot\n", __func__);
450 return;
451 }
452
453 spin_lock(&tbl->slot_tbl_lock);
454 nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
455 nfs4_check_drain_fc_complete(res->sr_session);
456 spin_unlock(&tbl->slot_tbl_lock);
457 res->sr_slot = NULL;
458 }
459
460 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
461 {
462 unsigned long timestamp;
463 struct nfs_client *clp;
464
465 /*
466 * sr_status remains 1 if an RPC level error occurred. The server
467 * may or may not have processed the sequence operation..
468 * Proceed as if the server received and processed the sequence
469 * operation.
470 */
471 if (res->sr_status == 1)
472 res->sr_status = NFS_OK;
473
474 /* don't increment the sequence number if the task wasn't sent */
475 if (!RPC_WAS_SENT(task))
476 goto out;
477
478 /* Check the SEQUENCE operation status */
479 switch (res->sr_status) {
480 case 0:
481 /* Update the slot's sequence and clientid lease timer */
482 ++res->sr_slot->seq_nr;
483 timestamp = res->sr_renewal_time;
484 clp = res->sr_session->clp;
485 do_renew_lease(clp, timestamp);
486 /* Check sequence flags */
487 if (res->sr_status_flags != 0)
488 nfs4_schedule_lease_recovery(clp);
489 break;
490 case -NFS4ERR_DELAY:
491 /* The server detected a resend of the RPC call and
492 * returned NFS4ERR_DELAY as per Section 2.10.6.2
493 * of RFC5661.
494 */
495 dprintk("%s: slot=%td seq=%d: Operation in progress\n",
496 __func__,
497 res->sr_slot - res->sr_session->fc_slot_table.slots,
498 res->sr_slot->seq_nr);
499 goto out_retry;
500 default:
501 /* Just update the slot sequence no. */
502 ++res->sr_slot->seq_nr;
503 }
504 out:
505 /* The session may be reset by one of the error handlers. */
506 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
507 nfs41_sequence_free_slot(res);
508 return 1;
509 out_retry:
510 if (!rpc_restart_call(task))
511 goto out;
512 rpc_delay(task, NFS4_POLL_RETRY_MAX);
513 return 0;
514 }
515
516 static int nfs4_sequence_done(struct rpc_task *task,
517 struct nfs4_sequence_res *res)
518 {
519 if (res->sr_session == NULL)
520 return 1;
521 return nfs41_sequence_done(task, res);
522 }
523
524 /*
525 * nfs4_find_slot - efficiently look for a free slot
526 *
527 * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
528 * If found, we mark the slot as used, update the highest_used_slotid,
529 * and respectively set up the sequence operation args.
530 * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
531 *
532 * Note: must be called with under the slot_tbl_lock.
533 */
534 static u32
535 nfs4_find_slot(struct nfs4_slot_table *tbl)
536 {
537 u32 slotid;
538 u32 ret_id = NFS4_NO_SLOT;
539
540 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
541 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
542 tbl->max_slots);
543 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
544 if (slotid >= tbl->max_slots)
545 goto out;
546 __set_bit(slotid, tbl->used_slots);
547 if (slotid > tbl->highest_used_slotid ||
548 tbl->highest_used_slotid == NFS4_NO_SLOT)
549 tbl->highest_used_slotid = slotid;
550 ret_id = slotid;
551 out:
552 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
553 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
554 return ret_id;
555 }
556
557 static void nfs41_init_sequence(struct nfs4_sequence_args *args,
558 struct nfs4_sequence_res *res, int cache_reply)
559 {
560 args->sa_session = NULL;
561 args->sa_cache_this = 0;
562 if (cache_reply)
563 args->sa_cache_this = 1;
564 res->sr_session = NULL;
565 res->sr_slot = NULL;
566 }
567
568 int nfs41_setup_sequence(struct nfs4_session *session,
569 struct nfs4_sequence_args *args,
570 struct nfs4_sequence_res *res,
571 struct rpc_task *task)
572 {
573 struct nfs4_slot *slot;
574 struct nfs4_slot_table *tbl;
575 u32 slotid;
576
577 dprintk("--> %s\n", __func__);
578 /* slot already allocated? */
579 if (res->sr_slot != NULL)
580 return 0;
581
582 tbl = &session->fc_slot_table;
583
584 spin_lock(&tbl->slot_tbl_lock);
585 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
586 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
587 /* The state manager will wait until the slot table is empty */
588 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
589 spin_unlock(&tbl->slot_tbl_lock);
590 dprintk("%s session is draining\n", __func__);
591 return -EAGAIN;
592 }
593
594 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
595 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
596 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
597 spin_unlock(&tbl->slot_tbl_lock);
598 dprintk("%s enforce FIFO order\n", __func__);
599 return -EAGAIN;
600 }
601
602 slotid = nfs4_find_slot(tbl);
603 if (slotid == NFS4_NO_SLOT) {
604 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
605 spin_unlock(&tbl->slot_tbl_lock);
606 dprintk("<-- %s: no free slots\n", __func__);
607 return -EAGAIN;
608 }
609 spin_unlock(&tbl->slot_tbl_lock);
610
611 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
612 slot = tbl->slots + slotid;
613 args->sa_session = session;
614 args->sa_slotid = slotid;
615
616 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
617
618 res->sr_session = session;
619 res->sr_slot = slot;
620 res->sr_renewal_time = jiffies;
621 res->sr_status_flags = 0;
622 /*
623 * sr_status is only set in decode_sequence, and so will remain
624 * set to 1 if an rpc level failure occurs.
625 */
626 res->sr_status = 1;
627 return 0;
628 }
629 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
630
631 int nfs4_setup_sequence(const struct nfs_server *server,
632 struct nfs4_sequence_args *args,
633 struct nfs4_sequence_res *res,
634 struct rpc_task *task)
635 {
636 struct nfs4_session *session = nfs4_get_session(server);
637 int ret = 0;
638
639 if (session == NULL)
640 goto out;
641
642 dprintk("--> %s clp %p session %p sr_slot %td\n",
643 __func__, session->clp, session, res->sr_slot ?
644 res->sr_slot - session->fc_slot_table.slots : -1);
645
646 ret = nfs41_setup_sequence(session, args, res, task);
647 out:
648 dprintk("<-- %s status=%d\n", __func__, ret);
649 return ret;
650 }
651
652 struct nfs41_call_sync_data {
653 const struct nfs_server *seq_server;
654 struct nfs4_sequence_args *seq_args;
655 struct nfs4_sequence_res *seq_res;
656 };
657
658 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
659 {
660 struct nfs41_call_sync_data *data = calldata;
661
662 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
663
664 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
665 data->seq_res, task))
666 return;
667 rpc_call_start(task);
668 }
669
670 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
671 {
672 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
673 nfs41_call_sync_prepare(task, calldata);
674 }
675
676 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
677 {
678 struct nfs41_call_sync_data *data = calldata;
679
680 nfs41_sequence_done(task, data->seq_res);
681 }
682
683 static const struct rpc_call_ops nfs41_call_sync_ops = {
684 .rpc_call_prepare = nfs41_call_sync_prepare,
685 .rpc_call_done = nfs41_call_sync_done,
686 };
687
688 static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
689 .rpc_call_prepare = nfs41_call_priv_sync_prepare,
690 .rpc_call_done = nfs41_call_sync_done,
691 };
692
693 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
694 struct nfs_server *server,
695 struct rpc_message *msg,
696 struct nfs4_sequence_args *args,
697 struct nfs4_sequence_res *res,
698 int privileged)
699 {
700 int ret;
701 struct rpc_task *task;
702 struct nfs41_call_sync_data data = {
703 .seq_server = server,
704 .seq_args = args,
705 .seq_res = res,
706 };
707 struct rpc_task_setup task_setup = {
708 .rpc_client = clnt,
709 .rpc_message = msg,
710 .callback_ops = &nfs41_call_sync_ops,
711 .callback_data = &data
712 };
713
714 if (privileged)
715 task_setup.callback_ops = &nfs41_call_priv_sync_ops;
716 task = rpc_run_task(&task_setup);
717 if (IS_ERR(task))
718 ret = PTR_ERR(task);
719 else {
720 ret = task->tk_status;
721 rpc_put_task(task);
722 }
723 return ret;
724 }
725
726 int _nfs4_call_sync_session(struct rpc_clnt *clnt,
727 struct nfs_server *server,
728 struct rpc_message *msg,
729 struct nfs4_sequence_args *args,
730 struct nfs4_sequence_res *res,
731 int cache_reply)
732 {
733 nfs41_init_sequence(args, res, cache_reply);
734 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
735 }
736
737 #else
738 static inline
739 void nfs41_init_sequence(struct nfs4_sequence_args *args,
740 struct nfs4_sequence_res *res, int cache_reply)
741 {
742 }
743
744 static int nfs4_sequence_done(struct rpc_task *task,
745 struct nfs4_sequence_res *res)
746 {
747 return 1;
748 }
749 #endif /* CONFIG_NFS_V4_1 */
750
751 int _nfs4_call_sync(struct rpc_clnt *clnt,
752 struct nfs_server *server,
753 struct rpc_message *msg,
754 struct nfs4_sequence_args *args,
755 struct nfs4_sequence_res *res,
756 int cache_reply)
757 {
758 nfs41_init_sequence(args, res, cache_reply);
759 return rpc_call_sync(clnt, msg, 0);
760 }
761
762 static inline
763 int nfs4_call_sync(struct rpc_clnt *clnt,
764 struct nfs_server *server,
765 struct rpc_message *msg,
766 struct nfs4_sequence_args *args,
767 struct nfs4_sequence_res *res,
768 int cache_reply)
769 {
770 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
771 args, res, cache_reply);
772 }
773
774 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
775 {
776 struct nfs_inode *nfsi = NFS_I(dir);
777
778 spin_lock(&dir->i_lock);
779 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
780 if (!cinfo->atomic || cinfo->before != dir->i_version)
781 nfs_force_lookup_revalidate(dir);
782 dir->i_version = cinfo->after;
783 spin_unlock(&dir->i_lock);
784 }
785
786 struct nfs4_opendata {
787 struct kref kref;
788 struct nfs_openargs o_arg;
789 struct nfs_openres o_res;
790 struct nfs_open_confirmargs c_arg;
791 struct nfs_open_confirmres c_res;
792 struct nfs4_string owner_name;
793 struct nfs4_string group_name;
794 struct nfs_fattr f_attr;
795 struct dentry *dir;
796 struct dentry *dentry;
797 struct nfs4_state_owner *owner;
798 struct nfs4_state *state;
799 struct iattr attrs;
800 unsigned long timestamp;
801 unsigned int rpc_done : 1;
802 int rpc_status;
803 int cancelled;
804 };
805
806
807 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
808 {
809 p->o_res.f_attr = &p->f_attr;
810 p->o_res.seqid = p->o_arg.seqid;
811 p->c_res.seqid = p->c_arg.seqid;
812 p->o_res.server = p->o_arg.server;
813 nfs_fattr_init(&p->f_attr);
814 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
815 }
816
817 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
818 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
819 const struct iattr *attrs,
820 gfp_t gfp_mask)
821 {
822 struct dentry *parent = dget_parent(dentry);
823 struct inode *dir = parent->d_inode;
824 struct nfs_server *server = NFS_SERVER(dir);
825 struct nfs4_opendata *p;
826
827 p = kzalloc(sizeof(*p), gfp_mask);
828 if (p == NULL)
829 goto err;
830 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
831 if (p->o_arg.seqid == NULL)
832 goto err_free;
833 nfs_sb_active(dentry->d_sb);
834 p->dentry = dget(dentry);
835 p->dir = parent;
836 p->owner = sp;
837 atomic_inc(&sp->so_count);
838 p->o_arg.fh = NFS_FH(dir);
839 p->o_arg.open_flags = flags;
840 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
841 p->o_arg.clientid = server->nfs_client->cl_clientid;
842 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
843 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
844 p->o_arg.name = &dentry->d_name;
845 p->o_arg.server = server;
846 p->o_arg.bitmask = server->attr_bitmask;
847 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
848 if (attrs != NULL && attrs->ia_valid != 0) {
849 __be32 verf[2];
850
851 p->o_arg.u.attrs = &p->attrs;
852 memcpy(&p->attrs, attrs, sizeof(p->attrs));
853
854 verf[0] = jiffies;
855 verf[1] = current->pid;
856 memcpy(p->o_arg.u.verifier.data, verf,
857 sizeof(p->o_arg.u.verifier.data));
858 }
859 p->c_arg.fh = &p->o_res.fh;
860 p->c_arg.stateid = &p->o_res.stateid;
861 p->c_arg.seqid = p->o_arg.seqid;
862 nfs4_init_opendata_res(p);
863 kref_init(&p->kref);
864 return p;
865 err_free:
866 kfree(p);
867 err:
868 dput(parent);
869 return NULL;
870 }
871
872 static void nfs4_opendata_free(struct kref *kref)
873 {
874 struct nfs4_opendata *p = container_of(kref,
875 struct nfs4_opendata, kref);
876 struct super_block *sb = p->dentry->d_sb;
877
878 nfs_free_seqid(p->o_arg.seqid);
879 if (p->state != NULL)
880 nfs4_put_open_state(p->state);
881 nfs4_put_state_owner(p->owner);
882 dput(p->dir);
883 dput(p->dentry);
884 nfs_sb_deactive(sb);
885 nfs_fattr_free_names(&p->f_attr);
886 kfree(p);
887 }
888
889 static void nfs4_opendata_put(struct nfs4_opendata *p)
890 {
891 if (p != NULL)
892 kref_put(&p->kref, nfs4_opendata_free);
893 }
894
895 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
896 {
897 int ret;
898
899 ret = rpc_wait_for_completion_task(task);
900 return ret;
901 }
902
903 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
904 {
905 int ret = 0;
906
907 if (open_mode & (O_EXCL|O_TRUNC))
908 goto out;
909 switch (mode & (FMODE_READ|FMODE_WRITE)) {
910 case FMODE_READ:
911 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
912 && state->n_rdonly != 0;
913 break;
914 case FMODE_WRITE:
915 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
916 && state->n_wronly != 0;
917 break;
918 case FMODE_READ|FMODE_WRITE:
919 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
920 && state->n_rdwr != 0;
921 }
922 out:
923 return ret;
924 }
925
926 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
927 {
928 if (delegation == NULL)
929 return 0;
930 if ((delegation->type & fmode) != fmode)
931 return 0;
932 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
933 return 0;
934 nfs_mark_delegation_referenced(delegation);
935 return 1;
936 }
937
938 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
939 {
940 switch (fmode) {
941 case FMODE_WRITE:
942 state->n_wronly++;
943 break;
944 case FMODE_READ:
945 state->n_rdonly++;
946 break;
947 case FMODE_READ|FMODE_WRITE:
948 state->n_rdwr++;
949 }
950 nfs4_state_set_mode_locked(state, state->state | fmode);
951 }
952
953 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
954 {
955 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
956 nfs4_stateid_copy(&state->stateid, stateid);
957 nfs4_stateid_copy(&state->open_stateid, stateid);
958 switch (fmode) {
959 case FMODE_READ:
960 set_bit(NFS_O_RDONLY_STATE, &state->flags);
961 break;
962 case FMODE_WRITE:
963 set_bit(NFS_O_WRONLY_STATE, &state->flags);
964 break;
965 case FMODE_READ|FMODE_WRITE:
966 set_bit(NFS_O_RDWR_STATE, &state->flags);
967 }
968 }
969
970 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
971 {
972 write_seqlock(&state->seqlock);
973 nfs_set_open_stateid_locked(state, stateid, fmode);
974 write_sequnlock(&state->seqlock);
975 }
976
977 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
978 {
979 /*
980 * Protect the call to nfs4_state_set_mode_locked and
981 * serialise the stateid update
982 */
983 write_seqlock(&state->seqlock);
984 if (deleg_stateid != NULL) {
985 nfs4_stateid_copy(&state->stateid, deleg_stateid);
986 set_bit(NFS_DELEGATED_STATE, &state->flags);
987 }
988 if (open_stateid != NULL)
989 nfs_set_open_stateid_locked(state, open_stateid, fmode);
990 write_sequnlock(&state->seqlock);
991 spin_lock(&state->owner->so_lock);
992 update_open_stateflags(state, fmode);
993 spin_unlock(&state->owner->so_lock);
994 }
995
996 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
997 {
998 struct nfs_inode *nfsi = NFS_I(state->inode);
999 struct nfs_delegation *deleg_cur;
1000 int ret = 0;
1001
1002 fmode &= (FMODE_READ|FMODE_WRITE);
1003
1004 rcu_read_lock();
1005 deleg_cur = rcu_dereference(nfsi->delegation);
1006 if (deleg_cur == NULL)
1007 goto no_delegation;
1008
1009 spin_lock(&deleg_cur->lock);
1010 if (nfsi->delegation != deleg_cur ||
1011 (deleg_cur->type & fmode) != fmode)
1012 goto no_delegation_unlock;
1013
1014 if (delegation == NULL)
1015 delegation = &deleg_cur->stateid;
1016 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1017 goto no_delegation_unlock;
1018
1019 nfs_mark_delegation_referenced(deleg_cur);
1020 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1021 ret = 1;
1022 no_delegation_unlock:
1023 spin_unlock(&deleg_cur->lock);
1024 no_delegation:
1025 rcu_read_unlock();
1026
1027 if (!ret && open_stateid != NULL) {
1028 __update_open_stateid(state, open_stateid, NULL, fmode);
1029 ret = 1;
1030 }
1031
1032 return ret;
1033 }
1034
1035
1036 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1037 {
1038 struct nfs_delegation *delegation;
1039
1040 rcu_read_lock();
1041 delegation = rcu_dereference(NFS_I(inode)->delegation);
1042 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1043 rcu_read_unlock();
1044 return;
1045 }
1046 rcu_read_unlock();
1047 nfs_inode_return_delegation(inode);
1048 }
1049
1050 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1051 {
1052 struct nfs4_state *state = opendata->state;
1053 struct nfs_inode *nfsi = NFS_I(state->inode);
1054 struct nfs_delegation *delegation;
1055 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1056 fmode_t fmode = opendata->o_arg.fmode;
1057 nfs4_stateid stateid;
1058 int ret = -EAGAIN;
1059
1060 for (;;) {
1061 if (can_open_cached(state, fmode, open_mode)) {
1062 spin_lock(&state->owner->so_lock);
1063 if (can_open_cached(state, fmode, open_mode)) {
1064 update_open_stateflags(state, fmode);
1065 spin_unlock(&state->owner->so_lock);
1066 goto out_return_state;
1067 }
1068 spin_unlock(&state->owner->so_lock);
1069 }
1070 rcu_read_lock();
1071 delegation = rcu_dereference(nfsi->delegation);
1072 if (!can_open_delegated(delegation, fmode)) {
1073 rcu_read_unlock();
1074 break;
1075 }
1076 /* Save the delegation */
1077 nfs4_stateid_copy(&stateid, &delegation->stateid);
1078 rcu_read_unlock();
1079 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1080 if (ret != 0)
1081 goto out;
1082 ret = -EAGAIN;
1083
1084 /* Try to update the stateid using the delegation */
1085 if (update_open_stateid(state, NULL, &stateid, fmode))
1086 goto out_return_state;
1087 }
1088 out:
1089 return ERR_PTR(ret);
1090 out_return_state:
1091 atomic_inc(&state->count);
1092 return state;
1093 }
1094
1095 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1096 {
1097 struct inode *inode;
1098 struct nfs4_state *state = NULL;
1099 struct nfs_delegation *delegation;
1100 int ret;
1101
1102 if (!data->rpc_done) {
1103 state = nfs4_try_open_cached(data);
1104 goto out;
1105 }
1106
1107 ret = -EAGAIN;
1108 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1109 goto err;
1110 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1111 ret = PTR_ERR(inode);
1112 if (IS_ERR(inode))
1113 goto err;
1114 ret = -ENOMEM;
1115 state = nfs4_get_open_state(inode, data->owner);
1116 if (state == NULL)
1117 goto err_put_inode;
1118 if (data->o_res.delegation_type != 0) {
1119 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1120 int delegation_flags = 0;
1121
1122 rcu_read_lock();
1123 delegation = rcu_dereference(NFS_I(inode)->delegation);
1124 if (delegation)
1125 delegation_flags = delegation->flags;
1126 rcu_read_unlock();
1127 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1128 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1129 "returning a delegation for "
1130 "OPEN(CLAIM_DELEGATE_CUR)\n",
1131 clp->cl_hostname);
1132 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1133 nfs_inode_set_delegation(state->inode,
1134 data->owner->so_cred,
1135 &data->o_res);
1136 else
1137 nfs_inode_reclaim_delegation(state->inode,
1138 data->owner->so_cred,
1139 &data->o_res);
1140 }
1141
1142 update_open_stateid(state, &data->o_res.stateid, NULL,
1143 data->o_arg.fmode);
1144 iput(inode);
1145 out:
1146 return state;
1147 err_put_inode:
1148 iput(inode);
1149 err:
1150 return ERR_PTR(ret);
1151 }
1152
1153 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1154 {
1155 struct nfs_inode *nfsi = NFS_I(state->inode);
1156 struct nfs_open_context *ctx;
1157
1158 spin_lock(&state->inode->i_lock);
1159 list_for_each_entry(ctx, &nfsi->open_files, list) {
1160 if (ctx->state != state)
1161 continue;
1162 get_nfs_open_context(ctx);
1163 spin_unlock(&state->inode->i_lock);
1164 return ctx;
1165 }
1166 spin_unlock(&state->inode->i_lock);
1167 return ERR_PTR(-ENOENT);
1168 }
1169
1170 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1171 {
1172 struct nfs4_opendata *opendata;
1173
1174 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1175 if (opendata == NULL)
1176 return ERR_PTR(-ENOMEM);
1177 opendata->state = state;
1178 atomic_inc(&state->count);
1179 return opendata;
1180 }
1181
1182 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1183 {
1184 struct nfs4_state *newstate;
1185 int ret;
1186
1187 opendata->o_arg.open_flags = 0;
1188 opendata->o_arg.fmode = fmode;
1189 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1190 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1191 nfs4_init_opendata_res(opendata);
1192 ret = _nfs4_recover_proc_open(opendata);
1193 if (ret != 0)
1194 return ret;
1195 newstate = nfs4_opendata_to_nfs4_state(opendata);
1196 if (IS_ERR(newstate))
1197 return PTR_ERR(newstate);
1198 nfs4_close_state(newstate, fmode);
1199 *res = newstate;
1200 return 0;
1201 }
1202
1203 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1204 {
1205 struct nfs4_state *newstate;
1206 int ret;
1207
1208 /* memory barrier prior to reading state->n_* */
1209 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1210 smp_rmb();
1211 if (state->n_rdwr != 0) {
1212 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1213 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1214 if (ret != 0)
1215 return ret;
1216 if (newstate != state)
1217 return -ESTALE;
1218 }
1219 if (state->n_wronly != 0) {
1220 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1221 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1222 if (ret != 0)
1223 return ret;
1224 if (newstate != state)
1225 return -ESTALE;
1226 }
1227 if (state->n_rdonly != 0) {
1228 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1229 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1230 if (ret != 0)
1231 return ret;
1232 if (newstate != state)
1233 return -ESTALE;
1234 }
1235 /*
1236 * We may have performed cached opens for all three recoveries.
1237 * Check if we need to update the current stateid.
1238 */
1239 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1240 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1241 write_seqlock(&state->seqlock);
1242 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1243 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1244 write_sequnlock(&state->seqlock);
1245 }
1246 return 0;
1247 }
1248
1249 /*
1250 * OPEN_RECLAIM:
1251 * reclaim state on the server after a reboot.
1252 */
1253 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1254 {
1255 struct nfs_delegation *delegation;
1256 struct nfs4_opendata *opendata;
1257 fmode_t delegation_type = 0;
1258 int status;
1259
1260 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1261 if (IS_ERR(opendata))
1262 return PTR_ERR(opendata);
1263 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1264 opendata->o_arg.fh = NFS_FH(state->inode);
1265 rcu_read_lock();
1266 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1267 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1268 delegation_type = delegation->type;
1269 rcu_read_unlock();
1270 opendata->o_arg.u.delegation_type = delegation_type;
1271 status = nfs4_open_recover(opendata, state);
1272 nfs4_opendata_put(opendata);
1273 return status;
1274 }
1275
1276 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1277 {
1278 struct nfs_server *server = NFS_SERVER(state->inode);
1279 struct nfs4_exception exception = { };
1280 int err;
1281 do {
1282 err = _nfs4_do_open_reclaim(ctx, state);
1283 if (err != -NFS4ERR_DELAY)
1284 break;
1285 nfs4_handle_exception(server, err, &exception);
1286 } while (exception.retry);
1287 return err;
1288 }
1289
1290 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1291 {
1292 struct nfs_open_context *ctx;
1293 int ret;
1294
1295 ctx = nfs4_state_find_open_context(state);
1296 if (IS_ERR(ctx))
1297 return PTR_ERR(ctx);
1298 ret = nfs4_do_open_reclaim(ctx, state);
1299 put_nfs_open_context(ctx);
1300 return ret;
1301 }
1302
1303 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1304 {
1305 struct nfs4_opendata *opendata;
1306 int ret;
1307
1308 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1309 if (IS_ERR(opendata))
1310 return PTR_ERR(opendata);
1311 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1312 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1313 ret = nfs4_open_recover(opendata, state);
1314 nfs4_opendata_put(opendata);
1315 return ret;
1316 }
1317
1318 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1319 {
1320 struct nfs4_exception exception = { };
1321 struct nfs_server *server = NFS_SERVER(state->inode);
1322 int err;
1323 do {
1324 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1325 switch (err) {
1326 case 0:
1327 case -ENOENT:
1328 case -ESTALE:
1329 goto out;
1330 case -NFS4ERR_BADSESSION:
1331 case -NFS4ERR_BADSLOT:
1332 case -NFS4ERR_BAD_HIGH_SLOT:
1333 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1334 case -NFS4ERR_DEADSESSION:
1335 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1336 goto out;
1337 case -NFS4ERR_STALE_CLIENTID:
1338 case -NFS4ERR_STALE_STATEID:
1339 case -NFS4ERR_EXPIRED:
1340 /* Don't recall a delegation if it was lost */
1341 nfs4_schedule_lease_recovery(server->nfs_client);
1342 goto out;
1343 case -ERESTARTSYS:
1344 /*
1345 * The show must go on: exit, but mark the
1346 * stateid as needing recovery.
1347 */
1348 case -NFS4ERR_DELEG_REVOKED:
1349 case -NFS4ERR_ADMIN_REVOKED:
1350 case -NFS4ERR_BAD_STATEID:
1351 nfs_inode_find_state_and_recover(state->inode,
1352 stateid);
1353 nfs4_schedule_stateid_recovery(server, state);
1354 case -EKEYEXPIRED:
1355 /*
1356 * User RPCSEC_GSS context has expired.
1357 * We cannot recover this stateid now, so
1358 * skip it and allow recovery thread to
1359 * proceed.
1360 */
1361 case -ENOMEM:
1362 err = 0;
1363 goto out;
1364 }
1365 err = nfs4_handle_exception(server, err, &exception);
1366 } while (exception.retry);
1367 out:
1368 return err;
1369 }
1370
1371 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1372 {
1373 struct nfs4_opendata *data = calldata;
1374
1375 data->rpc_status = task->tk_status;
1376 if (data->rpc_status == 0) {
1377 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1378 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1379 renew_lease(data->o_res.server, data->timestamp);
1380 data->rpc_done = 1;
1381 }
1382 }
1383
1384 static void nfs4_open_confirm_release(void *calldata)
1385 {
1386 struct nfs4_opendata *data = calldata;
1387 struct nfs4_state *state = NULL;
1388
1389 /* If this request hasn't been cancelled, do nothing */
1390 if (data->cancelled == 0)
1391 goto out_free;
1392 /* In case of error, no cleanup! */
1393 if (!data->rpc_done)
1394 goto out_free;
1395 state = nfs4_opendata_to_nfs4_state(data);
1396 if (!IS_ERR(state))
1397 nfs4_close_state(state, data->o_arg.fmode);
1398 out_free:
1399 nfs4_opendata_put(data);
1400 }
1401
1402 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1403 .rpc_call_done = nfs4_open_confirm_done,
1404 .rpc_release = nfs4_open_confirm_release,
1405 };
1406
1407 /*
1408 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1409 */
1410 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1411 {
1412 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1413 struct rpc_task *task;
1414 struct rpc_message msg = {
1415 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1416 .rpc_argp = &data->c_arg,
1417 .rpc_resp = &data->c_res,
1418 .rpc_cred = data->owner->so_cred,
1419 };
1420 struct rpc_task_setup task_setup_data = {
1421 .rpc_client = server->client,
1422 .rpc_message = &msg,
1423 .callback_ops = &nfs4_open_confirm_ops,
1424 .callback_data = data,
1425 .workqueue = nfsiod_workqueue,
1426 .flags = RPC_TASK_ASYNC,
1427 };
1428 int status;
1429
1430 kref_get(&data->kref);
1431 data->rpc_done = 0;
1432 data->rpc_status = 0;
1433 data->timestamp = jiffies;
1434 task = rpc_run_task(&task_setup_data);
1435 if (IS_ERR(task))
1436 return PTR_ERR(task);
1437 status = nfs4_wait_for_completion_rpc_task(task);
1438 if (status != 0) {
1439 data->cancelled = 1;
1440 smp_wmb();
1441 } else
1442 status = data->rpc_status;
1443 rpc_put_task(task);
1444 return status;
1445 }
1446
1447 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1448 {
1449 struct nfs4_opendata *data = calldata;
1450 struct nfs4_state_owner *sp = data->owner;
1451
1452 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1453 return;
1454 /*
1455 * Check if we still need to send an OPEN call, or if we can use
1456 * a delegation instead.
1457 */
1458 if (data->state != NULL) {
1459 struct nfs_delegation *delegation;
1460
1461 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1462 goto out_no_action;
1463 rcu_read_lock();
1464 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1465 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1466 can_open_delegated(delegation, data->o_arg.fmode))
1467 goto unlock_no_action;
1468 rcu_read_unlock();
1469 }
1470 /* Update client id. */
1471 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1472 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1473 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1474 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1475 }
1476 data->timestamp = jiffies;
1477 if (nfs4_setup_sequence(data->o_arg.server,
1478 &data->o_arg.seq_args,
1479 &data->o_res.seq_res, task))
1480 return;
1481 rpc_call_start(task);
1482 return;
1483 unlock_no_action:
1484 rcu_read_unlock();
1485 out_no_action:
1486 task->tk_action = NULL;
1487
1488 }
1489
1490 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1491 {
1492 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1493 nfs4_open_prepare(task, calldata);
1494 }
1495
1496 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1497 {
1498 struct nfs4_opendata *data = calldata;
1499
1500 data->rpc_status = task->tk_status;
1501
1502 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1503 return;
1504
1505 if (task->tk_status == 0) {
1506 switch (data->o_res.f_attr->mode & S_IFMT) {
1507 case S_IFREG:
1508 break;
1509 case S_IFLNK:
1510 data->rpc_status = -ELOOP;
1511 break;
1512 case S_IFDIR:
1513 data->rpc_status = -EISDIR;
1514 break;
1515 default:
1516 data->rpc_status = -ENOTDIR;
1517 }
1518 renew_lease(data->o_res.server, data->timestamp);
1519 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1520 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1521 }
1522 data->rpc_done = 1;
1523 }
1524
1525 static void nfs4_open_release(void *calldata)
1526 {
1527 struct nfs4_opendata *data = calldata;
1528 struct nfs4_state *state = NULL;
1529
1530 /* If this request hasn't been cancelled, do nothing */
1531 if (data->cancelled == 0)
1532 goto out_free;
1533 /* In case of error, no cleanup! */
1534 if (data->rpc_status != 0 || !data->rpc_done)
1535 goto out_free;
1536 /* In case we need an open_confirm, no cleanup! */
1537 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1538 goto out_free;
1539 state = nfs4_opendata_to_nfs4_state(data);
1540 if (!IS_ERR(state))
1541 nfs4_close_state(state, data->o_arg.fmode);
1542 out_free:
1543 nfs4_opendata_put(data);
1544 }
1545
1546 static const struct rpc_call_ops nfs4_open_ops = {
1547 .rpc_call_prepare = nfs4_open_prepare,
1548 .rpc_call_done = nfs4_open_done,
1549 .rpc_release = nfs4_open_release,
1550 };
1551
1552 static const struct rpc_call_ops nfs4_recover_open_ops = {
1553 .rpc_call_prepare = nfs4_recover_open_prepare,
1554 .rpc_call_done = nfs4_open_done,
1555 .rpc_release = nfs4_open_release,
1556 };
1557
1558 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1559 {
1560 struct inode *dir = data->dir->d_inode;
1561 struct nfs_server *server = NFS_SERVER(dir);
1562 struct nfs_openargs *o_arg = &data->o_arg;
1563 struct nfs_openres *o_res = &data->o_res;
1564 struct rpc_task *task;
1565 struct rpc_message msg = {
1566 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1567 .rpc_argp = o_arg,
1568 .rpc_resp = o_res,
1569 .rpc_cred = data->owner->so_cred,
1570 };
1571 struct rpc_task_setup task_setup_data = {
1572 .rpc_client = server->client,
1573 .rpc_message = &msg,
1574 .callback_ops = &nfs4_open_ops,
1575 .callback_data = data,
1576 .workqueue = nfsiod_workqueue,
1577 .flags = RPC_TASK_ASYNC,
1578 };
1579 int status;
1580
1581 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1582 kref_get(&data->kref);
1583 data->rpc_done = 0;
1584 data->rpc_status = 0;
1585 data->cancelled = 0;
1586 if (isrecover)
1587 task_setup_data.callback_ops = &nfs4_recover_open_ops;
1588 task = rpc_run_task(&task_setup_data);
1589 if (IS_ERR(task))
1590 return PTR_ERR(task);
1591 status = nfs4_wait_for_completion_rpc_task(task);
1592 if (status != 0) {
1593 data->cancelled = 1;
1594 smp_wmb();
1595 } else
1596 status = data->rpc_status;
1597 rpc_put_task(task);
1598
1599 return status;
1600 }
1601
1602 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1603 {
1604 struct inode *dir = data->dir->d_inode;
1605 struct nfs_openres *o_res = &data->o_res;
1606 int status;
1607
1608 status = nfs4_run_open_task(data, 1);
1609 if (status != 0 || !data->rpc_done)
1610 return status;
1611
1612 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1613
1614 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1615 status = _nfs4_proc_open_confirm(data);
1616 if (status != 0)
1617 return status;
1618 }
1619
1620 return status;
1621 }
1622
1623 /*
1624 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1625 */
1626 static int _nfs4_proc_open(struct nfs4_opendata *data)
1627 {
1628 struct inode *dir = data->dir->d_inode;
1629 struct nfs_server *server = NFS_SERVER(dir);
1630 struct nfs_openargs *o_arg = &data->o_arg;
1631 struct nfs_openres *o_res = &data->o_res;
1632 int status;
1633
1634 status = nfs4_run_open_task(data, 0);
1635 if (!data->rpc_done)
1636 return status;
1637 if (status != 0) {
1638 if (status == -NFS4ERR_BADNAME &&
1639 !(o_arg->open_flags & O_CREAT))
1640 return -ENOENT;
1641 return status;
1642 }
1643
1644 nfs_fattr_map_and_free_names(server, &data->f_attr);
1645
1646 if (o_arg->open_flags & O_CREAT)
1647 update_changeattr(dir, &o_res->cinfo);
1648 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1649 server->caps &= ~NFS_CAP_POSIX_LOCK;
1650 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1651 status = _nfs4_proc_open_confirm(data);
1652 if (status != 0)
1653 return status;
1654 }
1655 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1656 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1657 return 0;
1658 }
1659
1660 static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1661 {
1662 unsigned int loop;
1663 int ret;
1664
1665 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1666 ret = nfs4_wait_clnt_recover(clp);
1667 if (ret != 0)
1668 break;
1669 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1670 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1671 break;
1672 nfs4_schedule_state_manager(clp);
1673 ret = -EIO;
1674 }
1675 return ret;
1676 }
1677
1678 static int nfs4_recover_expired_lease(struct nfs_server *server)
1679 {
1680 return nfs4_client_recover_expired_lease(server->nfs_client);
1681 }
1682
1683 /*
1684 * OPEN_EXPIRED:
1685 * reclaim state on the server after a network partition.
1686 * Assumes caller holds the appropriate lock
1687 */
1688 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1689 {
1690 struct nfs4_opendata *opendata;
1691 int ret;
1692
1693 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1694 if (IS_ERR(opendata))
1695 return PTR_ERR(opendata);
1696 ret = nfs4_open_recover(opendata, state);
1697 if (ret == -ESTALE)
1698 d_drop(ctx->dentry);
1699 nfs4_opendata_put(opendata);
1700 return ret;
1701 }
1702
1703 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1704 {
1705 struct nfs_server *server = NFS_SERVER(state->inode);
1706 struct nfs4_exception exception = { };
1707 int err;
1708
1709 do {
1710 err = _nfs4_open_expired(ctx, state);
1711 switch (err) {
1712 default:
1713 goto out;
1714 case -NFS4ERR_GRACE:
1715 case -NFS4ERR_DELAY:
1716 nfs4_handle_exception(server, err, &exception);
1717 err = 0;
1718 }
1719 } while (exception.retry);
1720 out:
1721 return err;
1722 }
1723
1724 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1725 {
1726 struct nfs_open_context *ctx;
1727 int ret;
1728
1729 ctx = nfs4_state_find_open_context(state);
1730 if (IS_ERR(ctx))
1731 return PTR_ERR(ctx);
1732 ret = nfs4_do_open_expired(ctx, state);
1733 put_nfs_open_context(ctx);
1734 return ret;
1735 }
1736
1737 #if defined(CONFIG_NFS_V4_1)
1738 static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
1739 {
1740 int status = NFS_OK;
1741 struct nfs_server *server = NFS_SERVER(state->inode);
1742
1743 if (state->flags & flags) {
1744 status = nfs41_test_stateid(server, stateid);
1745 if (status != NFS_OK) {
1746 nfs41_free_stateid(server, stateid);
1747 state->flags &= ~flags;
1748 }
1749 }
1750 return status;
1751 }
1752
1753 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1754 {
1755 int deleg_status, open_status;
1756 int deleg_flags = 1 << NFS_DELEGATED_STATE;
1757 int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
1758
1759 deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
1760 open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags);
1761
1762 if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
1763 return NFS_OK;
1764 return nfs4_open_expired(sp, state);
1765 }
1766 #endif
1767
1768 /*
1769 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1770 * fields corresponding to attributes that were used to store the verifier.
1771 * Make sure we clobber those fields in the later setattr call
1772 */
1773 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1774 {
1775 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1776 !(sattr->ia_valid & ATTR_ATIME_SET))
1777 sattr->ia_valid |= ATTR_ATIME;
1778
1779 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1780 !(sattr->ia_valid & ATTR_MTIME_SET))
1781 sattr->ia_valid |= ATTR_MTIME;
1782 }
1783
1784 /*
1785 * Returns a referenced nfs4_state
1786 */
1787 static int _nfs4_do_open(struct inode *dir,
1788 struct dentry *dentry,
1789 fmode_t fmode,
1790 int flags,
1791 struct iattr *sattr,
1792 struct rpc_cred *cred,
1793 struct nfs4_state **res,
1794 struct nfs4_threshold **ctx_th)
1795 {
1796 struct nfs4_state_owner *sp;
1797 struct nfs4_state *state = NULL;
1798 struct nfs_server *server = NFS_SERVER(dir);
1799 struct nfs4_opendata *opendata;
1800 int status;
1801
1802 /* Protect against reboot recovery conflicts */
1803 status = -ENOMEM;
1804 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1805 if (sp == NULL) {
1806 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1807 goto out_err;
1808 }
1809 status = nfs4_recover_expired_lease(server);
1810 if (status != 0)
1811 goto err_put_state_owner;
1812 if (dentry->d_inode != NULL)
1813 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1814 status = -ENOMEM;
1815 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1816 if (opendata == NULL)
1817 goto err_put_state_owner;
1818
1819 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
1820 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
1821 if (!opendata->f_attr.mdsthreshold)
1822 goto err_opendata_put;
1823 }
1824 if (dentry->d_inode != NULL)
1825 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1826
1827 status = _nfs4_proc_open(opendata);
1828 if (status != 0)
1829 goto err_opendata_put;
1830
1831 state = nfs4_opendata_to_nfs4_state(opendata);
1832 status = PTR_ERR(state);
1833 if (IS_ERR(state))
1834 goto err_opendata_put;
1835 if (server->caps & NFS_CAP_POSIX_LOCK)
1836 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1837
1838 if (opendata->o_arg.open_flags & O_EXCL) {
1839 nfs4_exclusive_attrset(opendata, sattr);
1840
1841 nfs_fattr_init(opendata->o_res.f_attr);
1842 status = nfs4_do_setattr(state->inode, cred,
1843 opendata->o_res.f_attr, sattr,
1844 state);
1845 if (status == 0)
1846 nfs_setattr_update_inode(state->inode, sattr);
1847 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1848 }
1849
1850 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
1851 *ctx_th = opendata->f_attr.mdsthreshold;
1852 else
1853 kfree(opendata->f_attr.mdsthreshold);
1854 opendata->f_attr.mdsthreshold = NULL;
1855
1856 nfs4_opendata_put(opendata);
1857 nfs4_put_state_owner(sp);
1858 *res = state;
1859 return 0;
1860 err_opendata_put:
1861 kfree(opendata->f_attr.mdsthreshold);
1862 nfs4_opendata_put(opendata);
1863 err_put_state_owner:
1864 nfs4_put_state_owner(sp);
1865 out_err:
1866 *res = NULL;
1867 return status;
1868 }
1869
1870
1871 static struct nfs4_state *nfs4_do_open(struct inode *dir,
1872 struct dentry *dentry,
1873 fmode_t fmode,
1874 int flags,
1875 struct iattr *sattr,
1876 struct rpc_cred *cred,
1877 struct nfs4_threshold **ctx_th)
1878 {
1879 struct nfs4_exception exception = { };
1880 struct nfs4_state *res;
1881 int status;
1882
1883 do {
1884 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
1885 &res, ctx_th);
1886 if (status == 0)
1887 break;
1888 /* NOTE: BAD_SEQID means the server and client disagree about the
1889 * book-keeping w.r.t. state-changing operations
1890 * (OPEN/CLOSE/LOCK/LOCKU...)
1891 * It is actually a sign of a bug on the client or on the server.
1892 *
1893 * If we receive a BAD_SEQID error in the particular case of
1894 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1895 * have unhashed the old state_owner for us, and that we can
1896 * therefore safely retry using a new one. We should still warn
1897 * the user though...
1898 */
1899 if (status == -NFS4ERR_BAD_SEQID) {
1900 pr_warn_ratelimited("NFS: v4 server %s "
1901 " returned a bad sequence-id error!\n",
1902 NFS_SERVER(dir)->nfs_client->cl_hostname);
1903 exception.retry = 1;
1904 continue;
1905 }
1906 /*
1907 * BAD_STATEID on OPEN means that the server cancelled our
1908 * state before it received the OPEN_CONFIRM.
1909 * Recover by retrying the request as per the discussion
1910 * on Page 181 of RFC3530.
1911 */
1912 if (status == -NFS4ERR_BAD_STATEID) {
1913 exception.retry = 1;
1914 continue;
1915 }
1916 if (status == -EAGAIN) {
1917 /* We must have found a delegation */
1918 exception.retry = 1;
1919 continue;
1920 }
1921 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1922 status, &exception));
1923 } while (exception.retry);
1924 return res;
1925 }
1926
1927 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1928 struct nfs_fattr *fattr, struct iattr *sattr,
1929 struct nfs4_state *state)
1930 {
1931 struct nfs_server *server = NFS_SERVER(inode);
1932 struct nfs_setattrargs arg = {
1933 .fh = NFS_FH(inode),
1934 .iap = sattr,
1935 .server = server,
1936 .bitmask = server->attr_bitmask,
1937 };
1938 struct nfs_setattrres res = {
1939 .fattr = fattr,
1940 .server = server,
1941 };
1942 struct rpc_message msg = {
1943 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1944 .rpc_argp = &arg,
1945 .rpc_resp = &res,
1946 .rpc_cred = cred,
1947 };
1948 unsigned long timestamp = jiffies;
1949 int status;
1950
1951 nfs_fattr_init(fattr);
1952
1953 if (state != NULL) {
1954 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
1955 current->files, current->tgid);
1956 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
1957 FMODE_WRITE)) {
1958 /* Use that stateid */
1959 } else
1960 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
1961
1962 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
1963 if (status == 0 && state != NULL)
1964 renew_lease(server, timestamp);
1965 return status;
1966 }
1967
1968 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1969 struct nfs_fattr *fattr, struct iattr *sattr,
1970 struct nfs4_state *state)
1971 {
1972 struct nfs_server *server = NFS_SERVER(inode);
1973 struct nfs4_exception exception = {
1974 .state = state,
1975 .inode = inode,
1976 };
1977 int err;
1978 do {
1979 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
1980 switch (err) {
1981 case -NFS4ERR_OPENMODE:
1982 if (state && !(state->state & FMODE_WRITE)) {
1983 err = -EBADF;
1984 if (sattr->ia_valid & ATTR_OPEN)
1985 err = -EACCES;
1986 goto out;
1987 }
1988 }
1989 err = nfs4_handle_exception(server, err, &exception);
1990 } while (exception.retry);
1991 out:
1992 return err;
1993 }
1994
1995 struct nfs4_closedata {
1996 struct inode *inode;
1997 struct nfs4_state *state;
1998 struct nfs_closeargs arg;
1999 struct nfs_closeres res;
2000 struct nfs_fattr fattr;
2001 unsigned long timestamp;
2002 bool roc;
2003 u32 roc_barrier;
2004 };
2005
2006 static void nfs4_free_closedata(void *data)
2007 {
2008 struct nfs4_closedata *calldata = data;
2009 struct nfs4_state_owner *sp = calldata->state->owner;
2010 struct super_block *sb = calldata->state->inode->i_sb;
2011
2012 if (calldata->roc)
2013 pnfs_roc_release(calldata->state->inode);
2014 nfs4_put_open_state(calldata->state);
2015 nfs_free_seqid(calldata->arg.seqid);
2016 nfs4_put_state_owner(sp);
2017 nfs_sb_deactive(sb);
2018 kfree(calldata);
2019 }
2020
2021 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2022 fmode_t fmode)
2023 {
2024 spin_lock(&state->owner->so_lock);
2025 if (!(fmode & FMODE_READ))
2026 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2027 if (!(fmode & FMODE_WRITE))
2028 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2029 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2030 spin_unlock(&state->owner->so_lock);
2031 }
2032
2033 static void nfs4_close_done(struct rpc_task *task, void *data)
2034 {
2035 struct nfs4_closedata *calldata = data;
2036 struct nfs4_state *state = calldata->state;
2037 struct nfs_server *server = NFS_SERVER(calldata->inode);
2038
2039 dprintk("%s: begin!\n", __func__);
2040 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2041 return;
2042 /* hmm. we are done with the inode, and in the process of freeing
2043 * the state_owner. we keep this around to process errors
2044 */
2045 switch (task->tk_status) {
2046 case 0:
2047 if (calldata->roc)
2048 pnfs_roc_set_barrier(state->inode,
2049 calldata->roc_barrier);
2050 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2051 renew_lease(server, calldata->timestamp);
2052 nfs4_close_clear_stateid_flags(state,
2053 calldata->arg.fmode);
2054 break;
2055 case -NFS4ERR_STALE_STATEID:
2056 case -NFS4ERR_OLD_STATEID:
2057 case -NFS4ERR_BAD_STATEID:
2058 case -NFS4ERR_EXPIRED:
2059 if (calldata->arg.fmode == 0)
2060 break;
2061 default:
2062 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2063 rpc_restart_call_prepare(task);
2064 }
2065 nfs_release_seqid(calldata->arg.seqid);
2066 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2067 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2068 }
2069
2070 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2071 {
2072 struct nfs4_closedata *calldata = data;
2073 struct nfs4_state *state = calldata->state;
2074 int call_close = 0;
2075
2076 dprintk("%s: begin!\n", __func__);
2077 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2078 return;
2079
2080 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2081 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2082 spin_lock(&state->owner->so_lock);
2083 /* Calculate the change in open mode */
2084 if (state->n_rdwr == 0) {
2085 if (state->n_rdonly == 0) {
2086 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2087 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2088 calldata->arg.fmode &= ~FMODE_READ;
2089 }
2090 if (state->n_wronly == 0) {
2091 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2092 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2093 calldata->arg.fmode &= ~FMODE_WRITE;
2094 }
2095 }
2096 spin_unlock(&state->owner->so_lock);
2097
2098 if (!call_close) {
2099 /* Note: exit _without_ calling nfs4_close_done */
2100 task->tk_action = NULL;
2101 goto out;
2102 }
2103
2104 if (calldata->arg.fmode == 0) {
2105 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2106 if (calldata->roc &&
2107 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
2108 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
2109 task, NULL);
2110 goto out;
2111 }
2112 }
2113
2114 nfs_fattr_init(calldata->res.fattr);
2115 calldata->timestamp = jiffies;
2116 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
2117 &calldata->arg.seq_args,
2118 &calldata->res.seq_res,
2119 task))
2120 goto out;
2121 rpc_call_start(task);
2122 out:
2123 dprintk("%s: done!\n", __func__);
2124 }
2125
2126 static const struct rpc_call_ops nfs4_close_ops = {
2127 .rpc_call_prepare = nfs4_close_prepare,
2128 .rpc_call_done = nfs4_close_done,
2129 .rpc_release = nfs4_free_closedata,
2130 };
2131
2132 /*
2133 * It is possible for data to be read/written from a mem-mapped file
2134 * after the sys_close call (which hits the vfs layer as a flush).
2135 * This means that we can't safely call nfsv4 close on a file until
2136 * the inode is cleared. This in turn means that we are not good
2137 * NFSv4 citizens - we do not indicate to the server to update the file's
2138 * share state even when we are done with one of the three share
2139 * stateid's in the inode.
2140 *
2141 * NOTE: Caller must be holding the sp->so_owner semaphore!
2142 */
2143 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2144 {
2145 struct nfs_server *server = NFS_SERVER(state->inode);
2146 struct nfs4_closedata *calldata;
2147 struct nfs4_state_owner *sp = state->owner;
2148 struct rpc_task *task;
2149 struct rpc_message msg = {
2150 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2151 .rpc_cred = state->owner->so_cred,
2152 };
2153 struct rpc_task_setup task_setup_data = {
2154 .rpc_client = server->client,
2155 .rpc_message = &msg,
2156 .callback_ops = &nfs4_close_ops,
2157 .workqueue = nfsiod_workqueue,
2158 .flags = RPC_TASK_ASYNC,
2159 };
2160 int status = -ENOMEM;
2161
2162 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2163 if (calldata == NULL)
2164 goto out;
2165 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2166 calldata->inode = state->inode;
2167 calldata->state = state;
2168 calldata->arg.fh = NFS_FH(state->inode);
2169 calldata->arg.stateid = &state->open_stateid;
2170 /* Serialization for the sequence id */
2171 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2172 if (calldata->arg.seqid == NULL)
2173 goto out_free_calldata;
2174 calldata->arg.fmode = 0;
2175 calldata->arg.bitmask = server->cache_consistency_bitmask;
2176 calldata->res.fattr = &calldata->fattr;
2177 calldata->res.seqid = calldata->arg.seqid;
2178 calldata->res.server = server;
2179 calldata->roc = roc;
2180 nfs_sb_active(calldata->inode->i_sb);
2181
2182 msg.rpc_argp = &calldata->arg;
2183 msg.rpc_resp = &calldata->res;
2184 task_setup_data.callback_data = calldata;
2185 task = rpc_run_task(&task_setup_data);
2186 if (IS_ERR(task))
2187 return PTR_ERR(task);
2188 status = 0;
2189 if (wait)
2190 status = rpc_wait_for_completion_task(task);
2191 rpc_put_task(task);
2192 return status;
2193 out_free_calldata:
2194 kfree(calldata);
2195 out:
2196 if (roc)
2197 pnfs_roc_release(state->inode);
2198 nfs4_put_open_state(state);
2199 nfs4_put_state_owner(sp);
2200 return status;
2201 }
2202
2203 static struct inode *
2204 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2205 {
2206 struct nfs4_state *state;
2207
2208 /* Protect against concurrent sillydeletes */
2209 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2210 ctx->cred, &ctx->mdsthreshold);
2211 if (IS_ERR(state))
2212 return ERR_CAST(state);
2213 ctx->state = state;
2214 return igrab(state->inode);
2215 }
2216
2217 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2218 {
2219 if (ctx->state == NULL)
2220 return;
2221 if (is_sync)
2222 nfs4_close_sync(ctx->state, ctx->mode);
2223 else
2224 nfs4_close_state(ctx->state, ctx->mode);
2225 }
2226
2227 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2228 {
2229 struct nfs4_server_caps_arg args = {
2230 .fhandle = fhandle,
2231 };
2232 struct nfs4_server_caps_res res = {};
2233 struct rpc_message msg = {
2234 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2235 .rpc_argp = &args,
2236 .rpc_resp = &res,
2237 };
2238 int status;
2239
2240 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2241 if (status == 0) {
2242 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2243 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2244 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2245 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2246 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2247 NFS_CAP_CTIME|NFS_CAP_MTIME);
2248 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2249 server->caps |= NFS_CAP_ACLS;
2250 if (res.has_links != 0)
2251 server->caps |= NFS_CAP_HARDLINKS;
2252 if (res.has_symlinks != 0)
2253 server->caps |= NFS_CAP_SYMLINKS;
2254 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2255 server->caps |= NFS_CAP_FILEID;
2256 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2257 server->caps |= NFS_CAP_MODE;
2258 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2259 server->caps |= NFS_CAP_NLINK;
2260 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2261 server->caps |= NFS_CAP_OWNER;
2262 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2263 server->caps |= NFS_CAP_OWNER_GROUP;
2264 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2265 server->caps |= NFS_CAP_ATIME;
2266 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2267 server->caps |= NFS_CAP_CTIME;
2268 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2269 server->caps |= NFS_CAP_MTIME;
2270
2271 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2272 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2273 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2274 server->acl_bitmask = res.acl_bitmask;
2275 server->fh_expire_type = res.fh_expire_type;
2276 }
2277
2278 return status;
2279 }
2280
2281 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2282 {
2283 struct nfs4_exception exception = { };
2284 int err;
2285 do {
2286 err = nfs4_handle_exception(server,
2287 _nfs4_server_capabilities(server, fhandle),
2288 &exception);
2289 } while (exception.retry);
2290 return err;
2291 }
2292
2293 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2294 struct nfs_fsinfo *info)
2295 {
2296 struct nfs4_lookup_root_arg args = {
2297 .bitmask = nfs4_fattr_bitmap,
2298 };
2299 struct nfs4_lookup_res res = {
2300 .server = server,
2301 .fattr = info->fattr,
2302 .fh = fhandle,
2303 };
2304 struct rpc_message msg = {
2305 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2306 .rpc_argp = &args,
2307 .rpc_resp = &res,
2308 };
2309
2310 nfs_fattr_init(info->fattr);
2311 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2312 }
2313
2314 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2315 struct nfs_fsinfo *info)
2316 {
2317 struct nfs4_exception exception = { };
2318 int err;
2319 do {
2320 err = _nfs4_lookup_root(server, fhandle, info);
2321 switch (err) {
2322 case 0:
2323 case -NFS4ERR_WRONGSEC:
2324 goto out;
2325 default:
2326 err = nfs4_handle_exception(server, err, &exception);
2327 }
2328 } while (exception.retry);
2329 out:
2330 return err;
2331 }
2332
2333 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2334 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2335 {
2336 struct rpc_auth *auth;
2337 int ret;
2338
2339 auth = rpcauth_create(flavor, server->client);
2340 if (!auth) {
2341 ret = -EIO;
2342 goto out;
2343 }
2344 ret = nfs4_lookup_root(server, fhandle, info);
2345 out:
2346 return ret;
2347 }
2348
2349 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2350 struct nfs_fsinfo *info)
2351 {
2352 int i, len, status = 0;
2353 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2354
2355 len = gss_mech_list_pseudoflavors(&flav_array[0]);
2356 flav_array[len] = RPC_AUTH_NULL;
2357 len += 1;
2358
2359 for (i = 0; i < len; i++) {
2360 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2361 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2362 continue;
2363 break;
2364 }
2365 /*
2366 * -EACCESS could mean that the user doesn't have correct permissions
2367 * to access the mount. It could also mean that we tried to mount
2368 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2369 * existing mount programs don't handle -EACCES very well so it should
2370 * be mapped to -EPERM instead.
2371 */
2372 if (status == -EACCES)
2373 status = -EPERM;
2374 return status;
2375 }
2376
2377 /*
2378 * get the file handle for the "/" directory on the server
2379 */
2380 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2381 struct nfs_fsinfo *info)
2382 {
2383 int minor_version = server->nfs_client->cl_minorversion;
2384 int status = nfs4_lookup_root(server, fhandle, info);
2385 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2386 /*
2387 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2388 * by nfs4_map_errors() as this function exits.
2389 */
2390 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2391 if (status == 0)
2392 status = nfs4_server_capabilities(server, fhandle);
2393 if (status == 0)
2394 status = nfs4_do_fsinfo(server, fhandle, info);
2395 return nfs4_map_errors(status);
2396 }
2397
2398 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2399 struct nfs_fsinfo *info)
2400 {
2401 int error;
2402 struct nfs_fattr *fattr = info->fattr;
2403
2404 error = nfs4_server_capabilities(server, mntfh);
2405 if (error < 0) {
2406 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2407 return error;
2408 }
2409
2410 error = nfs4_proc_getattr(server, mntfh, fattr);
2411 if (error < 0) {
2412 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2413 return error;
2414 }
2415
2416 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2417 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2418 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2419
2420 return error;
2421 }
2422
2423 /*
2424 * Get locations and (maybe) other attributes of a referral.
2425 * Note that we'll actually follow the referral later when
2426 * we detect fsid mismatch in inode revalidation
2427 */
2428 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2429 const struct qstr *name, struct nfs_fattr *fattr,
2430 struct nfs_fh *fhandle)
2431 {
2432 int status = -ENOMEM;
2433 struct page *page = NULL;
2434 struct nfs4_fs_locations *locations = NULL;
2435
2436 page = alloc_page(GFP_KERNEL);
2437 if (page == NULL)
2438 goto out;
2439 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2440 if (locations == NULL)
2441 goto out;
2442
2443 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2444 if (status != 0)
2445 goto out;
2446 /* Make sure server returned a different fsid for the referral */
2447 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2448 dprintk("%s: server did not return a different fsid for"
2449 " a referral at %s\n", __func__, name->name);
2450 status = -EIO;
2451 goto out;
2452 }
2453 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2454 nfs_fixup_referral_attributes(&locations->fattr);
2455
2456 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2457 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2458 memset(fhandle, 0, sizeof(struct nfs_fh));
2459 out:
2460 if (page)
2461 __free_page(page);
2462 kfree(locations);
2463 return status;
2464 }
2465
2466 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2467 {
2468 struct nfs4_getattr_arg args = {
2469 .fh = fhandle,
2470 .bitmask = server->attr_bitmask,
2471 };
2472 struct nfs4_getattr_res res = {
2473 .fattr = fattr,
2474 .server = server,
2475 };
2476 struct rpc_message msg = {
2477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2478 .rpc_argp = &args,
2479 .rpc_resp = &res,
2480 };
2481
2482 nfs_fattr_init(fattr);
2483 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2484 }
2485
2486 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2487 {
2488 struct nfs4_exception exception = { };
2489 int err;
2490 do {
2491 err = nfs4_handle_exception(server,
2492 _nfs4_proc_getattr(server, fhandle, fattr),
2493 &exception);
2494 } while (exception.retry);
2495 return err;
2496 }
2497
2498 /*
2499 * The file is not closed if it is opened due to the a request to change
2500 * the size of the file. The open call will not be needed once the
2501 * VFS layer lookup-intents are implemented.
2502 *
2503 * Close is called when the inode is destroyed.
2504 * If we haven't opened the file for O_WRONLY, we
2505 * need to in the size_change case to obtain a stateid.
2506 *
2507 * Got race?
2508 * Because OPEN is always done by name in nfsv4, it is
2509 * possible that we opened a different file by the same
2510 * name. We can recognize this race condition, but we
2511 * can't do anything about it besides returning an error.
2512 *
2513 * This will be fixed with VFS changes (lookup-intent).
2514 */
2515 static int
2516 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2517 struct iattr *sattr)
2518 {
2519 struct inode *inode = dentry->d_inode;
2520 struct rpc_cred *cred = NULL;
2521 struct nfs4_state *state = NULL;
2522 int status;
2523
2524 if (pnfs_ld_layoutret_on_setattr(inode))
2525 pnfs_return_layout(inode);
2526
2527 nfs_fattr_init(fattr);
2528
2529 /* Search for an existing open(O_WRITE) file */
2530 if (sattr->ia_valid & ATTR_FILE) {
2531 struct nfs_open_context *ctx;
2532
2533 ctx = nfs_file_open_context(sattr->ia_file);
2534 if (ctx) {
2535 cred = ctx->cred;
2536 state = ctx->state;
2537 }
2538 }
2539
2540 /* Deal with open(O_TRUNC) */
2541 if (sattr->ia_valid & ATTR_OPEN)
2542 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2543
2544 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2545 if (status == 0)
2546 nfs_setattr_update_inode(inode, sattr);
2547 return status;
2548 }
2549
2550 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2551 const struct qstr *name, struct nfs_fh *fhandle,
2552 struct nfs_fattr *fattr)
2553 {
2554 struct nfs_server *server = NFS_SERVER(dir);
2555 int status;
2556 struct nfs4_lookup_arg args = {
2557 .bitmask = server->attr_bitmask,
2558 .dir_fh = NFS_FH(dir),
2559 .name = name,
2560 };
2561 struct nfs4_lookup_res res = {
2562 .server = server,
2563 .fattr = fattr,
2564 .fh = fhandle,
2565 };
2566 struct rpc_message msg = {
2567 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2568 .rpc_argp = &args,
2569 .rpc_resp = &res,
2570 };
2571
2572 nfs_fattr_init(fattr);
2573
2574 dprintk("NFS call lookup %s\n", name->name);
2575 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2576 dprintk("NFS reply lookup: %d\n", status);
2577 return status;
2578 }
2579
2580 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2581 {
2582 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2583 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2584 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2585 fattr->nlink = 2;
2586 }
2587
2588 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2589 struct qstr *name, struct nfs_fh *fhandle,
2590 struct nfs_fattr *fattr)
2591 {
2592 struct nfs4_exception exception = { };
2593 struct rpc_clnt *client = *clnt;
2594 int err;
2595 do {
2596 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2597 switch (err) {
2598 case -NFS4ERR_BADNAME:
2599 err = -ENOENT;
2600 goto out;
2601 case -NFS4ERR_MOVED:
2602 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2603 goto out;
2604 case -NFS4ERR_WRONGSEC:
2605 err = -EPERM;
2606 if (client != *clnt)
2607 goto out;
2608
2609 client = nfs4_create_sec_client(client, dir, name);
2610 if (IS_ERR(client))
2611 return PTR_ERR(client);
2612
2613 exception.retry = 1;
2614 break;
2615 default:
2616 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2617 }
2618 } while (exception.retry);
2619
2620 out:
2621 if (err == 0)
2622 *clnt = client;
2623 else if (client != *clnt)
2624 rpc_shutdown_client(client);
2625
2626 return err;
2627 }
2628
2629 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2630 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2631 {
2632 int status;
2633 struct rpc_clnt *client = NFS_CLIENT(dir);
2634
2635 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2636 if (client != NFS_CLIENT(dir)) {
2637 rpc_shutdown_client(client);
2638 nfs_fixup_secinfo_attributes(fattr);
2639 }
2640 return status;
2641 }
2642
2643 struct rpc_clnt *
2644 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2645 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2646 {
2647 int status;
2648 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2649
2650 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2651 if (status < 0) {
2652 rpc_shutdown_client(client);
2653 return ERR_PTR(status);
2654 }
2655 return client;
2656 }
2657
2658 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2659 {
2660 struct nfs_server *server = NFS_SERVER(inode);
2661 struct nfs4_accessargs args = {
2662 .fh = NFS_FH(inode),
2663 .bitmask = server->cache_consistency_bitmask,
2664 };
2665 struct nfs4_accessres res = {
2666 .server = server,
2667 };
2668 struct rpc_message msg = {
2669 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2670 .rpc_argp = &args,
2671 .rpc_resp = &res,
2672 .rpc_cred = entry->cred,
2673 };
2674 int mode = entry->mask;
2675 int status;
2676
2677 /*
2678 * Determine which access bits we want to ask for...
2679 */
2680 if (mode & MAY_READ)
2681 args.access |= NFS4_ACCESS_READ;
2682 if (S_ISDIR(inode->i_mode)) {
2683 if (mode & MAY_WRITE)
2684 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2685 if (mode & MAY_EXEC)
2686 args.access |= NFS4_ACCESS_LOOKUP;
2687 } else {
2688 if (mode & MAY_WRITE)
2689 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2690 if (mode & MAY_EXEC)
2691 args.access |= NFS4_ACCESS_EXECUTE;
2692 }
2693
2694 res.fattr = nfs_alloc_fattr();
2695 if (res.fattr == NULL)
2696 return -ENOMEM;
2697
2698 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2699 if (!status) {
2700 entry->mask = 0;
2701 if (res.access & NFS4_ACCESS_READ)
2702 entry->mask |= MAY_READ;
2703 if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2704 entry->mask |= MAY_WRITE;
2705 if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2706 entry->mask |= MAY_EXEC;
2707 nfs_refresh_inode(inode, res.fattr);
2708 }
2709 nfs_free_fattr(res.fattr);
2710 return status;
2711 }
2712
2713 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2714 {
2715 struct nfs4_exception exception = { };
2716 int err;
2717 do {
2718 err = nfs4_handle_exception(NFS_SERVER(inode),
2719 _nfs4_proc_access(inode, entry),
2720 &exception);
2721 } while (exception.retry);
2722 return err;
2723 }
2724
2725 /*
2726 * TODO: For the time being, we don't try to get any attributes
2727 * along with any of the zero-copy operations READ, READDIR,
2728 * READLINK, WRITE.
2729 *
2730 * In the case of the first three, we want to put the GETATTR
2731 * after the read-type operation -- this is because it is hard
2732 * to predict the length of a GETATTR response in v4, and thus
2733 * align the READ data correctly. This means that the GETATTR
2734 * may end up partially falling into the page cache, and we should
2735 * shift it into the 'tail' of the xdr_buf before processing.
2736 * To do this efficiently, we need to know the total length
2737 * of data received, which doesn't seem to be available outside
2738 * of the RPC layer.
2739 *
2740 * In the case of WRITE, we also want to put the GETATTR after
2741 * the operation -- in this case because we want to make sure
2742 * we get the post-operation mtime and size. This means that
2743 * we can't use xdr_encode_pages() as written: we need a variant
2744 * of it which would leave room in the 'tail' iovec.
2745 *
2746 * Both of these changes to the XDR layer would in fact be quite
2747 * minor, but I decided to leave them for a subsequent patch.
2748 */
2749 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2750 unsigned int pgbase, unsigned int pglen)
2751 {
2752 struct nfs4_readlink args = {
2753 .fh = NFS_FH(inode),
2754 .pgbase = pgbase,
2755 .pglen = pglen,
2756 .pages = &page,
2757 };
2758 struct nfs4_readlink_res res;
2759 struct rpc_message msg = {
2760 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2761 .rpc_argp = &args,
2762 .rpc_resp = &res,
2763 };
2764
2765 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2766 }
2767
2768 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2769 unsigned int pgbase, unsigned int pglen)
2770 {
2771 struct nfs4_exception exception = { };
2772 int err;
2773 do {
2774 err = nfs4_handle_exception(NFS_SERVER(inode),
2775 _nfs4_proc_readlink(inode, page, pgbase, pglen),
2776 &exception);
2777 } while (exception.retry);
2778 return err;
2779 }
2780
2781 /*
2782 * Got race?
2783 * We will need to arrange for the VFS layer to provide an atomic open.
2784 * Until then, this create/open method is prone to inefficiency and race
2785 * conditions due to the lookup, create, and open VFS calls from sys_open()
2786 * placed on the wire.
2787 *
2788 * Given the above sorry state of affairs, I'm simply sending an OPEN.
2789 * The file will be opened again in the subsequent VFS open call
2790 * (nfs4_proc_file_open).
2791 *
2792 * The open for read will just hang around to be used by any process that
2793 * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2794 */
2795
2796 static int
2797 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2798 int flags, struct nfs_open_context *ctx)
2799 {
2800 struct dentry *de = dentry;
2801 struct nfs4_state *state;
2802 struct rpc_cred *cred = NULL;
2803 fmode_t fmode = 0;
2804 int status = 0;
2805
2806 if (ctx != NULL) {
2807 cred = ctx->cred;
2808 de = ctx->dentry;
2809 fmode = ctx->mode;
2810 }
2811 sattr->ia_mode &= ~current_umask();
2812 state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
2813 d_drop(dentry);
2814 if (IS_ERR(state)) {
2815 status = PTR_ERR(state);
2816 goto out;
2817 }
2818 d_add(dentry, igrab(state->inode));
2819 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2820 if (ctx != NULL)
2821 ctx->state = state;
2822 else
2823 nfs4_close_sync(state, fmode);
2824 out:
2825 return status;
2826 }
2827
2828 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2829 {
2830 struct nfs_server *server = NFS_SERVER(dir);
2831 struct nfs_removeargs args = {
2832 .fh = NFS_FH(dir),
2833 .name = *name,
2834 };
2835 struct nfs_removeres res = {
2836 .server = server,
2837 };
2838 struct rpc_message msg = {
2839 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2840 .rpc_argp = &args,
2841 .rpc_resp = &res,
2842 };
2843 int status;
2844
2845 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2846 if (status == 0)
2847 update_changeattr(dir, &res.cinfo);
2848 return status;
2849 }
2850
2851 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2852 {
2853 struct nfs4_exception exception = { };
2854 int err;
2855 do {
2856 err = nfs4_handle_exception(NFS_SERVER(dir),
2857 _nfs4_proc_remove(dir, name),
2858 &exception);
2859 } while (exception.retry);
2860 return err;
2861 }
2862
2863 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2864 {
2865 struct nfs_server *server = NFS_SERVER(dir);
2866 struct nfs_removeargs *args = msg->rpc_argp;
2867 struct nfs_removeres *res = msg->rpc_resp;
2868
2869 res->server = server;
2870 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2871 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2872 }
2873
2874 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
2875 {
2876 if (nfs4_setup_sequence(NFS_SERVER(data->dir),
2877 &data->args.seq_args,
2878 &data->res.seq_res,
2879 task))
2880 return;
2881 rpc_call_start(task);
2882 }
2883
2884 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2885 {
2886 struct nfs_removeres *res = task->tk_msg.rpc_resp;
2887
2888 if (!nfs4_sequence_done(task, &res->seq_res))
2889 return 0;
2890 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2891 return 0;
2892 update_changeattr(dir, &res->cinfo);
2893 return 1;
2894 }
2895
2896 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2897 {
2898 struct nfs_server *server = NFS_SERVER(dir);
2899 struct nfs_renameargs *arg = msg->rpc_argp;
2900 struct nfs_renameres *res = msg->rpc_resp;
2901
2902 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2903 res->server = server;
2904 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2905 }
2906
2907 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
2908 {
2909 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
2910 &data->args.seq_args,
2911 &data->res.seq_res,
2912 task))
2913 return;
2914 rpc_call_start(task);
2915 }
2916
2917 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2918 struct inode *new_dir)
2919 {
2920 struct nfs_renameres *res = task->tk_msg.rpc_resp;
2921
2922 if (!nfs4_sequence_done(task, &res->seq_res))
2923 return 0;
2924 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2925 return 0;
2926
2927 update_changeattr(old_dir, &res->old_cinfo);
2928 update_changeattr(new_dir, &res->new_cinfo);
2929 return 1;
2930 }
2931
2932 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2933 struct inode *new_dir, struct qstr *new_name)
2934 {
2935 struct nfs_server *server = NFS_SERVER(old_dir);
2936 struct nfs_renameargs arg = {
2937 .old_dir = NFS_FH(old_dir),
2938 .new_dir = NFS_FH(new_dir),
2939 .old_name = old_name,
2940 .new_name = new_name,
2941 };
2942 struct nfs_renameres res = {
2943 .server = server,
2944 };
2945 struct rpc_message msg = {
2946 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2947 .rpc_argp = &arg,
2948 .rpc_resp = &res,
2949 };
2950 int status = -ENOMEM;
2951
2952 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2953 if (!status) {
2954 update_changeattr(old_dir, &res.old_cinfo);
2955 update_changeattr(new_dir, &res.new_cinfo);
2956 }
2957 return status;
2958 }
2959
2960 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2961 struct inode *new_dir, struct qstr *new_name)
2962 {
2963 struct nfs4_exception exception = { };
2964 int err;
2965 do {
2966 err = nfs4_handle_exception(NFS_SERVER(old_dir),
2967 _nfs4_proc_rename(old_dir, old_name,
2968 new_dir, new_name),
2969 &exception);
2970 } while (exception.retry);
2971 return err;
2972 }
2973
2974 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2975 {
2976 struct nfs_server *server = NFS_SERVER(inode);
2977 struct nfs4_link_arg arg = {
2978 .fh = NFS_FH(inode),
2979 .dir_fh = NFS_FH(dir),
2980 .name = name,
2981 .bitmask = server->attr_bitmask,
2982 };
2983 struct nfs4_link_res res = {
2984 .server = server,
2985 };
2986 struct rpc_message msg = {
2987 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2988 .rpc_argp = &arg,
2989 .rpc_resp = &res,
2990 };
2991 int status = -ENOMEM;
2992
2993 res.fattr = nfs_alloc_fattr();
2994 if (res.fattr == NULL)
2995 goto out;
2996
2997 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2998 if (!status) {
2999 update_changeattr(dir, &res.cinfo);
3000 nfs_post_op_update_inode(inode, res.fattr);
3001 }
3002 out:
3003 nfs_free_fattr(res.fattr);
3004 return status;
3005 }
3006
3007 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3008 {
3009 struct nfs4_exception exception = { };
3010 int err;
3011 do {
3012 err = nfs4_handle_exception(NFS_SERVER(inode),
3013 _nfs4_proc_link(inode, dir, name),
3014 &exception);
3015 } while (exception.retry);
3016 return err;
3017 }
3018
3019 struct nfs4_createdata {
3020 struct rpc_message msg;
3021 struct nfs4_create_arg arg;
3022 struct nfs4_create_res res;
3023 struct nfs_fh fh;
3024 struct nfs_fattr fattr;
3025 };
3026
3027 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3028 struct qstr *name, struct iattr *sattr, u32 ftype)
3029 {
3030 struct nfs4_createdata *data;
3031
3032 data = kzalloc(sizeof(*data), GFP_KERNEL);
3033 if (data != NULL) {
3034 struct nfs_server *server = NFS_SERVER(dir);
3035
3036 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3037 data->msg.rpc_argp = &data->arg;
3038 data->msg.rpc_resp = &data->res;
3039 data->arg.dir_fh = NFS_FH(dir);
3040 data->arg.server = server;
3041 data->arg.name = name;
3042 data->arg.attrs = sattr;
3043 data->arg.ftype = ftype;
3044 data->arg.bitmask = server->attr_bitmask;
3045 data->res.server = server;
3046 data->res.fh = &data->fh;
3047 data->res.fattr = &data->fattr;
3048 nfs_fattr_init(data->res.fattr);
3049 }
3050 return data;
3051 }
3052
3053 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3054 {
3055 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3056 &data->arg.seq_args, &data->res.seq_res, 1);
3057 if (status == 0) {
3058 update_changeattr(dir, &data->res.dir_cinfo);
3059 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3060 }
3061 return status;
3062 }
3063
3064 static void nfs4_free_createdata(struct nfs4_createdata *data)
3065 {
3066 kfree(data);
3067 }
3068
3069 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3070 struct page *page, unsigned int len, struct iattr *sattr)
3071 {
3072 struct nfs4_createdata *data;
3073 int status = -ENAMETOOLONG;
3074
3075 if (len > NFS4_MAXPATHLEN)
3076 goto out;
3077
3078 status = -ENOMEM;
3079 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3080 if (data == NULL)
3081 goto out;
3082
3083 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3084 data->arg.u.symlink.pages = &page;
3085 data->arg.u.symlink.len = len;
3086
3087 status = nfs4_do_create(dir, dentry, data);
3088
3089 nfs4_free_createdata(data);
3090 out:
3091 return status;
3092 }
3093
3094 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3095 struct page *page, unsigned int len, struct iattr *sattr)
3096 {
3097 struct nfs4_exception exception = { };
3098 int err;
3099 do {
3100 err = nfs4_handle_exception(NFS_SERVER(dir),
3101 _nfs4_proc_symlink(dir, dentry, page,
3102 len, sattr),
3103 &exception);
3104 } while (exception.retry);
3105 return err;
3106 }
3107
3108 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3109 struct iattr *sattr)
3110 {
3111 struct nfs4_createdata *data;
3112 int status = -ENOMEM;
3113
3114 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3115 if (data == NULL)
3116 goto out;
3117
3118 status = nfs4_do_create(dir, dentry, data);
3119
3120 nfs4_free_createdata(data);
3121 out:
3122 return status;
3123 }
3124
3125 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3126 struct iattr *sattr)
3127 {
3128 struct nfs4_exception exception = { };
3129 int err;
3130
3131 sattr->ia_mode &= ~current_umask();
3132 do {
3133 err = nfs4_handle_exception(NFS_SERVER(dir),
3134 _nfs4_proc_mkdir(dir, dentry, sattr),
3135 &exception);
3136 } while (exception.retry);
3137 return err;
3138 }
3139
3140 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3141 u64 cookie, struct page **pages, unsigned int count, int plus)
3142 {
3143 struct inode *dir = dentry->d_inode;
3144 struct nfs4_readdir_arg args = {
3145 .fh = NFS_FH(dir),
3146 .pages = pages,
3147 .pgbase = 0,
3148 .count = count,
3149 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3150 .plus = plus,
3151 };
3152 struct nfs4_readdir_res res;
3153 struct rpc_message msg = {
3154 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3155 .rpc_argp = &args,
3156 .rpc_resp = &res,
3157 .rpc_cred = cred,
3158 };
3159 int status;
3160
3161 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3162 dentry->d_parent->d_name.name,
3163 dentry->d_name.name,
3164 (unsigned long long)cookie);
3165 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
3166 res.pgbase = args.pgbase;
3167 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3168 if (status >= 0) {
3169 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
3170 status += args.pgbase;
3171 }
3172
3173 nfs_invalidate_atime(dir);
3174
3175 dprintk("%s: returns %d\n", __func__, status);
3176 return status;
3177 }
3178
3179 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3180 u64 cookie, struct page **pages, unsigned int count, int plus)
3181 {
3182 struct nfs4_exception exception = { };
3183 int err;
3184 do {
3185 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3186 _nfs4_proc_readdir(dentry, cred, cookie,
3187 pages, count, plus),
3188 &exception);
3189 } while (exception.retry);
3190 return err;
3191 }
3192
3193 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3194 struct iattr *sattr, dev_t rdev)
3195 {
3196 struct nfs4_createdata *data;
3197 int mode = sattr->ia_mode;
3198 int status = -ENOMEM;
3199
3200 BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3201 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3202
3203 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3204 if (data == NULL)
3205 goto out;
3206
3207 if (S_ISFIFO(mode))
3208 data->arg.ftype = NF4FIFO;
3209 else if (S_ISBLK(mode)) {
3210 data->arg.ftype = NF4BLK;
3211 data->arg.u.device.specdata1 = MAJOR(rdev);
3212 data->arg.u.device.specdata2 = MINOR(rdev);
3213 }
3214 else if (S_ISCHR(mode)) {
3215 data->arg.ftype = NF4CHR;
3216 data->arg.u.device.specdata1 = MAJOR(rdev);
3217 data->arg.u.device.specdata2 = MINOR(rdev);
3218 }
3219
3220 status = nfs4_do_create(dir, dentry, data);
3221
3222 nfs4_free_createdata(data);
3223 out:
3224 return status;
3225 }
3226
3227 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3228 struct iattr *sattr, dev_t rdev)
3229 {
3230 struct nfs4_exception exception = { };
3231 int err;
3232
3233 sattr->ia_mode &= ~current_umask();
3234 do {
3235 err = nfs4_handle_exception(NFS_SERVER(dir),
3236 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3237 &exception);
3238 } while (exception.retry);
3239 return err;
3240 }
3241
3242 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3243 struct nfs_fsstat *fsstat)
3244 {
3245 struct nfs4_statfs_arg args = {
3246 .fh = fhandle,
3247 .bitmask = server->attr_bitmask,
3248 };
3249 struct nfs4_statfs_res res = {
3250 .fsstat = fsstat,
3251 };
3252 struct rpc_message msg = {
3253 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3254 .rpc_argp = &args,
3255 .rpc_resp = &res,
3256 };
3257
3258 nfs_fattr_init(fsstat->fattr);
3259 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3260 }
3261
3262 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3263 {
3264 struct nfs4_exception exception = { };
3265 int err;
3266 do {
3267 err = nfs4_handle_exception(server,
3268 _nfs4_proc_statfs(server, fhandle, fsstat),
3269 &exception);
3270 } while (exception.retry);
3271 return err;
3272 }
3273
3274 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3275 struct nfs_fsinfo *fsinfo)
3276 {
3277 struct nfs4_fsinfo_arg args = {
3278 .fh = fhandle,
3279 .bitmask = server->attr_bitmask,
3280 };
3281 struct nfs4_fsinfo_res res = {
3282 .fsinfo = fsinfo,
3283 };
3284 struct rpc_message msg = {
3285 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3286 .rpc_argp = &args,
3287 .rpc_resp = &res,
3288 };
3289
3290 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3291 }
3292
3293 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3294 {
3295 struct nfs4_exception exception = { };
3296 int err;
3297
3298 do {
3299 err = nfs4_handle_exception(server,
3300 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3301 &exception);
3302 } while (exception.retry);
3303 return err;
3304 }
3305
3306 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3307 {
3308 nfs_fattr_init(fsinfo->fattr);
3309 return nfs4_do_fsinfo(server, fhandle, fsinfo);
3310 }
3311
3312 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3313 struct nfs_pathconf *pathconf)
3314 {
3315 struct nfs4_pathconf_arg args = {
3316 .fh = fhandle,
3317 .bitmask = server->attr_bitmask,
3318 };
3319 struct nfs4_pathconf_res res = {
3320 .pathconf = pathconf,
3321 };
3322 struct rpc_message msg = {
3323 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3324 .rpc_argp = &args,
3325 .rpc_resp = &res,
3326 };
3327
3328 /* None of the pathconf attributes are mandatory to implement */
3329 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3330 memset(pathconf, 0, sizeof(*pathconf));
3331 return 0;
3332 }
3333
3334 nfs_fattr_init(pathconf->fattr);
3335 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3336 }
3337
3338 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3339 struct nfs_pathconf *pathconf)
3340 {
3341 struct nfs4_exception exception = { };
3342 int err;
3343
3344 do {
3345 err = nfs4_handle_exception(server,
3346 _nfs4_proc_pathconf(server, fhandle, pathconf),
3347 &exception);
3348 } while (exception.retry);
3349 return err;
3350 }
3351
3352 void __nfs4_read_done_cb(struct nfs_read_data *data)
3353 {
3354 nfs_invalidate_atime(data->header->inode);
3355 }
3356
3357 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3358 {
3359 struct nfs_server *server = NFS_SERVER(data->header->inode);
3360
3361 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3362 rpc_restart_call_prepare(task);
3363 return -EAGAIN;
3364 }
3365
3366 __nfs4_read_done_cb(data);
3367 if (task->tk_status > 0)
3368 renew_lease(server, data->timestamp);
3369 return 0;
3370 }
3371
3372 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3373 {
3374
3375 dprintk("--> %s\n", __func__);
3376
3377 if (!nfs4_sequence_done(task, &data->res.seq_res))
3378 return -EAGAIN;
3379
3380 return data->read_done_cb ? data->read_done_cb(task, data) :
3381 nfs4_read_done_cb(task, data);
3382 }
3383
3384 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3385 {
3386 data->timestamp = jiffies;
3387 data->read_done_cb = nfs4_read_done_cb;
3388 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3389 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3390 }
3391
3392 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3393 {
3394 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3395 &data->args.seq_args,
3396 &data->res.seq_res,
3397 task))
3398 return;
3399 rpc_call_start(task);
3400 }
3401
3402 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3403 {
3404 struct inode *inode = data->header->inode;
3405
3406 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3407 rpc_restart_call_prepare(task);
3408 return -EAGAIN;
3409 }
3410 if (task->tk_status >= 0) {
3411 renew_lease(NFS_SERVER(inode), data->timestamp);
3412 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3413 }
3414 return 0;
3415 }
3416
3417 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3418 {
3419 if (!nfs4_sequence_done(task, &data->res.seq_res))
3420 return -EAGAIN;
3421 return data->write_done_cb ? data->write_done_cb(task, data) :
3422 nfs4_write_done_cb(task, data);
3423 }
3424
3425 static
3426 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3427 {
3428 const struct nfs_pgio_header *hdr = data->header;
3429
3430 /* Don't request attributes for pNFS or O_DIRECT writes */
3431 if (data->ds_clp != NULL || hdr->dreq != NULL)
3432 return false;
3433 /* Otherwise, request attributes if and only if we don't hold
3434 * a delegation
3435 */
3436 return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
3437 }
3438
3439 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3440 {
3441 struct nfs_server *server = NFS_SERVER(data->header->inode);
3442
3443 if (!nfs4_write_need_cache_consistency_data(data)) {
3444 data->args.bitmask = NULL;
3445 data->res.fattr = NULL;
3446 } else
3447 data->args.bitmask = server->cache_consistency_bitmask;
3448
3449 if (!data->write_done_cb)
3450 data->write_done_cb = nfs4_write_done_cb;
3451 data->res.server = server;
3452 data->timestamp = jiffies;
3453
3454 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3455 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3456 }
3457
3458 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3459 {
3460 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3461 &data->args.seq_args,
3462 &data->res.seq_res,
3463 task))
3464 return;
3465 rpc_call_start(task);
3466 }
3467
3468 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3469 {
3470 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3471 &data->args.seq_args,
3472 &data->res.seq_res,
3473 task))
3474 return;
3475 rpc_call_start(task);
3476 }
3477
3478 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3479 {
3480 struct inode *inode = data->inode;
3481
3482 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3483 rpc_restart_call_prepare(task);
3484 return -EAGAIN;
3485 }
3486 return 0;
3487 }
3488
3489 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3490 {
3491 if (!nfs4_sequence_done(task, &data->res.seq_res))
3492 return -EAGAIN;
3493 return data->commit_done_cb(task, data);
3494 }
3495
3496 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3497 {
3498 struct nfs_server *server = NFS_SERVER(data->inode);
3499
3500 if (data->commit_done_cb == NULL)
3501 data->commit_done_cb = nfs4_commit_done_cb;
3502 data->res.server = server;
3503 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3504 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3505 }
3506
3507 struct nfs4_renewdata {
3508 struct nfs_client *client;
3509 unsigned long timestamp;
3510 };
3511
3512 /*
3513 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3514 * standalone procedure for queueing an asynchronous RENEW.
3515 */
3516 static void nfs4_renew_release(void *calldata)
3517 {
3518 struct nfs4_renewdata *data = calldata;
3519 struct nfs_client *clp = data->client;
3520
3521 if (atomic_read(&clp->cl_count) > 1)
3522 nfs4_schedule_state_renewal(clp);
3523 nfs_put_client(clp);
3524 kfree(data);
3525 }
3526
3527 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3528 {
3529 struct nfs4_renewdata *data = calldata;
3530 struct nfs_client *clp = data->client;
3531 unsigned long timestamp = data->timestamp;
3532
3533 if (task->tk_status < 0) {
3534 /* Unless we're shutting down, schedule state recovery! */
3535 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3536 return;
3537 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3538 nfs4_schedule_lease_recovery(clp);
3539 return;
3540 }
3541 nfs4_schedule_path_down_recovery(clp);
3542 }
3543 do_renew_lease(clp, timestamp);
3544 }
3545
3546 static const struct rpc_call_ops nfs4_renew_ops = {
3547 .rpc_call_done = nfs4_renew_done,
3548 .rpc_release = nfs4_renew_release,
3549 };
3550
3551 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3552 {
3553 struct rpc_message msg = {
3554 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3555 .rpc_argp = clp,
3556 .rpc_cred = cred,
3557 };
3558 struct nfs4_renewdata *data;
3559
3560 if (renew_flags == 0)
3561 return 0;
3562 if (!atomic_inc_not_zero(&clp->cl_count))
3563 return -EIO;
3564 data = kmalloc(sizeof(*data), GFP_NOFS);
3565 if (data == NULL)
3566 return -ENOMEM;
3567 data->client = clp;
3568 data->timestamp = jiffies;
3569 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3570 &nfs4_renew_ops, data);
3571 }
3572
3573 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3574 {
3575 struct rpc_message msg = {
3576 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3577 .rpc_argp = clp,
3578 .rpc_cred = cred,
3579 };
3580 unsigned long now = jiffies;
3581 int status;
3582
3583 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3584 if (status < 0)
3585 return status;
3586 do_renew_lease(clp, now);
3587 return 0;
3588 }
3589
3590 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3591 {
3592 return (server->caps & NFS_CAP_ACLS)
3593 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3594 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3595 }
3596
3597 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3598 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3599 * the stack.
3600 */
3601 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3602
3603 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3604 struct page **pages, unsigned int *pgbase)
3605 {
3606 struct page *newpage, **spages;
3607 int rc = 0;
3608 size_t len;
3609 spages = pages;
3610
3611 do {
3612 len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3613 newpage = alloc_page(GFP_KERNEL);
3614
3615 if (newpage == NULL)
3616 goto unwind;
3617 memcpy(page_address(newpage), buf, len);
3618 buf += len;
3619 buflen -= len;
3620 *pages++ = newpage;
3621 rc++;
3622 } while (buflen != 0);
3623
3624 return rc;
3625
3626 unwind:
3627 for(; rc > 0; rc--)
3628 __free_page(spages[rc-1]);
3629 return -ENOMEM;
3630 }
3631
3632 struct nfs4_cached_acl {
3633 int cached;
3634 size_t len;
3635 char data[0];
3636 };
3637
3638 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3639 {
3640 struct nfs_inode *nfsi = NFS_I(inode);
3641
3642 spin_lock(&inode->i_lock);
3643 kfree(nfsi->nfs4_acl);
3644 nfsi->nfs4_acl = acl;
3645 spin_unlock(&inode->i_lock);
3646 }
3647
3648 static void nfs4_zap_acl_attr(struct inode *inode)
3649 {
3650 nfs4_set_cached_acl(inode, NULL);
3651 }
3652
3653 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3654 {
3655 struct nfs_inode *nfsi = NFS_I(inode);
3656 struct nfs4_cached_acl *acl;
3657 int ret = -ENOENT;
3658
3659 spin_lock(&inode->i_lock);
3660 acl = nfsi->nfs4_acl;
3661 if (acl == NULL)
3662 goto out;
3663 if (buf == NULL) /* user is just asking for length */
3664 goto out_len;
3665 if (acl->cached == 0)
3666 goto out;
3667 ret = -ERANGE; /* see getxattr(2) man page */
3668 if (acl->len > buflen)
3669 goto out;
3670 memcpy(buf, acl->data, acl->len);
3671 out_len:
3672 ret = acl->len;
3673 out:
3674 spin_unlock(&inode->i_lock);
3675 return ret;
3676 }
3677
3678 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3679 {
3680 struct nfs4_cached_acl *acl;
3681
3682 if (pages && acl_len <= PAGE_SIZE) {
3683 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3684 if (acl == NULL)
3685 goto out;
3686 acl->cached = 1;
3687 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3688 } else {
3689 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3690 if (acl == NULL)
3691 goto out;
3692 acl->cached = 0;
3693 }
3694 acl->len = acl_len;
3695 out:
3696 nfs4_set_cached_acl(inode, acl);
3697 }
3698
3699 /*
3700 * The getxattr API returns the required buffer length when called with a
3701 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3702 * the required buf. On a NULL buf, we send a page of data to the server
3703 * guessing that the ACL request can be serviced by a page. If so, we cache
3704 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3705 * the cache. If not so, we throw away the page, and cache the required
3706 * length. The next getxattr call will then produce another round trip to
3707 * the server, this time with the input buf of the required size.
3708 */
3709 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3710 {
3711 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3712 struct nfs_getaclargs args = {
3713 .fh = NFS_FH(inode),
3714 .acl_pages = pages,
3715 .acl_len = buflen,
3716 };
3717 struct nfs_getaclres res = {
3718 .acl_len = buflen,
3719 };
3720 struct rpc_message msg = {
3721 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3722 .rpc_argp = &args,
3723 .rpc_resp = &res,
3724 };
3725 int ret = -ENOMEM, npages, i, acl_len = 0;
3726
3727 npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3728 /* As long as we're doing a round trip to the server anyway,
3729 * let's be prepared for a page of acl data. */
3730 if (npages == 0)
3731 npages = 1;
3732
3733 /* Add an extra page to handle the bitmap returned */
3734 npages++;
3735
3736 for (i = 0; i < npages; i++) {
3737 pages[i] = alloc_page(GFP_KERNEL);
3738 if (!pages[i])
3739 goto out_free;
3740 }
3741
3742 /* for decoding across pages */
3743 res.acl_scratch = alloc_page(GFP_KERNEL);
3744 if (!res.acl_scratch)
3745 goto out_free;
3746
3747 args.acl_len = npages * PAGE_SIZE;
3748 args.acl_pgbase = 0;
3749
3750 /* Let decode_getfacl know not to fail if the ACL data is larger than
3751 * the page we send as a guess */
3752 if (buf == NULL)
3753 res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3754
3755 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3756 __func__, buf, buflen, npages, args.acl_len);
3757 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3758 &msg, &args.seq_args, &res.seq_res, 0);
3759 if (ret)
3760 goto out_free;
3761
3762 acl_len = res.acl_len - res.acl_data_offset;
3763 if (acl_len > args.acl_len)
3764 nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3765 else
3766 nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
3767 acl_len);
3768 if (buf) {
3769 ret = -ERANGE;
3770 if (acl_len > buflen)
3771 goto out_free;
3772 _copy_from_pages(buf, pages, res.acl_data_offset,
3773 acl_len);
3774 }
3775 ret = acl_len;
3776 out_free:
3777 for (i = 0; i < npages; i++)
3778 if (pages[i])
3779 __free_page(pages[i]);
3780 if (res.acl_scratch)
3781 __free_page(res.acl_scratch);
3782 return ret;
3783 }
3784
3785 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3786 {
3787 struct nfs4_exception exception = { };
3788 ssize_t ret;
3789 do {
3790 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3791 if (ret >= 0)
3792 break;
3793 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3794 } while (exception.retry);
3795 return ret;
3796 }
3797
3798 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3799 {
3800 struct nfs_server *server = NFS_SERVER(inode);
3801 int ret;
3802
3803 if (!nfs4_server_supports_acls(server))
3804 return -EOPNOTSUPP;
3805 ret = nfs_revalidate_inode(server, inode);
3806 if (ret < 0)
3807 return ret;
3808 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3809 nfs_zap_acl_cache(inode);
3810 ret = nfs4_read_cached_acl(inode, buf, buflen);
3811 if (ret != -ENOENT)
3812 /* -ENOENT is returned if there is no ACL or if there is an ACL
3813 * but no cached acl data, just the acl length */
3814 return ret;
3815 return nfs4_get_acl_uncached(inode, buf, buflen);
3816 }
3817
3818 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3819 {
3820 struct nfs_server *server = NFS_SERVER(inode);
3821 struct page *pages[NFS4ACL_MAXPAGES];
3822 struct nfs_setaclargs arg = {
3823 .fh = NFS_FH(inode),
3824 .acl_pages = pages,
3825 .acl_len = buflen,
3826 };
3827 struct nfs_setaclres res;
3828 struct rpc_message msg = {
3829 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3830 .rpc_argp = &arg,
3831 .rpc_resp = &res,
3832 };
3833 int ret, i;
3834
3835 if (!nfs4_server_supports_acls(server))
3836 return -EOPNOTSUPP;
3837 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3838 if (i < 0)
3839 return i;
3840 nfs_inode_return_delegation(inode);
3841 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3842
3843 /*
3844 * Free each page after tx, so the only ref left is
3845 * held by the network stack
3846 */
3847 for (; i > 0; i--)
3848 put_page(pages[i-1]);
3849
3850 /*
3851 * Acl update can result in inode attribute update.
3852 * so mark the attribute cache invalid.
3853 */
3854 spin_lock(&inode->i_lock);
3855 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3856 spin_unlock(&inode->i_lock);
3857 nfs_access_zap_cache(inode);
3858 nfs_zap_acl_cache(inode);
3859 return ret;
3860 }
3861
3862 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3863 {
3864 struct nfs4_exception exception = { };
3865 int err;
3866 do {
3867 err = nfs4_handle_exception(NFS_SERVER(inode),
3868 __nfs4_proc_set_acl(inode, buf, buflen),
3869 &exception);
3870 } while (exception.retry);
3871 return err;
3872 }
3873
3874 static int
3875 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3876 {
3877 struct nfs_client *clp = server->nfs_client;
3878
3879 if (task->tk_status >= 0)
3880 return 0;
3881 switch(task->tk_status) {
3882 case -NFS4ERR_DELEG_REVOKED:
3883 case -NFS4ERR_ADMIN_REVOKED:
3884 case -NFS4ERR_BAD_STATEID:
3885 if (state == NULL)
3886 break;
3887 nfs_remove_bad_delegation(state->inode);
3888 case -NFS4ERR_OPENMODE:
3889 if (state == NULL)
3890 break;
3891 nfs4_schedule_stateid_recovery(server, state);
3892 goto wait_on_recovery;
3893 case -NFS4ERR_EXPIRED:
3894 if (state != NULL)
3895 nfs4_schedule_stateid_recovery(server, state);
3896 case -NFS4ERR_STALE_STATEID:
3897 case -NFS4ERR_STALE_CLIENTID:
3898 nfs4_schedule_lease_recovery(clp);
3899 goto wait_on_recovery;
3900 #if defined(CONFIG_NFS_V4_1)
3901 case -NFS4ERR_BADSESSION:
3902 case -NFS4ERR_BADSLOT:
3903 case -NFS4ERR_BAD_HIGH_SLOT:
3904 case -NFS4ERR_DEADSESSION:
3905 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3906 case -NFS4ERR_SEQ_FALSE_RETRY:
3907 case -NFS4ERR_SEQ_MISORDERED:
3908 dprintk("%s ERROR %d, Reset session\n", __func__,
3909 task->tk_status);
3910 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
3911 task->tk_status = 0;
3912 return -EAGAIN;
3913 #endif /* CONFIG_NFS_V4_1 */
3914 case -NFS4ERR_DELAY:
3915 nfs_inc_server_stats(server, NFSIOS_DELAY);
3916 case -NFS4ERR_GRACE:
3917 case -EKEYEXPIRED:
3918 rpc_delay(task, NFS4_POLL_RETRY_MAX);
3919 task->tk_status = 0;
3920 return -EAGAIN;
3921 case -NFS4ERR_RETRY_UNCACHED_REP:
3922 case -NFS4ERR_OLD_STATEID:
3923 task->tk_status = 0;
3924 return -EAGAIN;
3925 }
3926 task->tk_status = nfs4_map_errors(task->tk_status);
3927 return 0;
3928 wait_on_recovery:
3929 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3930 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3931 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3932 task->tk_status = 0;
3933 return -EAGAIN;
3934 }
3935
3936 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
3937 nfs4_verifier *bootverf)
3938 {
3939 __be32 verf[2];
3940
3941 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
3942 /* An impossible timestamp guarantees this value
3943 * will never match a generated boot time. */
3944 verf[0] = 0;
3945 verf[1] = (__be32)(NSEC_PER_SEC + 1);
3946 } else {
3947 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
3948 verf[0] = (__be32)nn->boot_time.tv_sec;
3949 verf[1] = (__be32)nn->boot_time.tv_nsec;
3950 }
3951 memcpy(bootverf->data, verf, sizeof(bootverf->data));
3952 }
3953
3954 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3955 unsigned short port, struct rpc_cred *cred,
3956 struct nfs4_setclientid_res *res)
3957 {
3958 nfs4_verifier sc_verifier;
3959 struct nfs4_setclientid setclientid = {
3960 .sc_verifier = &sc_verifier,
3961 .sc_prog = program,
3962 .sc_cb_ident = clp->cl_cb_ident,
3963 };
3964 struct rpc_message msg = {
3965 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3966 .rpc_argp = &setclientid,
3967 .rpc_resp = res,
3968 .rpc_cred = cred,
3969 };
3970 int loop = 0;
3971 int status;
3972
3973 nfs4_init_boot_verifier(clp, &sc_verifier);
3974
3975 for(;;) {
3976 rcu_read_lock();
3977 setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3978 sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3979 clp->cl_ipaddr,
3980 rpc_peeraddr2str(clp->cl_rpcclient,
3981 RPC_DISPLAY_ADDR),
3982 rpc_peeraddr2str(clp->cl_rpcclient,
3983 RPC_DISPLAY_PROTO),
3984 clp->cl_rpcclient->cl_auth->au_ops->au_name,
3985 clp->cl_id_uniquifier);
3986 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3987 sizeof(setclientid.sc_netid),
3988 rpc_peeraddr2str(clp->cl_rpcclient,
3989 RPC_DISPLAY_NETID));
3990 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3991 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3992 clp->cl_ipaddr, port >> 8, port & 255);
3993 rcu_read_unlock();
3994
3995 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3996 if (status != -NFS4ERR_CLID_INUSE)
3997 break;
3998 if (loop != 0) {
3999 ++clp->cl_id_uniquifier;
4000 break;
4001 }
4002 ++loop;
4003 ssleep(clp->cl_lease_time / HZ + 1);
4004 }
4005 return status;
4006 }
4007
4008 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4009 struct nfs4_setclientid_res *arg,
4010 struct rpc_cred *cred)
4011 {
4012 struct nfs_fsinfo fsinfo;
4013 struct rpc_message msg = {
4014 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4015 .rpc_argp = arg,
4016 .rpc_resp = &fsinfo,
4017 .rpc_cred = cred,
4018 };
4019 unsigned long now;
4020 int status;
4021
4022 now = jiffies;
4023 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4024 if (status == 0) {
4025 spin_lock(&clp->cl_lock);
4026 clp->cl_lease_time = fsinfo.lease_time * HZ;
4027 clp->cl_last_renewal = now;
4028 spin_unlock(&clp->cl_lock);
4029 }
4030 return status;
4031 }
4032
4033 struct nfs4_delegreturndata {
4034 struct nfs4_delegreturnargs args;
4035 struct nfs4_delegreturnres res;
4036 struct nfs_fh fh;
4037 nfs4_stateid stateid;
4038 unsigned long timestamp;
4039 struct nfs_fattr fattr;
4040 int rpc_status;
4041 };
4042
4043 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4044 {
4045 struct nfs4_delegreturndata *data = calldata;
4046
4047 if (!nfs4_sequence_done(task, &data->res.seq_res))
4048 return;
4049
4050 switch (task->tk_status) {
4051 case -NFS4ERR_STALE_STATEID:
4052 case -NFS4ERR_EXPIRED:
4053 case 0:
4054 renew_lease(data->res.server, data->timestamp);
4055 break;
4056 default:
4057 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4058 -EAGAIN) {
4059 rpc_restart_call_prepare(task);
4060 return;
4061 }
4062 }
4063 data->rpc_status = task->tk_status;
4064 }
4065
4066 static void nfs4_delegreturn_release(void *calldata)
4067 {
4068 kfree(calldata);
4069 }
4070
4071 #if defined(CONFIG_NFS_V4_1)
4072 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4073 {
4074 struct nfs4_delegreturndata *d_data;
4075
4076 d_data = (struct nfs4_delegreturndata *)data;
4077
4078 if (nfs4_setup_sequence(d_data->res.server,
4079 &d_data->args.seq_args,
4080 &d_data->res.seq_res, task))
4081 return;
4082 rpc_call_start(task);
4083 }
4084 #endif /* CONFIG_NFS_V4_1 */
4085
4086 static const struct rpc_call_ops nfs4_delegreturn_ops = {
4087 #if defined(CONFIG_NFS_V4_1)
4088 .rpc_call_prepare = nfs4_delegreturn_prepare,
4089 #endif /* CONFIG_NFS_V4_1 */
4090 .rpc_call_done = nfs4_delegreturn_done,
4091 .rpc_release = nfs4_delegreturn_release,
4092 };
4093
4094 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4095 {
4096 struct nfs4_delegreturndata *data;
4097 struct nfs_server *server = NFS_SERVER(inode);
4098 struct rpc_task *task;
4099 struct rpc_message msg = {
4100 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4101 .rpc_cred = cred,
4102 };
4103 struct rpc_task_setup task_setup_data = {
4104 .rpc_client = server->client,
4105 .rpc_message = &msg,
4106 .callback_ops = &nfs4_delegreturn_ops,
4107 .flags = RPC_TASK_ASYNC,
4108 };
4109 int status = 0;
4110
4111 data = kzalloc(sizeof(*data), GFP_NOFS);
4112 if (data == NULL)
4113 return -ENOMEM;
4114 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4115 data->args.fhandle = &data->fh;
4116 data->args.stateid = &data->stateid;
4117 data->args.bitmask = server->cache_consistency_bitmask;
4118 nfs_copy_fh(&data->fh, NFS_FH(inode));
4119 nfs4_stateid_copy(&data->stateid, stateid);
4120 data->res.fattr = &data->fattr;
4121 data->res.server = server;
4122 nfs_fattr_init(data->res.fattr);
4123 data->timestamp = jiffies;
4124 data->rpc_status = 0;
4125
4126 task_setup_data.callback_data = data;
4127 msg.rpc_argp = &data->args;
4128 msg.rpc_resp = &data->res;
4129 task = rpc_run_task(&task_setup_data);
4130 if (IS_ERR(task))
4131 return PTR_ERR(task);
4132 if (!issync)
4133 goto out;
4134 status = nfs4_wait_for_completion_rpc_task(task);
4135 if (status != 0)
4136 goto out;
4137 status = data->rpc_status;
4138 if (status == 0)
4139 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4140 else
4141 nfs_refresh_inode(inode, &data->fattr);
4142 out:
4143 rpc_put_task(task);
4144 return status;
4145 }
4146
4147 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4148 {
4149 struct nfs_server *server = NFS_SERVER(inode);
4150 struct nfs4_exception exception = { };
4151 int err;
4152 do {
4153 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4154 switch (err) {
4155 case -NFS4ERR_STALE_STATEID:
4156 case -NFS4ERR_EXPIRED:
4157 case 0:
4158 return 0;
4159 }
4160 err = nfs4_handle_exception(server, err, &exception);
4161 } while (exception.retry);
4162 return err;
4163 }
4164
4165 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4166 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4167
4168 /*
4169 * sleep, with exponential backoff, and retry the LOCK operation.
4170 */
4171 static unsigned long
4172 nfs4_set_lock_task_retry(unsigned long timeout)
4173 {
4174 freezable_schedule_timeout_killable(timeout);
4175 timeout <<= 1;
4176 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4177 return NFS4_LOCK_MAXTIMEOUT;
4178 return timeout;
4179 }
4180
4181 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4182 {
4183 struct inode *inode = state->inode;
4184 struct nfs_server *server = NFS_SERVER(inode);
4185 struct nfs_client *clp = server->nfs_client;
4186 struct nfs_lockt_args arg = {
4187 .fh = NFS_FH(inode),
4188 .fl = request,
4189 };
4190 struct nfs_lockt_res res = {
4191 .denied = request,
4192 };
4193 struct rpc_message msg = {
4194 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4195 .rpc_argp = &arg,
4196 .rpc_resp = &res,
4197 .rpc_cred = state->owner->so_cred,
4198 };
4199 struct nfs4_lock_state *lsp;
4200 int status;
4201
4202 arg.lock_owner.clientid = clp->cl_clientid;
4203 status = nfs4_set_lock_state(state, request);
4204 if (status != 0)
4205 goto out;
4206 lsp = request->fl_u.nfs4_fl.owner;
4207 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4208 arg.lock_owner.s_dev = server->s_dev;
4209 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4210 switch (status) {
4211 case 0:
4212 request->fl_type = F_UNLCK;
4213 break;
4214 case -NFS4ERR_DENIED:
4215 status = 0;
4216 }
4217 request->fl_ops->fl_release_private(request);
4218 out:
4219 return status;
4220 }
4221
4222 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4223 {
4224 struct nfs4_exception exception = { };
4225 int err;
4226
4227 do {
4228 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4229 _nfs4_proc_getlk(state, cmd, request),
4230 &exception);
4231 } while (exception.retry);
4232 return err;
4233 }
4234
4235 static int do_vfs_lock(struct file *file, struct file_lock *fl)
4236 {
4237 int res = 0;
4238 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4239 case FL_POSIX:
4240 res = posix_lock_file_wait(file, fl);
4241 break;
4242 case FL_FLOCK:
4243 res = flock_lock_file_wait(file, fl);
4244 break;
4245 default:
4246 BUG();
4247 }
4248 return res;
4249 }
4250
4251 struct nfs4_unlockdata {
4252 struct nfs_locku_args arg;
4253 struct nfs_locku_res res;
4254 struct nfs4_lock_state *lsp;
4255 struct nfs_open_context *ctx;
4256 struct file_lock fl;
4257 const struct nfs_server *server;
4258 unsigned long timestamp;
4259 };
4260
4261 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4262 struct nfs_open_context *ctx,
4263 struct nfs4_lock_state *lsp,
4264 struct nfs_seqid *seqid)
4265 {
4266 struct nfs4_unlockdata *p;
4267 struct inode *inode = lsp->ls_state->inode;
4268
4269 p = kzalloc(sizeof(*p), GFP_NOFS);
4270 if (p == NULL)
4271 return NULL;
4272 p->arg.fh = NFS_FH(inode);
4273 p->arg.fl = &p->fl;
4274 p->arg.seqid = seqid;
4275 p->res.seqid = seqid;
4276 p->arg.stateid = &lsp->ls_stateid;
4277 p->lsp = lsp;
4278 atomic_inc(&lsp->ls_count);
4279 /* Ensure we don't close file until we're done freeing locks! */
4280 p->ctx = get_nfs_open_context(ctx);
4281 memcpy(&p->fl, fl, sizeof(p->fl));
4282 p->server = NFS_SERVER(inode);
4283 return p;
4284 }
4285
4286 static void nfs4_locku_release_calldata(void *data)
4287 {
4288 struct nfs4_unlockdata *calldata = data;
4289 nfs_free_seqid(calldata->arg.seqid);
4290 nfs4_put_lock_state(calldata->lsp);
4291 put_nfs_open_context(calldata->ctx);
4292 kfree(calldata);
4293 }
4294
4295 static void nfs4_locku_done(struct rpc_task *task, void *data)
4296 {
4297 struct nfs4_unlockdata *calldata = data;
4298
4299 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4300 return;
4301 switch (task->tk_status) {
4302 case 0:
4303 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4304 &calldata->res.stateid);
4305 renew_lease(calldata->server, calldata->timestamp);
4306 break;
4307 case -NFS4ERR_BAD_STATEID:
4308 case -NFS4ERR_OLD_STATEID:
4309 case -NFS4ERR_STALE_STATEID:
4310 case -NFS4ERR_EXPIRED:
4311 break;
4312 default:
4313 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4314 rpc_restart_call_prepare(task);
4315 }
4316 }
4317
4318 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4319 {
4320 struct nfs4_unlockdata *calldata = data;
4321
4322 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4323 return;
4324 if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4325 /* Note: exit _without_ running nfs4_locku_done */
4326 task->tk_action = NULL;
4327 return;
4328 }
4329 calldata->timestamp = jiffies;
4330 if (nfs4_setup_sequence(calldata->server,
4331 &calldata->arg.seq_args,
4332 &calldata->res.seq_res, task))
4333 return;
4334 rpc_call_start(task);
4335 }
4336
4337 static const struct rpc_call_ops nfs4_locku_ops = {
4338 .rpc_call_prepare = nfs4_locku_prepare,
4339 .rpc_call_done = nfs4_locku_done,
4340 .rpc_release = nfs4_locku_release_calldata,
4341 };
4342
4343 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4344 struct nfs_open_context *ctx,
4345 struct nfs4_lock_state *lsp,
4346 struct nfs_seqid *seqid)
4347 {
4348 struct nfs4_unlockdata *data;
4349 struct rpc_message msg = {
4350 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4351 .rpc_cred = ctx->cred,
4352 };
4353 struct rpc_task_setup task_setup_data = {
4354 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4355 .rpc_message = &msg,
4356 .callback_ops = &nfs4_locku_ops,
4357 .workqueue = nfsiod_workqueue,
4358 .flags = RPC_TASK_ASYNC,
4359 };
4360
4361 /* Ensure this is an unlock - when canceling a lock, the
4362 * canceled lock is passed in, and it won't be an unlock.
4363 */
4364 fl->fl_type = F_UNLCK;
4365
4366 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4367 if (data == NULL) {
4368 nfs_free_seqid(seqid);
4369 return ERR_PTR(-ENOMEM);
4370 }
4371
4372 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4373 msg.rpc_argp = &data->arg;
4374 msg.rpc_resp = &data->res;
4375 task_setup_data.callback_data = data;
4376 return rpc_run_task(&task_setup_data);
4377 }
4378
4379 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4380 {
4381 struct nfs_inode *nfsi = NFS_I(state->inode);
4382 struct nfs_seqid *seqid;
4383 struct nfs4_lock_state *lsp;
4384 struct rpc_task *task;
4385 int status = 0;
4386 unsigned char fl_flags = request->fl_flags;
4387
4388 status = nfs4_set_lock_state(state, request);
4389 /* Unlock _before_ we do the RPC call */
4390 request->fl_flags |= FL_EXISTS;
4391 down_read(&nfsi->rwsem);
4392 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4393 up_read(&nfsi->rwsem);
4394 goto out;
4395 }
4396 up_read(&nfsi->rwsem);
4397 if (status != 0)
4398 goto out;
4399 /* Is this a delegated lock? */
4400 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4401 goto out;
4402 lsp = request->fl_u.nfs4_fl.owner;
4403 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4404 status = -ENOMEM;
4405 if (seqid == NULL)
4406 goto out;
4407 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4408 status = PTR_ERR(task);
4409 if (IS_ERR(task))
4410 goto out;
4411 status = nfs4_wait_for_completion_rpc_task(task);
4412 rpc_put_task(task);
4413 out:
4414 request->fl_flags = fl_flags;
4415 return status;
4416 }
4417
4418 struct nfs4_lockdata {
4419 struct nfs_lock_args arg;
4420 struct nfs_lock_res res;
4421 struct nfs4_lock_state *lsp;
4422 struct nfs_open_context *ctx;
4423 struct file_lock fl;
4424 unsigned long timestamp;
4425 int rpc_status;
4426 int cancelled;
4427 struct nfs_server *server;
4428 };
4429
4430 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4431 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4432 gfp_t gfp_mask)
4433 {
4434 struct nfs4_lockdata *p;
4435 struct inode *inode = lsp->ls_state->inode;
4436 struct nfs_server *server = NFS_SERVER(inode);
4437
4438 p = kzalloc(sizeof(*p), gfp_mask);
4439 if (p == NULL)
4440 return NULL;
4441
4442 p->arg.fh = NFS_FH(inode);
4443 p->arg.fl = &p->fl;
4444 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4445 if (p->arg.open_seqid == NULL)
4446 goto out_free;
4447 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4448 if (p->arg.lock_seqid == NULL)
4449 goto out_free_seqid;
4450 p->arg.lock_stateid = &lsp->ls_stateid;
4451 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4452 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4453 p->arg.lock_owner.s_dev = server->s_dev;
4454 p->res.lock_seqid = p->arg.lock_seqid;
4455 p->lsp = lsp;
4456 p->server = server;
4457 atomic_inc(&lsp->ls_count);
4458 p->ctx = get_nfs_open_context(ctx);
4459 memcpy(&p->fl, fl, sizeof(p->fl));
4460 return p;
4461 out_free_seqid:
4462 nfs_free_seqid(p->arg.open_seqid);
4463 out_free:
4464 kfree(p);
4465 return NULL;
4466 }
4467
4468 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4469 {
4470 struct nfs4_lockdata *data = calldata;
4471 struct nfs4_state *state = data->lsp->ls_state;
4472
4473 dprintk("%s: begin!\n", __func__);
4474 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4475 return;
4476 /* Do we need to do an open_to_lock_owner? */
4477 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4478 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4479 return;
4480 data->arg.open_stateid = &state->stateid;
4481 data->arg.new_lock_owner = 1;
4482 data->res.open_seqid = data->arg.open_seqid;
4483 } else
4484 data->arg.new_lock_owner = 0;
4485 data->timestamp = jiffies;
4486 if (nfs4_setup_sequence(data->server,
4487 &data->arg.seq_args,
4488 &data->res.seq_res, task))
4489 return;
4490 rpc_call_start(task);
4491 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4492 }
4493
4494 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4495 {
4496 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4497 nfs4_lock_prepare(task, calldata);
4498 }
4499
4500 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4501 {
4502 struct nfs4_lockdata *data = calldata;
4503
4504 dprintk("%s: begin!\n", __func__);
4505
4506 if (!nfs4_sequence_done(task, &data->res.seq_res))
4507 return;
4508
4509 data->rpc_status = task->tk_status;
4510 if (data->arg.new_lock_owner != 0) {
4511 if (data->rpc_status == 0)
4512 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4513 else
4514 goto out;
4515 }
4516 if (data->rpc_status == 0) {
4517 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4518 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4519 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4520 }
4521 out:
4522 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4523 }
4524
4525 static void nfs4_lock_release(void *calldata)
4526 {
4527 struct nfs4_lockdata *data = calldata;
4528
4529 dprintk("%s: begin!\n", __func__);
4530 nfs_free_seqid(data->arg.open_seqid);
4531 if (data->cancelled != 0) {
4532 struct rpc_task *task;
4533 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4534 data->arg.lock_seqid);
4535 if (!IS_ERR(task))
4536 rpc_put_task_async(task);
4537 dprintk("%s: cancelling lock!\n", __func__);
4538 } else
4539 nfs_free_seqid(data->arg.lock_seqid);
4540 nfs4_put_lock_state(data->lsp);
4541 put_nfs_open_context(data->ctx);
4542 kfree(data);
4543 dprintk("%s: done!\n", __func__);
4544 }
4545
4546 static const struct rpc_call_ops nfs4_lock_ops = {
4547 .rpc_call_prepare = nfs4_lock_prepare,
4548 .rpc_call_done = nfs4_lock_done,
4549 .rpc_release = nfs4_lock_release,
4550 };
4551
4552 static const struct rpc_call_ops nfs4_recover_lock_ops = {
4553 .rpc_call_prepare = nfs4_recover_lock_prepare,
4554 .rpc_call_done = nfs4_lock_done,
4555 .rpc_release = nfs4_lock_release,
4556 };
4557
4558 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4559 {
4560 switch (error) {
4561 case -NFS4ERR_ADMIN_REVOKED:
4562 case -NFS4ERR_BAD_STATEID:
4563 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4564 if (new_lock_owner != 0 ||
4565 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4566 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4567 break;
4568 case -NFS4ERR_STALE_STATEID:
4569 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4570 case -NFS4ERR_EXPIRED:
4571 nfs4_schedule_lease_recovery(server->nfs_client);
4572 };
4573 }
4574
4575 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4576 {
4577 struct nfs4_lockdata *data;
4578 struct rpc_task *task;
4579 struct rpc_message msg = {
4580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4581 .rpc_cred = state->owner->so_cred,
4582 };
4583 struct rpc_task_setup task_setup_data = {
4584 .rpc_client = NFS_CLIENT(state->inode),
4585 .rpc_message = &msg,
4586 .callback_ops = &nfs4_lock_ops,
4587 .workqueue = nfsiod_workqueue,
4588 .flags = RPC_TASK_ASYNC,
4589 };
4590 int ret;
4591
4592 dprintk("%s: begin!\n", __func__);
4593 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4594 fl->fl_u.nfs4_fl.owner,
4595 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4596 if (data == NULL)
4597 return -ENOMEM;
4598 if (IS_SETLKW(cmd))
4599 data->arg.block = 1;
4600 if (recovery_type > NFS_LOCK_NEW) {
4601 if (recovery_type == NFS_LOCK_RECLAIM)
4602 data->arg.reclaim = NFS_LOCK_RECLAIM;
4603 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4604 }
4605 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4606 msg.rpc_argp = &data->arg;
4607 msg.rpc_resp = &data->res;
4608 task_setup_data.callback_data = data;
4609 task = rpc_run_task(&task_setup_data);
4610 if (IS_ERR(task))
4611 return PTR_ERR(task);
4612 ret = nfs4_wait_for_completion_rpc_task(task);
4613 if (ret == 0) {
4614 ret = data->rpc_status;
4615 if (ret)
4616 nfs4_handle_setlk_error(data->server, data->lsp,
4617 data->arg.new_lock_owner, ret);
4618 } else
4619 data->cancelled = 1;
4620 rpc_put_task(task);
4621 dprintk("%s: done, ret = %d!\n", __func__, ret);
4622 return ret;
4623 }
4624
4625 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4626 {
4627 struct nfs_server *server = NFS_SERVER(state->inode);
4628 struct nfs4_exception exception = {
4629 .inode = state->inode,
4630 };
4631 int err;
4632
4633 do {
4634 /* Cache the lock if possible... */
4635 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4636 return 0;
4637 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4638 if (err != -NFS4ERR_DELAY)
4639 break;
4640 nfs4_handle_exception(server, err, &exception);
4641 } while (exception.retry);
4642 return err;
4643 }
4644
4645 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4646 {
4647 struct nfs_server *server = NFS_SERVER(state->inode);
4648 struct nfs4_exception exception = {
4649 .inode = state->inode,
4650 };
4651 int err;
4652
4653 err = nfs4_set_lock_state(state, request);
4654 if (err != 0)
4655 return err;
4656 do {
4657 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4658 return 0;
4659 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4660 switch (err) {
4661 default:
4662 goto out;
4663 case -NFS4ERR_GRACE:
4664 case -NFS4ERR_DELAY:
4665 nfs4_handle_exception(server, err, &exception);
4666 err = 0;
4667 }
4668 } while (exception.retry);
4669 out:
4670 return err;
4671 }
4672
4673 #if defined(CONFIG_NFS_V4_1)
4674 static int nfs41_check_expired_locks(struct nfs4_state *state)
4675 {
4676 int status, ret = NFS_OK;
4677 struct nfs4_lock_state *lsp;
4678 struct nfs_server *server = NFS_SERVER(state->inode);
4679
4680 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4681 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
4682 status = nfs41_test_stateid(server, &lsp->ls_stateid);
4683 if (status != NFS_OK) {
4684 nfs41_free_stateid(server, &lsp->ls_stateid);
4685 lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
4686 ret = status;
4687 }
4688 }
4689 };
4690
4691 return ret;
4692 }
4693
4694 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4695 {
4696 int status = NFS_OK;
4697
4698 if (test_bit(LK_STATE_IN_USE, &state->flags))
4699 status = nfs41_check_expired_locks(state);
4700 if (status == NFS_OK)
4701 return status;
4702 return nfs4_lock_expired(state, request);
4703 }
4704 #endif
4705
4706 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4707 {
4708 struct nfs_inode *nfsi = NFS_I(state->inode);
4709 unsigned char fl_flags = request->fl_flags;
4710 int status = -ENOLCK;
4711
4712 if ((fl_flags & FL_POSIX) &&
4713 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4714 goto out;
4715 /* Is this a delegated open? */
4716 status = nfs4_set_lock_state(state, request);
4717 if (status != 0)
4718 goto out;
4719 request->fl_flags |= FL_ACCESS;
4720 status = do_vfs_lock(request->fl_file, request);
4721 if (status < 0)
4722 goto out;
4723 down_read(&nfsi->rwsem);
4724 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4725 /* Yes: cache locks! */
4726 /* ...but avoid races with delegation recall... */
4727 request->fl_flags = fl_flags & ~FL_SLEEP;
4728 status = do_vfs_lock(request->fl_file, request);
4729 goto out_unlock;
4730 }
4731 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4732 if (status != 0)
4733 goto out_unlock;
4734 /* Note: we always want to sleep here! */
4735 request->fl_flags = fl_flags | FL_SLEEP;
4736 if (do_vfs_lock(request->fl_file, request) < 0)
4737 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4738 "manager!\n", __func__);
4739 out_unlock:
4740 up_read(&nfsi->rwsem);
4741 out:
4742 request->fl_flags = fl_flags;
4743 return status;
4744 }
4745
4746 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4747 {
4748 struct nfs4_exception exception = {
4749 .state = state,
4750 .inode = state->inode,
4751 };
4752 int err;
4753
4754 do {
4755 err = _nfs4_proc_setlk(state, cmd, request);
4756 if (err == -NFS4ERR_DENIED)
4757 err = -EAGAIN;
4758 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4759 err, &exception);
4760 } while (exception.retry);
4761 return err;
4762 }
4763
4764 static int
4765 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4766 {
4767 struct nfs_open_context *ctx;
4768 struct nfs4_state *state;
4769 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4770 int status;
4771
4772 /* verify open state */
4773 ctx = nfs_file_open_context(filp);
4774 state = ctx->state;
4775
4776 if (request->fl_start < 0 || request->fl_end < 0)
4777 return -EINVAL;
4778
4779 if (IS_GETLK(cmd)) {
4780 if (state != NULL)
4781 return nfs4_proc_getlk(state, F_GETLK, request);
4782 return 0;
4783 }
4784
4785 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4786 return -EINVAL;
4787
4788 if (request->fl_type == F_UNLCK) {
4789 if (state != NULL)
4790 return nfs4_proc_unlck(state, cmd, request);
4791 return 0;
4792 }
4793
4794 if (state == NULL)
4795 return -ENOLCK;
4796 /*
4797 * Don't rely on the VFS having checked the file open mode,
4798 * since it won't do this for flock() locks.
4799 */
4800 switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4801 case F_RDLCK:
4802 if (!(filp->f_mode & FMODE_READ))
4803 return -EBADF;
4804 break;
4805 case F_WRLCK:
4806 if (!(filp->f_mode & FMODE_WRITE))
4807 return -EBADF;
4808 }
4809
4810 do {
4811 status = nfs4_proc_setlk(state, cmd, request);
4812 if ((status != -EAGAIN) || IS_SETLK(cmd))
4813 break;
4814 timeout = nfs4_set_lock_task_retry(timeout);
4815 status = -ERESTARTSYS;
4816 if (signalled())
4817 break;
4818 } while(status < 0);
4819 return status;
4820 }
4821
4822 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4823 {
4824 struct nfs_server *server = NFS_SERVER(state->inode);
4825 struct nfs4_exception exception = { };
4826 int err;
4827
4828 err = nfs4_set_lock_state(state, fl);
4829 if (err != 0)
4830 goto out;
4831 do {
4832 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4833 switch (err) {
4834 default:
4835 printk(KERN_ERR "NFS: %s: unhandled error "
4836 "%d.\n", __func__, err);
4837 case 0:
4838 case -ESTALE:
4839 goto out;
4840 case -NFS4ERR_EXPIRED:
4841 nfs4_schedule_stateid_recovery(server, state);
4842 case -NFS4ERR_STALE_CLIENTID:
4843 case -NFS4ERR_STALE_STATEID:
4844 nfs4_schedule_lease_recovery(server->nfs_client);
4845 goto out;
4846 case -NFS4ERR_BADSESSION:
4847 case -NFS4ERR_BADSLOT:
4848 case -NFS4ERR_BAD_HIGH_SLOT:
4849 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4850 case -NFS4ERR_DEADSESSION:
4851 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
4852 goto out;
4853 case -ERESTARTSYS:
4854 /*
4855 * The show must go on: exit, but mark the
4856 * stateid as needing recovery.
4857 */
4858 case -NFS4ERR_DELEG_REVOKED:
4859 case -NFS4ERR_ADMIN_REVOKED:
4860 case -NFS4ERR_BAD_STATEID:
4861 case -NFS4ERR_OPENMODE:
4862 nfs4_schedule_stateid_recovery(server, state);
4863 err = 0;
4864 goto out;
4865 case -EKEYEXPIRED:
4866 /*
4867 * User RPCSEC_GSS context has expired.
4868 * We cannot recover this stateid now, so
4869 * skip it and allow recovery thread to
4870 * proceed.
4871 */
4872 err = 0;
4873 goto out;
4874 case -ENOMEM:
4875 case -NFS4ERR_DENIED:
4876 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
4877 err = 0;
4878 goto out;
4879 case -NFS4ERR_DELAY:
4880 break;
4881 }
4882 err = nfs4_handle_exception(server, err, &exception);
4883 } while (exception.retry);
4884 out:
4885 return err;
4886 }
4887
4888 struct nfs_release_lockowner_data {
4889 struct nfs4_lock_state *lsp;
4890 struct nfs_server *server;
4891 struct nfs_release_lockowner_args args;
4892 };
4893
4894 static void nfs4_release_lockowner_release(void *calldata)
4895 {
4896 struct nfs_release_lockowner_data *data = calldata;
4897 nfs4_free_lock_state(data->server, data->lsp);
4898 kfree(calldata);
4899 }
4900
4901 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
4902 .rpc_release = nfs4_release_lockowner_release,
4903 };
4904
4905 int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
4906 {
4907 struct nfs_server *server = lsp->ls_state->owner->so_server;
4908 struct nfs_release_lockowner_data *data;
4909 struct rpc_message msg = {
4910 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4911 };
4912
4913 if (server->nfs_client->cl_mvops->minor_version != 0)
4914 return -EINVAL;
4915 data = kmalloc(sizeof(*data), GFP_NOFS);
4916 if (!data)
4917 return -ENOMEM;
4918 data->lsp = lsp;
4919 data->server = server;
4920 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
4921 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
4922 data->args.lock_owner.s_dev = server->s_dev;
4923 msg.rpc_argp = &data->args;
4924 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
4925 return 0;
4926 }
4927
4928 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4929
4930 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4931 const void *buf, size_t buflen,
4932 int flags, int type)
4933 {
4934 if (strcmp(key, "") != 0)
4935 return -EINVAL;
4936
4937 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4938 }
4939
4940 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4941 void *buf, size_t buflen, int type)
4942 {
4943 if (strcmp(key, "") != 0)
4944 return -EINVAL;
4945
4946 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4947 }
4948
4949 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4950 size_t list_len, const char *name,
4951 size_t name_len, int type)
4952 {
4953 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4954
4955 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4956 return 0;
4957
4958 if (list && len <= list_len)
4959 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4960 return len;
4961 }
4962
4963 /*
4964 * nfs_fhget will use either the mounted_on_fileid or the fileid
4965 */
4966 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4967 {
4968 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4969 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4970 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4971 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
4972 return;
4973
4974 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4975 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
4976 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4977 fattr->nlink = 2;
4978 }
4979
4980 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
4981 const struct qstr *name,
4982 struct nfs4_fs_locations *fs_locations,
4983 struct page *page)
4984 {
4985 struct nfs_server *server = NFS_SERVER(dir);
4986 u32 bitmask[2] = {
4987 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4988 };
4989 struct nfs4_fs_locations_arg args = {
4990 .dir_fh = NFS_FH(dir),
4991 .name = name,
4992 .page = page,
4993 .bitmask = bitmask,
4994 };
4995 struct nfs4_fs_locations_res res = {
4996 .fs_locations = fs_locations,
4997 };
4998 struct rpc_message msg = {
4999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
5000 .rpc_argp = &args,
5001 .rpc_resp = &res,
5002 };
5003 int status;
5004
5005 dprintk("%s: start\n", __func__);
5006
5007 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5008 * is not supported */
5009 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5010 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5011 else
5012 bitmask[0] |= FATTR4_WORD0_FILEID;
5013
5014 nfs_fattr_init(&fs_locations->fattr);
5015 fs_locations->server = server;
5016 fs_locations->nlocations = 0;
5017 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5018 dprintk("%s: returned status = %d\n", __func__, status);
5019 return status;
5020 }
5021
5022 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5023 const struct qstr *name,
5024 struct nfs4_fs_locations *fs_locations,
5025 struct page *page)
5026 {
5027 struct nfs4_exception exception = { };
5028 int err;
5029 do {
5030 err = nfs4_handle_exception(NFS_SERVER(dir),
5031 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5032 &exception);
5033 } while (exception.retry);
5034 return err;
5035 }
5036
5037 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5038 {
5039 int status;
5040 struct nfs4_secinfo_arg args = {
5041 .dir_fh = NFS_FH(dir),
5042 .name = name,
5043 };
5044 struct nfs4_secinfo_res res = {
5045 .flavors = flavors,
5046 };
5047 struct rpc_message msg = {
5048 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5049 .rpc_argp = &args,
5050 .rpc_resp = &res,
5051 };
5052
5053 dprintk("NFS call secinfo %s\n", name->name);
5054 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5055 dprintk("NFS reply secinfo: %d\n", status);
5056 return status;
5057 }
5058
5059 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5060 struct nfs4_secinfo_flavors *flavors)
5061 {
5062 struct nfs4_exception exception = { };
5063 int err;
5064 do {
5065 err = nfs4_handle_exception(NFS_SERVER(dir),
5066 _nfs4_proc_secinfo(dir, name, flavors),
5067 &exception);
5068 } while (exception.retry);
5069 return err;
5070 }
5071
5072 #ifdef CONFIG_NFS_V4_1
5073 /*
5074 * Check the exchange flags returned by the server for invalid flags, having
5075 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5076 * DS flags set.
5077 */
5078 static int nfs4_check_cl_exchange_flags(u32 flags)
5079 {
5080 if (flags & ~EXCHGID4_FLAG_MASK_R)
5081 goto out_inval;
5082 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5083 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5084 goto out_inval;
5085 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5086 goto out_inval;
5087 return NFS_OK;
5088 out_inval:
5089 return -NFS4ERR_INVAL;
5090 }
5091
5092 static bool
5093 nfs41_same_server_scope(struct nfs41_server_scope *a,
5094 struct nfs41_server_scope *b)
5095 {
5096 if (a->server_scope_sz == b->server_scope_sz &&
5097 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5098 return true;
5099
5100 return false;
5101 }
5102
5103 /*
5104 * nfs4_proc_bind_conn_to_session()
5105 *
5106 * The 4.1 client currently uses the same TCP connection for the
5107 * fore and backchannel.
5108 */
5109 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5110 {
5111 int status;
5112 struct nfs41_bind_conn_to_session_res res;
5113 struct rpc_message msg = {
5114 .rpc_proc =
5115 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5116 .rpc_argp = clp,
5117 .rpc_resp = &res,
5118 .rpc_cred = cred,
5119 };
5120
5121 dprintk("--> %s\n", __func__);
5122 BUG_ON(clp == NULL);
5123
5124 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5125 if (unlikely(res.session == NULL)) {
5126 status = -ENOMEM;
5127 goto out;
5128 }
5129
5130 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5131 if (status == 0) {
5132 if (memcmp(res.session->sess_id.data,
5133 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5134 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5135 status = -EIO;
5136 goto out_session;
5137 }
5138 if (res.dir != NFS4_CDFS4_BOTH) {
5139 dprintk("NFS: %s: Unexpected direction from server\n",
5140 __func__);
5141 status = -EIO;
5142 goto out_session;
5143 }
5144 if (res.use_conn_in_rdma_mode) {
5145 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5146 __func__);
5147 status = -EIO;
5148 goto out_session;
5149 }
5150 }
5151 out_session:
5152 kfree(res.session);
5153 out:
5154 dprintk("<-- %s status= %d\n", __func__, status);
5155 return status;
5156 }
5157
5158 /*
5159 * nfs4_proc_exchange_id()
5160 *
5161 * Since the clientid has expired, all compounds using sessions
5162 * associated with the stale clientid will be returning
5163 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5164 * be in some phase of session reset.
5165 */
5166 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5167 {
5168 nfs4_verifier verifier;
5169 struct nfs41_exchange_id_args args = {
5170 .verifier = &verifier,
5171 .client = clp,
5172 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5173 };
5174 struct nfs41_exchange_id_res res = {
5175 0
5176 };
5177 int status;
5178 struct rpc_message msg = {
5179 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5180 .rpc_argp = &args,
5181 .rpc_resp = &res,
5182 .rpc_cred = cred,
5183 };
5184
5185 dprintk("--> %s\n", __func__);
5186 BUG_ON(clp == NULL);
5187
5188 nfs4_init_boot_verifier(clp, &verifier);
5189
5190 args.id_len = scnprintf(args.id, sizeof(args.id),
5191 "%s/%s/%u",
5192 clp->cl_ipaddr,
5193 clp->cl_rpcclient->cl_nodename,
5194 clp->cl_rpcclient->cl_auth->au_flavor);
5195
5196 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5197 GFP_NOFS);
5198 if (unlikely(res.server_owner == NULL)) {
5199 status = -ENOMEM;
5200 goto out;
5201 }
5202
5203 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5204 GFP_NOFS);
5205 if (unlikely(res.server_scope == NULL)) {
5206 status = -ENOMEM;
5207 goto out_server_owner;
5208 }
5209
5210 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5211 if (unlikely(res.impl_id == NULL)) {
5212 status = -ENOMEM;
5213 goto out_server_scope;
5214 }
5215
5216 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5217 if (status == 0)
5218 status = nfs4_check_cl_exchange_flags(res.flags);
5219
5220 if (status == 0) {
5221 clp->cl_clientid = res.clientid;
5222 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5223 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5224 clp->cl_seqid = res.seqid;
5225
5226 kfree(clp->cl_serverowner);
5227 clp->cl_serverowner = res.server_owner;
5228 res.server_owner = NULL;
5229
5230 /* use the most recent implementation id */
5231 kfree(clp->cl_implid);
5232 clp->cl_implid = res.impl_id;
5233
5234 if (clp->cl_serverscope != NULL &&
5235 !nfs41_same_server_scope(clp->cl_serverscope,
5236 res.server_scope)) {
5237 dprintk("%s: server_scope mismatch detected\n",
5238 __func__);
5239 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5240 kfree(clp->cl_serverscope);
5241 clp->cl_serverscope = NULL;
5242 }
5243
5244 if (clp->cl_serverscope == NULL) {
5245 clp->cl_serverscope = res.server_scope;
5246 goto out;
5247 }
5248 } else
5249 kfree(res.impl_id);
5250
5251 out_server_owner:
5252 kfree(res.server_owner);
5253 out_server_scope:
5254 kfree(res.server_scope);
5255 out:
5256 if (clp->cl_implid != NULL)
5257 dprintk("%s: Server Implementation ID: "
5258 "domain: %s, name: %s, date: %llu,%u\n",
5259 __func__, clp->cl_implid->domain, clp->cl_implid->name,
5260 clp->cl_implid->date.seconds,
5261 clp->cl_implid->date.nseconds);
5262 dprintk("<-- %s status= %d\n", __func__, status);
5263 return status;
5264 }
5265
5266 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5267 struct rpc_cred *cred)
5268 {
5269 struct rpc_message msg = {
5270 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5271 .rpc_argp = clp,
5272 .rpc_cred = cred,
5273 };
5274 int status;
5275
5276 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5277 if (status)
5278 pr_warn("NFS: Got error %d from the server %s on "
5279 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5280 return status;
5281 }
5282
5283 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5284 struct rpc_cred *cred)
5285 {
5286 unsigned int loop;
5287 int ret;
5288
5289 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5290 ret = _nfs4_proc_destroy_clientid(clp, cred);
5291 switch (ret) {
5292 case -NFS4ERR_DELAY:
5293 case -NFS4ERR_CLIENTID_BUSY:
5294 ssleep(1);
5295 break;
5296 default:
5297 return ret;
5298 }
5299 }
5300 return 0;
5301 }
5302
5303 int nfs4_destroy_clientid(struct nfs_client *clp)
5304 {
5305 struct rpc_cred *cred;
5306 int ret = 0;
5307
5308 if (clp->cl_mvops->minor_version < 1)
5309 goto out;
5310 if (clp->cl_exchange_flags == 0)
5311 goto out;
5312 cred = nfs4_get_exchange_id_cred(clp);
5313 ret = nfs4_proc_destroy_clientid(clp, cred);
5314 if (cred)
5315 put_rpccred(cred);
5316 switch (ret) {
5317 case 0:
5318 case -NFS4ERR_STALE_CLIENTID:
5319 clp->cl_exchange_flags = 0;
5320 }
5321 out:
5322 return ret;
5323 }
5324
5325 struct nfs4_get_lease_time_data {
5326 struct nfs4_get_lease_time_args *args;
5327 struct nfs4_get_lease_time_res *res;
5328 struct nfs_client *clp;
5329 };
5330
5331 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5332 void *calldata)
5333 {
5334 int ret;
5335 struct nfs4_get_lease_time_data *data =
5336 (struct nfs4_get_lease_time_data *)calldata;
5337
5338 dprintk("--> %s\n", __func__);
5339 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5340 /* just setup sequence, do not trigger session recovery
5341 since we're invoked within one */
5342 ret = nfs41_setup_sequence(data->clp->cl_session,
5343 &data->args->la_seq_args,
5344 &data->res->lr_seq_res, task);
5345
5346 BUG_ON(ret == -EAGAIN);
5347 rpc_call_start(task);
5348 dprintk("<-- %s\n", __func__);
5349 }
5350
5351 /*
5352 * Called from nfs4_state_manager thread for session setup, so don't recover
5353 * from sequence operation or clientid errors.
5354 */
5355 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5356 {
5357 struct nfs4_get_lease_time_data *data =
5358 (struct nfs4_get_lease_time_data *)calldata;
5359
5360 dprintk("--> %s\n", __func__);
5361 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5362 return;
5363 switch (task->tk_status) {
5364 case -NFS4ERR_DELAY:
5365 case -NFS4ERR_GRACE:
5366 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5367 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5368 task->tk_status = 0;
5369 /* fall through */
5370 case -NFS4ERR_RETRY_UNCACHED_REP:
5371 rpc_restart_call_prepare(task);
5372 return;
5373 }
5374 dprintk("<-- %s\n", __func__);
5375 }
5376
5377 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5378 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5379 .rpc_call_done = nfs4_get_lease_time_done,
5380 };
5381
5382 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5383 {
5384 struct rpc_task *task;
5385 struct nfs4_get_lease_time_args args;
5386 struct nfs4_get_lease_time_res res = {
5387 .lr_fsinfo = fsinfo,
5388 };
5389 struct nfs4_get_lease_time_data data = {
5390 .args = &args,
5391 .res = &res,
5392 .clp = clp,
5393 };
5394 struct rpc_message msg = {
5395 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5396 .rpc_argp = &args,
5397 .rpc_resp = &res,
5398 };
5399 struct rpc_task_setup task_setup = {
5400 .rpc_client = clp->cl_rpcclient,
5401 .rpc_message = &msg,
5402 .callback_ops = &nfs4_get_lease_time_ops,
5403 .callback_data = &data,
5404 .flags = RPC_TASK_TIMEOUT,
5405 };
5406 int status;
5407
5408 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5409 dprintk("--> %s\n", __func__);
5410 task = rpc_run_task(&task_setup);
5411
5412 if (IS_ERR(task))
5413 status = PTR_ERR(task);
5414 else {
5415 status = task->tk_status;
5416 rpc_put_task(task);
5417 }
5418 dprintk("<-- %s return %d\n", __func__, status);
5419
5420 return status;
5421 }
5422
5423 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5424 {
5425 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5426 }
5427
5428 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5429 struct nfs4_slot *new,
5430 u32 max_slots,
5431 u32 ivalue)
5432 {
5433 struct nfs4_slot *old = NULL;
5434 u32 i;
5435
5436 spin_lock(&tbl->slot_tbl_lock);
5437 if (new) {
5438 old = tbl->slots;
5439 tbl->slots = new;
5440 tbl->max_slots = max_slots;
5441 }
5442 tbl->highest_used_slotid = -1; /* no slot is currently used */
5443 for (i = 0; i < tbl->max_slots; i++)
5444 tbl->slots[i].seq_nr = ivalue;
5445 spin_unlock(&tbl->slot_tbl_lock);
5446 kfree(old);
5447 }
5448
5449 /*
5450 * (re)Initialise a slot table
5451 */
5452 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5453 u32 ivalue)
5454 {
5455 struct nfs4_slot *new = NULL;
5456 int ret = -ENOMEM;
5457
5458 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5459 max_reqs, tbl->max_slots);
5460
5461 /* Does the newly negotiated max_reqs match the existing slot table? */
5462 if (max_reqs != tbl->max_slots) {
5463 new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5464 if (!new)
5465 goto out;
5466 }
5467 ret = 0;
5468
5469 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5470 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5471 tbl, tbl->slots, tbl->max_slots);
5472 out:
5473 dprintk("<-- %s: return %d\n", __func__, ret);
5474 return ret;
5475 }
5476
5477 /* Destroy the slot table */
5478 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5479 {
5480 if (session->fc_slot_table.slots != NULL) {
5481 kfree(session->fc_slot_table.slots);
5482 session->fc_slot_table.slots = NULL;
5483 }
5484 if (session->bc_slot_table.slots != NULL) {
5485 kfree(session->bc_slot_table.slots);
5486 session->bc_slot_table.slots = NULL;
5487 }
5488 return;
5489 }
5490
5491 /*
5492 * Initialize or reset the forechannel and backchannel tables
5493 */
5494 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5495 {
5496 struct nfs4_slot_table *tbl;
5497 int status;
5498
5499 dprintk("--> %s\n", __func__);
5500 /* Fore channel */
5501 tbl = &ses->fc_slot_table;
5502 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5503 if (status) /* -ENOMEM */
5504 return status;
5505 /* Back channel */
5506 tbl = &ses->bc_slot_table;
5507 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5508 if (status && tbl->slots == NULL)
5509 /* Fore and back channel share a connection so get
5510 * both slot tables or neither */
5511 nfs4_destroy_slot_tables(ses);
5512 return status;
5513 }
5514
5515 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5516 {
5517 struct nfs4_session *session;
5518 struct nfs4_slot_table *tbl;
5519
5520 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5521 if (!session)
5522 return NULL;
5523
5524 tbl = &session->fc_slot_table;
5525 tbl->highest_used_slotid = NFS4_NO_SLOT;
5526 spin_lock_init(&tbl->slot_tbl_lock);
5527 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5528 init_completion(&tbl->complete);
5529
5530 tbl = &session->bc_slot_table;
5531 tbl->highest_used_slotid = NFS4_NO_SLOT;
5532 spin_lock_init(&tbl->slot_tbl_lock);
5533 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5534 init_completion(&tbl->complete);
5535
5536 session->session_state = 1<<NFS4_SESSION_INITING;
5537
5538 session->clp = clp;
5539 return session;
5540 }
5541
5542 void nfs4_destroy_session(struct nfs4_session *session)
5543 {
5544 struct rpc_xprt *xprt;
5545 struct rpc_cred *cred;
5546
5547 cred = nfs4_get_exchange_id_cred(session->clp);
5548 nfs4_proc_destroy_session(session, cred);
5549 if (cred)
5550 put_rpccred(cred);
5551
5552 rcu_read_lock();
5553 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5554 rcu_read_unlock();
5555 dprintk("%s Destroy backchannel for xprt %p\n",
5556 __func__, xprt);
5557 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5558 nfs4_destroy_slot_tables(session);
5559 kfree(session);
5560 }
5561
5562 /*
5563 * Initialize the values to be used by the client in CREATE_SESSION
5564 * If nfs4_init_session set the fore channel request and response sizes,
5565 * use them.
5566 *
5567 * Set the back channel max_resp_sz_cached to zero to force the client to
5568 * always set csa_cachethis to FALSE because the current implementation
5569 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5570 */
5571 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5572 {
5573 struct nfs4_session *session = args->client->cl_session;
5574 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5575 mxresp_sz = session->fc_attrs.max_resp_sz;
5576
5577 if (mxrqst_sz == 0)
5578 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5579 if (mxresp_sz == 0)
5580 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5581 /* Fore channel attributes */
5582 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5583 args->fc_attrs.max_resp_sz = mxresp_sz;
5584 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5585 args->fc_attrs.max_reqs = max_session_slots;
5586
5587 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5588 "max_ops=%u max_reqs=%u\n",
5589 __func__,
5590 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5591 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5592
5593 /* Back channel attributes */
5594 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5595 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5596 args->bc_attrs.max_resp_sz_cached = 0;
5597 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5598 args->bc_attrs.max_reqs = 1;
5599
5600 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5601 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5602 __func__,
5603 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5604 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5605 args->bc_attrs.max_reqs);
5606 }
5607
5608 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5609 {
5610 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5611 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5612
5613 if (rcvd->max_resp_sz > sent->max_resp_sz)
5614 return -EINVAL;
5615 /*
5616 * Our requested max_ops is the minimum we need; we're not
5617 * prepared to break up compounds into smaller pieces than that.
5618 * So, no point even trying to continue if the server won't
5619 * cooperate:
5620 */
5621 if (rcvd->max_ops < sent->max_ops)
5622 return -EINVAL;
5623 if (rcvd->max_reqs == 0)
5624 return -EINVAL;
5625 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5626 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5627 return 0;
5628 }
5629
5630 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5631 {
5632 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5633 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5634
5635 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5636 return -EINVAL;
5637 if (rcvd->max_resp_sz < sent->max_resp_sz)
5638 return -EINVAL;
5639 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5640 return -EINVAL;
5641 /* These would render the backchannel useless: */
5642 if (rcvd->max_ops != sent->max_ops)
5643 return -EINVAL;
5644 if (rcvd->max_reqs != sent->max_reqs)
5645 return -EINVAL;
5646 return 0;
5647 }
5648
5649 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5650 struct nfs4_session *session)
5651 {
5652 int ret;
5653
5654 ret = nfs4_verify_fore_channel_attrs(args, session);
5655 if (ret)
5656 return ret;
5657 return nfs4_verify_back_channel_attrs(args, session);
5658 }
5659
5660 static int _nfs4_proc_create_session(struct nfs_client *clp,
5661 struct rpc_cred *cred)
5662 {
5663 struct nfs4_session *session = clp->cl_session;
5664 struct nfs41_create_session_args args = {
5665 .client = clp,
5666 .cb_program = NFS4_CALLBACK,
5667 };
5668 struct nfs41_create_session_res res = {
5669 .client = clp,
5670 };
5671 struct rpc_message msg = {
5672 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5673 .rpc_argp = &args,
5674 .rpc_resp = &res,
5675 .rpc_cred = cred,
5676 };
5677 int status;
5678
5679 nfs4_init_channel_attrs(&args);
5680 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5681
5682 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5683
5684 if (!status)
5685 /* Verify the session's negotiated channel_attrs values */
5686 status = nfs4_verify_channel_attrs(&args, session);
5687 if (!status) {
5688 /* Increment the clientid slot sequence id */
5689 clp->cl_seqid++;
5690 }
5691
5692 return status;
5693 }
5694
5695 /*
5696 * Issues a CREATE_SESSION operation to the server.
5697 * It is the responsibility of the caller to verify the session is
5698 * expired before calling this routine.
5699 */
5700 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
5701 {
5702 int status;
5703 unsigned *ptr;
5704 struct nfs4_session *session = clp->cl_session;
5705
5706 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5707
5708 status = _nfs4_proc_create_session(clp, cred);
5709 if (status)
5710 goto out;
5711
5712 /* Init or reset the session slot tables */
5713 status = nfs4_setup_session_slot_tables(session);
5714 dprintk("slot table setup returned %d\n", status);
5715 if (status)
5716 goto out;
5717
5718 ptr = (unsigned *)&session->sess_id.data[0];
5719 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5720 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5721 out:
5722 dprintk("<-- %s\n", __func__);
5723 return status;
5724 }
5725
5726 /*
5727 * Issue the over-the-wire RPC DESTROY_SESSION.
5728 * The caller must serialize access to this routine.
5729 */
5730 int nfs4_proc_destroy_session(struct nfs4_session *session,
5731 struct rpc_cred *cred)
5732 {
5733 struct rpc_message msg = {
5734 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
5735 .rpc_argp = session,
5736 .rpc_cred = cred,
5737 };
5738 int status = 0;
5739
5740 dprintk("--> nfs4_proc_destroy_session\n");
5741
5742 /* session is still being setup */
5743 if (session->clp->cl_cons_state != NFS_CS_READY)
5744 return status;
5745
5746 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5747
5748 if (status)
5749 printk(KERN_WARNING
5750 "NFS: Got error %d from the server on DESTROY_SESSION. "
5751 "Session has been destroyed regardless...\n", status);
5752
5753 dprintk("<-- nfs4_proc_destroy_session\n");
5754 return status;
5755 }
5756
5757 /*
5758 * With sessions, the client is not marked ready until after a
5759 * successful EXCHANGE_ID and CREATE_SESSION.
5760 *
5761 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
5762 * other versions of NFS can be tried.
5763 */
5764 static int nfs41_check_session_ready(struct nfs_client *clp)
5765 {
5766 int ret;
5767
5768 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
5769 ret = nfs4_client_recover_expired_lease(clp);
5770 if (ret)
5771 return ret;
5772 }
5773 if (clp->cl_cons_state < NFS_CS_READY)
5774 return -EPROTONOSUPPORT;
5775 smp_rmb();
5776 return 0;
5777 }
5778
5779 int nfs4_init_session(struct nfs_server *server)
5780 {
5781 struct nfs_client *clp = server->nfs_client;
5782 struct nfs4_session *session;
5783 unsigned int rsize, wsize;
5784
5785 if (!nfs4_has_session(clp))
5786 return 0;
5787
5788 session = clp->cl_session;
5789 spin_lock(&clp->cl_lock);
5790 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5791
5792 rsize = server->rsize;
5793 if (rsize == 0)
5794 rsize = NFS_MAX_FILE_IO_SIZE;
5795 wsize = server->wsize;
5796 if (wsize == 0)
5797 wsize = NFS_MAX_FILE_IO_SIZE;
5798
5799 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5800 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5801 }
5802 spin_unlock(&clp->cl_lock);
5803
5804 return nfs41_check_session_ready(clp);
5805 }
5806
5807 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
5808 {
5809 struct nfs4_session *session = clp->cl_session;
5810 int ret;
5811
5812 spin_lock(&clp->cl_lock);
5813 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5814 /*
5815 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
5816 * DS lease to be equal to the MDS lease.
5817 */
5818 clp->cl_lease_time = lease_time;
5819 clp->cl_last_renewal = jiffies;
5820 }
5821 spin_unlock(&clp->cl_lock);
5822
5823 ret = nfs41_check_session_ready(clp);
5824 if (ret)
5825 return ret;
5826 /* Test for the DS role */
5827 if (!is_ds_client(clp))
5828 return -ENODEV;
5829 return 0;
5830 }
5831 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5832
5833
5834 /*
5835 * Renew the cl_session lease.
5836 */
5837 struct nfs4_sequence_data {
5838 struct nfs_client *clp;
5839 struct nfs4_sequence_args args;
5840 struct nfs4_sequence_res res;
5841 };
5842
5843 static void nfs41_sequence_release(void *data)
5844 {
5845 struct nfs4_sequence_data *calldata = data;
5846 struct nfs_client *clp = calldata->clp;
5847
5848 if (atomic_read(&clp->cl_count) > 1)
5849 nfs4_schedule_state_renewal(clp);
5850 nfs_put_client(clp);
5851 kfree(calldata);
5852 }
5853
5854 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5855 {
5856 switch(task->tk_status) {
5857 case -NFS4ERR_DELAY:
5858 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5859 return -EAGAIN;
5860 default:
5861 nfs4_schedule_lease_recovery(clp);
5862 }
5863 return 0;
5864 }
5865
5866 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5867 {
5868 struct nfs4_sequence_data *calldata = data;
5869 struct nfs_client *clp = calldata->clp;
5870
5871 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5872 return;
5873
5874 if (task->tk_status < 0) {
5875 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5876 if (atomic_read(&clp->cl_count) == 1)
5877 goto out;
5878
5879 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5880 rpc_restart_call_prepare(task);
5881 return;
5882 }
5883 }
5884 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5885 out:
5886 dprintk("<-- %s\n", __func__);
5887 }
5888
5889 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5890 {
5891 struct nfs4_sequence_data *calldata = data;
5892 struct nfs_client *clp = calldata->clp;
5893 struct nfs4_sequence_args *args;
5894 struct nfs4_sequence_res *res;
5895
5896 args = task->tk_msg.rpc_argp;
5897 res = task->tk_msg.rpc_resp;
5898
5899 if (nfs41_setup_sequence(clp->cl_session, args, res, task))
5900 return;
5901 rpc_call_start(task);
5902 }
5903
5904 static const struct rpc_call_ops nfs41_sequence_ops = {
5905 .rpc_call_done = nfs41_sequence_call_done,
5906 .rpc_call_prepare = nfs41_sequence_prepare,
5907 .rpc_release = nfs41_sequence_release,
5908 };
5909
5910 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5911 {
5912 struct nfs4_sequence_data *calldata;
5913 struct rpc_message msg = {
5914 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5915 .rpc_cred = cred,
5916 };
5917 struct rpc_task_setup task_setup_data = {
5918 .rpc_client = clp->cl_rpcclient,
5919 .rpc_message = &msg,
5920 .callback_ops = &nfs41_sequence_ops,
5921 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5922 };
5923
5924 if (!atomic_inc_not_zero(&clp->cl_count))
5925 return ERR_PTR(-EIO);
5926 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5927 if (calldata == NULL) {
5928 nfs_put_client(clp);
5929 return ERR_PTR(-ENOMEM);
5930 }
5931 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5932 msg.rpc_argp = &calldata->args;
5933 msg.rpc_resp = &calldata->res;
5934 calldata->clp = clp;
5935 task_setup_data.callback_data = calldata;
5936
5937 return rpc_run_task(&task_setup_data);
5938 }
5939
5940 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5941 {
5942 struct rpc_task *task;
5943 int ret = 0;
5944
5945 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5946 return 0;
5947 task = _nfs41_proc_sequence(clp, cred);
5948 if (IS_ERR(task))
5949 ret = PTR_ERR(task);
5950 else
5951 rpc_put_task_async(task);
5952 dprintk("<-- %s status=%d\n", __func__, ret);
5953 return ret;
5954 }
5955
5956 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5957 {
5958 struct rpc_task *task;
5959 int ret;
5960
5961 task = _nfs41_proc_sequence(clp, cred);
5962 if (IS_ERR(task)) {
5963 ret = PTR_ERR(task);
5964 goto out;
5965 }
5966 ret = rpc_wait_for_completion_task(task);
5967 if (!ret) {
5968 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5969
5970 if (task->tk_status == 0)
5971 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5972 ret = task->tk_status;
5973 }
5974 rpc_put_task(task);
5975 out:
5976 dprintk("<-- %s status=%d\n", __func__, ret);
5977 return ret;
5978 }
5979
5980 struct nfs4_reclaim_complete_data {
5981 struct nfs_client *clp;
5982 struct nfs41_reclaim_complete_args arg;
5983 struct nfs41_reclaim_complete_res res;
5984 };
5985
5986 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5987 {
5988 struct nfs4_reclaim_complete_data *calldata = data;
5989
5990 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5991 if (nfs41_setup_sequence(calldata->clp->cl_session,
5992 &calldata->arg.seq_args,
5993 &calldata->res.seq_res, task))
5994 return;
5995
5996 rpc_call_start(task);
5997 }
5998
5999 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6000 {
6001 switch(task->tk_status) {
6002 case 0:
6003 case -NFS4ERR_COMPLETE_ALREADY:
6004 case -NFS4ERR_WRONG_CRED: /* What to do here? */
6005 break;
6006 case -NFS4ERR_DELAY:
6007 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6008 /* fall through */
6009 case -NFS4ERR_RETRY_UNCACHED_REP:
6010 return -EAGAIN;
6011 default:
6012 nfs4_schedule_lease_recovery(clp);
6013 }
6014 return 0;
6015 }
6016
6017 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
6018 {
6019 struct nfs4_reclaim_complete_data *calldata = data;
6020 struct nfs_client *clp = calldata->clp;
6021 struct nfs4_sequence_res *res = &calldata->res.seq_res;
6022
6023 dprintk("--> %s\n", __func__);
6024 if (!nfs41_sequence_done(task, res))
6025 return;
6026
6027 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
6028 rpc_restart_call_prepare(task);
6029 return;
6030 }
6031 dprintk("<-- %s\n", __func__);
6032 }
6033
6034 static void nfs4_free_reclaim_complete_data(void *data)
6035 {
6036 struct nfs4_reclaim_complete_data *calldata = data;
6037
6038 kfree(calldata);
6039 }
6040
6041 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
6042 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
6043 .rpc_call_done = nfs4_reclaim_complete_done,
6044 .rpc_release = nfs4_free_reclaim_complete_data,
6045 };
6046
6047 /*
6048 * Issue a global reclaim complete.
6049 */
6050 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
6051 {
6052 struct nfs4_reclaim_complete_data *calldata;
6053 struct rpc_task *task;
6054 struct rpc_message msg = {
6055 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
6056 };
6057 struct rpc_task_setup task_setup_data = {
6058 .rpc_client = clp->cl_rpcclient,
6059 .rpc_message = &msg,
6060 .callback_ops = &nfs4_reclaim_complete_call_ops,
6061 .flags = RPC_TASK_ASYNC,
6062 };
6063 int status = -ENOMEM;
6064
6065 dprintk("--> %s\n", __func__);
6066 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6067 if (calldata == NULL)
6068 goto out;
6069 calldata->clp = clp;
6070 calldata->arg.one_fs = 0;
6071
6072 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
6073 msg.rpc_argp = &calldata->arg;
6074 msg.rpc_resp = &calldata->res;
6075 task_setup_data.callback_data = calldata;
6076 task = rpc_run_task(&task_setup_data);
6077 if (IS_ERR(task)) {
6078 status = PTR_ERR(task);
6079 goto out;
6080 }
6081 status = nfs4_wait_for_completion_rpc_task(task);
6082 if (status == 0)
6083 status = task->tk_status;
6084 rpc_put_task(task);
6085 return 0;
6086 out:
6087 dprintk("<-- %s status=%d\n", __func__, status);
6088 return status;
6089 }
6090
6091 static void
6092 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
6093 {
6094 struct nfs4_layoutget *lgp = calldata;
6095 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6096
6097 dprintk("--> %s\n", __func__);
6098 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6099 * right now covering the LAYOUTGET we are about to send.
6100 * However, that is not so catastrophic, and there seems
6101 * to be no way to prevent it completely.
6102 */
6103 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
6104 &lgp->res.seq_res, task))
6105 return;
6106 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
6107 NFS_I(lgp->args.inode)->layout,
6108 lgp->args.ctx->state)) {
6109 rpc_exit(task, NFS4_OK);
6110 return;
6111 }
6112 rpc_call_start(task);
6113 }
6114
6115 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6116 {
6117 struct nfs4_layoutget *lgp = calldata;
6118 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6119
6120 dprintk("--> %s\n", __func__);
6121
6122 if (!nfs4_sequence_done(task, &lgp->res.seq_res))
6123 return;
6124
6125 switch (task->tk_status) {
6126 case 0:
6127 break;
6128 case -NFS4ERR_LAYOUTTRYLATER:
6129 case -NFS4ERR_RECALLCONFLICT:
6130 task->tk_status = -NFS4ERR_DELAY;
6131 /* Fall through */
6132 default:
6133 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6134 rpc_restart_call_prepare(task);
6135 return;
6136 }
6137 }
6138 dprintk("<-- %s\n", __func__);
6139 }
6140
6141 static void nfs4_layoutget_release(void *calldata)
6142 {
6143 struct nfs4_layoutget *lgp = calldata;
6144
6145 dprintk("--> %s\n", __func__);
6146 put_nfs_open_context(lgp->args.ctx);
6147 kfree(calldata);
6148 dprintk("<-- %s\n", __func__);
6149 }
6150
6151 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6152 .rpc_call_prepare = nfs4_layoutget_prepare,
6153 .rpc_call_done = nfs4_layoutget_done,
6154 .rpc_release = nfs4_layoutget_release,
6155 };
6156
6157 int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
6158 {
6159 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6160 struct rpc_task *task;
6161 struct rpc_message msg = {
6162 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6163 .rpc_argp = &lgp->args,
6164 .rpc_resp = &lgp->res,
6165 };
6166 struct rpc_task_setup task_setup_data = {
6167 .rpc_client = server->client,
6168 .rpc_message = &msg,
6169 .callback_ops = &nfs4_layoutget_call_ops,
6170 .callback_data = lgp,
6171 .flags = RPC_TASK_ASYNC,
6172 };
6173 int status = 0;
6174
6175 dprintk("--> %s\n", __func__);
6176
6177 lgp->res.layoutp = &lgp->args.layout;
6178 lgp->res.seq_res.sr_slot = NULL;
6179 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6180 task = rpc_run_task(&task_setup_data);
6181 if (IS_ERR(task))
6182 return PTR_ERR(task);
6183 status = nfs4_wait_for_completion_rpc_task(task);
6184 if (status == 0)
6185 status = task->tk_status;
6186 if (status == 0)
6187 status = pnfs_layout_process(lgp);
6188 rpc_put_task(task);
6189 dprintk("<-- %s status=%d\n", __func__, status);
6190 return status;
6191 }
6192
6193 static void
6194 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6195 {
6196 struct nfs4_layoutreturn *lrp = calldata;
6197
6198 dprintk("--> %s\n", __func__);
6199 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6200 &lrp->res.seq_res, task))
6201 return;
6202 rpc_call_start(task);
6203 }
6204
6205 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6206 {
6207 struct nfs4_layoutreturn *lrp = calldata;
6208 struct nfs_server *server;
6209 struct pnfs_layout_hdr *lo = lrp->args.layout;
6210
6211 dprintk("--> %s\n", __func__);
6212
6213 if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6214 return;
6215
6216 server = NFS_SERVER(lrp->args.inode);
6217 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6218 rpc_restart_call_prepare(task);
6219 return;
6220 }
6221 spin_lock(&lo->plh_inode->i_lock);
6222 if (task->tk_status == 0) {
6223 if (lrp->res.lrs_present) {
6224 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6225 } else
6226 BUG_ON(!list_empty(&lo->plh_segs));
6227 }
6228 lo->plh_block_lgets--;
6229 spin_unlock(&lo->plh_inode->i_lock);
6230 dprintk("<-- %s\n", __func__);
6231 }
6232
6233 static void nfs4_layoutreturn_release(void *calldata)
6234 {
6235 struct nfs4_layoutreturn *lrp = calldata;
6236
6237 dprintk("--> %s\n", __func__);
6238 put_layout_hdr(lrp->args.layout);
6239 kfree(calldata);
6240 dprintk("<-- %s\n", __func__);
6241 }
6242
6243 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6244 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6245 .rpc_call_done = nfs4_layoutreturn_done,
6246 .rpc_release = nfs4_layoutreturn_release,
6247 };
6248
6249 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6250 {
6251 struct rpc_task *task;
6252 struct rpc_message msg = {
6253 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6254 .rpc_argp = &lrp->args,
6255 .rpc_resp = &lrp->res,
6256 };
6257 struct rpc_task_setup task_setup_data = {
6258 .rpc_client = lrp->clp->cl_rpcclient,
6259 .rpc_message = &msg,
6260 .callback_ops = &nfs4_layoutreturn_call_ops,
6261 .callback_data = lrp,
6262 };
6263 int status;
6264
6265 dprintk("--> %s\n", __func__);
6266 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6267 task = rpc_run_task(&task_setup_data);
6268 if (IS_ERR(task))
6269 return PTR_ERR(task);
6270 status = task->tk_status;
6271 dprintk("<-- %s status=%d\n", __func__, status);
6272 rpc_put_task(task);
6273 return status;
6274 }
6275
6276 /*
6277 * Retrieve the list of Data Server devices from the MDS.
6278 */
6279 static int _nfs4_getdevicelist(struct nfs_server *server,
6280 const struct nfs_fh *fh,
6281 struct pnfs_devicelist *devlist)
6282 {
6283 struct nfs4_getdevicelist_args args = {
6284 .fh = fh,
6285 .layoutclass = server->pnfs_curr_ld->id,
6286 };
6287 struct nfs4_getdevicelist_res res = {
6288 .devlist = devlist,
6289 };
6290 struct rpc_message msg = {
6291 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6292 .rpc_argp = &args,
6293 .rpc_resp = &res,
6294 };
6295 int status;
6296
6297 dprintk("--> %s\n", __func__);
6298 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6299 &res.seq_res, 0);
6300 dprintk("<-- %s status=%d\n", __func__, status);
6301 return status;
6302 }
6303
6304 int nfs4_proc_getdevicelist(struct nfs_server *server,
6305 const struct nfs_fh *fh,
6306 struct pnfs_devicelist *devlist)
6307 {
6308 struct nfs4_exception exception = { };
6309 int err;
6310
6311 do {
6312 err = nfs4_handle_exception(server,
6313 _nfs4_getdevicelist(server, fh, devlist),
6314 &exception);
6315 } while (exception.retry);
6316
6317 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6318 err, devlist->num_devs);
6319
6320 return err;
6321 }
6322 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6323
6324 static int
6325 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6326 {
6327 struct nfs4_getdeviceinfo_args args = {
6328 .pdev = pdev,
6329 };
6330 struct nfs4_getdeviceinfo_res res = {
6331 .pdev = pdev,
6332 };
6333 struct rpc_message msg = {
6334 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6335 .rpc_argp = &args,
6336 .rpc_resp = &res,
6337 };
6338 int status;
6339
6340 dprintk("--> %s\n", __func__);
6341 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6342 dprintk("<-- %s status=%d\n", __func__, status);
6343
6344 return status;
6345 }
6346
6347 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6348 {
6349 struct nfs4_exception exception = { };
6350 int err;
6351
6352 do {
6353 err = nfs4_handle_exception(server,
6354 _nfs4_proc_getdeviceinfo(server, pdev),
6355 &exception);
6356 } while (exception.retry);
6357 return err;
6358 }
6359 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6360
6361 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6362 {
6363 struct nfs4_layoutcommit_data *data = calldata;
6364 struct nfs_server *server = NFS_SERVER(data->args.inode);
6365
6366 if (nfs4_setup_sequence(server, &data->args.seq_args,
6367 &data->res.seq_res, task))
6368 return;
6369 rpc_call_start(task);
6370 }
6371
6372 static void
6373 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6374 {
6375 struct nfs4_layoutcommit_data *data = calldata;
6376 struct nfs_server *server = NFS_SERVER(data->args.inode);
6377
6378 if (!nfs4_sequence_done(task, &data->res.seq_res))
6379 return;
6380
6381 switch (task->tk_status) { /* Just ignore these failures */
6382 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6383 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6384 case -NFS4ERR_BADLAYOUT: /* no layout */
6385 case -NFS4ERR_GRACE: /* loca_recalim always false */
6386 task->tk_status = 0;
6387 break;
6388 case 0:
6389 nfs_post_op_update_inode_force_wcc(data->args.inode,
6390 data->res.fattr);
6391 break;
6392 default:
6393 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6394 rpc_restart_call_prepare(task);
6395 return;
6396 }
6397 }
6398 }
6399
6400 static void nfs4_layoutcommit_release(void *calldata)
6401 {
6402 struct nfs4_layoutcommit_data *data = calldata;
6403 struct pnfs_layout_segment *lseg, *tmp;
6404 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6405
6406 pnfs_cleanup_layoutcommit(data);
6407 /* Matched by references in pnfs_set_layoutcommit */
6408 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6409 list_del_init(&lseg->pls_lc_list);
6410 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6411 &lseg->pls_flags))
6412 put_lseg(lseg);
6413 }
6414
6415 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6416 smp_mb__after_clear_bit();
6417 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6418
6419 put_rpccred(data->cred);
6420 kfree(data);
6421 }
6422
6423 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6424 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6425 .rpc_call_done = nfs4_layoutcommit_done,
6426 .rpc_release = nfs4_layoutcommit_release,
6427 };
6428
6429 int
6430 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6431 {
6432 struct rpc_message msg = {
6433 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6434 .rpc_argp = &data->args,
6435 .rpc_resp = &data->res,
6436 .rpc_cred = data->cred,
6437 };
6438 struct rpc_task_setup task_setup_data = {
6439 .task = &data->task,
6440 .rpc_client = NFS_CLIENT(data->args.inode),
6441 .rpc_message = &msg,
6442 .callback_ops = &nfs4_layoutcommit_ops,
6443 .callback_data = data,
6444 .flags = RPC_TASK_ASYNC,
6445 };
6446 struct rpc_task *task;
6447 int status = 0;
6448
6449 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6450 "lbw: %llu inode %lu\n",
6451 data->task.tk_pid, sync,
6452 data->args.lastbytewritten,
6453 data->args.inode->i_ino);
6454
6455 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6456 task = rpc_run_task(&task_setup_data);
6457 if (IS_ERR(task))
6458 return PTR_ERR(task);
6459 if (sync == false)
6460 goto out;
6461 status = nfs4_wait_for_completion_rpc_task(task);
6462 if (status != 0)
6463 goto out;
6464 status = task->tk_status;
6465 out:
6466 dprintk("%s: status %d\n", __func__, status);
6467 rpc_put_task(task);
6468 return status;
6469 }
6470
6471 static int
6472 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6473 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6474 {
6475 struct nfs41_secinfo_no_name_args args = {
6476 .style = SECINFO_STYLE_CURRENT_FH,
6477 };
6478 struct nfs4_secinfo_res res = {
6479 .flavors = flavors,
6480 };
6481 struct rpc_message msg = {
6482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6483 .rpc_argp = &args,
6484 .rpc_resp = &res,
6485 };
6486 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6487 }
6488
6489 static int
6490 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6491 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6492 {
6493 struct nfs4_exception exception = { };
6494 int err;
6495 do {
6496 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6497 switch (err) {
6498 case 0:
6499 case -NFS4ERR_WRONGSEC:
6500 case -NFS4ERR_NOTSUPP:
6501 goto out;
6502 default:
6503 err = nfs4_handle_exception(server, err, &exception);
6504 }
6505 } while (exception.retry);
6506 out:
6507 return err;
6508 }
6509
6510 static int
6511 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6512 struct nfs_fsinfo *info)
6513 {
6514 int err;
6515 struct page *page;
6516 rpc_authflavor_t flavor;
6517 struct nfs4_secinfo_flavors *flavors;
6518
6519 page = alloc_page(GFP_KERNEL);
6520 if (!page) {
6521 err = -ENOMEM;
6522 goto out;
6523 }
6524
6525 flavors = page_address(page);
6526 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6527
6528 /*
6529 * Fall back on "guess and check" method if
6530 * the server doesn't support SECINFO_NO_NAME
6531 */
6532 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6533 err = nfs4_find_root_sec(server, fhandle, info);
6534 goto out_freepage;
6535 }
6536 if (err)
6537 goto out_freepage;
6538
6539 flavor = nfs_find_best_sec(flavors);
6540 if (err == 0)
6541 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6542
6543 out_freepage:
6544 put_page(page);
6545 if (err == -EACCES)
6546 return -EPERM;
6547 out:
6548 return err;
6549 }
6550
6551 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6552 {
6553 int status;
6554 struct nfs41_test_stateid_args args = {
6555 .stateid = stateid,
6556 };
6557 struct nfs41_test_stateid_res res;
6558 struct rpc_message msg = {
6559 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6560 .rpc_argp = &args,
6561 .rpc_resp = &res,
6562 };
6563
6564 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6565 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6566
6567 if (status == NFS_OK)
6568 return res.status;
6569 return status;
6570 }
6571
6572 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6573 {
6574 struct nfs4_exception exception = { };
6575 int err;
6576 do {
6577 err = nfs4_handle_exception(server,
6578 _nfs41_test_stateid(server, stateid),
6579 &exception);
6580 } while (exception.retry);
6581 return err;
6582 }
6583
6584 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6585 {
6586 struct nfs41_free_stateid_args args = {
6587 .stateid = stateid,
6588 };
6589 struct nfs41_free_stateid_res res;
6590 struct rpc_message msg = {
6591 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6592 .rpc_argp = &args,
6593 .rpc_resp = &res,
6594 };
6595
6596 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6597 return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6598 }
6599
6600 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6601 {
6602 struct nfs4_exception exception = { };
6603 int err;
6604 do {
6605 err = nfs4_handle_exception(server,
6606 _nfs4_free_stateid(server, stateid),
6607 &exception);
6608 } while (exception.retry);
6609 return err;
6610 }
6611
6612 static bool nfs41_match_stateid(const nfs4_stateid *s1,
6613 const nfs4_stateid *s2)
6614 {
6615 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6616 return false;
6617
6618 if (s1->seqid == s2->seqid)
6619 return true;
6620 if (s1->seqid == 0 || s2->seqid == 0)
6621 return true;
6622
6623 return false;
6624 }
6625
6626 #endif /* CONFIG_NFS_V4_1 */
6627
6628 static bool nfs4_match_stateid(const nfs4_stateid *s1,
6629 const nfs4_stateid *s2)
6630 {
6631 return nfs4_stateid_match(s1, s2);
6632 }
6633
6634
6635 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6636 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6637 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6638 .recover_open = nfs4_open_reclaim,
6639 .recover_lock = nfs4_lock_reclaim,
6640 .establish_clid = nfs4_init_clientid,
6641 .get_clid_cred = nfs4_get_setclientid_cred,
6642 };
6643
6644 #if defined(CONFIG_NFS_V4_1)
6645 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6646 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6647 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6648 .recover_open = nfs4_open_reclaim,
6649 .recover_lock = nfs4_lock_reclaim,
6650 .establish_clid = nfs41_init_clientid,
6651 .get_clid_cred = nfs4_get_exchange_id_cred,
6652 .reclaim_complete = nfs41_proc_reclaim_complete,
6653 };
6654 #endif /* CONFIG_NFS_V4_1 */
6655
6656 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6657 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6658 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6659 .recover_open = nfs4_open_expired,
6660 .recover_lock = nfs4_lock_expired,
6661 .establish_clid = nfs4_init_clientid,
6662 .get_clid_cred = nfs4_get_setclientid_cred,
6663 };
6664
6665 #if defined(CONFIG_NFS_V4_1)
6666 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6667 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6668 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6669 .recover_open = nfs41_open_expired,
6670 .recover_lock = nfs41_lock_expired,
6671 .establish_clid = nfs41_init_clientid,
6672 .get_clid_cred = nfs4_get_exchange_id_cred,
6673 };
6674 #endif /* CONFIG_NFS_V4_1 */
6675
6676 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6677 .sched_state_renewal = nfs4_proc_async_renew,
6678 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6679 .renew_lease = nfs4_proc_renew,
6680 };
6681
6682 #if defined(CONFIG_NFS_V4_1)
6683 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6684 .sched_state_renewal = nfs41_proc_async_sequence,
6685 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6686 .renew_lease = nfs4_proc_sequence,
6687 };
6688 #endif
6689
6690 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6691 .minor_version = 0,
6692 .call_sync = _nfs4_call_sync,
6693 .match_stateid = nfs4_match_stateid,
6694 .find_root_sec = nfs4_find_root_sec,
6695 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6696 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6697 .state_renewal_ops = &nfs40_state_renewal_ops,
6698 };
6699
6700 #if defined(CONFIG_NFS_V4_1)
6701 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6702 .minor_version = 1,
6703 .call_sync = _nfs4_call_sync_session,
6704 .match_stateid = nfs41_match_stateid,
6705 .find_root_sec = nfs41_find_root_sec,
6706 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6707 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6708 .state_renewal_ops = &nfs41_state_renewal_ops,
6709 };
6710 #endif
6711
6712 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6713 [0] = &nfs_v4_0_minor_ops,
6714 #if defined(CONFIG_NFS_V4_1)
6715 [1] = &nfs_v4_1_minor_ops,
6716 #endif
6717 };
6718
6719 static const struct inode_operations nfs4_file_inode_operations = {
6720 .permission = nfs_permission,
6721 .getattr = nfs_getattr,
6722 .setattr = nfs_setattr,
6723 .getxattr = generic_getxattr,
6724 .setxattr = generic_setxattr,
6725 .listxattr = generic_listxattr,
6726 .removexattr = generic_removexattr,
6727 };
6728
6729 const struct nfs_rpc_ops nfs_v4_clientops = {
6730 .version = 4, /* protocol version */
6731 .dentry_ops = &nfs4_dentry_operations,
6732 .dir_inode_ops = &nfs4_dir_inode_operations,
6733 .file_inode_ops = &nfs4_file_inode_operations,
6734 .file_ops = &nfs4_file_operations,
6735 .getroot = nfs4_proc_get_root,
6736 .submount = nfs4_submount,
6737 .getattr = nfs4_proc_getattr,
6738 .setattr = nfs4_proc_setattr,
6739 .lookup = nfs4_proc_lookup,
6740 .access = nfs4_proc_access,
6741 .readlink = nfs4_proc_readlink,
6742 .create = nfs4_proc_create,
6743 .remove = nfs4_proc_remove,
6744 .unlink_setup = nfs4_proc_unlink_setup,
6745 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
6746 .unlink_done = nfs4_proc_unlink_done,
6747 .rename = nfs4_proc_rename,
6748 .rename_setup = nfs4_proc_rename_setup,
6749 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
6750 .rename_done = nfs4_proc_rename_done,
6751 .link = nfs4_proc_link,
6752 .symlink = nfs4_proc_symlink,
6753 .mkdir = nfs4_proc_mkdir,
6754 .rmdir = nfs4_proc_remove,
6755 .readdir = nfs4_proc_readdir,
6756 .mknod = nfs4_proc_mknod,
6757 .statfs = nfs4_proc_statfs,
6758 .fsinfo = nfs4_proc_fsinfo,
6759 .pathconf = nfs4_proc_pathconf,
6760 .set_capabilities = nfs4_server_capabilities,
6761 .decode_dirent = nfs4_decode_dirent,
6762 .read_setup = nfs4_proc_read_setup,
6763 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
6764 .read_done = nfs4_read_done,
6765 .write_setup = nfs4_proc_write_setup,
6766 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6767 .write_done = nfs4_write_done,
6768 .commit_setup = nfs4_proc_commit_setup,
6769 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
6770 .commit_done = nfs4_commit_done,
6771 .lock = nfs4_proc_lock,
6772 .clear_acl_cache = nfs4_zap_acl_attr,
6773 .close_context = nfs4_close_context,
6774 .open_context = nfs4_atomic_open,
6775 .init_client = nfs4_init_client,
6776 };
6777
6778 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6779 .prefix = XATTR_NAME_NFSV4_ACL,
6780 .list = nfs4_xattr_list_nfs4_acl,
6781 .get = nfs4_xattr_get_nfs4_acl,
6782 .set = nfs4_xattr_set_nfs4_acl,
6783 };
6784
6785 const struct xattr_handler *nfs4_xattr_handlers[] = {
6786 &nfs4_xattr_nfs4_acl_handler,
6787 NULL
6788 };
6789
6790 module_param(max_session_slots, ushort, 0644);
6791 MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
6792 "requests the client will negotiate");
6793
6794 /*
6795 * Local variables:
6796 * c-basic-offset: 8
6797 * End:
6798 */