SUNRPC: constify rpc_clnt fields cl_server and cl_protname
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sunrpc / clnt.c
CommitLineData
1da177e4 1/*
55aa4f58 2 * linux/net/sunrpc/clnt.c
1da177e4
LT
3 *
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
7 *
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
15 *
1da177e4
LT
16 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18 */
19
20#include <asm/system.h>
21
22#include <linux/module.h>
23#include <linux/types.h>
cb3997b5 24#include <linux/kallsyms.h>
1da177e4 25#include <linux/mm.h>
23ac6581
TM
26#include <linux/namei.h>
27#include <linux/mount.h>
1da177e4 28#include <linux/slab.h>
1da177e4 29#include <linux/utsname.h>
11c556b3 30#include <linux/workqueue.h>
176e21ee 31#include <linux/in.h>
510deb0d 32#include <linux/in6.h>
176e21ee 33#include <linux/un.h>
1da177e4
LT
34
35#include <linux/sunrpc/clnt.h>
1da177e4 36#include <linux/sunrpc/rpc_pipe_fs.h>
11c556b3 37#include <linux/sunrpc/metrics.h>
55ae1aab 38#include <linux/sunrpc/bc_xprt.h>
1da177e4 39
55ae1aab 40#include "sunrpc.h"
70abc49b 41#include "netns.h"
1da177e4 42
1da177e4
LT
43#ifdef RPC_DEBUG
44# define RPCDBG_FACILITY RPCDBG_CALL
45#endif
46
46121cf7
CL
47#define dprint_status(t) \
48 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
0dc47877 49 __func__, t->tk_status)
46121cf7 50
188fef11
TM
51/*
52 * All RPC clients are linked into this list
53 */
188fef11 54
1da177e4
LT
55static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
56
57
58static void call_start(struct rpc_task *task);
59static void call_reserve(struct rpc_task *task);
60static void call_reserveresult(struct rpc_task *task);
61static void call_allocate(struct rpc_task *task);
1da177e4
LT
62static void call_decode(struct rpc_task *task);
63static void call_bind(struct rpc_task *task);
da351878 64static void call_bind_status(struct rpc_task *task);
1da177e4 65static void call_transmit(struct rpc_task *task);
9e00abc3 66#if defined(CONFIG_SUNRPC_BACKCHANNEL)
55ae1aab 67static void call_bc_transmit(struct rpc_task *task);
9e00abc3 68#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1da177e4 69static void call_status(struct rpc_task *task);
940e3318 70static void call_transmit_status(struct rpc_task *task);
1da177e4
LT
71static void call_refresh(struct rpc_task *task);
72static void call_refreshresult(struct rpc_task *task);
73static void call_timeout(struct rpc_task *task);
74static void call_connect(struct rpc_task *task);
75static void call_connect_status(struct rpc_task *task);
1da177e4 76
b0e1c57e
CL
77static __be32 *rpc_encode_header(struct rpc_task *task);
78static __be32 *rpc_verify_header(struct rpc_task *task);
caabea8a 79static int rpc_ping(struct rpc_clnt *clnt);
64c91a1f 80
188fef11
TM
81static void rpc_register_client(struct rpc_clnt *clnt)
82{
70abc49b
SK
83 struct sunrpc_net *sn = net_generic(clnt->cl_xprt->xprt_net, sunrpc_net_id);
84
85 spin_lock(&sn->rpc_client_lock);
86 list_add(&clnt->cl_clients, &sn->all_clients);
87 spin_unlock(&sn->rpc_client_lock);
188fef11
TM
88}
89
90static void rpc_unregister_client(struct rpc_clnt *clnt)
91{
70abc49b
SK
92 struct sunrpc_net *sn = net_generic(clnt->cl_xprt->xprt_net, sunrpc_net_id);
93
94 spin_lock(&sn->rpc_client_lock);
188fef11 95 list_del(&clnt->cl_clients);
70abc49b 96 spin_unlock(&sn->rpc_client_lock);
188fef11 97}
1da177e4 98
0157d021
SK
99static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
100{
30507f58 101 if (clnt->cl_dentry) {
80df9d20
SK
102 if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy)
103 clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth);
30507f58 104 rpc_remove_client_dir(clnt->cl_dentry);
80df9d20 105 }
30507f58 106 clnt->cl_dentry = NULL;
0157d021
SK
107}
108
109static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
110{
111 struct super_block *pipefs_sb;
0157d021
SK
112
113 pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net);
114 if (pipefs_sb) {
0157d021
SK
115 __rpc_clnt_remove_pipedir(clnt);
116 rpc_put_sb_net(clnt->cl_xprt->xprt_net);
117 }
0157d021
SK
118}
119
120static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
121 struct rpc_clnt *clnt, char *dir_name)
1da177e4 122{
f134585a 123 static uint32_t clntid;
23ac6581
TM
124 char name[15];
125 struct qstr q = {
126 .name = name,
127 };
0157d021 128 struct dentry *dir, *dentry;
1da177e4
LT
129 int error;
130
0157d021
SK
131 dir = rpc_d_lookup_sb(sb, dir_name);
132 if (dir == NULL)
133 return dir;
f134585a 134 for (;;) {
23ac6581
TM
135 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
136 name[sizeof(name) - 1] = '\0';
137 q.hash = full_name_hash(q.name, q.len);
0157d021
SK
138 dentry = rpc_create_client_dir(dir, &q, clnt);
139 if (!IS_ERR(dentry))
23ac6581 140 break;
0157d021 141 error = PTR_ERR(dentry);
f134585a 142 if (error != -EEXIST) {
23ac6581
TM
143 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
144 " %s/%s, error %d\n",
145 dir_name, name, error);
0157d021 146 break;
f134585a 147 }
1da177e4 148 }
0157d021
SK
149 dput(dir);
150 return dentry;
151}
152
153static int
154rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
155{
156 struct super_block *pipefs_sb;
30507f58 157 struct dentry *dentry;
0157d021 158
30507f58 159 clnt->cl_dentry = NULL;
0157d021
SK
160 if (dir_name == NULL)
161 return 0;
0157d021 162 pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net);
70fe25b6
SK
163 if (!pipefs_sb)
164 return 0;
30507f58 165 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name);
0157d021 166 rpc_put_sb_net(clnt->cl_xprt->xprt_net);
30507f58
SK
167 if (IS_ERR(dentry))
168 return PTR_ERR(dentry);
169 clnt->cl_dentry = dentry;
23ac6581 170 return 0;
1da177e4
LT
171}
172
80df9d20
SK
173static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
174 struct super_block *sb)
175{
176 struct dentry *dentry;
177 int err = 0;
178
179 switch (event) {
180 case RPC_PIPEFS_MOUNT:
181 if (clnt->cl_program->pipe_dir_name == NULL)
182 break;
183 dentry = rpc_setup_pipedir_sb(sb, clnt,
184 clnt->cl_program->pipe_dir_name);
185 BUG_ON(dentry == NULL);
186 if (IS_ERR(dentry))
187 return PTR_ERR(dentry);
30507f58 188 clnt->cl_dentry = dentry;
80df9d20
SK
189 if (clnt->cl_auth->au_ops->pipes_create) {
190 err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth);
191 if (err)
192 __rpc_clnt_remove_pipedir(clnt);
193 }
194 break;
195 case RPC_PIPEFS_UMOUNT:
196 __rpc_clnt_remove_pipedir(clnt);
197 break;
198 default:
199 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
200 return -ENOTSUPP;
201 }
202 return err;
203}
204
205static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
206 void *ptr)
207{
208 struct super_block *sb = ptr;
209 struct rpc_clnt *clnt;
210 int error = 0;
211 struct sunrpc_net *sn = net_generic(sb->s_fs_info, sunrpc_net_id);
212
213 spin_lock(&sn->rpc_client_lock);
214 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
215 error = __rpc_pipefs_event(clnt, event, sb);
216 if (error)
217 break;
218 }
219 spin_unlock(&sn->rpc_client_lock);
220 return error;
221}
222
223static struct notifier_block rpc_clients_block = {
224 .notifier_call = rpc_pipefs_event,
eee17325 225 .priority = SUNRPC_PIPEFS_RPC_PRIO,
80df9d20
SK
226};
227
228int rpc_clients_notifier_register(void)
229{
230 return rpc_pipefs_notifier_register(&rpc_clients_block);
231}
232
233void rpc_clients_notifier_unregister(void)
234{
235 return rpc_pipefs_notifier_unregister(&rpc_clients_block);
236}
237
698b6d08 238static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
1da177e4 239{
698b6d08 240 struct rpc_program *program = args->program;
1da177e4
LT
241 struct rpc_version *version;
242 struct rpc_clnt *clnt = NULL;
6a19275a 243 struct rpc_auth *auth;
1da177e4 244 int err;
06b8d255
CL
245 size_t len;
246
247 /* sanity check the name before trying to print it */
248 err = -EINVAL;
698b6d08 249 len = strlen(args->servername);
06b8d255
CL
250 if (len > RPC_MAXNETNAMELEN)
251 goto out_no_rpciod;
252 len++;
1da177e4 253
46121cf7 254 dprintk("RPC: creating %s client for %s (xprt %p)\n",
698b6d08 255 program->name, args->servername, xprt);
1da177e4 256
4ada539e
TM
257 err = rpciod_up();
258 if (err)
259 goto out_no_rpciod;
1da177e4
LT
260 err = -EINVAL;
261 if (!xprt)
712917d1 262 goto out_no_xprt;
698b6d08
TM
263
264 if (args->version >= program->nrvers)
265 goto out_err;
266 version = program->version[args->version];
267 if (version == NULL)
1da177e4
LT
268 goto out_err;
269
270 err = -ENOMEM;
0da974f4 271 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
1da177e4
LT
272 if (!clnt)
273 goto out_err;
1da177e4
LT
274 clnt->cl_parent = clnt;
275
6eac7d3f
TM
276 clnt->cl_server = kstrdup(args->servername, GFP_KERNEL);
277 if (clnt->cl_server == NULL)
278 goto out_no_server;
1da177e4
LT
279
280 clnt->cl_xprt = xprt;
281 clnt->cl_procinfo = version->procs;
282 clnt->cl_maxproc = version->nrprocs;
283 clnt->cl_protname = program->name;
d5b337b4 284 clnt->cl_prog = args->prognumber ? : program->number;
1da177e4 285 clnt->cl_vers = version->number;
1da177e4 286 clnt->cl_stats = program->stats;
11c556b3 287 clnt->cl_metrics = rpc_alloc_iostats(clnt);
23bf85ba
TM
288 err = -ENOMEM;
289 if (clnt->cl_metrics == NULL)
290 goto out_no_stats;
3e32a5d9 291 clnt->cl_program = program;
6529eba0 292 INIT_LIST_HEAD(&clnt->cl_tasks);
4bef61ff 293 spin_lock_init(&clnt->cl_lock);
1da177e4 294
ec739ef0 295 if (!xprt_bound(clnt->cl_xprt))
1da177e4
LT
296 clnt->cl_autobind = 1;
297
ba7392bb
TM
298 clnt->cl_timeout = xprt->timeout;
299 if (args->timeout != NULL) {
300 memcpy(&clnt->cl_timeout_default, args->timeout,
301 sizeof(clnt->cl_timeout_default));
302 clnt->cl_timeout = &clnt->cl_timeout_default;
303 }
304
1da177e4 305 clnt->cl_rtt = &clnt->cl_rtt_default;
ba7392bb 306 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
608207e8
OK
307 clnt->cl_principal = NULL;
308 if (args->client_name) {
309 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
310 if (!clnt->cl_principal)
311 goto out_no_principal;
312 }
1da177e4 313
006abe88 314 atomic_set(&clnt->cl_count, 1);
34f52e35 315
1da177e4
LT
316 err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
317 if (err < 0)
318 goto out_no_path;
319
698b6d08 320 auth = rpcauth_create(args->authflavor, clnt);
6a19275a 321 if (IS_ERR(auth)) {
1da177e4 322 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
698b6d08 323 args->authflavor);
6a19275a 324 err = PTR_ERR(auth);
1da177e4
LT
325 goto out_no_auth;
326 }
327
328 /* save the nodename */
63ffc23d 329 clnt->cl_nodelen = strlen(init_utsname()->nodename);
1da177e4
LT
330 if (clnt->cl_nodelen > UNX_MAXNODENAME)
331 clnt->cl_nodelen = UNX_MAXNODENAME;
63ffc23d 332 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
6529eba0 333 rpc_register_client(clnt);
1da177e4
LT
334 return clnt;
335
336out_no_auth:
0157d021 337 rpc_clnt_remove_pipedir(clnt);
1da177e4 338out_no_path:
608207e8
OK
339 kfree(clnt->cl_principal);
340out_no_principal:
23bf85ba
TM
341 rpc_free_iostats(clnt->cl_metrics);
342out_no_stats:
6eac7d3f
TM
343 kfree(clnt->cl_server);
344out_no_server:
1da177e4
LT
345 kfree(clnt);
346out_err:
6b6ca86b 347 xprt_put(xprt);
712917d1 348out_no_xprt:
4ada539e
TM
349 rpciod_down();
350out_no_rpciod:
1da177e4
LT
351 return ERR_PTR(err);
352}
353
c2866763
CL
354/*
355 * rpc_create - create an RPC client and transport with one call
356 * @args: rpc_clnt create argument structure
357 *
358 * Creates and initializes an RPC transport and an RPC client.
359 *
360 * It can ping the server in order to determine if it is up, and to see if
361 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
362 * this behavior so asynchronous tasks can also use rpc_create.
363 */
364struct rpc_clnt *rpc_create(struct rpc_create_args *args)
365{
366 struct rpc_xprt *xprt;
367 struct rpc_clnt *clnt;
3c341b0b 368 struct xprt_create xprtargs = {
9a23e332 369 .net = args->net,
4fa016eb 370 .ident = args->protocol,
d3bc9a1d 371 .srcaddr = args->saddress,
96802a09
FM
372 .dstaddr = args->address,
373 .addrlen = args->addrsize,
f300baba 374 .bc_xprt = args->bc_xprt,
96802a09 375 };
510deb0d 376 char servername[48];
c2866763 377
43780b87
CL
378 /*
379 * If the caller chooses not to specify a hostname, whip
380 * up a string representation of the passed-in address.
381 */
382 if (args->servername == NULL) {
176e21ee
CL
383 struct sockaddr_un *sun =
384 (struct sockaddr_un *)args->address;
da09eb93
CL
385 struct sockaddr_in *sin =
386 (struct sockaddr_in *)args->address;
387 struct sockaddr_in6 *sin6 =
388 (struct sockaddr_in6 *)args->address;
389
510deb0d
CL
390 servername[0] = '\0';
391 switch (args->address->sa_family) {
176e21ee
CL
392 case AF_LOCAL:
393 snprintf(servername, sizeof(servername), "%s",
394 sun->sun_path);
395 break;
da09eb93 396 case AF_INET:
21454aaa
HH
397 snprintf(servername, sizeof(servername), "%pI4",
398 &sin->sin_addr.s_addr);
510deb0d 399 break;
da09eb93 400 case AF_INET6:
5b095d98 401 snprintf(servername, sizeof(servername), "%pI6",
da09eb93 402 &sin6->sin6_addr);
510deb0d 403 break;
510deb0d
CL
404 default:
405 /* caller wants default server name, but
406 * address family isn't recognized. */
407 return ERR_PTR(-EINVAL);
408 }
43780b87
CL
409 args->servername = servername;
410 }
411
510deb0d
CL
412 xprt = xprt_create_transport(&xprtargs);
413 if (IS_ERR(xprt))
414 return (struct rpc_clnt *)xprt;
415
c2866763
CL
416 /*
417 * By default, kernel RPC client connects from a reserved port.
418 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
419 * but it is always enabled for rpciod, which handles the connect
420 * operation.
421 */
422 xprt->resvport = 1;
423 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
424 xprt->resvport = 0;
425
698b6d08 426 clnt = rpc_new_client(args, xprt);
c2866763
CL
427 if (IS_ERR(clnt))
428 return clnt;
429
430 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
caabea8a 431 int err = rpc_ping(clnt);
c2866763
CL
432 if (err != 0) {
433 rpc_shutdown_client(clnt);
434 return ERR_PTR(err);
435 }
436 }
437
438 clnt->cl_softrtry = 1;
439 if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
440 clnt->cl_softrtry = 0;
441
c2866763
CL
442 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
443 clnt->cl_autobind = 1;
43d78ef2
CL
444 if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
445 clnt->cl_discrtry = 1;
b6b6152c
OK
446 if (!(args->flags & RPC_CLNT_CREATE_QUIET))
447 clnt->cl_chatty = 1;
c2866763
CL
448
449 return clnt;
450}
b86acd50 451EXPORT_SYMBOL_GPL(rpc_create);
c2866763 452
1da177e4
LT
453/*
454 * This function clones the RPC client structure. It allows us to share the
455 * same transport while varying parameters such as the authentication
456 * flavour.
457 */
458struct rpc_clnt *
459rpc_clone_client(struct rpc_clnt *clnt)
460{
461 struct rpc_clnt *new;
3e32a5d9 462 int err = -ENOMEM;
1da177e4 463
e69062b4 464 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
1da177e4
LT
465 if (!new)
466 goto out_no_clnt;
6eac7d3f
TM
467 new->cl_server = kstrdup(clnt->cl_server, GFP_KERNEL);
468 if (new->cl_server == NULL)
469 goto out_no_server;
d431a555
TM
470 new->cl_parent = clnt;
471 /* Turn off autobind on clones */
472 new->cl_autobind = 0;
473 INIT_LIST_HEAD(&new->cl_tasks);
474 spin_lock_init(&new->cl_lock);
ba7392bb 475 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
23bf85ba
TM
476 new->cl_metrics = rpc_alloc_iostats(clnt);
477 if (new->cl_metrics == NULL)
478 goto out_no_stats;
608207e8
OK
479 if (clnt->cl_principal) {
480 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
481 if (new->cl_principal == NULL)
482 goto out_no_principal;
483 }
006abe88 484 atomic_set(&new->cl_count, 1);
3e32a5d9
TM
485 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
486 if (err != 0)
487 goto out_no_path;
1da177e4
LT
488 if (new->cl_auth)
489 atomic_inc(&new->cl_auth->au_count);
d431a555 490 xprt_get(clnt->cl_xprt);
006abe88 491 atomic_inc(&clnt->cl_count);
6529eba0 492 rpc_register_client(new);
4ada539e 493 rpciod_up();
1da177e4 494 return new;
3e32a5d9 495out_no_path:
608207e8
OK
496 kfree(new->cl_principal);
497out_no_principal:
3e32a5d9 498 rpc_free_iostats(new->cl_metrics);
23bf85ba 499out_no_stats:
6eac7d3f
TM
500 kfree(new->cl_server);
501out_no_server:
23bf85ba 502 kfree(new);
1da177e4 503out_no_clnt:
0dc47877 504 dprintk("RPC: %s: returned error %d\n", __func__, err);
3e32a5d9 505 return ERR_PTR(err);
1da177e4 506}
e8914c65 507EXPORT_SYMBOL_GPL(rpc_clone_client);
1da177e4 508
58f9612c
TM
509/*
510 * Kill all tasks for the given client.
511 * XXX: kill their descendants as well?
512 */
513void rpc_killall_tasks(struct rpc_clnt *clnt)
514{
515 struct rpc_task *rovr;
516
517
518 if (list_empty(&clnt->cl_tasks))
519 return;
520 dprintk("RPC: killing all tasks for client %p\n", clnt);
521 /*
522 * Spin lock all_tasks to prevent changes...
523 */
524 spin_lock(&clnt->cl_lock);
525 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
526 if (!RPC_IS_ACTIVATED(rovr))
527 continue;
528 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
529 rovr->tk_flags |= RPC_TASK_KILLED;
530 rpc_exit(rovr, -EIO);
8e26de23
SK
531 if (RPC_IS_QUEUED(rovr))
532 rpc_wake_up_queued_task(rovr->tk_waitqueue,
533 rovr);
58f9612c
TM
534 }
535 }
536 spin_unlock(&clnt->cl_lock);
537}
538EXPORT_SYMBOL_GPL(rpc_killall_tasks);
539
1da177e4
LT
540/*
541 * Properly shut down an RPC client, terminating all outstanding
90c5755f 542 * requests.
1da177e4 543 */
4c402b40 544void rpc_shutdown_client(struct rpc_clnt *clnt)
1da177e4 545{
34f52e35
TM
546 dprintk("RPC: shutting down %s client for %s\n",
547 clnt->cl_protname, clnt->cl_server);
1da177e4 548
34f52e35 549 while (!list_empty(&clnt->cl_tasks)) {
1da177e4 550 rpc_killall_tasks(clnt);
532347e2 551 wait_event_timeout(destroy_wait,
34f52e35 552 list_empty(&clnt->cl_tasks), 1*HZ);
1da177e4
LT
553 }
554
4c402b40 555 rpc_release_client(clnt);
1da177e4 556}
e8914c65 557EXPORT_SYMBOL_GPL(rpc_shutdown_client);
1da177e4
LT
558
559/*
34f52e35 560 * Free an RPC client
1da177e4 561 */
34f52e35 562static void
006abe88 563rpc_free_client(struct rpc_clnt *clnt)
1da177e4 564{
46121cf7 565 dprintk("RPC: destroying %s client for %s\n",
1da177e4 566 clnt->cl_protname, clnt->cl_server);
6eac7d3f 567 if (clnt->cl_parent != clnt)
8ad7c892 568 rpc_release_client(clnt->cl_parent);
6eac7d3f 569 kfree(clnt->cl_server);
6529eba0 570 rpc_unregister_client(clnt);
f5131257 571 rpc_clnt_remove_pipedir(clnt);
11c556b3 572 rpc_free_iostats(clnt->cl_metrics);
608207e8 573 kfree(clnt->cl_principal);
11c556b3 574 clnt->cl_metrics = NULL;
6b6ca86b 575 xprt_put(clnt->cl_xprt);
4ada539e 576 rpciod_down();
1da177e4 577 kfree(clnt);
1da177e4
LT
578}
579
1dd17ec6
TM
580/*
581 * Free an RPC client
582 */
583static void
006abe88 584rpc_free_auth(struct rpc_clnt *clnt)
1dd17ec6 585{
1dd17ec6 586 if (clnt->cl_auth == NULL) {
006abe88 587 rpc_free_client(clnt);
1dd17ec6
TM
588 return;
589 }
590
591 /*
592 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
593 * release remaining GSS contexts. This mechanism ensures
594 * that it can do so safely.
595 */
006abe88 596 atomic_inc(&clnt->cl_count);
1dd17ec6
TM
597 rpcauth_release(clnt->cl_auth);
598 clnt->cl_auth = NULL;
006abe88
TM
599 if (atomic_dec_and_test(&clnt->cl_count))
600 rpc_free_client(clnt);
1dd17ec6
TM
601}
602
1da177e4 603/*
34f52e35 604 * Release reference to the RPC client
1da177e4
LT
605 */
606void
607rpc_release_client(struct rpc_clnt *clnt)
608{
34f52e35 609 dprintk("RPC: rpc_release_client(%p)\n", clnt);
1da177e4 610
34f52e35
TM
611 if (list_empty(&clnt->cl_tasks))
612 wake_up(&destroy_wait);
006abe88
TM
613 if (atomic_dec_and_test(&clnt->cl_count))
614 rpc_free_auth(clnt);
34f52e35
TM
615}
616
007e251f
AG
617/**
618 * rpc_bind_new_program - bind a new RPC program to an existing client
65b6e42c
RD
619 * @old: old rpc_client
620 * @program: rpc program to set
621 * @vers: rpc program version
007e251f
AG
622 *
623 * Clones the rpc client and sets up a new RPC program. This is mainly
624 * of use for enabling different RPC programs to share the same transport.
625 * The Sun NFSv2/v3 ACL protocol can do this.
626 */
627struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
628 struct rpc_program *program,
89eb21c3 629 u32 vers)
007e251f
AG
630{
631 struct rpc_clnt *clnt;
632 struct rpc_version *version;
633 int err;
634
635 BUG_ON(vers >= program->nrvers || !program->version[vers]);
636 version = program->version[vers];
637 clnt = rpc_clone_client(old);
638 if (IS_ERR(clnt))
639 goto out;
640 clnt->cl_procinfo = version->procs;
641 clnt->cl_maxproc = version->nrprocs;
642 clnt->cl_protname = program->name;
643 clnt->cl_prog = program->number;
644 clnt->cl_vers = version->number;
645 clnt->cl_stats = program->stats;
caabea8a 646 err = rpc_ping(clnt);
007e251f
AG
647 if (err != 0) {
648 rpc_shutdown_client(clnt);
649 clnt = ERR_PTR(err);
650 }
cca5172a 651out:
007e251f
AG
652 return clnt;
653}
e8914c65 654EXPORT_SYMBOL_GPL(rpc_bind_new_program);
007e251f 655
58f9612c
TM
656void rpc_task_release_client(struct rpc_task *task)
657{
658 struct rpc_clnt *clnt = task->tk_client;
659
660 if (clnt != NULL) {
661 /* Remove from client task list */
662 spin_lock(&clnt->cl_lock);
663 list_del(&task->tk_task);
664 spin_unlock(&clnt->cl_lock);
665 task->tk_client = NULL;
666
667 rpc_release_client(clnt);
668 }
669}
670
671static
672void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
673{
674 if (clnt != NULL) {
675 rpc_task_release_client(task);
676 task->tk_client = clnt;
006abe88 677 atomic_inc(&clnt->cl_count);
58f9612c
TM
678 if (clnt->cl_softrtry)
679 task->tk_flags |= RPC_TASK_SOFT;
680 /* Add to the client's list of all tasks */
681 spin_lock(&clnt->cl_lock);
682 list_add_tail(&task->tk_task, &clnt->cl_tasks);
683 spin_unlock(&clnt->cl_lock);
684 }
685}
686
cbdabc7f
AA
687void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
688{
689 rpc_task_release_client(task);
690 rpc_task_set_client(task, clnt);
691}
692EXPORT_SYMBOL_GPL(rpc_task_reset_client);
693
694
58f9612c
TM
695static void
696rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
697{
698 if (msg != NULL) {
699 task->tk_msg.rpc_proc = msg->rpc_proc;
700 task->tk_msg.rpc_argp = msg->rpc_argp;
701 task->tk_msg.rpc_resp = msg->rpc_resp;
a17c2153
TM
702 if (msg->rpc_cred != NULL)
703 task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
58f9612c
TM
704 }
705}
706
1da177e4
LT
707/*
708 * Default callback for async RPC calls
709 */
710static void
963d8fe5 711rpc_default_callback(struct rpc_task *task, void *data)
1da177e4
LT
712{
713}
714
963d8fe5
TM
715static const struct rpc_call_ops rpc_default_ops = {
716 .rpc_call_done = rpc_default_callback,
717};
718
c970aa85
TM
719/**
720 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
721 * @task_setup_data: pointer to task initialisation data
722 */
723struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
6e5b70e9 724{
19445b99 725 struct rpc_task *task;
6e5b70e9 726
84115e1c 727 task = rpc_new_task(task_setup_data);
19445b99 728 if (IS_ERR(task))
50859259 729 goto out;
6e5b70e9 730
58f9612c
TM
731 rpc_task_set_client(task, task_setup_data->rpc_client);
732 rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
733
58f9612c
TM
734 if (task->tk_action == NULL)
735 rpc_call_start(task);
736
6e5b70e9
TM
737 atomic_inc(&task->tk_count);
738 rpc_execute(task);
6e5b70e9 739out:
19445b99 740 return task;
6e5b70e9 741}
c970aa85 742EXPORT_SYMBOL_GPL(rpc_run_task);
6e5b70e9
TM
743
744/**
745 * rpc_call_sync - Perform a synchronous RPC call
746 * @clnt: pointer to RPC client
747 * @msg: RPC call parameters
748 * @flags: RPC call flags
1da177e4 749 */
cbc20059 750int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
1da177e4
LT
751{
752 struct rpc_task *task;
84115e1c
TM
753 struct rpc_task_setup task_setup_data = {
754 .rpc_client = clnt,
755 .rpc_message = msg,
756 .callback_ops = &rpc_default_ops,
757 .flags = flags,
758 };
6e5b70e9 759 int status;
1da177e4 760
1da177e4
LT
761 BUG_ON(flags & RPC_TASK_ASYNC);
762
c970aa85 763 task = rpc_run_task(&task_setup_data);
6e5b70e9
TM
764 if (IS_ERR(task))
765 return PTR_ERR(task);
e60859ac 766 status = task->tk_status;
bde8f00c 767 rpc_put_task(task);
1da177e4
LT
768 return status;
769}
e8914c65 770EXPORT_SYMBOL_GPL(rpc_call_sync);
1da177e4 771
6e5b70e9
TM
772/**
773 * rpc_call_async - Perform an asynchronous RPC call
774 * @clnt: pointer to RPC client
775 * @msg: RPC call parameters
776 * @flags: RPC call flags
65b6e42c 777 * @tk_ops: RPC call ops
6e5b70e9 778 * @data: user call data
1da177e4
LT
779 */
780int
cbc20059 781rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
963d8fe5 782 const struct rpc_call_ops *tk_ops, void *data)
1da177e4
LT
783{
784 struct rpc_task *task;
84115e1c
TM
785 struct rpc_task_setup task_setup_data = {
786 .rpc_client = clnt,
787 .rpc_message = msg,
788 .callback_ops = tk_ops,
789 .callback_data = data,
790 .flags = flags|RPC_TASK_ASYNC,
791 };
1da177e4 792
c970aa85 793 task = rpc_run_task(&task_setup_data);
6e5b70e9
TM
794 if (IS_ERR(task))
795 return PTR_ERR(task);
796 rpc_put_task(task);
797 return 0;
1da177e4 798}
e8914c65 799EXPORT_SYMBOL_GPL(rpc_call_async);
1da177e4 800
9e00abc3 801#if defined(CONFIG_SUNRPC_BACKCHANNEL)
55ae1aab
RL
802/**
803 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
804 * rpc_execute against it
7a73fdde
JSR
805 * @req: RPC request
806 * @tk_ops: RPC call ops
55ae1aab
RL
807 */
808struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
7a73fdde 809 const struct rpc_call_ops *tk_ops)
55ae1aab
RL
810{
811 struct rpc_task *task;
812 struct xdr_buf *xbufp = &req->rq_snd_buf;
813 struct rpc_task_setup task_setup_data = {
814 .callback_ops = tk_ops,
815 };
816
817 dprintk("RPC: rpc_run_bc_task req= %p\n", req);
818 /*
819 * Create an rpc_task to send the data
820 */
821 task = rpc_new_task(&task_setup_data);
19445b99 822 if (IS_ERR(task)) {
55ae1aab
RL
823 xprt_free_bc_request(req);
824 goto out;
825 }
826 task->tk_rqstp = req;
827
828 /*
829 * Set up the xdr_buf length.
830 * This also indicates that the buffer is XDR encoded already.
831 */
832 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
833 xbufp->tail[0].iov_len;
834
835 task->tk_action = call_bc_transmit;
836 atomic_inc(&task->tk_count);
837 BUG_ON(atomic_read(&task->tk_count) != 2);
838 rpc_execute(task);
839
840out:
841 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
842 return task;
843}
9e00abc3 844#endif /* CONFIG_SUNRPC_BACKCHANNEL */
55ae1aab 845
77de2c59
TM
846void
847rpc_call_start(struct rpc_task *task)
848{
849 task->tk_action = call_start;
850}
851EXPORT_SYMBOL_GPL(rpc_call_start);
852
ed39440a
CL
853/**
854 * rpc_peeraddr - extract remote peer address from clnt's xprt
855 * @clnt: RPC client structure
856 * @buf: target buffer
65b6e42c 857 * @bufsize: length of target buffer
ed39440a
CL
858 *
859 * Returns the number of bytes that are actually in the stored address.
860 */
861size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
862{
863 size_t bytes;
864 struct rpc_xprt *xprt = clnt->cl_xprt;
865
866 bytes = sizeof(xprt->addr);
867 if (bytes > bufsize)
868 bytes = bufsize;
869 memcpy(buf, &clnt->cl_xprt->addr, bytes);
c4efcb1d 870 return xprt->addrlen;
ed39440a 871}
b86acd50 872EXPORT_SYMBOL_GPL(rpc_peeraddr);
ed39440a 873
f425eba4
CL
874/**
875 * rpc_peeraddr2str - return remote peer address in printable format
876 * @clnt: RPC client structure
877 * @format: address format
878 *
879 */
b454ae90
CL
880const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
881 enum rpc_display_format_t format)
f425eba4
CL
882{
883 struct rpc_xprt *xprt = clnt->cl_xprt;
7559c7a2
CL
884
885 if (xprt->address_strings[format] != NULL)
886 return xprt->address_strings[format];
887 else
888 return "unprintable";
f425eba4 889}
b86acd50 890EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
f425eba4 891
1da177e4
LT
892void
893rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
894{
895 struct rpc_xprt *xprt = clnt->cl_xprt;
470056c2
CL
896 if (xprt->ops->set_buffer_size)
897 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1da177e4 898}
e8914c65 899EXPORT_SYMBOL_GPL(rpc_setbufsize);
1da177e4
LT
900
901/*
902 * Return size of largest payload RPC client can support, in bytes
903 *
904 * For stream transports, this is one RPC record fragment (see RFC
905 * 1831), as we don't support multi-record requests yet. For datagram
906 * transports, this is the size of an IP packet minus the IP, UDP, and
907 * RPC header sizes.
908 */
909size_t rpc_max_payload(struct rpc_clnt *clnt)
910{
911 return clnt->cl_xprt->max_payload;
912}
b86acd50 913EXPORT_SYMBOL_GPL(rpc_max_payload);
1da177e4 914
35f5a422
CL
915/**
916 * rpc_force_rebind - force transport to check that remote port is unchanged
917 * @clnt: client to rebind
918 *
919 */
920void rpc_force_rebind(struct rpc_clnt *clnt)
921{
922 if (clnt->cl_autobind)
ec739ef0 923 xprt_clear_bound(clnt->cl_xprt);
35f5a422 924}
b86acd50 925EXPORT_SYMBOL_GPL(rpc_force_rebind);
35f5a422 926
aae2006e
AA
927/*
928 * Restart an (async) RPC call from the call_prepare state.
929 * Usually called from within the exit handler.
930 */
f1f88fc7 931int
aae2006e
AA
932rpc_restart_call_prepare(struct rpc_task *task)
933{
934 if (RPC_ASSASSINATED(task))
f1f88fc7 935 return 0;
d00c5d43
TM
936 task->tk_action = call_start;
937 if (task->tk_ops->rpc_call_prepare != NULL)
938 task->tk_action = rpc_prepare_task;
f1f88fc7 939 return 1;
aae2006e
AA
940}
941EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
942
1da177e4
LT
943/*
944 * Restart an (async) RPC call. Usually called from within the
945 * exit handler.
946 */
f1f88fc7 947int
1da177e4
LT
948rpc_restart_call(struct rpc_task *task)
949{
950 if (RPC_ASSASSINATED(task))
f1f88fc7 951 return 0;
1da177e4 952 task->tk_action = call_start;
f1f88fc7 953 return 1;
1da177e4 954}
e8914c65 955EXPORT_SYMBOL_GPL(rpc_restart_call);
1da177e4 956
3748f1e4
CL
957#ifdef RPC_DEBUG
958static const char *rpc_proc_name(const struct rpc_task *task)
959{
960 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
961
962 if (proc) {
963 if (proc->p_name)
964 return proc->p_name;
965 else
966 return "NULL";
967 } else
968 return "no proc";
969}
970#endif
971
1da177e4
LT
972/*
973 * 0. Initial state
974 *
975 * Other FSM states can be visited zero or more times, but
976 * this state is visited exactly once for each RPC.
977 */
978static void
979call_start(struct rpc_task *task)
980{
981 struct rpc_clnt *clnt = task->tk_client;
982
3748f1e4 983 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
46121cf7 984 clnt->cl_protname, clnt->cl_vers,
3748f1e4 985 rpc_proc_name(task),
46121cf7 986 (RPC_IS_ASYNC(task) ? "async" : "sync"));
1da177e4
LT
987
988 /* Increment call count */
989 task->tk_msg.rpc_proc->p_count++;
990 clnt->cl_stats->rpccnt++;
991 task->tk_action = call_reserve;
992}
993
994/*
995 * 1. Reserve an RPC call slot
996 */
997static void
998call_reserve(struct rpc_task *task)
999{
46121cf7 1000 dprint_status(task);
1da177e4 1001
1da177e4
LT
1002 task->tk_status = 0;
1003 task->tk_action = call_reserveresult;
1004 xprt_reserve(task);
1005}
1006
1007/*
1008 * 1b. Grok the result of xprt_reserve()
1009 */
1010static void
1011call_reserveresult(struct rpc_task *task)
1012{
1013 int status = task->tk_status;
1014
46121cf7 1015 dprint_status(task);
1da177e4
LT
1016
1017 /*
1018 * After a call to xprt_reserve(), we must have either
1019 * a request slot or else an error status.
1020 */
1021 task->tk_status = 0;
1022 if (status >= 0) {
1023 if (task->tk_rqstp) {
f2d47d02 1024 task->tk_action = call_refresh;
1da177e4
LT
1025 return;
1026 }
1027
1028 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
0dc47877 1029 __func__, status);
1da177e4
LT
1030 rpc_exit(task, -EIO);
1031 return;
1032 }
1033
1034 /*
1035 * Even though there was an error, we may have acquired
1036 * a request slot somehow. Make sure not to leak it.
1037 */
1038 if (task->tk_rqstp) {
1039 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
0dc47877 1040 __func__, status);
1da177e4
LT
1041 xprt_release(task);
1042 }
1043
1044 switch (status) {
1045 case -EAGAIN: /* woken up; retry */
1046 task->tk_action = call_reserve;
1047 return;
1048 case -EIO: /* probably a shutdown */
1049 break;
1050 default:
1051 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
0dc47877 1052 __func__, status);
1da177e4
LT
1053 break;
1054 }
1055 rpc_exit(task, status);
1056}
1057
1058/*
55576244
BF
1059 * 2. Bind and/or refresh the credentials
1060 */
1061static void
1062call_refresh(struct rpc_task *task)
1063{
1064 dprint_status(task);
1065
1066 task->tk_action = call_refreshresult;
1067 task->tk_status = 0;
1068 task->tk_client->cl_stats->rpcauthrefresh++;
1069 rpcauth_refreshcred(task);
1070}
1071
1072/*
1073 * 2a. Process the results of a credential refresh
1074 */
1075static void
1076call_refreshresult(struct rpc_task *task)
1077{
1078 int status = task->tk_status;
1079
1080 dprint_status(task);
1081
1082 task->tk_status = 0;
5fc43978 1083 task->tk_action = call_refresh;
55576244 1084 switch (status) {
5fc43978
TM
1085 case 0:
1086 if (rpcauth_uptodatecred(task))
1087 task->tk_action = call_allocate;
55576244
BF
1088 return;
1089 case -ETIMEDOUT:
1090 rpc_delay(task, 3*HZ);
5fc43978
TM
1091 case -EAGAIN:
1092 status = -EACCES;
1093 if (!task->tk_cred_retry)
1094 break;
1095 task->tk_cred_retry--;
1096 dprintk("RPC: %5u %s: retry refresh creds\n",
1097 task->tk_pid, __func__);
1098 return;
55576244 1099 }
5fc43978
TM
1100 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1101 task->tk_pid, __func__, status);
1102 rpc_exit(task, status);
55576244
BF
1103}
1104
1105/*
1106 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
02107148 1107 * (Note: buffer memory is freed in xprt_release).
1da177e4
LT
1108 */
1109static void
1110call_allocate(struct rpc_task *task)
1111{
f2d47d02 1112 unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
02107148
CL
1113 struct rpc_rqst *req = task->tk_rqstp;
1114 struct rpc_xprt *xprt = task->tk_xprt;
2bea90d4 1115 struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1da177e4 1116
46121cf7
CL
1117 dprint_status(task);
1118
2bea90d4 1119 task->tk_status = 0;
f2d47d02 1120 task->tk_action = call_bind;
2bea90d4 1121
02107148 1122 if (req->rq_buffer)
1da177e4
LT
1123 return;
1124
2bea90d4
CL
1125 if (proc->p_proc != 0) {
1126 BUG_ON(proc->p_arglen == 0);
1127 if (proc->p_decode != NULL)
1128 BUG_ON(proc->p_replen == 0);
1129 }
1da177e4 1130
2bea90d4
CL
1131 /*
1132 * Calculate the size (in quads) of the RPC call
1133 * and reply headers, and convert both values
1134 * to byte sizes.
1135 */
1136 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1137 req->rq_callsize <<= 2;
1138 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1139 req->rq_rcvsize <<= 2;
1140
c5a4dd8b
CL
1141 req->rq_buffer = xprt->ops->buf_alloc(task,
1142 req->rq_callsize + req->rq_rcvsize);
2bea90d4 1143 if (req->rq_buffer != NULL)
1da177e4 1144 return;
46121cf7
CL
1145
1146 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1da177e4 1147
5afa9133 1148 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
b6e9c713 1149 task->tk_action = call_allocate;
1da177e4
LT
1150 rpc_delay(task, HZ>>4);
1151 return;
1152 }
1153
1154 rpc_exit(task, -ERESTARTSYS);
1155}
1156
940e3318
TM
1157static inline int
1158rpc_task_need_encode(struct rpc_task *task)
1159{
1160 return task->tk_rqstp->rq_snd_buf.len == 0;
1161}
1162
1163static inline void
1164rpc_task_force_reencode(struct rpc_task *task)
1165{
1166 task->tk_rqstp->rq_snd_buf.len = 0;
2574cc9f 1167 task->tk_rqstp->rq_bytes_sent = 0;
940e3318
TM
1168}
1169
2bea90d4
CL
1170static inline void
1171rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1172{
1173 buf->head[0].iov_base = start;
1174 buf->head[0].iov_len = len;
1175 buf->tail[0].iov_len = 0;
1176 buf->page_len = 0;
4f22ccc3 1177 buf->flags = 0;
2bea90d4
CL
1178 buf->len = 0;
1179 buf->buflen = len;
1180}
1181
1da177e4
LT
1182/*
1183 * 3. Encode arguments of an RPC call
1184 */
1185static void
b0e1c57e 1186rpc_xdr_encode(struct rpc_task *task)
1da177e4 1187{
1da177e4 1188 struct rpc_rqst *req = task->tk_rqstp;
9f06c719 1189 kxdreproc_t encode;
d8ed029d 1190 __be32 *p;
1da177e4 1191
46121cf7 1192 dprint_status(task);
1da177e4 1193
2bea90d4
CL
1194 rpc_xdr_buf_init(&req->rq_snd_buf,
1195 req->rq_buffer,
1196 req->rq_callsize);
1197 rpc_xdr_buf_init(&req->rq_rcv_buf,
1198 (char *)req->rq_buffer + req->rq_callsize,
1199 req->rq_rcvsize);
1da177e4 1200
b0e1c57e
CL
1201 p = rpc_encode_header(task);
1202 if (p == NULL) {
1203 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1da177e4
LT
1204 rpc_exit(task, -EIO);
1205 return;
1206 }
b0e1c57e
CL
1207
1208 encode = task->tk_msg.rpc_proc->p_encode;
f3680312
BF
1209 if (encode == NULL)
1210 return;
1211
1212 task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1213 task->tk_msg.rpc_argp);
1da177e4
LT
1214}
1215
1216/*
1217 * 4. Get the server port number if not yet set
1218 */
1219static void
1220call_bind(struct rpc_task *task)
1221{
ec739ef0 1222 struct rpc_xprt *xprt = task->tk_xprt;
1da177e4 1223
46121cf7 1224 dprint_status(task);
1da177e4 1225
da351878 1226 task->tk_action = call_connect;
ec739ef0 1227 if (!xprt_bound(xprt)) {
da351878 1228 task->tk_action = call_bind_status;
ec739ef0 1229 task->tk_timeout = xprt->bind_timeout;
bbf7c1dd 1230 xprt->ops->rpcbind(task);
1da177e4
LT
1231 }
1232}
1233
1234/*
da351878
CL
1235 * 4a. Sort out bind result
1236 */
1237static void
1238call_bind_status(struct rpc_task *task)
1239{
906462af 1240 int status = -EIO;
da351878
CL
1241
1242 if (task->tk_status >= 0) {
46121cf7 1243 dprint_status(task);
da351878
CL
1244 task->tk_status = 0;
1245 task->tk_action = call_connect;
1246 return;
1247 }
1248
1249 switch (task->tk_status) {
381ba74a
TM
1250 case -ENOMEM:
1251 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1252 rpc_delay(task, HZ >> 2);
2429cbf6 1253 goto retry_timeout;
da351878 1254 case -EACCES:
46121cf7
CL
1255 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1256 "unavailable\n", task->tk_pid);
b79dc8ce
CL
1257 /* fail immediately if this is an RPC ping */
1258 if (task->tk_msg.rpc_proc->p_proc == 0) {
1259 status = -EOPNOTSUPP;
1260 break;
1261 }
0b760113
TM
1262 if (task->tk_rebind_retry == 0)
1263 break;
1264 task->tk_rebind_retry--;
ea635a51 1265 rpc_delay(task, 3*HZ);
da45828e 1266 goto retry_timeout;
da351878 1267 case -ETIMEDOUT:
46121cf7 1268 dprintk("RPC: %5u rpcbind request timed out\n",
da351878 1269 task->tk_pid);
da45828e 1270 goto retry_timeout;
da351878 1271 case -EPFNOSUPPORT:
906462af 1272 /* server doesn't support any rpcbind version we know of */
012da158 1273 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
da351878
CL
1274 task->tk_pid);
1275 break;
1276 case -EPROTONOSUPPORT:
00a6e7bb 1277 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
da351878 1278 task->tk_pid);
00a6e7bb
CL
1279 task->tk_status = 0;
1280 task->tk_action = call_bind;
1281 return;
012da158
CL
1282 case -ECONNREFUSED: /* connection problems */
1283 case -ECONNRESET:
1284 case -ENOTCONN:
1285 case -EHOSTDOWN:
1286 case -EHOSTUNREACH:
1287 case -ENETUNREACH:
1288 case -EPIPE:
1289 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1290 task->tk_pid, task->tk_status);
1291 if (!RPC_IS_SOFTCONN(task)) {
1292 rpc_delay(task, 5*HZ);
1293 goto retry_timeout;
1294 }
1295 status = task->tk_status;
1296 break;
da351878 1297 default:
46121cf7 1298 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
da351878 1299 task->tk_pid, -task->tk_status);
da351878
CL
1300 }
1301
1302 rpc_exit(task, status);
1303 return;
1304
da45828e
TM
1305retry_timeout:
1306 task->tk_action = call_timeout;
da351878
CL
1307}
1308
1309/*
1310 * 4b. Connect to the RPC server
1da177e4
LT
1311 */
1312static void
1313call_connect(struct rpc_task *task)
1314{
da351878 1315 struct rpc_xprt *xprt = task->tk_xprt;
1da177e4 1316
46121cf7 1317 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
da351878
CL
1318 task->tk_pid, xprt,
1319 (xprt_connected(xprt) ? "is" : "is not"));
1da177e4 1320
da351878
CL
1321 task->tk_action = call_transmit;
1322 if (!xprt_connected(xprt)) {
1323 task->tk_action = call_connect_status;
1324 if (task->tk_status < 0)
1325 return;
1326 xprt_connect(task);
1da177e4 1327 }
1da177e4
LT
1328}
1329
1330/*
da351878 1331 * 4c. Sort out connect result
1da177e4
LT
1332 */
1333static void
1334call_connect_status(struct rpc_task *task)
1335{
1336 struct rpc_clnt *clnt = task->tk_client;
1337 int status = task->tk_status;
1338
46121cf7 1339 dprint_status(task);
da351878 1340
1da177e4 1341 task->tk_status = 0;
2a491991 1342 if (status >= 0 || status == -EAGAIN) {
1da177e4
LT
1343 clnt->cl_stats->netreconn++;
1344 task->tk_action = call_transmit;
1345 return;
1346 }
1347
1da177e4 1348 switch (status) {
da45828e
TM
1349 /* if soft mounted, test if we've timed out */
1350 case -ETIMEDOUT:
1351 task->tk_action = call_timeout;
2a491991
TM
1352 break;
1353 default:
1354 rpc_exit(task, -EIO);
1da177e4
LT
1355 }
1356}
1357
1358/*
1359 * 5. Transmit the RPC request, and wait for reply
1360 */
1361static void
1362call_transmit(struct rpc_task *task)
1363{
46121cf7 1364 dprint_status(task);
1da177e4
LT
1365
1366 task->tk_action = call_status;
1367 if (task->tk_status < 0)
1368 return;
1369 task->tk_status = xprt_prepare_transmit(task);
1370 if (task->tk_status != 0)
1371 return;
e0ab53de 1372 task->tk_action = call_transmit_status;
1da177e4 1373 /* Encode here so that rpcsec_gss can use correct sequence number. */
940e3318 1374 if (rpc_task_need_encode(task)) {
e0ab53de 1375 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
b0e1c57e 1376 rpc_xdr_encode(task);
5e5ce5be 1377 /* Did the encode result in an error condition? */
8b39f2b4
TM
1378 if (task->tk_status != 0) {
1379 /* Was the error nonfatal? */
1380 if (task->tk_status == -EAGAIN)
1381 rpc_delay(task, HZ >> 4);
1382 else
1383 rpc_exit(task, task->tk_status);
e0ab53de 1384 return;
8b39f2b4 1385 }
5e5ce5be 1386 }
1da177e4
LT
1387 xprt_transmit(task);
1388 if (task->tk_status < 0)
1389 return;
e0ab53de
TM
1390 /*
1391 * On success, ensure that we call xprt_end_transmit() before sleeping
1392 * in order to allow access to the socket to other RPC requests.
1393 */
1394 call_transmit_status(task);
55ae1aab 1395 if (rpc_reply_expected(task))
e0ab53de
TM
1396 return;
1397 task->tk_action = rpc_exit_task;
fda13939 1398 rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
e0ab53de
TM
1399}
1400
1401/*
1402 * 5a. Handle cleanup after a transmission
1403 */
1404static void
1405call_transmit_status(struct rpc_task *task)
1406{
1407 task->tk_action = call_status;
206a134b
CL
1408
1409 /*
1410 * Common case: success. Force the compiler to put this
1411 * test first.
1412 */
1413 if (task->tk_status == 0) {
1414 xprt_end_transmit(task);
1415 rpc_task_force_reencode(task);
1416 return;
1417 }
1418
15f081ca
TM
1419 switch (task->tk_status) {
1420 case -EAGAIN:
1421 break;
1422 default:
206a134b 1423 dprint_status(task);
15f081ca 1424 xprt_end_transmit(task);
09a21c41
CL
1425 rpc_task_force_reencode(task);
1426 break;
15f081ca
TM
1427 /*
1428 * Special cases: if we've been waiting on the
1429 * socket's write_space() callback, or if the
1430 * socket just returned a connection error,
1431 * then hold onto the transport lock.
1432 */
1433 case -ECONNREFUSED:
15f081ca
TM
1434 case -EHOSTDOWN:
1435 case -EHOSTUNREACH:
1436 case -ENETUNREACH:
09a21c41
CL
1437 if (RPC_IS_SOFTCONN(task)) {
1438 xprt_end_transmit(task);
1439 rpc_exit(task, task->tk_status);
1440 break;
1441 }
1442 case -ECONNRESET:
1443 case -ENOTCONN:
c8485e4d 1444 case -EPIPE:
15f081ca
TM
1445 rpc_task_force_reencode(task);
1446 }
1da177e4
LT
1447}
1448
9e00abc3 1449#if defined(CONFIG_SUNRPC_BACKCHANNEL)
55ae1aab
RL
1450/*
1451 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1452 * addition, disconnect on connectivity errors.
1453 */
1454static void
1455call_bc_transmit(struct rpc_task *task)
1456{
1457 struct rpc_rqst *req = task->tk_rqstp;
1458
1459 BUG_ON(task->tk_status != 0);
1460 task->tk_status = xprt_prepare_transmit(task);
1461 if (task->tk_status == -EAGAIN) {
1462 /*
1463 * Could not reserve the transport. Try again after the
1464 * transport is released.
1465 */
1466 task->tk_status = 0;
1467 task->tk_action = call_bc_transmit;
1468 return;
1469 }
1470
1471 task->tk_action = rpc_exit_task;
1472 if (task->tk_status < 0) {
1473 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1474 "error: %d\n", task->tk_status);
1475 return;
1476 }
1477
1478 xprt_transmit(task);
1479 xprt_end_transmit(task);
1480 dprint_status(task);
1481 switch (task->tk_status) {
1482 case 0:
1483 /* Success */
1484 break;
1485 case -EHOSTDOWN:
1486 case -EHOSTUNREACH:
1487 case -ENETUNREACH:
1488 case -ETIMEDOUT:
1489 /*
1490 * Problem reaching the server. Disconnect and let the
1491 * forechannel reestablish the connection. The server will
1492 * have to retransmit the backchannel request and we'll
1493 * reprocess it. Since these ops are idempotent, there's no
1494 * need to cache our reply at this time.
1495 */
1496 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1497 "error: %d\n", task->tk_status);
1498 xprt_conditional_disconnect(task->tk_xprt,
1499 req->rq_connect_cookie);
1500 break;
1501 default:
1502 /*
1503 * We were unable to reply and will have to drop the
1504 * request. The server should reconnect and retransmit.
1505 */
1506 BUG_ON(task->tk_status == -EAGAIN);
1507 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1508 "error: %d\n", task->tk_status);
1509 break;
1510 }
1511 rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1512}
9e00abc3 1513#endif /* CONFIG_SUNRPC_BACKCHANNEL */
55ae1aab 1514
1da177e4
LT
1515/*
1516 * 6. Sort out the RPC call status
1517 */
1518static void
1519call_status(struct rpc_task *task)
1520{
1521 struct rpc_clnt *clnt = task->tk_client;
1522 struct rpc_rqst *req = task->tk_rqstp;
1523 int status;
1524
dd2b63d0
RL
1525 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1526 task->tk_status = req->rq_reply_bytes_recvd;
1da177e4 1527
46121cf7 1528 dprint_status(task);
1da177e4
LT
1529
1530 status = task->tk_status;
1531 if (status >= 0) {
1532 task->tk_action = call_decode;
1533 return;
1534 }
1535
1536 task->tk_status = 0;
1537 switch(status) {
76303992
TM
1538 case -EHOSTDOWN:
1539 case -EHOSTUNREACH:
1540 case -ENETUNREACH:
1541 /*
1542 * Delay any retries for 3 seconds, then handle as if it
1543 * were a timeout.
1544 */
1545 rpc_delay(task, 3*HZ);
1da177e4
LT
1546 case -ETIMEDOUT:
1547 task->tk_action = call_timeout;
241c39b9 1548 if (task->tk_client->cl_discrtry)
7c1d71cf
TM
1549 xprt_conditional_disconnect(task->tk_xprt,
1550 req->rq_connect_cookie);
1da177e4 1551 break;
c8485e4d 1552 case -ECONNRESET:
1da177e4 1553 case -ECONNREFUSED:
35f5a422 1554 rpc_force_rebind(clnt);
c8485e4d
TM
1555 rpc_delay(task, 3*HZ);
1556 case -EPIPE:
1557 case -ENOTCONN:
1da177e4
LT
1558 task->tk_action = call_bind;
1559 break;
1560 case -EAGAIN:
1561 task->tk_action = call_transmit;
1562 break;
1563 case -EIO:
1564 /* shutdown or soft timeout */
1565 rpc_exit(task, status);
1566 break;
1567 default:
b6b6152c
OK
1568 if (clnt->cl_chatty)
1569 printk("%s: RPC call returned error %d\n",
1da177e4
LT
1570 clnt->cl_protname, -status);
1571 rpc_exit(task, status);
1da177e4
LT
1572 }
1573}
1574
1575/*
e0ab53de 1576 * 6a. Handle RPC timeout
1da177e4
LT
1577 * We do not release the request slot, so we keep using the
1578 * same XID for all retransmits.
1579 */
1580static void
1581call_timeout(struct rpc_task *task)
1582{
1583 struct rpc_clnt *clnt = task->tk_client;
1584
1585 if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
46121cf7 1586 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1da177e4
LT
1587 goto retry;
1588 }
1589
46121cf7 1590 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
ef759a2e
CL
1591 task->tk_timeouts++;
1592
3a28becc
CL
1593 if (RPC_IS_SOFTCONN(task)) {
1594 rpc_exit(task, -ETIMEDOUT);
1595 return;
1596 }
1da177e4 1597 if (RPC_IS_SOFT(task)) {
b6b6152c
OK
1598 if (clnt->cl_chatty)
1599 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1da177e4 1600 clnt->cl_protname, clnt->cl_server);
7494d00c
TM
1601 if (task->tk_flags & RPC_TASK_TIMEOUT)
1602 rpc_exit(task, -ETIMEDOUT);
1603 else
1604 rpc_exit(task, -EIO);
1da177e4
LT
1605 return;
1606 }
1607
f518e35a 1608 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1da177e4 1609 task->tk_flags |= RPC_CALL_MAJORSEEN;
b6b6152c
OK
1610 if (clnt->cl_chatty)
1611 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1da177e4
LT
1612 clnt->cl_protname, clnt->cl_server);
1613 }
35f5a422 1614 rpc_force_rebind(clnt);
b48633bd
TM
1615 /*
1616 * Did our request time out due to an RPCSEC_GSS out-of-sequence
1617 * event? RFC2203 requires the server to drop all such requests.
1618 */
1619 rpcauth_invalcred(task);
1da177e4
LT
1620
1621retry:
1622 clnt->cl_stats->rpcretrans++;
1623 task->tk_action = call_bind;
1624 task->tk_status = 0;
1625}
1626
1627/*
1628 * 7. Decode the RPC reply
1629 */
1630static void
1631call_decode(struct rpc_task *task)
1632{
1633 struct rpc_clnt *clnt = task->tk_client;
1634 struct rpc_rqst *req = task->tk_rqstp;
bf269551 1635 kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
d8ed029d 1636 __be32 *p;
1da177e4 1637
726fd6ad 1638 dprint_status(task);
1da177e4 1639
f518e35a 1640 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
b6b6152c
OK
1641 if (clnt->cl_chatty)
1642 printk(KERN_NOTICE "%s: server %s OK\n",
1643 clnt->cl_protname, clnt->cl_server);
1da177e4
LT
1644 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1645 }
1646
43ac3f29
TM
1647 /*
1648 * Ensure that we see all writes made by xprt_complete_rqst()
dd2b63d0 1649 * before it changed req->rq_reply_bytes_recvd.
43ac3f29
TM
1650 */
1651 smp_rmb();
1da177e4
LT
1652 req->rq_rcv_buf.len = req->rq_private_buf.len;
1653
1654 /* Check that the softirq receive buffer is valid */
1655 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1656 sizeof(req->rq_rcv_buf)) != 0);
1657
1e799b67
TM
1658 if (req->rq_rcv_buf.len < 12) {
1659 if (!RPC_IS_SOFT(task)) {
1660 task->tk_action = call_bind;
1661 clnt->cl_stats->rpcretrans++;
1662 goto out_retry;
1663 }
1664 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
1665 clnt->cl_protname, task->tk_status);
1666 task->tk_action = call_timeout;
1667 goto out_retry;
1668 }
1669
b0e1c57e 1670 p = rpc_verify_header(task);
abbcf28f
TM
1671 if (IS_ERR(p)) {
1672 if (p == ERR_PTR(-EAGAIN))
1673 goto out_retry;
1674 return;
1da177e4
LT
1675 }
1676
abbcf28f 1677 task->tk_action = rpc_exit_task;
1da177e4 1678
6d5fcb5a 1679 if (decode) {
1da177e4
LT
1680 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1681 task->tk_msg.rpc_resp);
6d5fcb5a 1682 }
46121cf7
CL
1683 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1684 task->tk_status);
1da177e4
LT
1685 return;
1686out_retry:
1da177e4 1687 task->tk_status = 0;
b0e1c57e 1688 /* Note: rpc_verify_header() may have freed the RPC slot */
24b74bf0 1689 if (task->tk_rqstp == req) {
dd2b63d0 1690 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
24b74bf0 1691 if (task->tk_client->cl_discrtry)
7c1d71cf
TM
1692 xprt_conditional_disconnect(task->tk_xprt,
1693 req->rq_connect_cookie);
24b74bf0 1694 }
1da177e4
LT
1695}
1696
d8ed029d 1697static __be32 *
b0e1c57e 1698rpc_encode_header(struct rpc_task *task)
1da177e4
LT
1699{
1700 struct rpc_clnt *clnt = task->tk_client;
1da177e4 1701 struct rpc_rqst *req = task->tk_rqstp;
d8ed029d 1702 __be32 *p = req->rq_svec[0].iov_base;
1da177e4
LT
1703
1704 /* FIXME: check buffer size? */
808012fb
CL
1705
1706 p = xprt_skip_transport_header(task->tk_xprt, p);
1da177e4
LT
1707 *p++ = req->rq_xid; /* XID */
1708 *p++ = htonl(RPC_CALL); /* CALL */
1709 *p++ = htonl(RPC_VERSION); /* RPC version */
1710 *p++ = htonl(clnt->cl_prog); /* program number */
1711 *p++ = htonl(clnt->cl_vers); /* program version */
1712 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
334ccfd5
TM
1713 p = rpcauth_marshcred(task, p);
1714 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1715 return p;
1da177e4
LT
1716}
1717
d8ed029d 1718static __be32 *
b0e1c57e 1719rpc_verify_header(struct rpc_task *task)
1da177e4
LT
1720{
1721 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1722 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
d8ed029d
AD
1723 __be32 *p = iov->iov_base;
1724 u32 n;
1da177e4
LT
1725 int error = -EACCES;
1726
e8896495
DH
1727 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1728 /* RFC-1014 says that the representation of XDR data must be a
1729 * multiple of four bytes
1730 * - if it isn't pointer subtraction in the NFS client may give
1731 * undefined results
1732 */
8a702bbb 1733 dprintk("RPC: %5u %s: XDR representation not a multiple of"
0dc47877 1734 " 4 bytes: 0x%x\n", task->tk_pid, __func__,
8a702bbb 1735 task->tk_rqstp->rq_rcv_buf.len);
e8896495
DH
1736 goto out_eio;
1737 }
1da177e4
LT
1738 if ((len -= 3) < 0)
1739 goto out_overflow;
1da177e4 1740
f4a2e418 1741 p += 1; /* skip XID */
1da177e4 1742 if ((n = ntohl(*p++)) != RPC_REPLY) {
8a702bbb 1743 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
f4a2e418 1744 task->tk_pid, __func__, n);
abbcf28f 1745 goto out_garbage;
1da177e4 1746 }
f4a2e418 1747
1da177e4
LT
1748 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1749 if (--len < 0)
1750 goto out_overflow;
1751 switch ((n = ntohl(*p++))) {
89f0e4fe
JP
1752 case RPC_AUTH_ERROR:
1753 break;
1754 case RPC_MISMATCH:
1755 dprintk("RPC: %5u %s: RPC call version mismatch!\n",
1756 task->tk_pid, __func__);
1757 error = -EPROTONOSUPPORT;
1758 goto out_err;
1759 default:
1760 dprintk("RPC: %5u %s: RPC call rejected, "
1761 "unknown error: %x\n",
1762 task->tk_pid, __func__, n);
1763 goto out_eio;
1da177e4
LT
1764 }
1765 if (--len < 0)
1766 goto out_overflow;
1767 switch ((n = ntohl(*p++))) {
1768 case RPC_AUTH_REJECTEDCRED:
1769 case RPC_AUTH_REJECTEDVERF:
1770 case RPCSEC_GSS_CREDPROBLEM:
1771 case RPCSEC_GSS_CTXPROBLEM:
1772 if (!task->tk_cred_retry)
1773 break;
1774 task->tk_cred_retry--;
46121cf7 1775 dprintk("RPC: %5u %s: retry stale creds\n",
0dc47877 1776 task->tk_pid, __func__);
1da177e4 1777 rpcauth_invalcred(task);
220bcc2a
TM
1778 /* Ensure we obtain a new XID! */
1779 xprt_release(task);
118df3d1 1780 task->tk_action = call_reserve;
abbcf28f 1781 goto out_retry;
1da177e4
LT
1782 case RPC_AUTH_BADCRED:
1783 case RPC_AUTH_BADVERF:
1784 /* possibly garbled cred/verf? */
1785 if (!task->tk_garb_retry)
1786 break;
1787 task->tk_garb_retry--;
46121cf7 1788 dprintk("RPC: %5u %s: retry garbled creds\n",
0dc47877 1789 task->tk_pid, __func__);
1da177e4 1790 task->tk_action = call_bind;
abbcf28f 1791 goto out_retry;
1da177e4 1792 case RPC_AUTH_TOOWEAK:
b0e1c57e 1793 printk(KERN_NOTICE "RPC: server %s requires stronger "
1356b8c2 1794 "authentication.\n", task->tk_client->cl_server);
1da177e4
LT
1795 break;
1796 default:
8a702bbb 1797 dprintk("RPC: %5u %s: unknown auth error: %x\n",
0dc47877 1798 task->tk_pid, __func__, n);
1da177e4
LT
1799 error = -EIO;
1800 }
46121cf7 1801 dprintk("RPC: %5u %s: call rejected %d\n",
0dc47877 1802 task->tk_pid, __func__, n);
1da177e4
LT
1803 goto out_err;
1804 }
1805 if (!(p = rpcauth_checkverf(task, p))) {
8a702bbb 1806 dprintk("RPC: %5u %s: auth check failed\n",
0dc47877 1807 task->tk_pid, __func__);
abbcf28f 1808 goto out_garbage; /* bad verifier, retry */
1da177e4 1809 }
d8ed029d 1810 len = p - (__be32 *)iov->iov_base - 1;
1da177e4
LT
1811 if (len < 0)
1812 goto out_overflow;
1813 switch ((n = ntohl(*p++))) {
1814 case RPC_SUCCESS:
1815 return p;
1816 case RPC_PROG_UNAVAIL:
46121cf7 1817 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
0dc47877 1818 task->tk_pid, __func__,
1da177e4
LT
1819 (unsigned int)task->tk_client->cl_prog,
1820 task->tk_client->cl_server);
cdf47706
AG
1821 error = -EPFNOSUPPORT;
1822 goto out_err;
1da177e4 1823 case RPC_PROG_MISMATCH:
46121cf7 1824 dprintk("RPC: %5u %s: program %u, version %u unsupported by "
0dc47877 1825 "server %s\n", task->tk_pid, __func__,
1da177e4
LT
1826 (unsigned int)task->tk_client->cl_prog,
1827 (unsigned int)task->tk_client->cl_vers,
1828 task->tk_client->cl_server);
cdf47706
AG
1829 error = -EPROTONOSUPPORT;
1830 goto out_err;
1da177e4 1831 case RPC_PROC_UNAVAIL:
3748f1e4 1832 dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
46121cf7 1833 "version %u on server %s\n",
0dc47877 1834 task->tk_pid, __func__,
3748f1e4 1835 rpc_proc_name(task),
1da177e4
LT
1836 task->tk_client->cl_prog,
1837 task->tk_client->cl_vers,
1838 task->tk_client->cl_server);
cdf47706
AG
1839 error = -EOPNOTSUPP;
1840 goto out_err;
1da177e4 1841 case RPC_GARBAGE_ARGS:
46121cf7 1842 dprintk("RPC: %5u %s: server saw garbage\n",
0dc47877 1843 task->tk_pid, __func__);
1da177e4
LT
1844 break; /* retry */
1845 default:
8a702bbb 1846 dprintk("RPC: %5u %s: server accept status: %x\n",
0dc47877 1847 task->tk_pid, __func__, n);
1da177e4
LT
1848 /* Also retry */
1849 }
1850
abbcf28f 1851out_garbage:
1da177e4
LT
1852 task->tk_client->cl_stats->rpcgarbage++;
1853 if (task->tk_garb_retry) {
1854 task->tk_garb_retry--;
46121cf7 1855 dprintk("RPC: %5u %s: retrying\n",
0dc47877 1856 task->tk_pid, __func__);
1da177e4 1857 task->tk_action = call_bind;
abbcf28f
TM
1858out_retry:
1859 return ERR_PTR(-EAGAIN);
1da177e4 1860 }
1da177e4
LT
1861out_eio:
1862 error = -EIO;
1863out_err:
1864 rpc_exit(task, error);
8a702bbb 1865 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
0dc47877 1866 __func__, error);
abbcf28f 1867 return ERR_PTR(error);
1da177e4 1868out_overflow:
8a702bbb 1869 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
0dc47877 1870 __func__);
abbcf28f 1871 goto out_garbage;
1da177e4 1872}
5ee0ed7d 1873
9f06c719 1874static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
5ee0ed7d 1875{
5ee0ed7d
TM
1876}
1877
bf269551 1878static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
5ee0ed7d
TM
1879{
1880 return 0;
1881}
1882
1883static struct rpc_procinfo rpcproc_null = {
1884 .p_encode = rpcproc_encode_null,
1885 .p_decode = rpcproc_decode_null,
1886};
1887
caabea8a 1888static int rpc_ping(struct rpc_clnt *clnt)
5ee0ed7d
TM
1889{
1890 struct rpc_message msg = {
1891 .rpc_proc = &rpcproc_null,
1892 };
1893 int err;
1894 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
caabea8a 1895 err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
5ee0ed7d
TM
1896 put_rpccred(msg.rpc_cred);
1897 return err;
1898}
188fef11 1899
5e1550d6
TM
1900struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
1901{
1902 struct rpc_message msg = {
1903 .rpc_proc = &rpcproc_null,
1904 .rpc_cred = cred,
1905 };
84115e1c
TM
1906 struct rpc_task_setup task_setup_data = {
1907 .rpc_client = clnt,
1908 .rpc_message = &msg,
1909 .callback_ops = &rpc_default_ops,
1910 .flags = flags,
1911 };
c970aa85 1912 return rpc_run_task(&task_setup_data);
5e1550d6 1913}
e8914c65 1914EXPORT_SYMBOL_GPL(rpc_call_null);
5e1550d6 1915
188fef11 1916#ifdef RPC_DEBUG
68a23ee9
CL
1917static void rpc_show_header(void)
1918{
cb3997b5
CL
1919 printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
1920 "-timeout ---ops--\n");
68a23ee9
CL
1921}
1922
38e886e0
CL
1923static void rpc_show_task(const struct rpc_clnt *clnt,
1924 const struct rpc_task *task)
1925{
1926 const char *rpc_waitq = "none";
38e886e0
CL
1927
1928 if (RPC_IS_QUEUED(task))
1929 rpc_waitq = rpc_qname(task->tk_waitqueue);
1930
b3bcedad 1931 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
cb3997b5
CL
1932 task->tk_pid, task->tk_flags, task->tk_status,
1933 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1934 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
b3bcedad 1935 task->tk_action, rpc_waitq);
38e886e0
CL
1936}
1937
70abc49b 1938void rpc_show_tasks(struct net *net)
188fef11
TM
1939{
1940 struct rpc_clnt *clnt;
38e886e0 1941 struct rpc_task *task;
68a23ee9 1942 int header = 0;
70abc49b 1943 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
188fef11 1944
70abc49b
SK
1945 spin_lock(&sn->rpc_client_lock);
1946 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
188fef11 1947 spin_lock(&clnt->cl_lock);
38e886e0 1948 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
68a23ee9
CL
1949 if (!header) {
1950 rpc_show_header();
1951 header++;
1952 }
38e886e0 1953 rpc_show_task(clnt, task);
188fef11
TM
1954 }
1955 spin_unlock(&clnt->cl_lock);
1956 }
70abc49b 1957 spin_unlock(&sn->rpc_client_lock);
188fef11
TM
1958}
1959#endif