SUNRPC: Finish API to load RPC transport implementations dynamically
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sunrpc / xprt.c
1 /*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
23 * of -ETIMEDOUT.
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
28 * again.
29 *
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
33 *
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
35 *
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
37 */
38
39 #include <linux/module.h>
40
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/net.h>
45
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/metrics.h>
48
49 /*
50 * Local variables
51 */
52
53 #ifdef RPC_DEBUG
54 # define RPCDBG_FACILITY RPCDBG_XPRT
55 #endif
56
57 /*
58 * Local functions
59 */
60 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
61 static inline void do_xprt_reserve(struct rpc_task *);
62 static void xprt_connect_status(struct rpc_task *task);
63 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
64
65 static spinlock_t xprt_list_lock = SPIN_LOCK_UNLOCKED;
66 static LIST_HEAD(xprt_list);
67
68 /*
69 * The transport code maintains an estimate on the maximum number of out-
70 * standing RPC requests, using a smoothed version of the congestion
71 * avoidance implemented in 44BSD. This is basically the Van Jacobson
72 * congestion algorithm: If a retransmit occurs, the congestion window is
73 * halved; otherwise, it is incremented by 1/cwnd when
74 *
75 * - a reply is received and
76 * - a full number of requests are outstanding and
77 * - the congestion window hasn't been updated recently.
78 */
79 #define RPC_CWNDSHIFT (8U)
80 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
81 #define RPC_INITCWND RPC_CWNDSCALE
82 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
83
84 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
85
86 /**
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
97 */
98 int xprt_register_transport(struct xprt_class *transport)
99 {
100 struct xprt_class *t;
101 int result;
102
103 result = -EEXIST;
104 spin_lock(&xprt_list_lock);
105 list_for_each_entry(t, &xprt_list, list) {
106 /* don't register the same transport class twice */
107 if (t == transport)
108 goto out;
109 }
110
111 result = -EINVAL;
112 if (try_module_get(THIS_MODULE)) {
113 list_add_tail(&transport->list, &xprt_list);
114 printk(KERN_INFO "RPC: Registered %s transport module.\n",
115 transport->name);
116 result = 0;
117 }
118
119 out:
120 spin_unlock(&xprt_list_lock);
121 return result;
122 }
123 EXPORT_SYMBOL_GPL(xprt_register_transport);
124
125 /**
126 * xprt_unregister_transport - unregister a transport implementation
127 * transport: transport to unregister
128 *
129 * Returns:
130 * 0: transport successfully unregistered
131 * -ENOENT: transport never registered
132 */
133 int xprt_unregister_transport(struct xprt_class *transport)
134 {
135 struct xprt_class *t;
136 int result;
137
138 result = 0;
139 spin_lock(&xprt_list_lock);
140 list_for_each_entry(t, &xprt_list, list) {
141 if (t == transport) {
142 printk(KERN_INFO
143 "RPC: Unregistered %s transport module.\n",
144 transport->name);
145 list_del_init(&transport->list);
146 module_put(THIS_MODULE);
147 goto out;
148 }
149 }
150 result = -ENOENT;
151
152 out:
153 spin_unlock(&xprt_list_lock);
154 return result;
155 }
156 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
157
158 /**
159 * xprt_reserve_xprt - serialize write access to transports
160 * @task: task that is requesting access to the transport
161 *
162 * This prevents mixing the payload of separate requests, and prevents
163 * transport connects from colliding with writes. No congestion control
164 * is provided.
165 */
166 int xprt_reserve_xprt(struct rpc_task *task)
167 {
168 struct rpc_xprt *xprt = task->tk_xprt;
169 struct rpc_rqst *req = task->tk_rqstp;
170
171 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
172 if (task == xprt->snd_task)
173 return 1;
174 if (task == NULL)
175 return 0;
176 goto out_sleep;
177 }
178 xprt->snd_task = task;
179 if (req) {
180 req->rq_bytes_sent = 0;
181 req->rq_ntrans++;
182 }
183 return 1;
184
185 out_sleep:
186 dprintk("RPC: %5u failed to lock transport %p\n",
187 task->tk_pid, xprt);
188 task->tk_timeout = 0;
189 task->tk_status = -EAGAIN;
190 if (req && req->rq_ntrans)
191 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
192 else
193 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
194 return 0;
195 }
196 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
197
198 static void xprt_clear_locked(struct rpc_xprt *xprt)
199 {
200 xprt->snd_task = NULL;
201 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
202 smp_mb__before_clear_bit();
203 clear_bit(XPRT_LOCKED, &xprt->state);
204 smp_mb__after_clear_bit();
205 } else
206 queue_work(rpciod_workqueue, &xprt->task_cleanup);
207 }
208
209 /*
210 * xprt_reserve_xprt_cong - serialize write access to transports
211 * @task: task that is requesting access to the transport
212 *
213 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
214 * integrated into the decision of whether a request is allowed to be
215 * woken up and given access to the transport.
216 */
217 int xprt_reserve_xprt_cong(struct rpc_task *task)
218 {
219 struct rpc_xprt *xprt = task->tk_xprt;
220 struct rpc_rqst *req = task->tk_rqstp;
221
222 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
223 if (task == xprt->snd_task)
224 return 1;
225 goto out_sleep;
226 }
227 if (__xprt_get_cong(xprt, task)) {
228 xprt->snd_task = task;
229 if (req) {
230 req->rq_bytes_sent = 0;
231 req->rq_ntrans++;
232 }
233 return 1;
234 }
235 xprt_clear_locked(xprt);
236 out_sleep:
237 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
238 task->tk_timeout = 0;
239 task->tk_status = -EAGAIN;
240 if (req && req->rq_ntrans)
241 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
242 else
243 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
244 return 0;
245 }
246 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
247
248 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
249 {
250 int retval;
251
252 spin_lock_bh(&xprt->transport_lock);
253 retval = xprt->ops->reserve_xprt(task);
254 spin_unlock_bh(&xprt->transport_lock);
255 return retval;
256 }
257
258 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
259 {
260 struct rpc_task *task;
261 struct rpc_rqst *req;
262
263 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
264 return;
265
266 task = rpc_wake_up_next(&xprt->resend);
267 if (!task) {
268 task = rpc_wake_up_next(&xprt->sending);
269 if (!task)
270 goto out_unlock;
271 }
272
273 req = task->tk_rqstp;
274 xprt->snd_task = task;
275 if (req) {
276 req->rq_bytes_sent = 0;
277 req->rq_ntrans++;
278 }
279 return;
280
281 out_unlock:
282 xprt_clear_locked(xprt);
283 }
284
285 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
286 {
287 struct rpc_task *task;
288
289 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
290 return;
291 if (RPCXPRT_CONGESTED(xprt))
292 goto out_unlock;
293 task = rpc_wake_up_next(&xprt->resend);
294 if (!task) {
295 task = rpc_wake_up_next(&xprt->sending);
296 if (!task)
297 goto out_unlock;
298 }
299 if (__xprt_get_cong(xprt, task)) {
300 struct rpc_rqst *req = task->tk_rqstp;
301 xprt->snd_task = task;
302 if (req) {
303 req->rq_bytes_sent = 0;
304 req->rq_ntrans++;
305 }
306 return;
307 }
308 out_unlock:
309 xprt_clear_locked(xprt);
310 }
311
312 /**
313 * xprt_release_xprt - allow other requests to use a transport
314 * @xprt: transport with other tasks potentially waiting
315 * @task: task that is releasing access to the transport
316 *
317 * Note that "task" can be NULL. No congestion control is provided.
318 */
319 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
320 {
321 if (xprt->snd_task == task) {
322 xprt_clear_locked(xprt);
323 __xprt_lock_write_next(xprt);
324 }
325 }
326 EXPORT_SYMBOL_GPL(xprt_release_xprt);
327
328 /**
329 * xprt_release_xprt_cong - allow other requests to use a transport
330 * @xprt: transport with other tasks potentially waiting
331 * @task: task that is releasing access to the transport
332 *
333 * Note that "task" can be NULL. Another task is awoken to use the
334 * transport if the transport's congestion window allows it.
335 */
336 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
337 {
338 if (xprt->snd_task == task) {
339 xprt_clear_locked(xprt);
340 __xprt_lock_write_next_cong(xprt);
341 }
342 }
343 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
344
345 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
346 {
347 spin_lock_bh(&xprt->transport_lock);
348 xprt->ops->release_xprt(xprt, task);
349 spin_unlock_bh(&xprt->transport_lock);
350 }
351
352 /*
353 * Van Jacobson congestion avoidance. Check if the congestion window
354 * overflowed. Put the task to sleep if this is the case.
355 */
356 static int
357 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
358 {
359 struct rpc_rqst *req = task->tk_rqstp;
360
361 if (req->rq_cong)
362 return 1;
363 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
364 task->tk_pid, xprt->cong, xprt->cwnd);
365 if (RPCXPRT_CONGESTED(xprt))
366 return 0;
367 req->rq_cong = 1;
368 xprt->cong += RPC_CWNDSCALE;
369 return 1;
370 }
371
372 /*
373 * Adjust the congestion window, and wake up the next task
374 * that has been sleeping due to congestion
375 */
376 static void
377 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
378 {
379 if (!req->rq_cong)
380 return;
381 req->rq_cong = 0;
382 xprt->cong -= RPC_CWNDSCALE;
383 __xprt_lock_write_next_cong(xprt);
384 }
385
386 /**
387 * xprt_release_rqst_cong - housekeeping when request is complete
388 * @task: RPC request that recently completed
389 *
390 * Useful for transports that require congestion control.
391 */
392 void xprt_release_rqst_cong(struct rpc_task *task)
393 {
394 __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
395 }
396 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
397
398 /**
399 * xprt_adjust_cwnd - adjust transport congestion window
400 * @task: recently completed RPC request used to adjust window
401 * @result: result code of completed RPC request
402 *
403 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
404 */
405 void xprt_adjust_cwnd(struct rpc_task *task, int result)
406 {
407 struct rpc_rqst *req = task->tk_rqstp;
408 struct rpc_xprt *xprt = task->tk_xprt;
409 unsigned long cwnd = xprt->cwnd;
410
411 if (result >= 0 && cwnd <= xprt->cong) {
412 /* The (cwnd >> 1) term makes sure
413 * the result gets rounded properly. */
414 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
415 if (cwnd > RPC_MAXCWND(xprt))
416 cwnd = RPC_MAXCWND(xprt);
417 __xprt_lock_write_next_cong(xprt);
418 } else if (result == -ETIMEDOUT) {
419 cwnd >>= 1;
420 if (cwnd < RPC_CWNDSCALE)
421 cwnd = RPC_CWNDSCALE;
422 }
423 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
424 xprt->cong, xprt->cwnd, cwnd);
425 xprt->cwnd = cwnd;
426 __xprt_put_cong(xprt, req);
427 }
428 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
429
430 /**
431 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
432 * @xprt: transport with waiting tasks
433 * @status: result code to plant in each task before waking it
434 *
435 */
436 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
437 {
438 if (status < 0)
439 rpc_wake_up_status(&xprt->pending, status);
440 else
441 rpc_wake_up(&xprt->pending);
442 }
443 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
444
445 /**
446 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
447 * @task: task to be put to sleep
448 *
449 */
450 void xprt_wait_for_buffer_space(struct rpc_task *task)
451 {
452 struct rpc_rqst *req = task->tk_rqstp;
453 struct rpc_xprt *xprt = req->rq_xprt;
454
455 task->tk_timeout = req->rq_timeout;
456 rpc_sleep_on(&xprt->pending, task, NULL, NULL);
457 }
458 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
459
460 /**
461 * xprt_write_space - wake the task waiting for transport output buffer space
462 * @xprt: transport with waiting tasks
463 *
464 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
465 */
466 void xprt_write_space(struct rpc_xprt *xprt)
467 {
468 if (unlikely(xprt->shutdown))
469 return;
470
471 spin_lock_bh(&xprt->transport_lock);
472 if (xprt->snd_task) {
473 dprintk("RPC: write space: waking waiting task on "
474 "xprt %p\n", xprt);
475 rpc_wake_up_task(xprt->snd_task);
476 }
477 spin_unlock_bh(&xprt->transport_lock);
478 }
479 EXPORT_SYMBOL_GPL(xprt_write_space);
480
481 /**
482 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
483 * @task: task whose timeout is to be set
484 *
485 * Set a request's retransmit timeout based on the transport's
486 * default timeout parameters. Used by transports that don't adjust
487 * the retransmit timeout based on round-trip time estimation.
488 */
489 void xprt_set_retrans_timeout_def(struct rpc_task *task)
490 {
491 task->tk_timeout = task->tk_rqstp->rq_timeout;
492 }
493 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
494
495 /*
496 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
497 * @task: task whose timeout is to be set
498 *
499 * Set a request's retransmit timeout using the RTT estimator.
500 */
501 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
502 {
503 int timer = task->tk_msg.rpc_proc->p_timer;
504 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
505 struct rpc_rqst *req = task->tk_rqstp;
506 unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
507
508 task->tk_timeout = rpc_calc_rto(rtt, timer);
509 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
510 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
511 task->tk_timeout = max_timeout;
512 }
513 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
514
515 static void xprt_reset_majortimeo(struct rpc_rqst *req)
516 {
517 struct rpc_timeout *to = &req->rq_xprt->timeout;
518
519 req->rq_majortimeo = req->rq_timeout;
520 if (to->to_exponential)
521 req->rq_majortimeo <<= to->to_retries;
522 else
523 req->rq_majortimeo += to->to_increment * to->to_retries;
524 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
525 req->rq_majortimeo = to->to_maxval;
526 req->rq_majortimeo += jiffies;
527 }
528
529 /**
530 * xprt_adjust_timeout - adjust timeout values for next retransmit
531 * @req: RPC request containing parameters to use for the adjustment
532 *
533 */
534 int xprt_adjust_timeout(struct rpc_rqst *req)
535 {
536 struct rpc_xprt *xprt = req->rq_xprt;
537 struct rpc_timeout *to = &xprt->timeout;
538 int status = 0;
539
540 if (time_before(jiffies, req->rq_majortimeo)) {
541 if (to->to_exponential)
542 req->rq_timeout <<= 1;
543 else
544 req->rq_timeout += to->to_increment;
545 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
546 req->rq_timeout = to->to_maxval;
547 req->rq_retries++;
548 } else {
549 req->rq_timeout = to->to_initval;
550 req->rq_retries = 0;
551 xprt_reset_majortimeo(req);
552 /* Reset the RTT counters == "slow start" */
553 spin_lock_bh(&xprt->transport_lock);
554 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
555 spin_unlock_bh(&xprt->transport_lock);
556 status = -ETIMEDOUT;
557 }
558
559 if (req->rq_timeout == 0) {
560 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
561 req->rq_timeout = 5 * HZ;
562 }
563 return status;
564 }
565
566 static void xprt_autoclose(struct work_struct *work)
567 {
568 struct rpc_xprt *xprt =
569 container_of(work, struct rpc_xprt, task_cleanup);
570
571 xprt_disconnect(xprt);
572 xprt->ops->close(xprt);
573 xprt_release_write(xprt, NULL);
574 }
575
576 /**
577 * xprt_disconnect - mark a transport as disconnected
578 * @xprt: transport to flag for disconnect
579 *
580 */
581 void xprt_disconnect(struct rpc_xprt *xprt)
582 {
583 dprintk("RPC: disconnected transport %p\n", xprt);
584 spin_lock_bh(&xprt->transport_lock);
585 xprt_clear_connected(xprt);
586 xprt_wake_pending_tasks(xprt, -ENOTCONN);
587 spin_unlock_bh(&xprt->transport_lock);
588 }
589 EXPORT_SYMBOL_GPL(xprt_disconnect);
590
591 static void
592 xprt_init_autodisconnect(unsigned long data)
593 {
594 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
595
596 spin_lock(&xprt->transport_lock);
597 if (!list_empty(&xprt->recv) || xprt->shutdown)
598 goto out_abort;
599 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
600 goto out_abort;
601 spin_unlock(&xprt->transport_lock);
602 if (xprt_connecting(xprt))
603 xprt_release_write(xprt, NULL);
604 else
605 queue_work(rpciod_workqueue, &xprt->task_cleanup);
606 return;
607 out_abort:
608 spin_unlock(&xprt->transport_lock);
609 }
610
611 /**
612 * xprt_connect - schedule a transport connect operation
613 * @task: RPC task that is requesting the connect
614 *
615 */
616 void xprt_connect(struct rpc_task *task)
617 {
618 struct rpc_xprt *xprt = task->tk_xprt;
619
620 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
621 xprt, (xprt_connected(xprt) ? "is" : "is not"));
622
623 if (!xprt_bound(xprt)) {
624 task->tk_status = -EIO;
625 return;
626 }
627 if (!xprt_lock_write(xprt, task))
628 return;
629 if (xprt_connected(xprt))
630 xprt_release_write(xprt, task);
631 else {
632 if (task->tk_rqstp)
633 task->tk_rqstp->rq_bytes_sent = 0;
634
635 task->tk_timeout = xprt->connect_timeout;
636 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
637 xprt->stat.connect_start = jiffies;
638 xprt->ops->connect(task);
639 }
640 return;
641 }
642
643 static void xprt_connect_status(struct rpc_task *task)
644 {
645 struct rpc_xprt *xprt = task->tk_xprt;
646
647 if (task->tk_status >= 0) {
648 xprt->stat.connect_count++;
649 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
650 dprintk("RPC: %5u xprt_connect_status: connection established\n",
651 task->tk_pid);
652 return;
653 }
654
655 switch (task->tk_status) {
656 case -ECONNREFUSED:
657 case -ECONNRESET:
658 dprintk("RPC: %5u xprt_connect_status: server %s refused "
659 "connection\n", task->tk_pid,
660 task->tk_client->cl_server);
661 break;
662 case -ENOTCONN:
663 dprintk("RPC: %5u xprt_connect_status: connection broken\n",
664 task->tk_pid);
665 break;
666 case -ETIMEDOUT:
667 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
668 "out\n", task->tk_pid);
669 break;
670 default:
671 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
672 "server %s\n", task->tk_pid, -task->tk_status,
673 task->tk_client->cl_server);
674 xprt_release_write(xprt, task);
675 task->tk_status = -EIO;
676 }
677 }
678
679 /**
680 * xprt_lookup_rqst - find an RPC request corresponding to an XID
681 * @xprt: transport on which the original request was transmitted
682 * @xid: RPC XID of incoming reply
683 *
684 */
685 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
686 {
687 struct list_head *pos;
688
689 list_for_each(pos, &xprt->recv) {
690 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
691 if (entry->rq_xid == xid)
692 return entry;
693 }
694
695 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
696 ntohl(xid));
697 xprt->stat.bad_xids++;
698 return NULL;
699 }
700 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
701
702 /**
703 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
704 * @task: RPC request that recently completed
705 *
706 */
707 void xprt_update_rtt(struct rpc_task *task)
708 {
709 struct rpc_rqst *req = task->tk_rqstp;
710 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
711 unsigned timer = task->tk_msg.rpc_proc->p_timer;
712
713 if (timer) {
714 if (req->rq_ntrans == 1)
715 rpc_update_rtt(rtt, timer,
716 (long)jiffies - req->rq_xtime);
717 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
718 }
719 }
720 EXPORT_SYMBOL_GPL(xprt_update_rtt);
721
722 /**
723 * xprt_complete_rqst - called when reply processing is complete
724 * @task: RPC request that recently completed
725 * @copied: actual number of bytes received from the transport
726 *
727 * Caller holds transport lock.
728 */
729 void xprt_complete_rqst(struct rpc_task *task, int copied)
730 {
731 struct rpc_rqst *req = task->tk_rqstp;
732
733 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
734 task->tk_pid, ntohl(req->rq_xid), copied);
735
736 task->tk_xprt->stat.recvs++;
737 task->tk_rtt = (long)jiffies - req->rq_xtime;
738
739 list_del_init(&req->rq_list);
740 /* Ensure all writes are done before we update req->rq_received */
741 smp_wmb();
742 req->rq_received = req->rq_private_buf.len = copied;
743 rpc_wake_up_task(task);
744 }
745 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
746
747 static void xprt_timer(struct rpc_task *task)
748 {
749 struct rpc_rqst *req = task->tk_rqstp;
750 struct rpc_xprt *xprt = req->rq_xprt;
751
752 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
753
754 spin_lock(&xprt->transport_lock);
755 if (!req->rq_received) {
756 if (xprt->ops->timer)
757 xprt->ops->timer(task);
758 task->tk_status = -ETIMEDOUT;
759 }
760 task->tk_timeout = 0;
761 rpc_wake_up_task(task);
762 spin_unlock(&xprt->transport_lock);
763 }
764
765 /**
766 * xprt_prepare_transmit - reserve the transport before sending a request
767 * @task: RPC task about to send a request
768 *
769 */
770 int xprt_prepare_transmit(struct rpc_task *task)
771 {
772 struct rpc_rqst *req = task->tk_rqstp;
773 struct rpc_xprt *xprt = req->rq_xprt;
774 int err = 0;
775
776 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
777
778 spin_lock_bh(&xprt->transport_lock);
779 if (req->rq_received && !req->rq_bytes_sent) {
780 err = req->rq_received;
781 goto out_unlock;
782 }
783 if (!xprt->ops->reserve_xprt(task)) {
784 err = -EAGAIN;
785 goto out_unlock;
786 }
787
788 if (!xprt_connected(xprt)) {
789 err = -ENOTCONN;
790 goto out_unlock;
791 }
792 out_unlock:
793 spin_unlock_bh(&xprt->transport_lock);
794 return err;
795 }
796
797 void xprt_end_transmit(struct rpc_task *task)
798 {
799 xprt_release_write(task->tk_xprt, task);
800 }
801
802 /**
803 * xprt_transmit - send an RPC request on a transport
804 * @task: controlling RPC task
805 *
806 * We have to copy the iovec because sendmsg fiddles with its contents.
807 */
808 void xprt_transmit(struct rpc_task *task)
809 {
810 struct rpc_rqst *req = task->tk_rqstp;
811 struct rpc_xprt *xprt = req->rq_xprt;
812 int status;
813
814 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
815
816 if (!req->rq_received) {
817 if (list_empty(&req->rq_list)) {
818 spin_lock_bh(&xprt->transport_lock);
819 /* Update the softirq receive buffer */
820 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
821 sizeof(req->rq_private_buf));
822 /* Add request to the receive list */
823 list_add_tail(&req->rq_list, &xprt->recv);
824 spin_unlock_bh(&xprt->transport_lock);
825 xprt_reset_majortimeo(req);
826 /* Turn off autodisconnect */
827 del_singleshot_timer_sync(&xprt->timer);
828 }
829 } else if (!req->rq_bytes_sent)
830 return;
831
832 status = xprt->ops->send_request(task);
833 if (status == 0) {
834 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
835 spin_lock_bh(&xprt->transport_lock);
836
837 xprt->ops->set_retrans_timeout(task);
838
839 xprt->stat.sends++;
840 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
841 xprt->stat.bklog_u += xprt->backlog.qlen;
842
843 /* Don't race with disconnect */
844 if (!xprt_connected(xprt))
845 task->tk_status = -ENOTCONN;
846 else if (!req->rq_received)
847 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
848 spin_unlock_bh(&xprt->transport_lock);
849 return;
850 }
851
852 /* Note: at this point, task->tk_sleeping has not yet been set,
853 * hence there is no danger of the waking up task being put on
854 * schedq, and being picked up by a parallel run of rpciod().
855 */
856 task->tk_status = status;
857 if (status == -ECONNREFUSED)
858 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
859 }
860
861 static inline void do_xprt_reserve(struct rpc_task *task)
862 {
863 struct rpc_xprt *xprt = task->tk_xprt;
864
865 task->tk_status = 0;
866 if (task->tk_rqstp)
867 return;
868 if (!list_empty(&xprt->free)) {
869 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
870 list_del_init(&req->rq_list);
871 task->tk_rqstp = req;
872 xprt_request_init(task, xprt);
873 return;
874 }
875 dprintk("RPC: waiting for request slot\n");
876 task->tk_status = -EAGAIN;
877 task->tk_timeout = 0;
878 rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
879 }
880
881 /**
882 * xprt_reserve - allocate an RPC request slot
883 * @task: RPC task requesting a slot allocation
884 *
885 * If no more slots are available, place the task on the transport's
886 * backlog queue.
887 */
888 void xprt_reserve(struct rpc_task *task)
889 {
890 struct rpc_xprt *xprt = task->tk_xprt;
891
892 task->tk_status = -EIO;
893 spin_lock(&xprt->reserve_lock);
894 do_xprt_reserve(task);
895 spin_unlock(&xprt->reserve_lock);
896 }
897
898 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
899 {
900 return xprt->xid++;
901 }
902
903 static inline void xprt_init_xid(struct rpc_xprt *xprt)
904 {
905 xprt->xid = net_random();
906 }
907
908 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
909 {
910 struct rpc_rqst *req = task->tk_rqstp;
911
912 req->rq_timeout = xprt->timeout.to_initval;
913 req->rq_task = task;
914 req->rq_xprt = xprt;
915 req->rq_buffer = NULL;
916 req->rq_xid = xprt_alloc_xid(xprt);
917 req->rq_release_snd_buf = NULL;
918 xprt_reset_majortimeo(req);
919 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
920 req, ntohl(req->rq_xid));
921 }
922
923 /**
924 * xprt_release - release an RPC request slot
925 * @task: task which is finished with the slot
926 *
927 */
928 void xprt_release(struct rpc_task *task)
929 {
930 struct rpc_xprt *xprt = task->tk_xprt;
931 struct rpc_rqst *req;
932
933 if (!(req = task->tk_rqstp))
934 return;
935 rpc_count_iostats(task);
936 spin_lock_bh(&xprt->transport_lock);
937 xprt->ops->release_xprt(xprt, task);
938 if (xprt->ops->release_request)
939 xprt->ops->release_request(task);
940 if (!list_empty(&req->rq_list))
941 list_del(&req->rq_list);
942 xprt->last_used = jiffies;
943 if (list_empty(&xprt->recv))
944 mod_timer(&xprt->timer,
945 xprt->last_used + xprt->idle_timeout);
946 spin_unlock_bh(&xprt->transport_lock);
947 xprt->ops->buf_free(req->rq_buffer);
948 task->tk_rqstp = NULL;
949 if (req->rq_release_snd_buf)
950 req->rq_release_snd_buf(req);
951 memset(req, 0, sizeof(*req)); /* mark unused */
952
953 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
954
955 spin_lock(&xprt->reserve_lock);
956 list_add(&req->rq_list, &xprt->free);
957 rpc_wake_up_next(&xprt->backlog);
958 spin_unlock(&xprt->reserve_lock);
959 }
960
961 /**
962 * xprt_set_timeout - set constant RPC timeout
963 * @to: RPC timeout parameters to set up
964 * @retr: number of retries
965 * @incr: amount of increase after each retry
966 *
967 */
968 void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
969 {
970 to->to_initval =
971 to->to_increment = incr;
972 to->to_maxval = to->to_initval + (incr * retr);
973 to->to_retries = retr;
974 to->to_exponential = 0;
975 }
976
977 /**
978 * xprt_create_transport - create an RPC transport
979 * @args: rpc transport creation arguments
980 *
981 */
982 struct rpc_xprt *xprt_create_transport(struct rpc_xprtsock_create *args)
983 {
984 struct rpc_xprt *xprt;
985 struct rpc_rqst *req;
986 struct xprt_class *t;
987
988 spin_lock(&xprt_list_lock);
989 list_for_each_entry(t, &xprt_list, list) {
990 if ((t->family == args->dstaddr->sa_family) &&
991 (t->protocol == args->proto)) {
992 spin_unlock(&xprt_list_lock);
993 goto found;
994 }
995 }
996 spin_unlock(&xprt_list_lock);
997 printk(KERN_ERR "RPC: transport (%u/%d) not supported\n",
998 args->dstaddr->sa_family, args->proto);
999 return ERR_PTR(-EIO);
1000
1001 found:
1002 xprt = t->setup(args);
1003 if (IS_ERR(xprt)) {
1004 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1005 -PTR_ERR(xprt));
1006 return xprt;
1007 }
1008
1009 kref_init(&xprt->kref);
1010 spin_lock_init(&xprt->transport_lock);
1011 spin_lock_init(&xprt->reserve_lock);
1012
1013 INIT_LIST_HEAD(&xprt->free);
1014 INIT_LIST_HEAD(&xprt->recv);
1015 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1016 init_timer(&xprt->timer);
1017 xprt->timer.function = xprt_init_autodisconnect;
1018 xprt->timer.data = (unsigned long) xprt;
1019 xprt->last_used = jiffies;
1020 xprt->cwnd = RPC_INITCWND;
1021 xprt->bind_index = 0;
1022
1023 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1024 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1025 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1026 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1027 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1028
1029 /* initialize free list */
1030 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
1031 list_add(&req->rq_list, &xprt->free);
1032
1033 xprt_init_xid(xprt);
1034
1035 dprintk("RPC: created transport %p with %u slots\n", xprt,
1036 xprt->max_reqs);
1037
1038 return xprt;
1039 }
1040
1041 /**
1042 * xprt_destroy - destroy an RPC transport, killing off all requests.
1043 * @kref: kref for the transport to destroy
1044 *
1045 */
1046 static void xprt_destroy(struct kref *kref)
1047 {
1048 struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
1049
1050 dprintk("RPC: destroying transport %p\n", xprt);
1051 xprt->shutdown = 1;
1052 del_timer_sync(&xprt->timer);
1053
1054 /*
1055 * Tear down transport state and free the rpc_xprt
1056 */
1057 xprt->ops->destroy(xprt);
1058 }
1059
1060 /**
1061 * xprt_put - release a reference to an RPC transport.
1062 * @xprt: pointer to the transport
1063 *
1064 */
1065 void xprt_put(struct rpc_xprt *xprt)
1066 {
1067 kref_put(&xprt->kref, xprt_destroy);
1068 }
1069
1070 /**
1071 * xprt_get - return a reference to an RPC transport.
1072 * @xprt: pointer to the transport
1073 *
1074 */
1075 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1076 {
1077 kref_get(&xprt->kref);
1078 return xprt;
1079 }