svc: Add xpo_prep_reply_hdr
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / sunrpc / svcsock.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/svcsock.c
3 *
4 * These are the RPC server socket internals.
5 *
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
9 *
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
18 *
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
20 */
21
172589cc 22#include <linux/kernel.h>
1da177e4
LT
23#include <linux/sched.h>
24#include <linux/errno.h>
25#include <linux/fcntl.h>
26#include <linux/net.h>
27#include <linux/in.h>
28#include <linux/inet.h>
29#include <linux/udp.h>
91483c4b 30#include <linux/tcp.h>
1da177e4
LT
31#include <linux/unistd.h>
32#include <linux/slab.h>
33#include <linux/netdevice.h>
34#include <linux/skbuff.h>
b41b66d6 35#include <linux/file.h>
7dfb7103 36#include <linux/freezer.h>
1da177e4
LT
37#include <net/sock.h>
38#include <net/checksum.h>
39#include <net/ip.h>
b92503b2 40#include <net/ipv6.h>
c752f073 41#include <net/tcp_states.h>
1da177e4
LT
42#include <asm/uaccess.h>
43#include <asm/ioctls.h>
44
45#include <linux/sunrpc/types.h>
ad06e4bd 46#include <linux/sunrpc/clnt.h>
1da177e4
LT
47#include <linux/sunrpc/xdr.h>
48#include <linux/sunrpc/svcsock.h>
49#include <linux/sunrpc/stats.h>
50
51/* SMP locking strategy:
52 *
3262c816
GB
53 * svc_pool->sp_lock protects most of the fields of that pool.
54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
55 * when both need to be taken (rare), svc_serv->sv_lock is first.
56 * BKL protects svc_serv->sv_nrthread.
7ac1bea5
N
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache.
c081a0c7 59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
1da177e4
LT
60 *
61 * Some flags can be set to certain values at any time
62 * providing that certain rules are followed:
63 *
1da177e4 64 * SK_CONN, SK_DATA, can be set or cleared at any time.
cca5172a 65 * after a set, svc_sock_enqueue must be called.
1da177e4
LT
66 * after a clear, the socket must be read/accepted
67 * if this succeeds, it must be set again.
68 * SK_CLOSE can set at any time. It is never cleared.
aaf68cfb
N
69 * sk_inuse contains a bias of '1' until SK_DEAD is set.
70 * so when sk_inuse hits zero, we know the socket is dead
71 * and no-one is using it.
72 * SK_DEAD can only be set while SK_BUSY is held which ensures
73 * no other thread will be using the socket or will try to
74 * set SK_DEAD.
1da177e4
LT
75 *
76 */
77
360d8738 78#define RPCDBG_FACILITY RPCDBG_SVCXPRT
1da177e4
LT
79
80
81static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
6b174337 82 int *errp, int flags);
aaf68cfb 83static void svc_delete_socket(struct svc_sock *svsk);
1da177e4
LT
84static void svc_udp_data_ready(struct sock *, int);
85static int svc_udp_recvfrom(struct svc_rqst *);
86static int svc_udp_sendto(struct svc_rqst *);
cda1fd4a 87static void svc_close_socket(struct svc_sock *svsk);
755cceab
TT
88static void svc_sock_detach(struct svc_xprt *);
89static void svc_sock_free(struct svc_xprt *);
1da177e4
LT
90
91static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
92static int svc_deferred_recv(struct svc_rqst *rqstp);
93static struct cache_deferred_req *svc_defer(struct cache_req *req);
94
36bdfc8b
GB
95/* apparently the "standard" is that clients close
96 * idle connections after 5 minutes, servers after
97 * 6 minutes
98 * http://www.connectathon.org/talks96/nfstcp.pdf
99 */
100static int svc_conn_age_period = 6*60;
101
ed07536e
PZ
102#ifdef CONFIG_DEBUG_LOCK_ALLOC
103static struct lock_class_key svc_key[2];
104static struct lock_class_key svc_slock_key[2];
105
106static inline void svc_reclassify_socket(struct socket *sock)
107{
108 struct sock *sk = sock->sk;
02b3d346 109 BUG_ON(sock_owned_by_user(sk));
ed07536e
PZ
110 switch (sk->sk_family) {
111 case AF_INET:
112 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
113 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
114 break;
115
116 case AF_INET6:
117 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
118 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
119 break;
120
121 default:
122 BUG();
123 }
124}
125#else
126static inline void svc_reclassify_socket(struct socket *sock)
127{
128}
129#endif
130
ad06e4bd
CL
131static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
132{
133 switch (addr->sa_family) {
134 case AF_INET:
135 snprintf(buf, len, "%u.%u.%u.%u, port=%u",
136 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
582ee43d 137 ntohs(((struct sockaddr_in *) addr)->sin_port));
ad06e4bd 138 break;
5a05ed73 139
ad06e4bd
CL
140 case AF_INET6:
141 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
142 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
582ee43d 143 ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
ad06e4bd 144 break;
5a05ed73 145
ad06e4bd
CL
146 default:
147 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
148 break;
149 }
150 return buf;
151}
152
153/**
154 * svc_print_addr - Format rq_addr field for printing
155 * @rqstp: svc_rqst struct containing address to print
156 * @buf: target buffer for formatted address
157 * @len: length of target buffer
158 *
159 */
160char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
161{
27459f09 162 return __svc_print_addr(svc_addr(rqstp), buf, len);
ad06e4bd
CL
163}
164EXPORT_SYMBOL_GPL(svc_print_addr);
165
1da177e4 166/*
3262c816 167 * Queue up an idle server thread. Must have pool->sp_lock held.
1da177e4 168 * Note: this is really a stack rather than a queue, so that we only
3262c816 169 * use as many different threads as we need, and the rest don't pollute
1da177e4
LT
170 * the cache.
171 */
172static inline void
3262c816 173svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
1da177e4 174{
3262c816 175 list_add(&rqstp->rq_list, &pool->sp_threads);
1da177e4
LT
176}
177
178/*
3262c816 179 * Dequeue an nfsd thread. Must have pool->sp_lock held.
1da177e4
LT
180 */
181static inline void
3262c816 182svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
1da177e4
LT
183{
184 list_del(&rqstp->rq_list);
185}
186
187/*
188 * Release an skbuff after use
189 */
5148bf4e 190static void svc_release_skb(struct svc_rqst *rqstp)
1da177e4 191{
5148bf4e 192 struct sk_buff *skb = rqstp->rq_xprt_ctxt;
1da177e4
LT
193 struct svc_deferred_req *dr = rqstp->rq_deferred;
194
195 if (skb) {
5148bf4e 196 rqstp->rq_xprt_ctxt = NULL;
1da177e4
LT
197
198 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
199 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
200 }
201 if (dr) {
202 rqstp->rq_deferred = NULL;
203 kfree(dr);
204 }
205}
206
207/*
208 * Any space to write?
209 */
210static inline unsigned long
211svc_sock_wspace(struct svc_sock *svsk)
212{
213 int wspace;
214
215 if (svsk->sk_sock->type == SOCK_STREAM)
216 wspace = sk_stream_wspace(svsk->sk_sk);
217 else
218 wspace = sock_wspace(svsk->sk_sk);
219
220 return wspace;
221}
222
223/*
224 * Queue up a socket with data pending. If there are idle nfsd
225 * processes, wake 'em up.
226 *
227 */
228static void
229svc_sock_enqueue(struct svc_sock *svsk)
230{
231 struct svc_serv *serv = svsk->sk_server;
bfd24160 232 struct svc_pool *pool;
1da177e4 233 struct svc_rqst *rqstp;
bfd24160 234 int cpu;
1da177e4
LT
235
236 if (!(svsk->sk_flags &
237 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
238 return;
239 if (test_bit(SK_DEAD, &svsk->sk_flags))
240 return;
241
bfd24160
GB
242 cpu = get_cpu();
243 pool = svc_pool_for_cpu(svsk->sk_server, cpu);
244 put_cpu();
245
3262c816 246 spin_lock_bh(&pool->sp_lock);
1da177e4 247
3262c816
GB
248 if (!list_empty(&pool->sp_threads) &&
249 !list_empty(&pool->sp_sockets))
1da177e4
LT
250 printk(KERN_ERR
251 "svc_sock_enqueue: threads and sockets both waiting??\n");
252
253 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
254 /* Don't enqueue dead sockets */
255 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
256 goto out_unlock;
257 }
258
c081a0c7
GB
259 /* Mark socket as busy. It will remain in this state until the
260 * server has processed all pending data and put the socket back
261 * on the idle list. We update SK_BUSY atomically because
262 * it also guards against trying to enqueue the svc_sock twice.
263 */
264 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
265 /* Don't enqueue socket while already enqueued */
1da177e4
LT
266 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
267 goto out_unlock;
268 }
3262c816
GB
269 BUG_ON(svsk->sk_pool != NULL);
270 svsk->sk_pool = pool;
1da177e4
LT
271
272 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
c6b0a9f8 273 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
1da177e4
LT
274 > svc_sock_wspace(svsk))
275 && !test_bit(SK_CLOSE, &svsk->sk_flags)
276 && !test_bit(SK_CONN, &svsk->sk_flags)) {
277 /* Don't enqueue while not enough space for reply */
278 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
c6b0a9f8 279 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
1da177e4 280 svc_sock_wspace(svsk));
3262c816 281 svsk->sk_pool = NULL;
c081a0c7 282 clear_bit(SK_BUSY, &svsk->sk_flags);
1da177e4
LT
283 goto out_unlock;
284 }
285 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
286
1da177e4 287
3262c816
GB
288 if (!list_empty(&pool->sp_threads)) {
289 rqstp = list_entry(pool->sp_threads.next,
1da177e4
LT
290 struct svc_rqst,
291 rq_list);
292 dprintk("svc: socket %p served by daemon %p\n",
293 svsk->sk_sk, rqstp);
3262c816 294 svc_thread_dequeue(pool, rqstp);
1da177e4 295 if (rqstp->rq_sock)
cca5172a 296 printk(KERN_ERR
1da177e4
LT
297 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
298 rqstp, rqstp->rq_sock);
299 rqstp->rq_sock = svsk;
c45c357d 300 atomic_inc(&svsk->sk_inuse);
c6b0a9f8 301 rqstp->rq_reserved = serv->sv_max_mesg;
5685f0fa 302 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
3262c816 303 BUG_ON(svsk->sk_pool != pool);
1da177e4
LT
304 wake_up(&rqstp->rq_wait);
305 } else {
306 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
3262c816
GB
307 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
308 BUG_ON(svsk->sk_pool != pool);
1da177e4
LT
309 }
310
311out_unlock:
3262c816 312 spin_unlock_bh(&pool->sp_lock);
1da177e4
LT
313}
314
315/*
3262c816 316 * Dequeue the first socket. Must be called with the pool->sp_lock held.
1da177e4
LT
317 */
318static inline struct svc_sock *
3262c816 319svc_sock_dequeue(struct svc_pool *pool)
1da177e4
LT
320{
321 struct svc_sock *svsk;
322
3262c816 323 if (list_empty(&pool->sp_sockets))
1da177e4
LT
324 return NULL;
325
3262c816 326 svsk = list_entry(pool->sp_sockets.next,
1da177e4
LT
327 struct svc_sock, sk_ready);
328 list_del_init(&svsk->sk_ready);
329
330 dprintk("svc: socket %p dequeued, inuse=%d\n",
c45c357d 331 svsk->sk_sk, atomic_read(&svsk->sk_inuse));
1da177e4
LT
332
333 return svsk;
334}
335
336/*
337 * Having read something from a socket, check whether it
338 * needs to be re-enqueued.
339 * Note: SK_DATA only gets cleared when a read-attempt finds
340 * no (or insufficient) data.
341 */
342static inline void
343svc_sock_received(struct svc_sock *svsk)
344{
3262c816 345 svsk->sk_pool = NULL;
1da177e4
LT
346 clear_bit(SK_BUSY, &svsk->sk_flags);
347 svc_sock_enqueue(svsk);
348}
349
350
351/**
352 * svc_reserve - change the space reserved for the reply to a request.
353 * @rqstp: The request in question
354 * @space: new max space to reserve
355 *
356 * Each request reserves some space on the output queue of the socket
357 * to make sure the reply fits. This function reduces that reserved
358 * space to be the amount of space used already, plus @space.
359 *
360 */
361void svc_reserve(struct svc_rqst *rqstp, int space)
362{
363 space += rqstp->rq_res.head[0].iov_len;
364
365 if (space < rqstp->rq_reserved) {
366 struct svc_sock *svsk = rqstp->rq_sock;
5685f0fa 367 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
1da177e4 368 rqstp->rq_reserved = space;
1da177e4
LT
369
370 svc_sock_enqueue(svsk);
371 }
372}
373
374/*
375 * Release a socket after use.
376 */
377static inline void
378svc_sock_put(struct svc_sock *svsk)
379{
aaf68cfb 380 if (atomic_dec_and_test(&svsk->sk_inuse)) {
755cceab
TT
381 BUG_ON(!test_bit(SK_DEAD, &svsk->sk_flags));
382 svsk->sk_xprt.xpt_ops->xpo_free(&svsk->sk_xprt);
1da177e4 383 }
1da177e4
LT
384}
385
386static void
387svc_sock_release(struct svc_rqst *rqstp)
388{
389 struct svc_sock *svsk = rqstp->rq_sock;
390
5148bf4e 391 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
1da177e4 392
44524359 393 svc_free_res_pages(rqstp);
1da177e4
LT
394 rqstp->rq_res.page_len = 0;
395 rqstp->rq_res.page_base = 0;
396
397
398 /* Reset response buffer and release
399 * the reservation.
400 * But first, check that enough space was reserved
401 * for the reply, otherwise we have a bug!
402 */
403 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
404 printk(KERN_ERR "RPC request reserved %d but used %d\n",
405 rqstp->rq_reserved,
406 rqstp->rq_res.len);
407
408 rqstp->rq_res.head[0].iov_len = 0;
409 svc_reserve(rqstp, 0);
410 rqstp->rq_sock = NULL;
411
412 svc_sock_put(svsk);
413}
414
415/*
416 * External function to wake up a server waiting for data
3262c816
GB
417 * This really only makes sense for services like lockd
418 * which have exactly one thread anyway.
1da177e4
LT
419 */
420void
421svc_wake_up(struct svc_serv *serv)
422{
423 struct svc_rqst *rqstp;
3262c816
GB
424 unsigned int i;
425 struct svc_pool *pool;
426
427 for (i = 0; i < serv->sv_nrpools; i++) {
428 pool = &serv->sv_pools[i];
429
430 spin_lock_bh(&pool->sp_lock);
431 if (!list_empty(&pool->sp_threads)) {
432 rqstp = list_entry(pool->sp_threads.next,
433 struct svc_rqst,
434 rq_list);
435 dprintk("svc: daemon %p woken up.\n", rqstp);
436 /*
437 svc_thread_dequeue(pool, rqstp);
438 rqstp->rq_sock = NULL;
439 */
440 wake_up(&rqstp->rq_wait);
441 }
442 spin_unlock_bh(&pool->sp_lock);
1da177e4 443 }
1da177e4
LT
444}
445
b92503b2
CL
446union svc_pktinfo_u {
447 struct in_pktinfo pkti;
b92503b2 448 struct in6_pktinfo pkti6;
b92503b2 449};
bc375ea7
DM
450#define SVC_PKTINFO_SPACE \
451 CMSG_SPACE(sizeof(union svc_pktinfo_u))
b92503b2
CL
452
453static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
454{
455 switch (rqstp->rq_sock->sk_sk->sk_family) {
456 case AF_INET: {
457 struct in_pktinfo *pki = CMSG_DATA(cmh);
458
459 cmh->cmsg_level = SOL_IP;
460 cmh->cmsg_type = IP_PKTINFO;
461 pki->ipi_ifindex = 0;
462 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
463 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
464 }
465 break;
5a05ed73 466
b92503b2
CL
467 case AF_INET6: {
468 struct in6_pktinfo *pki = CMSG_DATA(cmh);
469
470 cmh->cmsg_level = SOL_IPV6;
471 cmh->cmsg_type = IPV6_PKTINFO;
472 pki->ipi6_ifindex = 0;
473 ipv6_addr_copy(&pki->ipi6_addr,
474 &rqstp->rq_daddr.addr6);
475 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
476 }
477 break;
b92503b2
CL
478 }
479 return;
480}
481
1da177e4
LT
482/*
483 * Generic sendto routine
484 */
485static int
486svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
487{
488 struct svc_sock *svsk = rqstp->rq_sock;
489 struct socket *sock = svsk->sk_sock;
490 int slen;
bc375ea7
DM
491 union {
492 struct cmsghdr hdr;
493 long all[SVC_PKTINFO_SPACE / sizeof(long)];
494 } buffer;
495 struct cmsghdr *cmh = &buffer.hdr;
1da177e4
LT
496 int len = 0;
497 int result;
498 int size;
499 struct page **ppage = xdr->pages;
500 size_t base = xdr->page_base;
501 unsigned int pglen = xdr->page_len;
502 unsigned int flags = MSG_MORE;
ad06e4bd 503 char buf[RPC_MAX_ADDRBUFLEN];
1da177e4
LT
504
505 slen = xdr->len;
506
507 if (rqstp->rq_prot == IPPROTO_UDP) {
b92503b2
CL
508 struct msghdr msg = {
509 .msg_name = &rqstp->rq_addr,
510 .msg_namelen = rqstp->rq_addrlen,
511 .msg_control = cmh,
512 .msg_controllen = sizeof(buffer),
513 .msg_flags = MSG_MORE,
514 };
515
516 svc_set_cmsg_data(rqstp, cmh);
1da177e4
LT
517
518 if (sock_sendmsg(sock, &msg, 0) < 0)
519 goto out;
520 }
521
522 /* send head */
523 if (slen == xdr->head[0].iov_len)
524 flags = 0;
44524359
N
525 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
526 xdr->head[0].iov_len, flags);
1da177e4
LT
527 if (len != xdr->head[0].iov_len)
528 goto out;
529 slen -= xdr->head[0].iov_len;
530 if (slen == 0)
531 goto out;
532
533 /* send page data */
534 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
535 while (pglen > 0) {
536 if (slen == size)
537 flags = 0;
e6242e92 538 result = kernel_sendpage(sock, *ppage, base, size, flags);
1da177e4
LT
539 if (result > 0)
540 len += result;
541 if (result != size)
542 goto out;
543 slen -= size;
544 pglen -= size;
545 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
546 base = 0;
547 ppage++;
548 }
549 /* send tail */
550 if (xdr->tail[0].iov_len) {
44524359
N
551 result = kernel_sendpage(sock, rqstp->rq_respages[0],
552 ((unsigned long)xdr->tail[0].iov_base)
cca5172a 553 & (PAGE_SIZE-1),
1da177e4
LT
554 xdr->tail[0].iov_len, 0);
555
556 if (result > 0)
557 len += result;
558 }
559out:
ad06e4bd
CL
560 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
561 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len,
562 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
1da177e4
LT
563
564 return len;
565}
566
80212d59
N
567/*
568 * Report socket names for nfsdfs
569 */
570static int one_sock_name(char *buf, struct svc_sock *svsk)
571{
572 int len;
573
574 switch(svsk->sk_sk->sk_family) {
575 case AF_INET:
576 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
577 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
578 "udp" : "tcp",
579 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
580 inet_sk(svsk->sk_sk)->num);
581 break;
582 default:
583 len = sprintf(buf, "*unknown-%d*\n",
584 svsk->sk_sk->sk_family);
585 }
586 return len;
587}
588
589int
b41b66d6 590svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
80212d59 591{
b41b66d6 592 struct svc_sock *svsk, *closesk = NULL;
80212d59
N
593 int len = 0;
594
595 if (!serv)
596 return 0;
aaf68cfb 597 spin_lock_bh(&serv->sv_lock);
80212d59
N
598 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
599 int onelen = one_sock_name(buf+len, svsk);
b41b66d6
N
600 if (toclose && strcmp(toclose, buf+len) == 0)
601 closesk = svsk;
602 else
603 len += onelen;
80212d59 604 }
aaf68cfb 605 spin_unlock_bh(&serv->sv_lock);
b41b66d6 606 if (closesk)
5680c446
N
607 /* Should unregister with portmap, but you cannot
608 * unregister just one protocol...
609 */
aaf68cfb 610 svc_close_socket(closesk);
37a03472
N
611 else if (toclose)
612 return -ENOENT;
80212d59
N
613 return len;
614}
615EXPORT_SYMBOL(svc_sock_names);
616
1da177e4
LT
617/*
618 * Check input queue length
619 */
620static int
621svc_recv_available(struct svc_sock *svsk)
622{
1da177e4
LT
623 struct socket *sock = svsk->sk_sock;
624 int avail, err;
625
e6242e92 626 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
1da177e4
LT
627
628 return (err >= 0)? avail : err;
629}
630
631/*
632 * Generic recvfrom routine.
633 */
634static int
635svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
636{
067d7817 637 struct svc_sock *svsk = rqstp->rq_sock;
1ba95105
CL
638 struct msghdr msg = {
639 .msg_flags = MSG_DONTWAIT,
640 };
a9747692 641 struct sockaddr *sin;
1ba95105 642 int len;
1da177e4 643
1ba95105
CL
644 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
645 msg.msg_flags);
1da177e4
LT
646
647 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
1da177e4 648 */
067d7817
CL
649 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
650 rqstp->rq_addrlen = svsk->sk_remotelen;
1da177e4 651
a9747692
FM
652 /* Destination address in request is needed for binding the
653 * source address in RPC callbacks later.
654 */
655 sin = (struct sockaddr *)&svsk->sk_local;
656 switch (sin->sa_family) {
657 case AF_INET:
658 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
659 break;
660 case AF_INET6:
661 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
662 break;
663 }
664
1da177e4 665 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
1ba95105 666 svsk, iov[0].iov_base, iov[0].iov_len, len);
1da177e4
LT
667
668 return len;
669}
670
671/*
672 * Set socket snd and rcv buffer lengths
673 */
674static inline void
675svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
676{
677#if 0
678 mm_segment_t oldfs;
679 oldfs = get_fs(); set_fs(KERNEL_DS);
680 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
681 (char*)&snd, sizeof(snd));
682 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
683 (char*)&rcv, sizeof(rcv));
684#else
685 /* sock_setsockopt limits use to sysctl_?mem_max,
686 * which isn't acceptable. Until that is made conditional
687 * on not having CAP_SYS_RESOURCE or similar, we go direct...
688 * DaveM said I could!
689 */
690 lock_sock(sock->sk);
691 sock->sk->sk_sndbuf = snd * 2;
692 sock->sk->sk_rcvbuf = rcv * 2;
693 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
694 release_sock(sock->sk);
695#endif
696}
697/*
698 * INET callback when data has been received on the socket.
699 */
700static void
701svc_udp_data_ready(struct sock *sk, int count)
702{
939bb7ef 703 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4 704
939bb7ef
NB
705 if (svsk) {
706 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
707 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
708 set_bit(SK_DATA, &svsk->sk_flags);
709 svc_sock_enqueue(svsk);
710 }
1da177e4
LT
711 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
712 wake_up_interruptible(sk->sk_sleep);
713}
714
715/*
716 * INET callback when space is newly available on the socket.
717 */
718static void
719svc_write_space(struct sock *sk)
720{
721 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
722
723 if (svsk) {
724 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
725 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
726 svc_sock_enqueue(svsk);
727 }
728
729 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
939bb7ef 730 dprintk("RPC svc_write_space: someone sleeping on %p\n",
1da177e4
LT
731 svsk);
732 wake_up_interruptible(sk->sk_sleep);
733 }
734}
735
7a37f578
N
736static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
737 struct cmsghdr *cmh)
95756482
CL
738{
739 switch (rqstp->rq_sock->sk_sk->sk_family) {
740 case AF_INET: {
7a37f578
N
741 struct in_pktinfo *pki = CMSG_DATA(cmh);
742 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
95756482 743 break;
7a37f578 744 }
95756482 745 case AF_INET6: {
7a37f578
N
746 struct in6_pktinfo *pki = CMSG_DATA(cmh);
747 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
95756482 748 break;
7a37f578 749 }
95756482 750 }
95756482
CL
751}
752
1da177e4
LT
753/*
754 * Receive a datagram from a UDP socket.
755 */
1da177e4
LT
756static int
757svc_udp_recvfrom(struct svc_rqst *rqstp)
758{
759 struct svc_sock *svsk = rqstp->rq_sock;
760 struct svc_serv *serv = svsk->sk_server;
761 struct sk_buff *skb;
bc375ea7
DM
762 union {
763 struct cmsghdr hdr;
764 long all[SVC_PKTINFO_SPACE / sizeof(long)];
765 } buffer;
766 struct cmsghdr *cmh = &buffer.hdr;
1da177e4 767 int err, len;
7a37f578
N
768 struct msghdr msg = {
769 .msg_name = svc_addr(rqstp),
770 .msg_control = cmh,
771 .msg_controllen = sizeof(buffer),
772 .msg_flags = MSG_DONTWAIT,
773 };
1da177e4
LT
774
775 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
776 /* udp sockets need large rcvbuf as all pending
777 * requests are still in that buffer. sndbuf must
778 * also be large enough that there is enough space
3262c816
GB
779 * for one reply per thread. We count all threads
780 * rather than threads in a particular pool, which
781 * provides an upper bound on the number of threads
782 * which will access the socket.
1da177e4
LT
783 */
784 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
785 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
786 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
1da177e4
LT
787
788 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
789 svc_sock_received(svsk);
790 return svc_deferred_recv(rqstp);
791 }
792
aaf68cfb
N
793 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
794 svc_delete_socket(svsk);
795 return 0;
796 }
797
1da177e4 798 clear_bit(SK_DATA, &svsk->sk_flags);
05ed690e
N
799 skb = NULL;
800 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
801 0, 0, MSG_PEEK | MSG_DONTWAIT);
802 if (err >= 0)
803 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err);
804
805 if (skb == NULL) {
806 if (err != -EAGAIN) {
807 /* possibly an icmp error */
808 dprintk("svc: recvfrom returned error %d\n", -err);
809 set_bit(SK_DATA, &svsk->sk_flags);
1da177e4 810 }
05ed690e
N
811 svc_sock_received(svsk);
812 return -EAGAIN;
1da177e4 813 }
7a37f578 814 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
b7aa0bf7
ED
815 if (skb->tstamp.tv64 == 0) {
816 skb->tstamp = ktime_get_real();
cca5172a 817 /* Don't enable netstamp, sunrpc doesn't
1da177e4
LT
818 need that much accuracy */
819 }
b7aa0bf7 820 svsk->sk_sk->sk_stamp = skb->tstamp;
1da177e4
LT
821 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
822
823 /*
824 * Maybe more packets - kick another thread ASAP.
825 */
826 svc_sock_received(svsk);
827
828 len = skb->len - sizeof(struct udphdr);
829 rqstp->rq_arg.len = len;
830
95756482 831 rqstp->rq_prot = IPPROTO_UDP;
27459f09 832
7a37f578
N
833 if (cmh->cmsg_level != IPPROTO_IP ||
834 cmh->cmsg_type != IP_PKTINFO) {
835 if (net_ratelimit())
836 printk("rpcsvc: received unknown control message:"
837 "%d/%d\n",
838 cmh->cmsg_level, cmh->cmsg_type);
839 skb_free_datagram(svsk->sk_sk, skb);
840 return 0;
841 }
842 svc_udp_get_dest_address(rqstp, cmh);
1da177e4
LT
843
844 if (skb_is_nonlinear(skb)) {
845 /* we have to copy */
846 local_bh_disable();
847 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
848 local_bh_enable();
849 /* checksum error */
850 skb_free_datagram(svsk->sk_sk, skb);
851 return 0;
852 }
853 local_bh_enable();
cca5172a 854 skb_free_datagram(svsk->sk_sk, skb);
1da177e4
LT
855 } else {
856 /* we can use it in-place */
857 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
858 rqstp->rq_arg.head[0].iov_len = len;
fb286bb2
HX
859 if (skb_checksum_complete(skb)) {
860 skb_free_datagram(svsk->sk_sk, skb);
861 return 0;
1da177e4 862 }
5148bf4e 863 rqstp->rq_xprt_ctxt = skb;
1da177e4
LT
864 }
865
866 rqstp->rq_arg.page_base = 0;
867 if (len <= rqstp->rq_arg.head[0].iov_len) {
868 rqstp->rq_arg.head[0].iov_len = len;
869 rqstp->rq_arg.page_len = 0;
44524359 870 rqstp->rq_respages = rqstp->rq_pages+1;
1da177e4
LT
871 } else {
872 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
44524359 873 rqstp->rq_respages = rqstp->rq_pages + 1 +
172589cc 874 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
1da177e4
LT
875 }
876
877 if (serv->sv_stats)
878 serv->sv_stats->netudpcnt++;
879
880 return len;
881}
882
883static int
884svc_udp_sendto(struct svc_rqst *rqstp)
885{
886 int error;
887
888 error = svc_sendto(rqstp, &rqstp->rq_res);
889 if (error == -ECONNREFUSED)
890 /* ICMP error on earlier request. */
891 error = svc_sendto(rqstp, &rqstp->rq_res);
892
893 return error;
894}
895
e831fe65
TT
896static void svc_udp_prep_reply_hdr(struct svc_rqst *rqstp)
897{
898}
899
360d8738 900static struct svc_xprt_ops svc_udp_ops = {
5d137990
TT
901 .xpo_recvfrom = svc_udp_recvfrom,
902 .xpo_sendto = svc_udp_sendto,
5148bf4e 903 .xpo_release_rqst = svc_release_skb,
755cceab
TT
904 .xpo_detach = svc_sock_detach,
905 .xpo_free = svc_sock_free,
e831fe65 906 .xpo_prep_reply_hdr = svc_udp_prep_reply_hdr,
360d8738
TT
907};
908
909static struct svc_xprt_class svc_udp_class = {
910 .xcl_name = "udp",
911 .xcl_ops = &svc_udp_ops,
49023155 912 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
360d8738
TT
913};
914
1da177e4
LT
915static void
916svc_udp_init(struct svc_sock *svsk)
917{
7a37f578
N
918 int one = 1;
919 mm_segment_t oldfs;
920
360d8738 921 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt);
1da177e4
LT
922 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
923 svsk->sk_sk->sk_write_space = svc_write_space;
1da177e4
LT
924
925 /* initialise setting must have enough space to
cca5172a 926 * receive and respond to one request.
1da177e4
LT
927 * svc_udp_recvfrom will re-adjust if necessary
928 */
929 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
930 3 * svsk->sk_server->sv_max_mesg,
931 3 * svsk->sk_server->sv_max_mesg);
1da177e4
LT
932
933 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
934 set_bit(SK_CHNGBUF, &svsk->sk_flags);
7a37f578
N
935
936 oldfs = get_fs();
937 set_fs(KERNEL_DS);
938 /* make sure we get destination address info */
939 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
940 (char __user *)&one, sizeof(one));
941 set_fs(oldfs);
1da177e4
LT
942}
943
944/*
945 * A data_ready event on a listening socket means there's a connection
946 * pending. Do not use state_change as a substitute for it.
947 */
948static void
949svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
950{
939bb7ef 951 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4
LT
952
953 dprintk("svc: socket %p TCP (listen) state change %d\n",
939bb7ef 954 sk, sk->sk_state);
1da177e4 955
939bb7ef
NB
956 /*
957 * This callback may called twice when a new connection
958 * is established as a child socket inherits everything
959 * from a parent LISTEN socket.
960 * 1) data_ready method of the parent socket will be called
961 * when one of child sockets become ESTABLISHED.
962 * 2) data_ready method of the child socket may be called
963 * when it receives data before the socket is accepted.
964 * In case of 2, we should ignore it silently.
965 */
966 if (sk->sk_state == TCP_LISTEN) {
967 if (svsk) {
968 set_bit(SK_CONN, &svsk->sk_flags);
969 svc_sock_enqueue(svsk);
970 } else
971 printk("svc: socket %p: no user data\n", sk);
1da177e4 972 }
939bb7ef 973
1da177e4
LT
974 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
975 wake_up_interruptible_all(sk->sk_sleep);
976}
977
978/*
979 * A state change on a connected socket means it's dying or dead.
980 */
981static void
982svc_tcp_state_change(struct sock *sk)
983{
939bb7ef 984 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4
LT
985
986 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
939bb7ef 987 sk, sk->sk_state, sk->sk_user_data);
1da177e4 988
939bb7ef 989 if (!svsk)
1da177e4 990 printk("svc: socket %p: no user data\n", sk);
939bb7ef
NB
991 else {
992 set_bit(SK_CLOSE, &svsk->sk_flags);
993 svc_sock_enqueue(svsk);
1da177e4 994 }
1da177e4
LT
995 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
996 wake_up_interruptible_all(sk->sk_sleep);
997}
998
999static void
1000svc_tcp_data_ready(struct sock *sk, int count)
1001{
939bb7ef 1002 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4
LT
1003
1004 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
939bb7ef
NB
1005 sk, sk->sk_user_data);
1006 if (svsk) {
1007 set_bit(SK_DATA, &svsk->sk_flags);
1008 svc_sock_enqueue(svsk);
1009 }
1da177e4
LT
1010 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1011 wake_up_interruptible(sk->sk_sleep);
1012}
1013
bcdb81ae
CL
1014static inline int svc_port_is_privileged(struct sockaddr *sin)
1015{
1016 switch (sin->sa_family) {
1017 case AF_INET:
1018 return ntohs(((struct sockaddr_in *)sin)->sin_port)
1019 < PROT_SOCK;
bcdb81ae
CL
1020 case AF_INET6:
1021 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
1022 < PROT_SOCK;
bcdb81ae
CL
1023 default:
1024 return 0;
1025 }
1026}
1027
1da177e4
LT
1028/*
1029 * Accept a TCP connection
1030 */
1031static void
1032svc_tcp_accept(struct svc_sock *svsk)
1033{
cdd88b9f
AM
1034 struct sockaddr_storage addr;
1035 struct sockaddr *sin = (struct sockaddr *) &addr;
1da177e4
LT
1036 struct svc_serv *serv = svsk->sk_server;
1037 struct socket *sock = svsk->sk_sock;
1038 struct socket *newsock;
1da177e4
LT
1039 struct svc_sock *newsvsk;
1040 int err, slen;
ad06e4bd 1041 char buf[RPC_MAX_ADDRBUFLEN];
1da177e4
LT
1042
1043 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
1044 if (!sock)
1045 return;
1046
e6242e92
SS
1047 clear_bit(SK_CONN, &svsk->sk_flags);
1048 err = kernel_accept(sock, &newsock, O_NONBLOCK);
1049 if (err < 0) {
1da177e4
LT
1050 if (err == -ENOMEM)
1051 printk(KERN_WARNING "%s: no more sockets!\n",
1052 serv->sv_name);
e6242e92 1053 else if (err != -EAGAIN && net_ratelimit())
1da177e4
LT
1054 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
1055 serv->sv_name, -err);
e6242e92 1056 return;
1da177e4 1057 }
e6242e92 1058
1da177e4
LT
1059 set_bit(SK_CONN, &svsk->sk_flags);
1060 svc_sock_enqueue(svsk);
1061
cdd88b9f 1062 err = kernel_getpeername(newsock, sin, &slen);
1da177e4
LT
1063 if (err < 0) {
1064 if (net_ratelimit())
1065 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
1066 serv->sv_name, -err);
1067 goto failed; /* aborted connection or whatever */
1068 }
1069
1070 /* Ideally, we would want to reject connections from unauthorized
ad06e4bd
CL
1071 * hosts here, but when we get encryption, the IP of the host won't
1072 * tell us anything. For now just warn about unpriv connections.
1da177e4 1073 */
cdd88b9f 1074 if (!svc_port_is_privileged(sin)) {
1da177e4 1075 dprintk(KERN_WARNING
ad06e4bd 1076 "%s: connect from unprivileged port: %s\n",
cca5172a 1077 serv->sv_name,
cdd88b9f 1078 __svc_print_addr(sin, buf, sizeof(buf)));
1da177e4 1079 }
ad06e4bd 1080 dprintk("%s: connect from %s\n", serv->sv_name,
cdd88b9f 1081 __svc_print_addr(sin, buf, sizeof(buf)));
1da177e4
LT
1082
1083 /* make sure that a write doesn't block forever when
1084 * low on memory
1085 */
1086 newsock->sk->sk_sndtimeo = HZ*30;
1087
6b174337
CL
1088 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
1089 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
1da177e4 1090 goto failed;
cdd88b9f 1091 memcpy(&newsvsk->sk_remote, sin, slen);
067d7817 1092 newsvsk->sk_remotelen = slen;
a9747692
FM
1093 err = kernel_getsockname(newsock, sin, &slen);
1094 if (unlikely(err < 0)) {
1095 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
1096 slen = offsetof(struct sockaddr, sa_data);
1097 }
1098 memcpy(&newsvsk->sk_local, sin, slen);
067d7817 1099
e79eff1f 1100 svc_sock_received(newsvsk);
1da177e4
LT
1101
1102 /* make sure that we don't have too many active connections.
1103 * If we have, something must be dropped.
1104 *
1105 * There's no point in trying to do random drop here for
1106 * DoS prevention. The NFS clients does 1 reconnect in 15
1107 * seconds. An attacker can easily beat that.
1108 *
1109 * The only somewhat efficient mechanism would be if drop
1110 * old connections from the same IP first. But right now
1111 * we don't even record the client IP in svc_sock.
1112 */
1113 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
1114 struct svc_sock *svsk = NULL;
1115 spin_lock_bh(&serv->sv_lock);
1116 if (!list_empty(&serv->sv_tempsocks)) {
1117 if (net_ratelimit()) {
1118 /* Try to help the admin */
1119 printk(KERN_NOTICE "%s: too many open TCP "
1120 "sockets, consider increasing the "
1121 "number of nfsd threads\n",
1122 serv->sv_name);
ad06e4bd
CL
1123 printk(KERN_NOTICE
1124 "%s: last TCP connect from %s\n",
9db619e6
WW
1125 serv->sv_name, __svc_print_addr(sin,
1126 buf, sizeof(buf)));
1da177e4
LT
1127 }
1128 /*
1129 * Always select the oldest socket. It's not fair,
1130 * but so is life
1131 */
1132 svsk = list_entry(serv->sv_tempsocks.prev,
1133 struct svc_sock,
1134 sk_list);
1135 set_bit(SK_CLOSE, &svsk->sk_flags);
c45c357d 1136 atomic_inc(&svsk->sk_inuse);
1da177e4
LT
1137 }
1138 spin_unlock_bh(&serv->sv_lock);
1139
1140 if (svsk) {
1141 svc_sock_enqueue(svsk);
1142 svc_sock_put(svsk);
1143 }
1144
1145 }
1146
1147 if (serv->sv_stats)
1148 serv->sv_stats->nettcpconn++;
1149
1150 return;
1151
1152failed:
1153 sock_release(newsock);
1154 return;
1155}
1156
1157/*
1158 * Receive data from a TCP socket.
1159 */
1160static int
1161svc_tcp_recvfrom(struct svc_rqst *rqstp)
1162{
1163 struct svc_sock *svsk = rqstp->rq_sock;
1164 struct svc_serv *serv = svsk->sk_server;
1165 int len;
3cc03b16 1166 struct kvec *vec;
1da177e4
LT
1167 int pnum, vlen;
1168
1169 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1170 svsk, test_bit(SK_DATA, &svsk->sk_flags),
1171 test_bit(SK_CONN, &svsk->sk_flags),
1172 test_bit(SK_CLOSE, &svsk->sk_flags));
1173
1174 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1175 svc_sock_received(svsk);
1176 return svc_deferred_recv(rqstp);
1177 }
1178
1179 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
1180 svc_delete_socket(svsk);
1181 return 0;
1182 }
1183
1a047060 1184 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1185 svc_tcp_accept(svsk);
1186 svc_sock_received(svsk);
1187 return 0;
1188 }
1189
1190 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
1191 /* sndbuf needs to have room for one request
1192 * per thread, otherwise we can stall even when the
1193 * network isn't a bottleneck.
3262c816
GB
1194 *
1195 * We count all threads rather than threads in a
1196 * particular pool, which provides an upper bound
1197 * on the number of threads which will access the socket.
1198 *
1da177e4 1199 * rcvbuf just needs to be able to hold a few requests.
cca5172a 1200 * Normally they will be removed from the queue
1da177e4
LT
1201 * as soon a a complete request arrives.
1202 */
1203 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
1204 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1205 3 * serv->sv_max_mesg);
1da177e4
LT
1206
1207 clear_bit(SK_DATA, &svsk->sk_flags);
1208
1209 /* Receive data. If we haven't got the record length yet, get
1210 * the next four bytes. Otherwise try to gobble up as much as
1211 * possible up to the complete record length.
1212 */
1213 if (svsk->sk_tcplen < 4) {
1214 unsigned long want = 4 - svsk->sk_tcplen;
1215 struct kvec iov;
1216
1217 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
1218 iov.iov_len = want;
1219 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
1220 goto error;
1221 svsk->sk_tcplen += len;
1222
1223 if (len < want) {
1224 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
cca5172a 1225 len, want);
1da177e4
LT
1226 svc_sock_received(svsk);
1227 return -EAGAIN; /* record header not complete */
1228 }
1229
1230 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1231 if (!(svsk->sk_reclen & 0x80000000)) {
1232 /* FIXME: technically, a record can be fragmented,
1233 * and non-terminal fragments will not have the top
1234 * bit set in the fragment length header.
1235 * But apparently no known nfs clients send fragmented
1236 * records. */
34e9a63b
N
1237 if (net_ratelimit())
1238 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1239 " (non-terminal)\n",
1240 (unsigned long) svsk->sk_reclen);
1da177e4
LT
1241 goto err_delete;
1242 }
1243 svsk->sk_reclen &= 0x7fffffff;
1244 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
c6b0a9f8 1245 if (svsk->sk_reclen > serv->sv_max_mesg) {
34e9a63b
N
1246 if (net_ratelimit())
1247 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1248 " (large)\n",
1249 (unsigned long) svsk->sk_reclen);
1da177e4
LT
1250 goto err_delete;
1251 }
1252 }
1253
1254 /* Check whether enough data is available */
1255 len = svc_recv_available(svsk);
1256 if (len < 0)
1257 goto error;
1258
1259 if (len < svsk->sk_reclen) {
1260 dprintk("svc: incomplete TCP record (%d of %d)\n",
1261 len, svsk->sk_reclen);
1262 svc_sock_received(svsk);
1263 return -EAGAIN; /* record not complete */
1264 }
1265 len = svsk->sk_reclen;
1266 set_bit(SK_DATA, &svsk->sk_flags);
1267
3cc03b16 1268 vec = rqstp->rq_vec;
1da177e4
LT
1269 vec[0] = rqstp->rq_arg.head[0];
1270 vlen = PAGE_SIZE;
1271 pnum = 1;
1272 while (vlen < len) {
44524359 1273 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1da177e4
LT
1274 vec[pnum].iov_len = PAGE_SIZE;
1275 pnum++;
1276 vlen += PAGE_SIZE;
1277 }
44524359 1278 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1da177e4
LT
1279
1280 /* Now receive data */
1281 len = svc_recvfrom(rqstp, vec, pnum, len);
1282 if (len < 0)
1283 goto error;
1284
1285 dprintk("svc: TCP complete record (%d bytes)\n", len);
1286 rqstp->rq_arg.len = len;
1287 rqstp->rq_arg.page_base = 0;
1288 if (len <= rqstp->rq_arg.head[0].iov_len) {
1289 rqstp->rq_arg.head[0].iov_len = len;
1290 rqstp->rq_arg.page_len = 0;
1291 } else {
1292 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1293 }
1294
5148bf4e 1295 rqstp->rq_xprt_ctxt = NULL;
1da177e4
LT
1296 rqstp->rq_prot = IPPROTO_TCP;
1297
1298 /* Reset TCP read info */
1299 svsk->sk_reclen = 0;
1300 svsk->sk_tcplen = 0;
1301
1302 svc_sock_received(svsk);
1303 if (serv->sv_stats)
1304 serv->sv_stats->nettcpcnt++;
1305
1306 return len;
1307
1308 err_delete:
1309 svc_delete_socket(svsk);
1310 return -EAGAIN;
1311
1312 error:
1313 if (len == -EAGAIN) {
1314 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1315 svc_sock_received(svsk);
1316 } else {
1317 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1318 svsk->sk_server->sv_name, -len);
93fbf1a5 1319 goto err_delete;
1da177e4
LT
1320 }
1321
1322 return len;
1323}
1324
1325/*
1326 * Send out data on TCP socket.
1327 */
1328static int
1329svc_tcp_sendto(struct svc_rqst *rqstp)
1330{
1331 struct xdr_buf *xbufp = &rqstp->rq_res;
1332 int sent;
d8ed029d 1333 __be32 reclen;
1da177e4
LT
1334
1335 /* Set up the first element of the reply kvec.
1336 * Any other kvecs that may be in use have been taken
1337 * care of by the server implementation itself.
1338 */
1339 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1340 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1341
1342 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1343 return -ENOTCONN;
1344
1345 sent = svc_sendto(rqstp, &rqstp->rq_res);
1346 if (sent != xbufp->len) {
1347 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1348 rqstp->rq_sock->sk_server->sv_name,
1349 (sent<0)?"got error":"sent only",
1350 sent, xbufp->len);
aaf68cfb
N
1351 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
1352 svc_sock_enqueue(rqstp->rq_sock);
1da177e4
LT
1353 sent = -EAGAIN;
1354 }
1355 return sent;
1356}
1357
e831fe65
TT
1358/*
1359 * Setup response header. TCP has a 4B record length field.
1360 */
1361static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
1362{
1363 struct kvec *resv = &rqstp->rq_res.head[0];
1364
1365 /* tcp needs a space for the record length... */
1366 svc_putnl(resv, 0);
1367}
1368
360d8738 1369static struct svc_xprt_ops svc_tcp_ops = {
5d137990
TT
1370 .xpo_recvfrom = svc_tcp_recvfrom,
1371 .xpo_sendto = svc_tcp_sendto,
5148bf4e 1372 .xpo_release_rqst = svc_release_skb,
755cceab
TT
1373 .xpo_detach = svc_sock_detach,
1374 .xpo_free = svc_sock_free,
e831fe65 1375 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
360d8738
TT
1376};
1377
1378static struct svc_xprt_class svc_tcp_class = {
1379 .xcl_name = "tcp",
1380 .xcl_ops = &svc_tcp_ops,
49023155 1381 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
360d8738
TT
1382};
1383
1384void svc_init_xprt_sock(void)
1385{
1386 svc_reg_xprt_class(&svc_tcp_class);
1387 svc_reg_xprt_class(&svc_udp_class);
1388}
1389
1390void svc_cleanup_xprt_sock(void)
1391{
1392 svc_unreg_xprt_class(&svc_tcp_class);
1393 svc_unreg_xprt_class(&svc_udp_class);
1394}
1395
1da177e4
LT
1396static void
1397svc_tcp_init(struct svc_sock *svsk)
1398{
1399 struct sock *sk = svsk->sk_sk;
1400 struct tcp_sock *tp = tcp_sk(sk);
1401
360d8738 1402 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt);
1da177e4
LT
1403
1404 if (sk->sk_state == TCP_LISTEN) {
1405 dprintk("setting up TCP socket for listening\n");
1406 sk->sk_data_ready = svc_tcp_listen_data_ready;
1407 set_bit(SK_CONN, &svsk->sk_flags);
1408 } else {
1409 dprintk("setting up TCP socket for reading\n");
1410 sk->sk_state_change = svc_tcp_state_change;
1411 sk->sk_data_ready = svc_tcp_data_ready;
1412 sk->sk_write_space = svc_write_space;
1413
1414 svsk->sk_reclen = 0;
1415 svsk->sk_tcplen = 0;
1416
1417 tp->nonagle = 1; /* disable Nagle's algorithm */
1418
1419 /* initialise setting must have enough space to
cca5172a 1420 * receive and respond to one request.
1da177e4
LT
1421 * svc_tcp_recvfrom will re-adjust if necessary
1422 */
1423 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
1424 3 * svsk->sk_server->sv_max_mesg,
1425 3 * svsk->sk_server->sv_max_mesg);
1da177e4
LT
1426
1427 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1428 set_bit(SK_DATA, &svsk->sk_flags);
cca5172a 1429 if (sk->sk_state != TCP_ESTABLISHED)
1da177e4
LT
1430 set_bit(SK_CLOSE, &svsk->sk_flags);
1431 }
1432}
1433
1434void
1435svc_sock_update_bufs(struct svc_serv *serv)
1436{
1437 /*
1438 * The number of server threads has changed. Update
1439 * rcvbuf and sndbuf accordingly on all sockets
1440 */
1441 struct list_head *le;
1442
1443 spin_lock_bh(&serv->sv_lock);
1444 list_for_each(le, &serv->sv_permsocks) {
cca5172a 1445 struct svc_sock *svsk =
1da177e4
LT
1446 list_entry(le, struct svc_sock, sk_list);
1447 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1448 }
1449 list_for_each(le, &serv->sv_tempsocks) {
1450 struct svc_sock *svsk =
1451 list_entry(le, struct svc_sock, sk_list);
1452 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1453 }
1454 spin_unlock_bh(&serv->sv_lock);
1455}
1456
1457/*
3262c816
GB
1458 * Receive the next request on any socket. This code is carefully
1459 * organised not to touch any cachelines in the shared svc_serv
1460 * structure, only cachelines in the local svc_pool.
1da177e4
LT
1461 */
1462int
6fb2b47f 1463svc_recv(struct svc_rqst *rqstp, long timeout)
1da177e4 1464{
27459f09 1465 struct svc_sock *svsk = NULL;
6fb2b47f 1466 struct svc_serv *serv = rqstp->rq_server;
3262c816 1467 struct svc_pool *pool = rqstp->rq_pool;
44524359 1468 int len, i;
1da177e4
LT
1469 int pages;
1470 struct xdr_buf *arg;
1471 DECLARE_WAITQUEUE(wait, current);
1472
1473 dprintk("svc: server %p waiting for data (to = %ld)\n",
1474 rqstp, timeout);
1475
1476 if (rqstp->rq_sock)
cca5172a 1477 printk(KERN_ERR
1da177e4
LT
1478 "svc_recv: service %p, socket not NULL!\n",
1479 rqstp);
1480 if (waitqueue_active(&rqstp->rq_wait))
cca5172a 1481 printk(KERN_ERR
1da177e4
LT
1482 "svc_recv: service %p, wait queue active!\n",
1483 rqstp);
1484
1da177e4
LT
1485
1486 /* now allocate needed pages. If we get a failure, sleep briefly */
c6b0a9f8 1487 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
44524359
N
1488 for (i=0; i < pages ; i++)
1489 while (rqstp->rq_pages[i] == NULL) {
1490 struct page *p = alloc_page(GFP_KERNEL);
1491 if (!p)
1492 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1493 rqstp->rq_pages[i] = p;
1da177e4 1494 }
250f3915
N
1495 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
1496 BUG_ON(pages >= RPCSVC_MAXPAGES);
1da177e4
LT
1497
1498 /* Make arg->head point to first page and arg->pages point to rest */
1499 arg = &rqstp->rq_arg;
44524359 1500 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1da177e4 1501 arg->head[0].iov_len = PAGE_SIZE;
44524359 1502 arg->pages = rqstp->rq_pages + 1;
1da177e4
LT
1503 arg->page_base = 0;
1504 /* save at least one page for response */
1505 arg->page_len = (pages-2)*PAGE_SIZE;
1506 arg->len = (pages-1)*PAGE_SIZE;
1507 arg->tail[0].iov_len = 0;
3e1d1d28
CL
1508
1509 try_to_freeze();
1887b935 1510 cond_resched();
1da177e4
LT
1511 if (signalled())
1512 return -EINTR;
1513
3262c816
GB
1514 spin_lock_bh(&pool->sp_lock);
1515 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1da177e4 1516 rqstp->rq_sock = svsk;
c45c357d 1517 atomic_inc(&svsk->sk_inuse);
c6b0a9f8 1518 rqstp->rq_reserved = serv->sv_max_mesg;
5685f0fa 1519 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1da177e4
LT
1520 } else {
1521 /* No data pending. Go to sleep */
3262c816 1522 svc_thread_enqueue(pool, rqstp);
1da177e4
LT
1523
1524 /*
1525 * We have to be able to interrupt this wait
1526 * to bring down the daemons ...
1527 */
1528 set_current_state(TASK_INTERRUPTIBLE);
1529 add_wait_queue(&rqstp->rq_wait, &wait);
3262c816 1530 spin_unlock_bh(&pool->sp_lock);
1da177e4
LT
1531
1532 schedule_timeout(timeout);
1533
3e1d1d28 1534 try_to_freeze();
1da177e4 1535
3262c816 1536 spin_lock_bh(&pool->sp_lock);
1da177e4
LT
1537 remove_wait_queue(&rqstp->rq_wait, &wait);
1538
1539 if (!(svsk = rqstp->rq_sock)) {
3262c816
GB
1540 svc_thread_dequeue(pool, rqstp);
1541 spin_unlock_bh(&pool->sp_lock);
1da177e4
LT
1542 dprintk("svc: server %p, no data yet\n", rqstp);
1543 return signalled()? -EINTR : -EAGAIN;
1544 }
1545 }
3262c816 1546 spin_unlock_bh(&pool->sp_lock);
1da177e4 1547
3262c816
GB
1548 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1549 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse));
5d137990 1550 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
1da177e4
LT
1551 dprintk("svc: got len=%d\n", len);
1552
1553 /* No data, incomplete (TCP) read, or accept() */
1554 if (len == 0 || len == -EAGAIN) {
1555 rqstp->rq_res.len = 0;
1556 svc_sock_release(rqstp);
1557 return -EAGAIN;
1558 }
1559 svsk->sk_lastrecv = get_seconds();
36bdfc8b 1560 clear_bit(SK_OLD, &svsk->sk_flags);
1da177e4 1561
bcdb81ae 1562 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1da177e4
LT
1563 rqstp->rq_chandle.defer = svc_defer;
1564
1565 if (serv->sv_stats)
1566 serv->sv_stats->netcnt++;
1567 return len;
1568}
1569
cca5172a 1570/*
1da177e4
LT
1571 * Drop request
1572 */
1573void
1574svc_drop(struct svc_rqst *rqstp)
1575{
1576 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1577 svc_sock_release(rqstp);
1578}
1579
1580/*
1581 * Return reply to client.
1582 */
1583int
1584svc_send(struct svc_rqst *rqstp)
1585{
1586 struct svc_sock *svsk;
1587 int len;
1588 struct xdr_buf *xb;
1589
1590 if ((svsk = rqstp->rq_sock) == NULL) {
1591 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1592 __FILE__, __LINE__);
1593 return -EFAULT;
1594 }
1595
1596 /* release the receive skb before sending the reply */
5148bf4e 1597 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
1da177e4
LT
1598
1599 /* calculate over-all length */
1600 xb = & rqstp->rq_res;
1601 xb->len = xb->head[0].iov_len +
1602 xb->page_len +
1603 xb->tail[0].iov_len;
1604
57b47a53
IM
1605 /* Grab svsk->sk_mutex to serialize outgoing data. */
1606 mutex_lock(&svsk->sk_mutex);
1da177e4
LT
1607 if (test_bit(SK_DEAD, &svsk->sk_flags))
1608 len = -ENOTCONN;
1609 else
5d137990 1610 len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp);
57b47a53 1611 mutex_unlock(&svsk->sk_mutex);
1da177e4
LT
1612 svc_sock_release(rqstp);
1613
1614 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1615 return 0;
1616 return len;
1617}
1618
36bdfc8b
GB
1619/*
1620 * Timer function to close old temporary sockets, using
1621 * a mark-and-sweep algorithm.
1622 */
1623static void
1624svc_age_temp_sockets(unsigned long closure)
1625{
1626 struct svc_serv *serv = (struct svc_serv *)closure;
1627 struct svc_sock *svsk;
1628 struct list_head *le, *next;
1629 LIST_HEAD(to_be_aged);
1630
1631 dprintk("svc_age_temp_sockets\n");
1632
1633 if (!spin_trylock_bh(&serv->sv_lock)) {
1634 /* busy, try again 1 sec later */
1635 dprintk("svc_age_temp_sockets: busy\n");
1636 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1637 return;
1638 }
1639
1640 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1641 svsk = list_entry(le, struct svc_sock, sk_list);
1642
1643 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1644 continue;
7a1fa065 1645 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags))
36bdfc8b 1646 continue;
c45c357d 1647 atomic_inc(&svsk->sk_inuse);
36bdfc8b
GB
1648 list_move(le, &to_be_aged);
1649 set_bit(SK_CLOSE, &svsk->sk_flags);
1650 set_bit(SK_DETACHED, &svsk->sk_flags);
1651 }
1652 spin_unlock_bh(&serv->sv_lock);
1653
1654 while (!list_empty(&to_be_aged)) {
1655 le = to_be_aged.next;
1656 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1657 list_del_init(le);
1658 svsk = list_entry(le, struct svc_sock, sk_list);
1659
1660 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1661 svsk, get_seconds() - svsk->sk_lastrecv);
1662
1663 /* a thread will dequeue and close it soon */
1664 svc_sock_enqueue(svsk);
1665 svc_sock_put(svsk);
1666 }
1667
1668 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1669}
1670
1da177e4
LT
1671/*
1672 * Initialize socket for RPC use and create svc_sock struct
1673 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1674 */
6b174337
CL
1675static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1676 struct socket *sock,
1677 int *errp, int flags)
1da177e4
LT
1678{
1679 struct svc_sock *svsk;
1680 struct sock *inet;
6b174337
CL
1681 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1682 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1da177e4
LT
1683
1684 dprintk("svc: svc_setup_socket %p\n", sock);
0da974f4 1685 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1da177e4
LT
1686 *errp = -ENOMEM;
1687 return NULL;
1688 }
1da177e4
LT
1689
1690 inet = sock->sk;
1691
1692 /* Register socket with portmapper */
1693 if (*errp >= 0 && pmap_register)
1694 *errp = svc_register(serv, inet->sk_protocol,
1695 ntohs(inet_sk(inet)->sport));
1696
1697 if (*errp < 0) {
1698 kfree(svsk);
1699 return NULL;
1700 }
1701
1702 set_bit(SK_BUSY, &svsk->sk_flags);
1703 inet->sk_user_data = svsk;
1704 svsk->sk_sock = sock;
1705 svsk->sk_sk = inet;
1706 svsk->sk_ostate = inet->sk_state_change;
1707 svsk->sk_odata = inet->sk_data_ready;
1708 svsk->sk_owspace = inet->sk_write_space;
1709 svsk->sk_server = serv;
aaf68cfb 1710 atomic_set(&svsk->sk_inuse, 1);
1da177e4 1711 svsk->sk_lastrecv = get_seconds();
7ac1bea5 1712 spin_lock_init(&svsk->sk_lock);
1da177e4
LT
1713 INIT_LIST_HEAD(&svsk->sk_deferred);
1714 INIT_LIST_HEAD(&svsk->sk_ready);
57b47a53 1715 mutex_init(&svsk->sk_mutex);
1da177e4
LT
1716
1717 /* Initialize the socket */
1718 if (sock->type == SOCK_DGRAM)
1719 svc_udp_init(svsk);
1720 else
1721 svc_tcp_init(svsk);
1722
1723 spin_lock_bh(&serv->sv_lock);
6b174337 1724 if (is_temporary) {
1da177e4
LT
1725 set_bit(SK_TEMP, &svsk->sk_flags);
1726 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1727 serv->sv_tmpcnt++;
36bdfc8b
GB
1728 if (serv->sv_temptimer.function == NULL) {
1729 /* setup timer to age temp sockets */
1730 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1731 (unsigned long)serv);
1732 mod_timer(&serv->sv_temptimer,
1733 jiffies + svc_conn_age_period * HZ);
1734 }
1da177e4
LT
1735 } else {
1736 clear_bit(SK_TEMP, &svsk->sk_flags);
1737 list_add(&svsk->sk_list, &serv->sv_permsocks);
1738 }
1739 spin_unlock_bh(&serv->sv_lock);
1740
1741 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1742 svsk, svsk->sk_sk);
1743
1da177e4
LT
1744 return svsk;
1745}
1746
b41b66d6
N
1747int svc_addsock(struct svc_serv *serv,
1748 int fd,
1749 char *name_return,
1750 int *proto)
1751{
1752 int err = 0;
1753 struct socket *so = sockfd_lookup(fd, &err);
1754 struct svc_sock *svsk = NULL;
1755
1756 if (!so)
1757 return err;
1758 if (so->sk->sk_family != AF_INET)
1759 err = -EAFNOSUPPORT;
1760 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1761 so->sk->sk_protocol != IPPROTO_UDP)
1762 err = -EPROTONOSUPPORT;
1763 else if (so->state > SS_UNCONNECTED)
1764 err = -EISCONN;
1765 else {
6b174337 1766 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
e79eff1f
N
1767 if (svsk) {
1768 svc_sock_received(svsk);
b41b66d6 1769 err = 0;
e79eff1f 1770 }
b41b66d6
N
1771 }
1772 if (err) {
1773 sockfd_put(so);
1774 return err;
1775 }
1776 if (proto) *proto = so->sk->sk_protocol;
1777 return one_sock_name(name_return, svsk);
1778}
1779EXPORT_SYMBOL_GPL(svc_addsock);
1780
1da177e4
LT
1781/*
1782 * Create socket for RPC service.
1783 */
6b174337 1784static int svc_create_socket(struct svc_serv *serv, int protocol,
77f1f67a 1785 struct sockaddr *sin, int len, int flags)
1da177e4
LT
1786{
1787 struct svc_sock *svsk;
1788 struct socket *sock;
1789 int error;
1790 int type;
ad06e4bd 1791 char buf[RPC_MAX_ADDRBUFLEN];
1da177e4 1792
ad06e4bd
CL
1793 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1794 serv->sv_program->pg_name, protocol,
77f1f67a 1795 __svc_print_addr(sin, buf, sizeof(buf)));
1da177e4
LT
1796
1797 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1798 printk(KERN_WARNING "svc: only UDP and TCP "
1799 "sockets supported\n");
1800 return -EINVAL;
1801 }
1802 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1803
77f1f67a
CL
1804 error = sock_create_kern(sin->sa_family, type, protocol, &sock);
1805 if (error < 0)
1da177e4
LT
1806 return error;
1807
ed07536e
PZ
1808 svc_reclassify_socket(sock);
1809
18114746 1810 if (type == SOCK_STREAM)
77f1f67a
CL
1811 sock->sk->sk_reuse = 1; /* allow address reuse */
1812 error = kernel_bind(sock, sin, len);
18114746
ES
1813 if (error < 0)
1814 goto bummer;
1da177e4
LT
1815
1816 if (protocol == IPPROTO_TCP) {
e6242e92 1817 if ((error = kernel_listen(sock, 64)) < 0)
1da177e4
LT
1818 goto bummer;
1819 }
1820
e79eff1f
N
1821 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1822 svc_sock_received(svsk);
6b174337 1823 return ntohs(inet_sk(svsk->sk_sk)->sport);
e79eff1f 1824 }
1da177e4
LT
1825
1826bummer:
1827 dprintk("svc: svc_create_socket error = %d\n", -error);
1828 sock_release(sock);
1829 return error;
1830}
1831
755cceab
TT
1832/*
1833 * Detach the svc_sock from the socket so that no
1834 * more callbacks occur.
1835 */
1836static void svc_sock_detach(struct svc_xprt *xprt)
1837{
1838 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1839 struct sock *sk = svsk->sk_sk;
1840
1841 dprintk("svc: svc_sock_detach(%p)\n", svsk);
1842
1843 /* put back the old socket callbacks */
1844 sk->sk_state_change = svsk->sk_ostate;
1845 sk->sk_data_ready = svsk->sk_odata;
1846 sk->sk_write_space = svsk->sk_owspace;
1847}
1848
1849/*
1850 * Free the svc_sock's socket resources and the svc_sock itself.
1851 */
1852static void svc_sock_free(struct svc_xprt *xprt)
1853{
1854 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1855 dprintk("svc: svc_sock_free(%p)\n", svsk);
1856
1857 if (svsk->sk_info_authunix != NULL)
1858 svcauth_unix_info_release(svsk->sk_info_authunix);
1859 if (svsk->sk_sock->file)
1860 sockfd_put(svsk->sk_sock);
1861 else
1862 sock_release(svsk->sk_sock);
1863 kfree(svsk);
1864}
1865
1da177e4
LT
1866/*
1867 * Remove a dead socket
1868 */
aaf68cfb 1869static void
1da177e4
LT
1870svc_delete_socket(struct svc_sock *svsk)
1871{
1872 struct svc_serv *serv;
1873 struct sock *sk;
1874
1875 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1876
1877 serv = svsk->sk_server;
1878 sk = svsk->sk_sk;
1879
755cceab 1880 svsk->sk_xprt.xpt_ops->xpo_detach(&svsk->sk_xprt);
1da177e4
LT
1881
1882 spin_lock_bh(&serv->sv_lock);
1883
36bdfc8b
GB
1884 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1885 list_del_init(&svsk->sk_list);
cca5172a 1886 /*
3262c816
GB
1887 * We used to delete the svc_sock from whichever list
1888 * it's sk_ready node was on, but we don't actually
1889 * need to. This is because the only time we're called
1890 * while still attached to a queue, the queue itself
1891 * is about to be destroyed (in svc_destroy).
1892 */
aaf68cfb
N
1893 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
1894 BUG_ON(atomic_read(&svsk->sk_inuse)<2);
1895 atomic_dec(&svsk->sk_inuse);
1da177e4
LT
1896 if (test_bit(SK_TEMP, &svsk->sk_flags))
1897 serv->sv_tmpcnt--;
aaf68cfb 1898 }
1da177e4 1899
d6740df9 1900 spin_unlock_bh(&serv->sv_lock);
aaf68cfb
N
1901}
1902
cda1fd4a 1903static void svc_close_socket(struct svc_sock *svsk)
aaf68cfb
N
1904{
1905 set_bit(SK_CLOSE, &svsk->sk_flags);
1906 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
1907 /* someone else will have to effect the close */
1908 return;
1909
1910 atomic_inc(&svsk->sk_inuse);
1911 svc_delete_socket(svsk);
1912 clear_bit(SK_BUSY, &svsk->sk_flags);
d6740df9 1913 svc_sock_put(svsk);
1da177e4
LT
1914}
1915
cda1fd4a
N
1916void svc_force_close_socket(struct svc_sock *svsk)
1917{
1918 set_bit(SK_CLOSE, &svsk->sk_flags);
1919 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
1920 /* Waiting to be processed, but no threads left,
1921 * So just remove it from the waiting list
1922 */
1923 list_del_init(&svsk->sk_ready);
1924 clear_bit(SK_BUSY, &svsk->sk_flags);
1925 }
1926 svc_close_socket(svsk);
1927}
1928
6b174337
CL
1929/**
1930 * svc_makesock - Make a socket for nfsd and lockd
1931 * @serv: RPC server structure
1932 * @protocol: transport protocol to use
1933 * @port: port to use
482fb94e 1934 * @flags: requested socket characteristics
6b174337 1935 *
1da177e4 1936 */
482fb94e
CL
1937int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port,
1938 int flags)
1da177e4 1939{
6b174337
CL
1940 struct sockaddr_in sin = {
1941 .sin_family = AF_INET,
1942 .sin_addr.s_addr = INADDR_ANY,
1943 .sin_port = htons(port),
1944 };
1da177e4
LT
1945
1946 dprintk("svc: creating socket proto = %d\n", protocol);
77f1f67a
CL
1947 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin,
1948 sizeof(sin), flags);
1da177e4
LT
1949}
1950
1951/*
cca5172a 1952 * Handle defer and revisit of requests
1da177e4
LT
1953 */
1954
1955static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1956{
1957 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1da177e4
LT
1958 struct svc_sock *svsk;
1959
1960 if (too_many) {
1961 svc_sock_put(dr->svsk);
1962 kfree(dr);
1963 return;
1964 }
1965 dprintk("revisit queued\n");
1966 svsk = dr->svsk;
1967 dr->svsk = NULL;
7ac1bea5 1968 spin_lock(&svsk->sk_lock);
1da177e4 1969 list_add(&dr->handle.recent, &svsk->sk_deferred);
7ac1bea5 1970 spin_unlock(&svsk->sk_lock);
1da177e4
LT
1971 set_bit(SK_DEFERRED, &svsk->sk_flags);
1972 svc_sock_enqueue(svsk);
1973 svc_sock_put(svsk);
1974}
1975
1976static struct cache_deferred_req *
1977svc_defer(struct cache_req *req)
1978{
1979 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1980 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1981 struct svc_deferred_req *dr;
1982
1983 if (rqstp->rq_arg.page_len)
1984 return NULL; /* if more than a page, give up FIXME */
1985 if (rqstp->rq_deferred) {
1986 dr = rqstp->rq_deferred;
1987 rqstp->rq_deferred = NULL;
1988 } else {
1989 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1990 /* FIXME maybe discard if size too large */
1991 dr = kmalloc(size, GFP_KERNEL);
1992 if (dr == NULL)
1993 return NULL;
1994
1995 dr->handle.owner = rqstp->rq_server;
1996 dr->prot = rqstp->rq_prot;
24422222
CL
1997 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1998 dr->addrlen = rqstp->rq_addrlen;
1918e341 1999 dr->daddr = rqstp->rq_daddr;
1da177e4
LT
2000 dr->argslen = rqstp->rq_arg.len >> 2;
2001 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
2002 }
c45c357d 2003 atomic_inc(&rqstp->rq_sock->sk_inuse);
1da177e4 2004 dr->svsk = rqstp->rq_sock;
1da177e4
LT
2005
2006 dr->handle.revisit = svc_revisit;
2007 return &dr->handle;
2008}
2009
2010/*
2011 * recv data from a deferred request into an active one
2012 */
2013static int svc_deferred_recv(struct svc_rqst *rqstp)
2014{
2015 struct svc_deferred_req *dr = rqstp->rq_deferred;
2016
2017 rqstp->rq_arg.head[0].iov_base = dr->args;
2018 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
2019 rqstp->rq_arg.page_len = 0;
2020 rqstp->rq_arg.len = dr->argslen<<2;
2021 rqstp->rq_prot = dr->prot;
24422222
CL
2022 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
2023 rqstp->rq_addrlen = dr->addrlen;
1918e341 2024 rqstp->rq_daddr = dr->daddr;
44524359 2025 rqstp->rq_respages = rqstp->rq_pages;
1da177e4
LT
2026 return dr->argslen<<2;
2027}
2028
2029
2030static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
2031{
2032 struct svc_deferred_req *dr = NULL;
cca5172a 2033
1da177e4
LT
2034 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
2035 return NULL;
7ac1bea5 2036 spin_lock(&svsk->sk_lock);
1da177e4
LT
2037 clear_bit(SK_DEFERRED, &svsk->sk_flags);
2038 if (!list_empty(&svsk->sk_deferred)) {
2039 dr = list_entry(svsk->sk_deferred.next,
2040 struct svc_deferred_req,
2041 handle.recent);
2042 list_del_init(&dr->handle.recent);
2043 set_bit(SK_DEFERRED, &svsk->sk_flags);
2044 }
7ac1bea5 2045 spin_unlock(&svsk->sk_lock);
1da177e4
LT
2046 return dr;
2047}