svc: Move sk_sendto and sk_recvfrom to svc_xprt_class
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / sunrpc / svcsock.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/svcsock.c
3 *
4 * These are the RPC server socket internals.
5 *
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
9 *
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
18 *
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
20 */
21
172589cc 22#include <linux/kernel.h>
1da177e4
LT
23#include <linux/sched.h>
24#include <linux/errno.h>
25#include <linux/fcntl.h>
26#include <linux/net.h>
27#include <linux/in.h>
28#include <linux/inet.h>
29#include <linux/udp.h>
91483c4b 30#include <linux/tcp.h>
1da177e4
LT
31#include <linux/unistd.h>
32#include <linux/slab.h>
33#include <linux/netdevice.h>
34#include <linux/skbuff.h>
b41b66d6 35#include <linux/file.h>
7dfb7103 36#include <linux/freezer.h>
1da177e4
LT
37#include <net/sock.h>
38#include <net/checksum.h>
39#include <net/ip.h>
b92503b2 40#include <net/ipv6.h>
c752f073 41#include <net/tcp_states.h>
1da177e4
LT
42#include <asm/uaccess.h>
43#include <asm/ioctls.h>
44
45#include <linux/sunrpc/types.h>
ad06e4bd 46#include <linux/sunrpc/clnt.h>
1da177e4
LT
47#include <linux/sunrpc/xdr.h>
48#include <linux/sunrpc/svcsock.h>
49#include <linux/sunrpc/stats.h>
50
51/* SMP locking strategy:
52 *
3262c816
GB
53 * svc_pool->sp_lock protects most of the fields of that pool.
54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
55 * when both need to be taken (rare), svc_serv->sv_lock is first.
56 * BKL protects svc_serv->sv_nrthread.
7ac1bea5
N
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache.
c081a0c7 59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
1da177e4
LT
60 *
61 * Some flags can be set to certain values at any time
62 * providing that certain rules are followed:
63 *
1da177e4 64 * SK_CONN, SK_DATA, can be set or cleared at any time.
cca5172a 65 * after a set, svc_sock_enqueue must be called.
1da177e4
LT
66 * after a clear, the socket must be read/accepted
67 * if this succeeds, it must be set again.
68 * SK_CLOSE can set at any time. It is never cleared.
aaf68cfb
N
69 * sk_inuse contains a bias of '1' until SK_DEAD is set.
70 * so when sk_inuse hits zero, we know the socket is dead
71 * and no-one is using it.
72 * SK_DEAD can only be set while SK_BUSY is held which ensures
73 * no other thread will be using the socket or will try to
74 * set SK_DEAD.
1da177e4
LT
75 *
76 */
77
360d8738 78#define RPCDBG_FACILITY RPCDBG_SVCXPRT
1da177e4
LT
79
80
81static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
6b174337 82 int *errp, int flags);
aaf68cfb 83static void svc_delete_socket(struct svc_sock *svsk);
1da177e4
LT
84static void svc_udp_data_ready(struct sock *, int);
85static int svc_udp_recvfrom(struct svc_rqst *);
86static int svc_udp_sendto(struct svc_rqst *);
cda1fd4a 87static void svc_close_socket(struct svc_sock *svsk);
1da177e4
LT
88
89static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
90static int svc_deferred_recv(struct svc_rqst *rqstp);
91static struct cache_deferred_req *svc_defer(struct cache_req *req);
92
36bdfc8b
GB
93/* apparently the "standard" is that clients close
94 * idle connections after 5 minutes, servers after
95 * 6 minutes
96 * http://www.connectathon.org/talks96/nfstcp.pdf
97 */
98static int svc_conn_age_period = 6*60;
99
ed07536e
PZ
100#ifdef CONFIG_DEBUG_LOCK_ALLOC
101static struct lock_class_key svc_key[2];
102static struct lock_class_key svc_slock_key[2];
103
104static inline void svc_reclassify_socket(struct socket *sock)
105{
106 struct sock *sk = sock->sk;
02b3d346 107 BUG_ON(sock_owned_by_user(sk));
ed07536e
PZ
108 switch (sk->sk_family) {
109 case AF_INET:
110 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
111 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
112 break;
113
114 case AF_INET6:
115 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
116 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
117 break;
118
119 default:
120 BUG();
121 }
122}
123#else
124static inline void svc_reclassify_socket(struct socket *sock)
125{
126}
127#endif
128
ad06e4bd
CL
129static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
130{
131 switch (addr->sa_family) {
132 case AF_INET:
133 snprintf(buf, len, "%u.%u.%u.%u, port=%u",
134 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
582ee43d 135 ntohs(((struct sockaddr_in *) addr)->sin_port));
ad06e4bd 136 break;
5a05ed73 137
ad06e4bd
CL
138 case AF_INET6:
139 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
140 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
582ee43d 141 ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
ad06e4bd 142 break;
5a05ed73 143
ad06e4bd
CL
144 default:
145 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
146 break;
147 }
148 return buf;
149}
150
151/**
152 * svc_print_addr - Format rq_addr field for printing
153 * @rqstp: svc_rqst struct containing address to print
154 * @buf: target buffer for formatted address
155 * @len: length of target buffer
156 *
157 */
158char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
159{
27459f09 160 return __svc_print_addr(svc_addr(rqstp), buf, len);
ad06e4bd
CL
161}
162EXPORT_SYMBOL_GPL(svc_print_addr);
163
1da177e4 164/*
3262c816 165 * Queue up an idle server thread. Must have pool->sp_lock held.
1da177e4 166 * Note: this is really a stack rather than a queue, so that we only
3262c816 167 * use as many different threads as we need, and the rest don't pollute
1da177e4
LT
168 * the cache.
169 */
170static inline void
3262c816 171svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
1da177e4 172{
3262c816 173 list_add(&rqstp->rq_list, &pool->sp_threads);
1da177e4
LT
174}
175
176/*
3262c816 177 * Dequeue an nfsd thread. Must have pool->sp_lock held.
1da177e4
LT
178 */
179static inline void
3262c816 180svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
1da177e4
LT
181{
182 list_del(&rqstp->rq_list);
183}
184
185/*
186 * Release an skbuff after use
187 */
188static inline void
189svc_release_skb(struct svc_rqst *rqstp)
190{
191 struct sk_buff *skb = rqstp->rq_skbuff;
192 struct svc_deferred_req *dr = rqstp->rq_deferred;
193
194 if (skb) {
195 rqstp->rq_skbuff = NULL;
196
197 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
198 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
199 }
200 if (dr) {
201 rqstp->rq_deferred = NULL;
202 kfree(dr);
203 }
204}
205
206/*
207 * Any space to write?
208 */
209static inline unsigned long
210svc_sock_wspace(struct svc_sock *svsk)
211{
212 int wspace;
213
214 if (svsk->sk_sock->type == SOCK_STREAM)
215 wspace = sk_stream_wspace(svsk->sk_sk);
216 else
217 wspace = sock_wspace(svsk->sk_sk);
218
219 return wspace;
220}
221
222/*
223 * Queue up a socket with data pending. If there are idle nfsd
224 * processes, wake 'em up.
225 *
226 */
227static void
228svc_sock_enqueue(struct svc_sock *svsk)
229{
230 struct svc_serv *serv = svsk->sk_server;
bfd24160 231 struct svc_pool *pool;
1da177e4 232 struct svc_rqst *rqstp;
bfd24160 233 int cpu;
1da177e4
LT
234
235 if (!(svsk->sk_flags &
236 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
237 return;
238 if (test_bit(SK_DEAD, &svsk->sk_flags))
239 return;
240
bfd24160
GB
241 cpu = get_cpu();
242 pool = svc_pool_for_cpu(svsk->sk_server, cpu);
243 put_cpu();
244
3262c816 245 spin_lock_bh(&pool->sp_lock);
1da177e4 246
3262c816
GB
247 if (!list_empty(&pool->sp_threads) &&
248 !list_empty(&pool->sp_sockets))
1da177e4
LT
249 printk(KERN_ERR
250 "svc_sock_enqueue: threads and sockets both waiting??\n");
251
252 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
253 /* Don't enqueue dead sockets */
254 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
255 goto out_unlock;
256 }
257
c081a0c7
GB
258 /* Mark socket as busy. It will remain in this state until the
259 * server has processed all pending data and put the socket back
260 * on the idle list. We update SK_BUSY atomically because
261 * it also guards against trying to enqueue the svc_sock twice.
262 */
263 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
264 /* Don't enqueue socket while already enqueued */
1da177e4
LT
265 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
266 goto out_unlock;
267 }
3262c816
GB
268 BUG_ON(svsk->sk_pool != NULL);
269 svsk->sk_pool = pool;
1da177e4
LT
270
271 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
c6b0a9f8 272 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
1da177e4
LT
273 > svc_sock_wspace(svsk))
274 && !test_bit(SK_CLOSE, &svsk->sk_flags)
275 && !test_bit(SK_CONN, &svsk->sk_flags)) {
276 /* Don't enqueue while not enough space for reply */
277 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
c6b0a9f8 278 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
1da177e4 279 svc_sock_wspace(svsk));
3262c816 280 svsk->sk_pool = NULL;
c081a0c7 281 clear_bit(SK_BUSY, &svsk->sk_flags);
1da177e4
LT
282 goto out_unlock;
283 }
284 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
285
1da177e4 286
3262c816
GB
287 if (!list_empty(&pool->sp_threads)) {
288 rqstp = list_entry(pool->sp_threads.next,
1da177e4
LT
289 struct svc_rqst,
290 rq_list);
291 dprintk("svc: socket %p served by daemon %p\n",
292 svsk->sk_sk, rqstp);
3262c816 293 svc_thread_dequeue(pool, rqstp);
1da177e4 294 if (rqstp->rq_sock)
cca5172a 295 printk(KERN_ERR
1da177e4
LT
296 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
297 rqstp, rqstp->rq_sock);
298 rqstp->rq_sock = svsk;
c45c357d 299 atomic_inc(&svsk->sk_inuse);
c6b0a9f8 300 rqstp->rq_reserved = serv->sv_max_mesg;
5685f0fa 301 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
3262c816 302 BUG_ON(svsk->sk_pool != pool);
1da177e4
LT
303 wake_up(&rqstp->rq_wait);
304 } else {
305 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
3262c816
GB
306 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
307 BUG_ON(svsk->sk_pool != pool);
1da177e4
LT
308 }
309
310out_unlock:
3262c816 311 spin_unlock_bh(&pool->sp_lock);
1da177e4
LT
312}
313
314/*
3262c816 315 * Dequeue the first socket. Must be called with the pool->sp_lock held.
1da177e4
LT
316 */
317static inline struct svc_sock *
3262c816 318svc_sock_dequeue(struct svc_pool *pool)
1da177e4
LT
319{
320 struct svc_sock *svsk;
321
3262c816 322 if (list_empty(&pool->sp_sockets))
1da177e4
LT
323 return NULL;
324
3262c816 325 svsk = list_entry(pool->sp_sockets.next,
1da177e4
LT
326 struct svc_sock, sk_ready);
327 list_del_init(&svsk->sk_ready);
328
329 dprintk("svc: socket %p dequeued, inuse=%d\n",
c45c357d 330 svsk->sk_sk, atomic_read(&svsk->sk_inuse));
1da177e4
LT
331
332 return svsk;
333}
334
335/*
336 * Having read something from a socket, check whether it
337 * needs to be re-enqueued.
338 * Note: SK_DATA only gets cleared when a read-attempt finds
339 * no (or insufficient) data.
340 */
341static inline void
342svc_sock_received(struct svc_sock *svsk)
343{
3262c816 344 svsk->sk_pool = NULL;
1da177e4
LT
345 clear_bit(SK_BUSY, &svsk->sk_flags);
346 svc_sock_enqueue(svsk);
347}
348
349
350/**
351 * svc_reserve - change the space reserved for the reply to a request.
352 * @rqstp: The request in question
353 * @space: new max space to reserve
354 *
355 * Each request reserves some space on the output queue of the socket
356 * to make sure the reply fits. This function reduces that reserved
357 * space to be the amount of space used already, plus @space.
358 *
359 */
360void svc_reserve(struct svc_rqst *rqstp, int space)
361{
362 space += rqstp->rq_res.head[0].iov_len;
363
364 if (space < rqstp->rq_reserved) {
365 struct svc_sock *svsk = rqstp->rq_sock;
5685f0fa 366 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
1da177e4 367 rqstp->rq_reserved = space;
1da177e4
LT
368
369 svc_sock_enqueue(svsk);
370 }
371}
372
373/*
374 * Release a socket after use.
375 */
376static inline void
377svc_sock_put(struct svc_sock *svsk)
378{
aaf68cfb
N
379 if (atomic_dec_and_test(&svsk->sk_inuse)) {
380 BUG_ON(! test_bit(SK_DEAD, &svsk->sk_flags));
381
202dd450 382 dprintk("svc: releasing dead socket\n");
d6740df9
NB
383 if (svsk->sk_sock->file)
384 sockfd_put(svsk->sk_sock);
385 else
386 sock_release(svsk->sk_sock);
387 if (svsk->sk_info_authunix != NULL)
388 svcauth_unix_info_release(svsk->sk_info_authunix);
1da177e4
LT
389 kfree(svsk);
390 }
1da177e4
LT
391}
392
393static void
394svc_sock_release(struct svc_rqst *rqstp)
395{
396 struct svc_sock *svsk = rqstp->rq_sock;
397
398 svc_release_skb(rqstp);
399
44524359 400 svc_free_res_pages(rqstp);
1da177e4
LT
401 rqstp->rq_res.page_len = 0;
402 rqstp->rq_res.page_base = 0;
403
404
405 /* Reset response buffer and release
406 * the reservation.
407 * But first, check that enough space was reserved
408 * for the reply, otherwise we have a bug!
409 */
410 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
411 printk(KERN_ERR "RPC request reserved %d but used %d\n",
412 rqstp->rq_reserved,
413 rqstp->rq_res.len);
414
415 rqstp->rq_res.head[0].iov_len = 0;
416 svc_reserve(rqstp, 0);
417 rqstp->rq_sock = NULL;
418
419 svc_sock_put(svsk);
420}
421
422/*
423 * External function to wake up a server waiting for data
3262c816
GB
424 * This really only makes sense for services like lockd
425 * which have exactly one thread anyway.
1da177e4
LT
426 */
427void
428svc_wake_up(struct svc_serv *serv)
429{
430 struct svc_rqst *rqstp;
3262c816
GB
431 unsigned int i;
432 struct svc_pool *pool;
433
434 for (i = 0; i < serv->sv_nrpools; i++) {
435 pool = &serv->sv_pools[i];
436
437 spin_lock_bh(&pool->sp_lock);
438 if (!list_empty(&pool->sp_threads)) {
439 rqstp = list_entry(pool->sp_threads.next,
440 struct svc_rqst,
441 rq_list);
442 dprintk("svc: daemon %p woken up.\n", rqstp);
443 /*
444 svc_thread_dequeue(pool, rqstp);
445 rqstp->rq_sock = NULL;
446 */
447 wake_up(&rqstp->rq_wait);
448 }
449 spin_unlock_bh(&pool->sp_lock);
1da177e4 450 }
1da177e4
LT
451}
452
b92503b2
CL
453union svc_pktinfo_u {
454 struct in_pktinfo pkti;
b92503b2 455 struct in6_pktinfo pkti6;
b92503b2 456};
bc375ea7
DM
457#define SVC_PKTINFO_SPACE \
458 CMSG_SPACE(sizeof(union svc_pktinfo_u))
b92503b2
CL
459
460static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
461{
462 switch (rqstp->rq_sock->sk_sk->sk_family) {
463 case AF_INET: {
464 struct in_pktinfo *pki = CMSG_DATA(cmh);
465
466 cmh->cmsg_level = SOL_IP;
467 cmh->cmsg_type = IP_PKTINFO;
468 pki->ipi_ifindex = 0;
469 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
470 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
471 }
472 break;
5a05ed73 473
b92503b2
CL
474 case AF_INET6: {
475 struct in6_pktinfo *pki = CMSG_DATA(cmh);
476
477 cmh->cmsg_level = SOL_IPV6;
478 cmh->cmsg_type = IPV6_PKTINFO;
479 pki->ipi6_ifindex = 0;
480 ipv6_addr_copy(&pki->ipi6_addr,
481 &rqstp->rq_daddr.addr6);
482 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
483 }
484 break;
b92503b2
CL
485 }
486 return;
487}
488
1da177e4
LT
489/*
490 * Generic sendto routine
491 */
492static int
493svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
494{
495 struct svc_sock *svsk = rqstp->rq_sock;
496 struct socket *sock = svsk->sk_sock;
497 int slen;
bc375ea7
DM
498 union {
499 struct cmsghdr hdr;
500 long all[SVC_PKTINFO_SPACE / sizeof(long)];
501 } buffer;
502 struct cmsghdr *cmh = &buffer.hdr;
1da177e4
LT
503 int len = 0;
504 int result;
505 int size;
506 struct page **ppage = xdr->pages;
507 size_t base = xdr->page_base;
508 unsigned int pglen = xdr->page_len;
509 unsigned int flags = MSG_MORE;
ad06e4bd 510 char buf[RPC_MAX_ADDRBUFLEN];
1da177e4
LT
511
512 slen = xdr->len;
513
514 if (rqstp->rq_prot == IPPROTO_UDP) {
b92503b2
CL
515 struct msghdr msg = {
516 .msg_name = &rqstp->rq_addr,
517 .msg_namelen = rqstp->rq_addrlen,
518 .msg_control = cmh,
519 .msg_controllen = sizeof(buffer),
520 .msg_flags = MSG_MORE,
521 };
522
523 svc_set_cmsg_data(rqstp, cmh);
1da177e4
LT
524
525 if (sock_sendmsg(sock, &msg, 0) < 0)
526 goto out;
527 }
528
529 /* send head */
530 if (slen == xdr->head[0].iov_len)
531 flags = 0;
44524359
N
532 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
533 xdr->head[0].iov_len, flags);
1da177e4
LT
534 if (len != xdr->head[0].iov_len)
535 goto out;
536 slen -= xdr->head[0].iov_len;
537 if (slen == 0)
538 goto out;
539
540 /* send page data */
541 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
542 while (pglen > 0) {
543 if (slen == size)
544 flags = 0;
e6242e92 545 result = kernel_sendpage(sock, *ppage, base, size, flags);
1da177e4
LT
546 if (result > 0)
547 len += result;
548 if (result != size)
549 goto out;
550 slen -= size;
551 pglen -= size;
552 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
553 base = 0;
554 ppage++;
555 }
556 /* send tail */
557 if (xdr->tail[0].iov_len) {
44524359
N
558 result = kernel_sendpage(sock, rqstp->rq_respages[0],
559 ((unsigned long)xdr->tail[0].iov_base)
cca5172a 560 & (PAGE_SIZE-1),
1da177e4
LT
561 xdr->tail[0].iov_len, 0);
562
563 if (result > 0)
564 len += result;
565 }
566out:
ad06e4bd
CL
567 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
568 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len,
569 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
1da177e4
LT
570
571 return len;
572}
573
80212d59
N
574/*
575 * Report socket names for nfsdfs
576 */
577static int one_sock_name(char *buf, struct svc_sock *svsk)
578{
579 int len;
580
581 switch(svsk->sk_sk->sk_family) {
582 case AF_INET:
583 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
584 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
585 "udp" : "tcp",
586 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
587 inet_sk(svsk->sk_sk)->num);
588 break;
589 default:
590 len = sprintf(buf, "*unknown-%d*\n",
591 svsk->sk_sk->sk_family);
592 }
593 return len;
594}
595
596int
b41b66d6 597svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
80212d59 598{
b41b66d6 599 struct svc_sock *svsk, *closesk = NULL;
80212d59
N
600 int len = 0;
601
602 if (!serv)
603 return 0;
aaf68cfb 604 spin_lock_bh(&serv->sv_lock);
80212d59
N
605 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
606 int onelen = one_sock_name(buf+len, svsk);
b41b66d6
N
607 if (toclose && strcmp(toclose, buf+len) == 0)
608 closesk = svsk;
609 else
610 len += onelen;
80212d59 611 }
aaf68cfb 612 spin_unlock_bh(&serv->sv_lock);
b41b66d6 613 if (closesk)
5680c446
N
614 /* Should unregister with portmap, but you cannot
615 * unregister just one protocol...
616 */
aaf68cfb 617 svc_close_socket(closesk);
37a03472
N
618 else if (toclose)
619 return -ENOENT;
80212d59
N
620 return len;
621}
622EXPORT_SYMBOL(svc_sock_names);
623
1da177e4
LT
624/*
625 * Check input queue length
626 */
627static int
628svc_recv_available(struct svc_sock *svsk)
629{
1da177e4
LT
630 struct socket *sock = svsk->sk_sock;
631 int avail, err;
632
e6242e92 633 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
1da177e4
LT
634
635 return (err >= 0)? avail : err;
636}
637
638/*
639 * Generic recvfrom routine.
640 */
641static int
642svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
643{
067d7817 644 struct svc_sock *svsk = rqstp->rq_sock;
1ba95105
CL
645 struct msghdr msg = {
646 .msg_flags = MSG_DONTWAIT,
647 };
a9747692 648 struct sockaddr *sin;
1ba95105 649 int len;
1da177e4 650
1ba95105
CL
651 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
652 msg.msg_flags);
1da177e4
LT
653
654 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
1da177e4 655 */
067d7817
CL
656 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
657 rqstp->rq_addrlen = svsk->sk_remotelen;
1da177e4 658
a9747692
FM
659 /* Destination address in request is needed for binding the
660 * source address in RPC callbacks later.
661 */
662 sin = (struct sockaddr *)&svsk->sk_local;
663 switch (sin->sa_family) {
664 case AF_INET:
665 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
666 break;
667 case AF_INET6:
668 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
669 break;
670 }
671
1da177e4 672 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
1ba95105 673 svsk, iov[0].iov_base, iov[0].iov_len, len);
1da177e4
LT
674
675 return len;
676}
677
678/*
679 * Set socket snd and rcv buffer lengths
680 */
681static inline void
682svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
683{
684#if 0
685 mm_segment_t oldfs;
686 oldfs = get_fs(); set_fs(KERNEL_DS);
687 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
688 (char*)&snd, sizeof(snd));
689 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
690 (char*)&rcv, sizeof(rcv));
691#else
692 /* sock_setsockopt limits use to sysctl_?mem_max,
693 * which isn't acceptable. Until that is made conditional
694 * on not having CAP_SYS_RESOURCE or similar, we go direct...
695 * DaveM said I could!
696 */
697 lock_sock(sock->sk);
698 sock->sk->sk_sndbuf = snd * 2;
699 sock->sk->sk_rcvbuf = rcv * 2;
700 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
701 release_sock(sock->sk);
702#endif
703}
704/*
705 * INET callback when data has been received on the socket.
706 */
707static void
708svc_udp_data_ready(struct sock *sk, int count)
709{
939bb7ef 710 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4 711
939bb7ef
NB
712 if (svsk) {
713 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
714 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
715 set_bit(SK_DATA, &svsk->sk_flags);
716 svc_sock_enqueue(svsk);
717 }
1da177e4
LT
718 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
719 wake_up_interruptible(sk->sk_sleep);
720}
721
722/*
723 * INET callback when space is newly available on the socket.
724 */
725static void
726svc_write_space(struct sock *sk)
727{
728 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
729
730 if (svsk) {
731 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
732 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
733 svc_sock_enqueue(svsk);
734 }
735
736 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
939bb7ef 737 dprintk("RPC svc_write_space: someone sleeping on %p\n",
1da177e4
LT
738 svsk);
739 wake_up_interruptible(sk->sk_sleep);
740 }
741}
742
7a37f578
N
743static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
744 struct cmsghdr *cmh)
95756482
CL
745{
746 switch (rqstp->rq_sock->sk_sk->sk_family) {
747 case AF_INET: {
7a37f578
N
748 struct in_pktinfo *pki = CMSG_DATA(cmh);
749 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
95756482 750 break;
7a37f578 751 }
95756482 752 case AF_INET6: {
7a37f578
N
753 struct in6_pktinfo *pki = CMSG_DATA(cmh);
754 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
95756482 755 break;
7a37f578 756 }
95756482 757 }
95756482
CL
758}
759
1da177e4
LT
760/*
761 * Receive a datagram from a UDP socket.
762 */
1da177e4
LT
763static int
764svc_udp_recvfrom(struct svc_rqst *rqstp)
765{
766 struct svc_sock *svsk = rqstp->rq_sock;
767 struct svc_serv *serv = svsk->sk_server;
768 struct sk_buff *skb;
bc375ea7
DM
769 union {
770 struct cmsghdr hdr;
771 long all[SVC_PKTINFO_SPACE / sizeof(long)];
772 } buffer;
773 struct cmsghdr *cmh = &buffer.hdr;
1da177e4 774 int err, len;
7a37f578
N
775 struct msghdr msg = {
776 .msg_name = svc_addr(rqstp),
777 .msg_control = cmh,
778 .msg_controllen = sizeof(buffer),
779 .msg_flags = MSG_DONTWAIT,
780 };
1da177e4
LT
781
782 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
783 /* udp sockets need large rcvbuf as all pending
784 * requests are still in that buffer. sndbuf must
785 * also be large enough that there is enough space
3262c816
GB
786 * for one reply per thread. We count all threads
787 * rather than threads in a particular pool, which
788 * provides an upper bound on the number of threads
789 * which will access the socket.
1da177e4
LT
790 */
791 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
792 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
793 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
1da177e4
LT
794
795 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
796 svc_sock_received(svsk);
797 return svc_deferred_recv(rqstp);
798 }
799
aaf68cfb
N
800 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
801 svc_delete_socket(svsk);
802 return 0;
803 }
804
1da177e4 805 clear_bit(SK_DATA, &svsk->sk_flags);
05ed690e
N
806 skb = NULL;
807 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
808 0, 0, MSG_PEEK | MSG_DONTWAIT);
809 if (err >= 0)
810 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err);
811
812 if (skb == NULL) {
813 if (err != -EAGAIN) {
814 /* possibly an icmp error */
815 dprintk("svc: recvfrom returned error %d\n", -err);
816 set_bit(SK_DATA, &svsk->sk_flags);
1da177e4 817 }
05ed690e
N
818 svc_sock_received(svsk);
819 return -EAGAIN;
1da177e4 820 }
7a37f578 821 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
b7aa0bf7
ED
822 if (skb->tstamp.tv64 == 0) {
823 skb->tstamp = ktime_get_real();
cca5172a 824 /* Don't enable netstamp, sunrpc doesn't
1da177e4
LT
825 need that much accuracy */
826 }
b7aa0bf7 827 svsk->sk_sk->sk_stamp = skb->tstamp;
1da177e4
LT
828 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
829
830 /*
831 * Maybe more packets - kick another thread ASAP.
832 */
833 svc_sock_received(svsk);
834
835 len = skb->len - sizeof(struct udphdr);
836 rqstp->rq_arg.len = len;
837
95756482 838 rqstp->rq_prot = IPPROTO_UDP;
27459f09 839
7a37f578
N
840 if (cmh->cmsg_level != IPPROTO_IP ||
841 cmh->cmsg_type != IP_PKTINFO) {
842 if (net_ratelimit())
843 printk("rpcsvc: received unknown control message:"
844 "%d/%d\n",
845 cmh->cmsg_level, cmh->cmsg_type);
846 skb_free_datagram(svsk->sk_sk, skb);
847 return 0;
848 }
849 svc_udp_get_dest_address(rqstp, cmh);
1da177e4
LT
850
851 if (skb_is_nonlinear(skb)) {
852 /* we have to copy */
853 local_bh_disable();
854 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
855 local_bh_enable();
856 /* checksum error */
857 skb_free_datagram(svsk->sk_sk, skb);
858 return 0;
859 }
860 local_bh_enable();
cca5172a 861 skb_free_datagram(svsk->sk_sk, skb);
1da177e4
LT
862 } else {
863 /* we can use it in-place */
864 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
865 rqstp->rq_arg.head[0].iov_len = len;
fb286bb2
HX
866 if (skb_checksum_complete(skb)) {
867 skb_free_datagram(svsk->sk_sk, skb);
868 return 0;
1da177e4
LT
869 }
870 rqstp->rq_skbuff = skb;
871 }
872
873 rqstp->rq_arg.page_base = 0;
874 if (len <= rqstp->rq_arg.head[0].iov_len) {
875 rqstp->rq_arg.head[0].iov_len = len;
876 rqstp->rq_arg.page_len = 0;
44524359 877 rqstp->rq_respages = rqstp->rq_pages+1;
1da177e4
LT
878 } else {
879 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
44524359 880 rqstp->rq_respages = rqstp->rq_pages + 1 +
172589cc 881 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
1da177e4
LT
882 }
883
884 if (serv->sv_stats)
885 serv->sv_stats->netudpcnt++;
886
887 return len;
888}
889
890static int
891svc_udp_sendto(struct svc_rqst *rqstp)
892{
893 int error;
894
895 error = svc_sendto(rqstp, &rqstp->rq_res);
896 if (error == -ECONNREFUSED)
897 /* ICMP error on earlier request. */
898 error = svc_sendto(rqstp, &rqstp->rq_res);
899
900 return error;
901}
902
360d8738 903static struct svc_xprt_ops svc_udp_ops = {
5d137990
TT
904 .xpo_recvfrom = svc_udp_recvfrom,
905 .xpo_sendto = svc_udp_sendto,
360d8738
TT
906};
907
908static struct svc_xprt_class svc_udp_class = {
909 .xcl_name = "udp",
910 .xcl_ops = &svc_udp_ops,
49023155 911 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
360d8738
TT
912};
913
1da177e4
LT
914static void
915svc_udp_init(struct svc_sock *svsk)
916{
7a37f578
N
917 int one = 1;
918 mm_segment_t oldfs;
919
360d8738 920 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt);
1da177e4
LT
921 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
922 svsk->sk_sk->sk_write_space = svc_write_space;
1da177e4
LT
923
924 /* initialise setting must have enough space to
cca5172a 925 * receive and respond to one request.
1da177e4
LT
926 * svc_udp_recvfrom will re-adjust if necessary
927 */
928 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
929 3 * svsk->sk_server->sv_max_mesg,
930 3 * svsk->sk_server->sv_max_mesg);
1da177e4
LT
931
932 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
933 set_bit(SK_CHNGBUF, &svsk->sk_flags);
7a37f578
N
934
935 oldfs = get_fs();
936 set_fs(KERNEL_DS);
937 /* make sure we get destination address info */
938 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
939 (char __user *)&one, sizeof(one));
940 set_fs(oldfs);
1da177e4
LT
941}
942
943/*
944 * A data_ready event on a listening socket means there's a connection
945 * pending. Do not use state_change as a substitute for it.
946 */
947static void
948svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
949{
939bb7ef 950 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4
LT
951
952 dprintk("svc: socket %p TCP (listen) state change %d\n",
939bb7ef 953 sk, sk->sk_state);
1da177e4 954
939bb7ef
NB
955 /*
956 * This callback may called twice when a new connection
957 * is established as a child socket inherits everything
958 * from a parent LISTEN socket.
959 * 1) data_ready method of the parent socket will be called
960 * when one of child sockets become ESTABLISHED.
961 * 2) data_ready method of the child socket may be called
962 * when it receives data before the socket is accepted.
963 * In case of 2, we should ignore it silently.
964 */
965 if (sk->sk_state == TCP_LISTEN) {
966 if (svsk) {
967 set_bit(SK_CONN, &svsk->sk_flags);
968 svc_sock_enqueue(svsk);
969 } else
970 printk("svc: socket %p: no user data\n", sk);
1da177e4 971 }
939bb7ef 972
1da177e4
LT
973 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
974 wake_up_interruptible_all(sk->sk_sleep);
975}
976
977/*
978 * A state change on a connected socket means it's dying or dead.
979 */
980static void
981svc_tcp_state_change(struct sock *sk)
982{
939bb7ef 983 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4
LT
984
985 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
939bb7ef 986 sk, sk->sk_state, sk->sk_user_data);
1da177e4 987
939bb7ef 988 if (!svsk)
1da177e4 989 printk("svc: socket %p: no user data\n", sk);
939bb7ef
NB
990 else {
991 set_bit(SK_CLOSE, &svsk->sk_flags);
992 svc_sock_enqueue(svsk);
1da177e4 993 }
1da177e4
LT
994 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
995 wake_up_interruptible_all(sk->sk_sleep);
996}
997
998static void
999svc_tcp_data_ready(struct sock *sk, int count)
1000{
939bb7ef 1001 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1da177e4
LT
1002
1003 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
939bb7ef
NB
1004 sk, sk->sk_user_data);
1005 if (svsk) {
1006 set_bit(SK_DATA, &svsk->sk_flags);
1007 svc_sock_enqueue(svsk);
1008 }
1da177e4
LT
1009 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1010 wake_up_interruptible(sk->sk_sleep);
1011}
1012
bcdb81ae
CL
1013static inline int svc_port_is_privileged(struct sockaddr *sin)
1014{
1015 switch (sin->sa_family) {
1016 case AF_INET:
1017 return ntohs(((struct sockaddr_in *)sin)->sin_port)
1018 < PROT_SOCK;
bcdb81ae
CL
1019 case AF_INET6:
1020 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
1021 < PROT_SOCK;
bcdb81ae
CL
1022 default:
1023 return 0;
1024 }
1025}
1026
1da177e4
LT
1027/*
1028 * Accept a TCP connection
1029 */
1030static void
1031svc_tcp_accept(struct svc_sock *svsk)
1032{
cdd88b9f
AM
1033 struct sockaddr_storage addr;
1034 struct sockaddr *sin = (struct sockaddr *) &addr;
1da177e4
LT
1035 struct svc_serv *serv = svsk->sk_server;
1036 struct socket *sock = svsk->sk_sock;
1037 struct socket *newsock;
1da177e4
LT
1038 struct svc_sock *newsvsk;
1039 int err, slen;
ad06e4bd 1040 char buf[RPC_MAX_ADDRBUFLEN];
1da177e4
LT
1041
1042 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
1043 if (!sock)
1044 return;
1045
e6242e92
SS
1046 clear_bit(SK_CONN, &svsk->sk_flags);
1047 err = kernel_accept(sock, &newsock, O_NONBLOCK);
1048 if (err < 0) {
1da177e4
LT
1049 if (err == -ENOMEM)
1050 printk(KERN_WARNING "%s: no more sockets!\n",
1051 serv->sv_name);
e6242e92 1052 else if (err != -EAGAIN && net_ratelimit())
1da177e4
LT
1053 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
1054 serv->sv_name, -err);
e6242e92 1055 return;
1da177e4 1056 }
e6242e92 1057
1da177e4
LT
1058 set_bit(SK_CONN, &svsk->sk_flags);
1059 svc_sock_enqueue(svsk);
1060
cdd88b9f 1061 err = kernel_getpeername(newsock, sin, &slen);
1da177e4
LT
1062 if (err < 0) {
1063 if (net_ratelimit())
1064 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
1065 serv->sv_name, -err);
1066 goto failed; /* aborted connection or whatever */
1067 }
1068
1069 /* Ideally, we would want to reject connections from unauthorized
ad06e4bd
CL
1070 * hosts here, but when we get encryption, the IP of the host won't
1071 * tell us anything. For now just warn about unpriv connections.
1da177e4 1072 */
cdd88b9f 1073 if (!svc_port_is_privileged(sin)) {
1da177e4 1074 dprintk(KERN_WARNING
ad06e4bd 1075 "%s: connect from unprivileged port: %s\n",
cca5172a 1076 serv->sv_name,
cdd88b9f 1077 __svc_print_addr(sin, buf, sizeof(buf)));
1da177e4 1078 }
ad06e4bd 1079 dprintk("%s: connect from %s\n", serv->sv_name,
cdd88b9f 1080 __svc_print_addr(sin, buf, sizeof(buf)));
1da177e4
LT
1081
1082 /* make sure that a write doesn't block forever when
1083 * low on memory
1084 */
1085 newsock->sk->sk_sndtimeo = HZ*30;
1086
6b174337
CL
1087 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
1088 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
1da177e4 1089 goto failed;
cdd88b9f 1090 memcpy(&newsvsk->sk_remote, sin, slen);
067d7817 1091 newsvsk->sk_remotelen = slen;
a9747692
FM
1092 err = kernel_getsockname(newsock, sin, &slen);
1093 if (unlikely(err < 0)) {
1094 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
1095 slen = offsetof(struct sockaddr, sa_data);
1096 }
1097 memcpy(&newsvsk->sk_local, sin, slen);
067d7817 1098
e79eff1f 1099 svc_sock_received(newsvsk);
1da177e4
LT
1100
1101 /* make sure that we don't have too many active connections.
1102 * If we have, something must be dropped.
1103 *
1104 * There's no point in trying to do random drop here for
1105 * DoS prevention. The NFS clients does 1 reconnect in 15
1106 * seconds. An attacker can easily beat that.
1107 *
1108 * The only somewhat efficient mechanism would be if drop
1109 * old connections from the same IP first. But right now
1110 * we don't even record the client IP in svc_sock.
1111 */
1112 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
1113 struct svc_sock *svsk = NULL;
1114 spin_lock_bh(&serv->sv_lock);
1115 if (!list_empty(&serv->sv_tempsocks)) {
1116 if (net_ratelimit()) {
1117 /* Try to help the admin */
1118 printk(KERN_NOTICE "%s: too many open TCP "
1119 "sockets, consider increasing the "
1120 "number of nfsd threads\n",
1121 serv->sv_name);
ad06e4bd
CL
1122 printk(KERN_NOTICE
1123 "%s: last TCP connect from %s\n",
9db619e6
WW
1124 serv->sv_name, __svc_print_addr(sin,
1125 buf, sizeof(buf)));
1da177e4
LT
1126 }
1127 /*
1128 * Always select the oldest socket. It's not fair,
1129 * but so is life
1130 */
1131 svsk = list_entry(serv->sv_tempsocks.prev,
1132 struct svc_sock,
1133 sk_list);
1134 set_bit(SK_CLOSE, &svsk->sk_flags);
c45c357d 1135 atomic_inc(&svsk->sk_inuse);
1da177e4
LT
1136 }
1137 spin_unlock_bh(&serv->sv_lock);
1138
1139 if (svsk) {
1140 svc_sock_enqueue(svsk);
1141 svc_sock_put(svsk);
1142 }
1143
1144 }
1145
1146 if (serv->sv_stats)
1147 serv->sv_stats->nettcpconn++;
1148
1149 return;
1150
1151failed:
1152 sock_release(newsock);
1153 return;
1154}
1155
1156/*
1157 * Receive data from a TCP socket.
1158 */
1159static int
1160svc_tcp_recvfrom(struct svc_rqst *rqstp)
1161{
1162 struct svc_sock *svsk = rqstp->rq_sock;
1163 struct svc_serv *serv = svsk->sk_server;
1164 int len;
3cc03b16 1165 struct kvec *vec;
1da177e4
LT
1166 int pnum, vlen;
1167
1168 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1169 svsk, test_bit(SK_DATA, &svsk->sk_flags),
1170 test_bit(SK_CONN, &svsk->sk_flags),
1171 test_bit(SK_CLOSE, &svsk->sk_flags));
1172
1173 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1174 svc_sock_received(svsk);
1175 return svc_deferred_recv(rqstp);
1176 }
1177
1178 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
1179 svc_delete_socket(svsk);
1180 return 0;
1181 }
1182
1a047060 1183 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1184 svc_tcp_accept(svsk);
1185 svc_sock_received(svsk);
1186 return 0;
1187 }
1188
1189 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
1190 /* sndbuf needs to have room for one request
1191 * per thread, otherwise we can stall even when the
1192 * network isn't a bottleneck.
3262c816
GB
1193 *
1194 * We count all threads rather than threads in a
1195 * particular pool, which provides an upper bound
1196 * on the number of threads which will access the socket.
1197 *
1da177e4 1198 * rcvbuf just needs to be able to hold a few requests.
cca5172a 1199 * Normally they will be removed from the queue
1da177e4
LT
1200 * as soon a a complete request arrives.
1201 */
1202 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
1203 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1204 3 * serv->sv_max_mesg);
1da177e4
LT
1205
1206 clear_bit(SK_DATA, &svsk->sk_flags);
1207
1208 /* Receive data. If we haven't got the record length yet, get
1209 * the next four bytes. Otherwise try to gobble up as much as
1210 * possible up to the complete record length.
1211 */
1212 if (svsk->sk_tcplen < 4) {
1213 unsigned long want = 4 - svsk->sk_tcplen;
1214 struct kvec iov;
1215
1216 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
1217 iov.iov_len = want;
1218 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
1219 goto error;
1220 svsk->sk_tcplen += len;
1221
1222 if (len < want) {
1223 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
cca5172a 1224 len, want);
1da177e4
LT
1225 svc_sock_received(svsk);
1226 return -EAGAIN; /* record header not complete */
1227 }
1228
1229 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1230 if (!(svsk->sk_reclen & 0x80000000)) {
1231 /* FIXME: technically, a record can be fragmented,
1232 * and non-terminal fragments will not have the top
1233 * bit set in the fragment length header.
1234 * But apparently no known nfs clients send fragmented
1235 * records. */
34e9a63b
N
1236 if (net_ratelimit())
1237 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1238 " (non-terminal)\n",
1239 (unsigned long) svsk->sk_reclen);
1da177e4
LT
1240 goto err_delete;
1241 }
1242 svsk->sk_reclen &= 0x7fffffff;
1243 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
c6b0a9f8 1244 if (svsk->sk_reclen > serv->sv_max_mesg) {
34e9a63b
N
1245 if (net_ratelimit())
1246 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1247 " (large)\n",
1248 (unsigned long) svsk->sk_reclen);
1da177e4
LT
1249 goto err_delete;
1250 }
1251 }
1252
1253 /* Check whether enough data is available */
1254 len = svc_recv_available(svsk);
1255 if (len < 0)
1256 goto error;
1257
1258 if (len < svsk->sk_reclen) {
1259 dprintk("svc: incomplete TCP record (%d of %d)\n",
1260 len, svsk->sk_reclen);
1261 svc_sock_received(svsk);
1262 return -EAGAIN; /* record not complete */
1263 }
1264 len = svsk->sk_reclen;
1265 set_bit(SK_DATA, &svsk->sk_flags);
1266
3cc03b16 1267 vec = rqstp->rq_vec;
1da177e4
LT
1268 vec[0] = rqstp->rq_arg.head[0];
1269 vlen = PAGE_SIZE;
1270 pnum = 1;
1271 while (vlen < len) {
44524359 1272 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1da177e4
LT
1273 vec[pnum].iov_len = PAGE_SIZE;
1274 pnum++;
1275 vlen += PAGE_SIZE;
1276 }
44524359 1277 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1da177e4
LT
1278
1279 /* Now receive data */
1280 len = svc_recvfrom(rqstp, vec, pnum, len);
1281 if (len < 0)
1282 goto error;
1283
1284 dprintk("svc: TCP complete record (%d bytes)\n", len);
1285 rqstp->rq_arg.len = len;
1286 rqstp->rq_arg.page_base = 0;
1287 if (len <= rqstp->rq_arg.head[0].iov_len) {
1288 rqstp->rq_arg.head[0].iov_len = len;
1289 rqstp->rq_arg.page_len = 0;
1290 } else {
1291 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1292 }
1293
1294 rqstp->rq_skbuff = NULL;
1295 rqstp->rq_prot = IPPROTO_TCP;
1296
1297 /* Reset TCP read info */
1298 svsk->sk_reclen = 0;
1299 svsk->sk_tcplen = 0;
1300
1301 svc_sock_received(svsk);
1302 if (serv->sv_stats)
1303 serv->sv_stats->nettcpcnt++;
1304
1305 return len;
1306
1307 err_delete:
1308 svc_delete_socket(svsk);
1309 return -EAGAIN;
1310
1311 error:
1312 if (len == -EAGAIN) {
1313 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1314 svc_sock_received(svsk);
1315 } else {
1316 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1317 svsk->sk_server->sv_name, -len);
93fbf1a5 1318 goto err_delete;
1da177e4
LT
1319 }
1320
1321 return len;
1322}
1323
1324/*
1325 * Send out data on TCP socket.
1326 */
1327static int
1328svc_tcp_sendto(struct svc_rqst *rqstp)
1329{
1330 struct xdr_buf *xbufp = &rqstp->rq_res;
1331 int sent;
d8ed029d 1332 __be32 reclen;
1da177e4
LT
1333
1334 /* Set up the first element of the reply kvec.
1335 * Any other kvecs that may be in use have been taken
1336 * care of by the server implementation itself.
1337 */
1338 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1339 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1340
1341 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1342 return -ENOTCONN;
1343
1344 sent = svc_sendto(rqstp, &rqstp->rq_res);
1345 if (sent != xbufp->len) {
1346 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1347 rqstp->rq_sock->sk_server->sv_name,
1348 (sent<0)?"got error":"sent only",
1349 sent, xbufp->len);
aaf68cfb
N
1350 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
1351 svc_sock_enqueue(rqstp->rq_sock);
1da177e4
LT
1352 sent = -EAGAIN;
1353 }
1354 return sent;
1355}
1356
360d8738 1357static struct svc_xprt_ops svc_tcp_ops = {
5d137990
TT
1358 .xpo_recvfrom = svc_tcp_recvfrom,
1359 .xpo_sendto = svc_tcp_sendto,
360d8738
TT
1360};
1361
1362static struct svc_xprt_class svc_tcp_class = {
1363 .xcl_name = "tcp",
1364 .xcl_ops = &svc_tcp_ops,
49023155 1365 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
360d8738
TT
1366};
1367
1368void svc_init_xprt_sock(void)
1369{
1370 svc_reg_xprt_class(&svc_tcp_class);
1371 svc_reg_xprt_class(&svc_udp_class);
1372}
1373
1374void svc_cleanup_xprt_sock(void)
1375{
1376 svc_unreg_xprt_class(&svc_tcp_class);
1377 svc_unreg_xprt_class(&svc_udp_class);
1378}
1379
1da177e4
LT
1380static void
1381svc_tcp_init(struct svc_sock *svsk)
1382{
1383 struct sock *sk = svsk->sk_sk;
1384 struct tcp_sock *tp = tcp_sk(sk);
1385
360d8738 1386 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt);
1da177e4
LT
1387
1388 if (sk->sk_state == TCP_LISTEN) {
1389 dprintk("setting up TCP socket for listening\n");
1390 sk->sk_data_ready = svc_tcp_listen_data_ready;
1391 set_bit(SK_CONN, &svsk->sk_flags);
1392 } else {
1393 dprintk("setting up TCP socket for reading\n");
1394 sk->sk_state_change = svc_tcp_state_change;
1395 sk->sk_data_ready = svc_tcp_data_ready;
1396 sk->sk_write_space = svc_write_space;
1397
1398 svsk->sk_reclen = 0;
1399 svsk->sk_tcplen = 0;
1400
1401 tp->nonagle = 1; /* disable Nagle's algorithm */
1402
1403 /* initialise setting must have enough space to
cca5172a 1404 * receive and respond to one request.
1da177e4
LT
1405 * svc_tcp_recvfrom will re-adjust if necessary
1406 */
1407 svc_sock_setbufsize(svsk->sk_sock,
c6b0a9f8
N
1408 3 * svsk->sk_server->sv_max_mesg,
1409 3 * svsk->sk_server->sv_max_mesg);
1da177e4
LT
1410
1411 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1412 set_bit(SK_DATA, &svsk->sk_flags);
cca5172a 1413 if (sk->sk_state != TCP_ESTABLISHED)
1da177e4
LT
1414 set_bit(SK_CLOSE, &svsk->sk_flags);
1415 }
1416}
1417
1418void
1419svc_sock_update_bufs(struct svc_serv *serv)
1420{
1421 /*
1422 * The number of server threads has changed. Update
1423 * rcvbuf and sndbuf accordingly on all sockets
1424 */
1425 struct list_head *le;
1426
1427 spin_lock_bh(&serv->sv_lock);
1428 list_for_each(le, &serv->sv_permsocks) {
cca5172a 1429 struct svc_sock *svsk =
1da177e4
LT
1430 list_entry(le, struct svc_sock, sk_list);
1431 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1432 }
1433 list_for_each(le, &serv->sv_tempsocks) {
1434 struct svc_sock *svsk =
1435 list_entry(le, struct svc_sock, sk_list);
1436 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1437 }
1438 spin_unlock_bh(&serv->sv_lock);
1439}
1440
1441/*
3262c816
GB
1442 * Receive the next request on any socket. This code is carefully
1443 * organised not to touch any cachelines in the shared svc_serv
1444 * structure, only cachelines in the local svc_pool.
1da177e4
LT
1445 */
1446int
6fb2b47f 1447svc_recv(struct svc_rqst *rqstp, long timeout)
1da177e4 1448{
27459f09 1449 struct svc_sock *svsk = NULL;
6fb2b47f 1450 struct svc_serv *serv = rqstp->rq_server;
3262c816 1451 struct svc_pool *pool = rqstp->rq_pool;
44524359 1452 int len, i;
1da177e4
LT
1453 int pages;
1454 struct xdr_buf *arg;
1455 DECLARE_WAITQUEUE(wait, current);
1456
1457 dprintk("svc: server %p waiting for data (to = %ld)\n",
1458 rqstp, timeout);
1459
1460 if (rqstp->rq_sock)
cca5172a 1461 printk(KERN_ERR
1da177e4
LT
1462 "svc_recv: service %p, socket not NULL!\n",
1463 rqstp);
1464 if (waitqueue_active(&rqstp->rq_wait))
cca5172a 1465 printk(KERN_ERR
1da177e4
LT
1466 "svc_recv: service %p, wait queue active!\n",
1467 rqstp);
1468
1da177e4
LT
1469
1470 /* now allocate needed pages. If we get a failure, sleep briefly */
c6b0a9f8 1471 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
44524359
N
1472 for (i=0; i < pages ; i++)
1473 while (rqstp->rq_pages[i] == NULL) {
1474 struct page *p = alloc_page(GFP_KERNEL);
1475 if (!p)
1476 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1477 rqstp->rq_pages[i] = p;
1da177e4 1478 }
250f3915
N
1479 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
1480 BUG_ON(pages >= RPCSVC_MAXPAGES);
1da177e4
LT
1481
1482 /* Make arg->head point to first page and arg->pages point to rest */
1483 arg = &rqstp->rq_arg;
44524359 1484 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1da177e4 1485 arg->head[0].iov_len = PAGE_SIZE;
44524359 1486 arg->pages = rqstp->rq_pages + 1;
1da177e4
LT
1487 arg->page_base = 0;
1488 /* save at least one page for response */
1489 arg->page_len = (pages-2)*PAGE_SIZE;
1490 arg->len = (pages-1)*PAGE_SIZE;
1491 arg->tail[0].iov_len = 0;
3e1d1d28
CL
1492
1493 try_to_freeze();
1887b935 1494 cond_resched();
1da177e4
LT
1495 if (signalled())
1496 return -EINTR;
1497
3262c816
GB
1498 spin_lock_bh(&pool->sp_lock);
1499 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1da177e4 1500 rqstp->rq_sock = svsk;
c45c357d 1501 atomic_inc(&svsk->sk_inuse);
c6b0a9f8 1502 rqstp->rq_reserved = serv->sv_max_mesg;
5685f0fa 1503 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1da177e4
LT
1504 } else {
1505 /* No data pending. Go to sleep */
3262c816 1506 svc_thread_enqueue(pool, rqstp);
1da177e4
LT
1507
1508 /*
1509 * We have to be able to interrupt this wait
1510 * to bring down the daemons ...
1511 */
1512 set_current_state(TASK_INTERRUPTIBLE);
1513 add_wait_queue(&rqstp->rq_wait, &wait);
3262c816 1514 spin_unlock_bh(&pool->sp_lock);
1da177e4
LT
1515
1516 schedule_timeout(timeout);
1517
3e1d1d28 1518 try_to_freeze();
1da177e4 1519
3262c816 1520 spin_lock_bh(&pool->sp_lock);
1da177e4
LT
1521 remove_wait_queue(&rqstp->rq_wait, &wait);
1522
1523 if (!(svsk = rqstp->rq_sock)) {
3262c816
GB
1524 svc_thread_dequeue(pool, rqstp);
1525 spin_unlock_bh(&pool->sp_lock);
1da177e4
LT
1526 dprintk("svc: server %p, no data yet\n", rqstp);
1527 return signalled()? -EINTR : -EAGAIN;
1528 }
1529 }
3262c816 1530 spin_unlock_bh(&pool->sp_lock);
1da177e4 1531
3262c816
GB
1532 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1533 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse));
5d137990 1534 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
1da177e4
LT
1535 dprintk("svc: got len=%d\n", len);
1536
1537 /* No data, incomplete (TCP) read, or accept() */
1538 if (len == 0 || len == -EAGAIN) {
1539 rqstp->rq_res.len = 0;
1540 svc_sock_release(rqstp);
1541 return -EAGAIN;
1542 }
1543 svsk->sk_lastrecv = get_seconds();
36bdfc8b 1544 clear_bit(SK_OLD, &svsk->sk_flags);
1da177e4 1545
bcdb81ae 1546 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1da177e4
LT
1547 rqstp->rq_chandle.defer = svc_defer;
1548
1549 if (serv->sv_stats)
1550 serv->sv_stats->netcnt++;
1551 return len;
1552}
1553
cca5172a 1554/*
1da177e4
LT
1555 * Drop request
1556 */
1557void
1558svc_drop(struct svc_rqst *rqstp)
1559{
1560 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1561 svc_sock_release(rqstp);
1562}
1563
1564/*
1565 * Return reply to client.
1566 */
1567int
1568svc_send(struct svc_rqst *rqstp)
1569{
1570 struct svc_sock *svsk;
1571 int len;
1572 struct xdr_buf *xb;
1573
1574 if ((svsk = rqstp->rq_sock) == NULL) {
1575 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1576 __FILE__, __LINE__);
1577 return -EFAULT;
1578 }
1579
1580 /* release the receive skb before sending the reply */
1581 svc_release_skb(rqstp);
1582
1583 /* calculate over-all length */
1584 xb = & rqstp->rq_res;
1585 xb->len = xb->head[0].iov_len +
1586 xb->page_len +
1587 xb->tail[0].iov_len;
1588
57b47a53
IM
1589 /* Grab svsk->sk_mutex to serialize outgoing data. */
1590 mutex_lock(&svsk->sk_mutex);
1da177e4
LT
1591 if (test_bit(SK_DEAD, &svsk->sk_flags))
1592 len = -ENOTCONN;
1593 else
5d137990 1594 len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp);
57b47a53 1595 mutex_unlock(&svsk->sk_mutex);
1da177e4
LT
1596 svc_sock_release(rqstp);
1597
1598 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1599 return 0;
1600 return len;
1601}
1602
36bdfc8b
GB
1603/*
1604 * Timer function to close old temporary sockets, using
1605 * a mark-and-sweep algorithm.
1606 */
1607static void
1608svc_age_temp_sockets(unsigned long closure)
1609{
1610 struct svc_serv *serv = (struct svc_serv *)closure;
1611 struct svc_sock *svsk;
1612 struct list_head *le, *next;
1613 LIST_HEAD(to_be_aged);
1614
1615 dprintk("svc_age_temp_sockets\n");
1616
1617 if (!spin_trylock_bh(&serv->sv_lock)) {
1618 /* busy, try again 1 sec later */
1619 dprintk("svc_age_temp_sockets: busy\n");
1620 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1621 return;
1622 }
1623
1624 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1625 svsk = list_entry(le, struct svc_sock, sk_list);
1626
1627 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1628 continue;
7a1fa065 1629 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags))
36bdfc8b 1630 continue;
c45c357d 1631 atomic_inc(&svsk->sk_inuse);
36bdfc8b
GB
1632 list_move(le, &to_be_aged);
1633 set_bit(SK_CLOSE, &svsk->sk_flags);
1634 set_bit(SK_DETACHED, &svsk->sk_flags);
1635 }
1636 spin_unlock_bh(&serv->sv_lock);
1637
1638 while (!list_empty(&to_be_aged)) {
1639 le = to_be_aged.next;
1640 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1641 list_del_init(le);
1642 svsk = list_entry(le, struct svc_sock, sk_list);
1643
1644 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1645 svsk, get_seconds() - svsk->sk_lastrecv);
1646
1647 /* a thread will dequeue and close it soon */
1648 svc_sock_enqueue(svsk);
1649 svc_sock_put(svsk);
1650 }
1651
1652 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1653}
1654
1da177e4
LT
1655/*
1656 * Initialize socket for RPC use and create svc_sock struct
1657 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1658 */
6b174337
CL
1659static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1660 struct socket *sock,
1661 int *errp, int flags)
1da177e4
LT
1662{
1663 struct svc_sock *svsk;
1664 struct sock *inet;
6b174337
CL
1665 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1666 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1da177e4
LT
1667
1668 dprintk("svc: svc_setup_socket %p\n", sock);
0da974f4 1669 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1da177e4
LT
1670 *errp = -ENOMEM;
1671 return NULL;
1672 }
1da177e4
LT
1673
1674 inet = sock->sk;
1675
1676 /* Register socket with portmapper */
1677 if (*errp >= 0 && pmap_register)
1678 *errp = svc_register(serv, inet->sk_protocol,
1679 ntohs(inet_sk(inet)->sport));
1680
1681 if (*errp < 0) {
1682 kfree(svsk);
1683 return NULL;
1684 }
1685
1686 set_bit(SK_BUSY, &svsk->sk_flags);
1687 inet->sk_user_data = svsk;
1688 svsk->sk_sock = sock;
1689 svsk->sk_sk = inet;
1690 svsk->sk_ostate = inet->sk_state_change;
1691 svsk->sk_odata = inet->sk_data_ready;
1692 svsk->sk_owspace = inet->sk_write_space;
1693 svsk->sk_server = serv;
aaf68cfb 1694 atomic_set(&svsk->sk_inuse, 1);
1da177e4 1695 svsk->sk_lastrecv = get_seconds();
7ac1bea5 1696 spin_lock_init(&svsk->sk_lock);
1da177e4
LT
1697 INIT_LIST_HEAD(&svsk->sk_deferred);
1698 INIT_LIST_HEAD(&svsk->sk_ready);
57b47a53 1699 mutex_init(&svsk->sk_mutex);
1da177e4
LT
1700
1701 /* Initialize the socket */
1702 if (sock->type == SOCK_DGRAM)
1703 svc_udp_init(svsk);
1704 else
1705 svc_tcp_init(svsk);
1706
1707 spin_lock_bh(&serv->sv_lock);
6b174337 1708 if (is_temporary) {
1da177e4
LT
1709 set_bit(SK_TEMP, &svsk->sk_flags);
1710 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1711 serv->sv_tmpcnt++;
36bdfc8b
GB
1712 if (serv->sv_temptimer.function == NULL) {
1713 /* setup timer to age temp sockets */
1714 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1715 (unsigned long)serv);
1716 mod_timer(&serv->sv_temptimer,
1717 jiffies + svc_conn_age_period * HZ);
1718 }
1da177e4
LT
1719 } else {
1720 clear_bit(SK_TEMP, &svsk->sk_flags);
1721 list_add(&svsk->sk_list, &serv->sv_permsocks);
1722 }
1723 spin_unlock_bh(&serv->sv_lock);
1724
1725 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1726 svsk, svsk->sk_sk);
1727
1da177e4
LT
1728 return svsk;
1729}
1730
b41b66d6
N
1731int svc_addsock(struct svc_serv *serv,
1732 int fd,
1733 char *name_return,
1734 int *proto)
1735{
1736 int err = 0;
1737 struct socket *so = sockfd_lookup(fd, &err);
1738 struct svc_sock *svsk = NULL;
1739
1740 if (!so)
1741 return err;
1742 if (so->sk->sk_family != AF_INET)
1743 err = -EAFNOSUPPORT;
1744 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1745 so->sk->sk_protocol != IPPROTO_UDP)
1746 err = -EPROTONOSUPPORT;
1747 else if (so->state > SS_UNCONNECTED)
1748 err = -EISCONN;
1749 else {
6b174337 1750 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
e79eff1f
N
1751 if (svsk) {
1752 svc_sock_received(svsk);
b41b66d6 1753 err = 0;
e79eff1f 1754 }
b41b66d6
N
1755 }
1756 if (err) {
1757 sockfd_put(so);
1758 return err;
1759 }
1760 if (proto) *proto = so->sk->sk_protocol;
1761 return one_sock_name(name_return, svsk);
1762}
1763EXPORT_SYMBOL_GPL(svc_addsock);
1764
1da177e4
LT
1765/*
1766 * Create socket for RPC service.
1767 */
6b174337 1768static int svc_create_socket(struct svc_serv *serv, int protocol,
77f1f67a 1769 struct sockaddr *sin, int len, int flags)
1da177e4
LT
1770{
1771 struct svc_sock *svsk;
1772 struct socket *sock;
1773 int error;
1774 int type;
ad06e4bd 1775 char buf[RPC_MAX_ADDRBUFLEN];
1da177e4 1776
ad06e4bd
CL
1777 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1778 serv->sv_program->pg_name, protocol,
77f1f67a 1779 __svc_print_addr(sin, buf, sizeof(buf)));
1da177e4
LT
1780
1781 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1782 printk(KERN_WARNING "svc: only UDP and TCP "
1783 "sockets supported\n");
1784 return -EINVAL;
1785 }
1786 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1787
77f1f67a
CL
1788 error = sock_create_kern(sin->sa_family, type, protocol, &sock);
1789 if (error < 0)
1da177e4
LT
1790 return error;
1791
ed07536e
PZ
1792 svc_reclassify_socket(sock);
1793
18114746 1794 if (type == SOCK_STREAM)
77f1f67a
CL
1795 sock->sk->sk_reuse = 1; /* allow address reuse */
1796 error = kernel_bind(sock, sin, len);
18114746
ES
1797 if (error < 0)
1798 goto bummer;
1da177e4
LT
1799
1800 if (protocol == IPPROTO_TCP) {
e6242e92 1801 if ((error = kernel_listen(sock, 64)) < 0)
1da177e4
LT
1802 goto bummer;
1803 }
1804
e79eff1f
N
1805 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1806 svc_sock_received(svsk);
6b174337 1807 return ntohs(inet_sk(svsk->sk_sk)->sport);
e79eff1f 1808 }
1da177e4
LT
1809
1810bummer:
1811 dprintk("svc: svc_create_socket error = %d\n", -error);
1812 sock_release(sock);
1813 return error;
1814}
1815
1816/*
1817 * Remove a dead socket
1818 */
aaf68cfb 1819static void
1da177e4
LT
1820svc_delete_socket(struct svc_sock *svsk)
1821{
1822 struct svc_serv *serv;
1823 struct sock *sk;
1824
1825 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1826
1827 serv = svsk->sk_server;
1828 sk = svsk->sk_sk;
1829
1830 sk->sk_state_change = svsk->sk_ostate;
1831 sk->sk_data_ready = svsk->sk_odata;
1832 sk->sk_write_space = svsk->sk_owspace;
1833
1834 spin_lock_bh(&serv->sv_lock);
1835
36bdfc8b
GB
1836 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1837 list_del_init(&svsk->sk_list);
cca5172a 1838 /*
3262c816
GB
1839 * We used to delete the svc_sock from whichever list
1840 * it's sk_ready node was on, but we don't actually
1841 * need to. This is because the only time we're called
1842 * while still attached to a queue, the queue itself
1843 * is about to be destroyed (in svc_destroy).
1844 */
aaf68cfb
N
1845 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
1846 BUG_ON(atomic_read(&svsk->sk_inuse)<2);
1847 atomic_dec(&svsk->sk_inuse);
1da177e4
LT
1848 if (test_bit(SK_TEMP, &svsk->sk_flags))
1849 serv->sv_tmpcnt--;
aaf68cfb 1850 }
1da177e4 1851
d6740df9 1852 spin_unlock_bh(&serv->sv_lock);
aaf68cfb
N
1853}
1854
cda1fd4a 1855static void svc_close_socket(struct svc_sock *svsk)
aaf68cfb
N
1856{
1857 set_bit(SK_CLOSE, &svsk->sk_flags);
1858 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
1859 /* someone else will have to effect the close */
1860 return;
1861
1862 atomic_inc(&svsk->sk_inuse);
1863 svc_delete_socket(svsk);
1864 clear_bit(SK_BUSY, &svsk->sk_flags);
d6740df9 1865 svc_sock_put(svsk);
1da177e4
LT
1866}
1867
cda1fd4a
N
1868void svc_force_close_socket(struct svc_sock *svsk)
1869{
1870 set_bit(SK_CLOSE, &svsk->sk_flags);
1871 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
1872 /* Waiting to be processed, but no threads left,
1873 * So just remove it from the waiting list
1874 */
1875 list_del_init(&svsk->sk_ready);
1876 clear_bit(SK_BUSY, &svsk->sk_flags);
1877 }
1878 svc_close_socket(svsk);
1879}
1880
6b174337
CL
1881/**
1882 * svc_makesock - Make a socket for nfsd and lockd
1883 * @serv: RPC server structure
1884 * @protocol: transport protocol to use
1885 * @port: port to use
482fb94e 1886 * @flags: requested socket characteristics
6b174337 1887 *
1da177e4 1888 */
482fb94e
CL
1889int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port,
1890 int flags)
1da177e4 1891{
6b174337
CL
1892 struct sockaddr_in sin = {
1893 .sin_family = AF_INET,
1894 .sin_addr.s_addr = INADDR_ANY,
1895 .sin_port = htons(port),
1896 };
1da177e4
LT
1897
1898 dprintk("svc: creating socket proto = %d\n", protocol);
77f1f67a
CL
1899 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin,
1900 sizeof(sin), flags);
1da177e4
LT
1901}
1902
1903/*
cca5172a 1904 * Handle defer and revisit of requests
1da177e4
LT
1905 */
1906
1907static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1908{
1909 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1da177e4
LT
1910 struct svc_sock *svsk;
1911
1912 if (too_many) {
1913 svc_sock_put(dr->svsk);
1914 kfree(dr);
1915 return;
1916 }
1917 dprintk("revisit queued\n");
1918 svsk = dr->svsk;
1919 dr->svsk = NULL;
7ac1bea5 1920 spin_lock(&svsk->sk_lock);
1da177e4 1921 list_add(&dr->handle.recent, &svsk->sk_deferred);
7ac1bea5 1922 spin_unlock(&svsk->sk_lock);
1da177e4
LT
1923 set_bit(SK_DEFERRED, &svsk->sk_flags);
1924 svc_sock_enqueue(svsk);
1925 svc_sock_put(svsk);
1926}
1927
1928static struct cache_deferred_req *
1929svc_defer(struct cache_req *req)
1930{
1931 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1932 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1933 struct svc_deferred_req *dr;
1934
1935 if (rqstp->rq_arg.page_len)
1936 return NULL; /* if more than a page, give up FIXME */
1937 if (rqstp->rq_deferred) {
1938 dr = rqstp->rq_deferred;
1939 rqstp->rq_deferred = NULL;
1940 } else {
1941 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1942 /* FIXME maybe discard if size too large */
1943 dr = kmalloc(size, GFP_KERNEL);
1944 if (dr == NULL)
1945 return NULL;
1946
1947 dr->handle.owner = rqstp->rq_server;
1948 dr->prot = rqstp->rq_prot;
24422222
CL
1949 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1950 dr->addrlen = rqstp->rq_addrlen;
1918e341 1951 dr->daddr = rqstp->rq_daddr;
1da177e4
LT
1952 dr->argslen = rqstp->rq_arg.len >> 2;
1953 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1954 }
c45c357d 1955 atomic_inc(&rqstp->rq_sock->sk_inuse);
1da177e4 1956 dr->svsk = rqstp->rq_sock;
1da177e4
LT
1957
1958 dr->handle.revisit = svc_revisit;
1959 return &dr->handle;
1960}
1961
1962/*
1963 * recv data from a deferred request into an active one
1964 */
1965static int svc_deferred_recv(struct svc_rqst *rqstp)
1966{
1967 struct svc_deferred_req *dr = rqstp->rq_deferred;
1968
1969 rqstp->rq_arg.head[0].iov_base = dr->args;
1970 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1971 rqstp->rq_arg.page_len = 0;
1972 rqstp->rq_arg.len = dr->argslen<<2;
1973 rqstp->rq_prot = dr->prot;
24422222
CL
1974 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1975 rqstp->rq_addrlen = dr->addrlen;
1918e341 1976 rqstp->rq_daddr = dr->daddr;
44524359 1977 rqstp->rq_respages = rqstp->rq_pages;
1da177e4
LT
1978 return dr->argslen<<2;
1979}
1980
1981
1982static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1983{
1984 struct svc_deferred_req *dr = NULL;
cca5172a 1985
1da177e4
LT
1986 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1987 return NULL;
7ac1bea5 1988 spin_lock(&svsk->sk_lock);
1da177e4
LT
1989 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1990 if (!list_empty(&svsk->sk_deferred)) {
1991 dr = list_entry(svsk->sk_deferred.next,
1992 struct svc_deferred_req,
1993 handle.recent);
1994 list_del_init(&dr->handle.recent);
1995 set_bit(SK_DEFERRED, &svsk->sk_flags);
1996 }
7ac1bea5 1997 spin_unlock(&svsk->sk_lock);
1da177e4
LT
1998 return dr;
1999}