8177b86570dbbe3d4bf23f5b516348d864182c4d
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / tcp.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
259 #include <linux/bootmem.h>
260
261 #include <net/icmp.h>
262 #include <net/tcp.h>
263 #include <net/xfrm.h>
264 #include <net/ip.h>
265
266
267 #include <asm/uaccess.h>
268 #include <asm/ioctls.h>
269
270 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271
272 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
273
274 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
275
276 int sysctl_tcp_mem[3];
277 int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
278 int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
279
280 EXPORT_SYMBOL(sysctl_tcp_mem);
281 EXPORT_SYMBOL(sysctl_tcp_rmem);
282 EXPORT_SYMBOL(sysctl_tcp_wmem);
283
284 atomic_t tcp_memory_allocated; /* Current allocated memory. */
285 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
286
287 EXPORT_SYMBOL(tcp_memory_allocated);
288 EXPORT_SYMBOL(tcp_sockets_allocated);
289
290 /*
291 * Pressure flag: try to collapse.
292 * Technical note: it is used by multiple contexts non atomically.
293 * All the sk_stream_mem_schedule() is of this nature: accounting
294 * is strict, actions are advisory and have some latency.
295 */
296 int tcp_memory_pressure;
297
298 EXPORT_SYMBOL(tcp_memory_pressure);
299
300 void tcp_enter_memory_pressure(void)
301 {
302 if (!tcp_memory_pressure) {
303 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
304 tcp_memory_pressure = 1;
305 }
306 }
307
308 EXPORT_SYMBOL(tcp_enter_memory_pressure);
309
310 /*
311 * LISTEN is a special case for poll..
312 */
313 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
314 poll_table *wait)
315 {
316 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0;
317 }
318
319 /*
320 * Wait for a TCP event.
321 *
322 * Note that we don't need to lock the socket, as the upper poll layers
323 * take care of normal races (between the test and the event) and we don't
324 * go look at any of the socket buffers directly.
325 */
326 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
327 {
328 unsigned int mask;
329 struct sock *sk = sock->sk;
330 struct tcp_sock *tp = tcp_sk(sk);
331
332 poll_wait(file, sk->sk_sleep, wait);
333 if (sk->sk_state == TCP_LISTEN)
334 return tcp_listen_poll(sk, wait);
335
336 /* Socket is not locked. We are protected from async events
337 by poll logic and correct handling of state changes
338 made by another threads is impossible in any case.
339 */
340
341 mask = 0;
342 if (sk->sk_err)
343 mask = POLLERR;
344
345 /*
346 * POLLHUP is certainly not done right. But poll() doesn't
347 * have a notion of HUP in just one direction, and for a
348 * socket the read side is more interesting.
349 *
350 * Some poll() documentation says that POLLHUP is incompatible
351 * with the POLLOUT/POLLWR flags, so somebody should check this
352 * all. But careful, it tends to be safer to return too many
353 * bits than too few, and you can easily break real applications
354 * if you don't tell them that something has hung up!
355 *
356 * Check-me.
357 *
358 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
359 * our fs/select.c). It means that after we received EOF,
360 * poll always returns immediately, making impossible poll() on write()
361 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
362 * if and only if shutdown has been made in both directions.
363 * Actually, it is interesting to look how Solaris and DUX
364 * solve this dilemma. I would prefer, if PULLHUP were maskable,
365 * then we could set it on SND_SHUTDOWN. BTW examples given
366 * in Stevens' books assume exactly this behaviour, it explains
367 * why PULLHUP is incompatible with POLLOUT. --ANK
368 *
369 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
370 * blocking on fresh not-connected or disconnected socket. --ANK
371 */
372 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
373 mask |= POLLHUP;
374 if (sk->sk_shutdown & RCV_SHUTDOWN)
375 mask |= POLLIN | POLLRDNORM;
376
377 /* Connected? */
378 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
379 /* Potential race condition. If read of tp below will
380 * escape above sk->sk_state, we can be illegally awaken
381 * in SYN_* states. */
382 if ((tp->rcv_nxt != tp->copied_seq) &&
383 (tp->urg_seq != tp->copied_seq ||
384 tp->rcv_nxt != tp->copied_seq + 1 ||
385 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
386 mask |= POLLIN | POLLRDNORM;
387
388 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
389 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
390 mask |= POLLOUT | POLLWRNORM;
391 } else { /* send SIGIO later */
392 set_bit(SOCK_ASYNC_NOSPACE,
393 &sk->sk_socket->flags);
394 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
395
396 /* Race breaker. If space is freed after
397 * wspace test but before the flags are set,
398 * IO signal will be lost.
399 */
400 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
401 mask |= POLLOUT | POLLWRNORM;
402 }
403 }
404
405 if (tp->urg_data & TCP_URG_VALID)
406 mask |= POLLPRI;
407 }
408 return mask;
409 }
410
411 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
412 {
413 struct tcp_sock *tp = tcp_sk(sk);
414 int answ;
415
416 switch (cmd) {
417 case SIOCINQ:
418 if (sk->sk_state == TCP_LISTEN)
419 return -EINVAL;
420
421 lock_sock(sk);
422 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
423 answ = 0;
424 else if (sock_flag(sk, SOCK_URGINLINE) ||
425 !tp->urg_data ||
426 before(tp->urg_seq, tp->copied_seq) ||
427 !before(tp->urg_seq, tp->rcv_nxt)) {
428 answ = tp->rcv_nxt - tp->copied_seq;
429
430 /* Subtract 1, if FIN is in queue. */
431 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
432 answ -=
433 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
434 } else
435 answ = tp->urg_seq - tp->copied_seq;
436 release_sock(sk);
437 break;
438 case SIOCATMARK:
439 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
440 break;
441 case SIOCOUTQ:
442 if (sk->sk_state == TCP_LISTEN)
443 return -EINVAL;
444
445 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
446 answ = 0;
447 else
448 answ = tp->write_seq - tp->snd_una;
449 break;
450 default:
451 return -ENOIOCTLCMD;
452 };
453
454 return put_user(answ, (int __user *)arg);
455 }
456
457
458 int tcp_listen_start(struct sock *sk)
459 {
460 struct inet_sock *inet = inet_sk(sk);
461 struct inet_connection_sock *icsk = inet_csk(sk);
462 int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE);
463
464 if (rc != 0)
465 return rc;
466
467 sk->sk_max_ack_backlog = 0;
468 sk->sk_ack_backlog = 0;
469 inet_csk_delack_init(sk);
470
471 /* There is race window here: we announce ourselves listening,
472 * but this transition is still not validated by get_port().
473 * It is OK, because this socket enters to hash table only
474 * after validation is complete.
475 */
476 sk->sk_state = TCP_LISTEN;
477 if (!sk->sk_prot->get_port(sk, inet->num)) {
478 inet->sport = htons(inet->num);
479
480 sk_dst_reset(sk);
481 sk->sk_prot->hash(sk);
482
483 return 0;
484 }
485
486 sk->sk_state = TCP_CLOSE;
487 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
488 return -EADDRINUSE;
489 }
490
491 /*
492 * This routine closes sockets which have been at least partially
493 * opened, but not yet accepted.
494 */
495
496 static void tcp_listen_stop (struct sock *sk)
497 {
498 struct inet_connection_sock *icsk = inet_csk(sk);
499 struct request_sock *acc_req;
500 struct request_sock *req;
501
502 inet_csk_delete_keepalive_timer(sk);
503
504 /* make all the listen_opt local to us */
505 acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
506
507 /* Following specs, it would be better either to send FIN
508 * (and enter FIN-WAIT-1, it is normal close)
509 * or to send active reset (abort).
510 * Certainly, it is pretty dangerous while synflood, but it is
511 * bad justification for our negligence 8)
512 * To be honest, we are not able to make either
513 * of the variants now. --ANK
514 */
515 reqsk_queue_destroy(&icsk->icsk_accept_queue);
516
517 while ((req = acc_req) != NULL) {
518 struct sock *child = req->sk;
519
520 acc_req = req->dl_next;
521
522 local_bh_disable();
523 bh_lock_sock(child);
524 BUG_TRAP(!sock_owned_by_user(child));
525 sock_hold(child);
526
527 tcp_disconnect(child, O_NONBLOCK);
528
529 sock_orphan(child);
530
531 atomic_inc(&tcp_orphan_count);
532
533 tcp_destroy_sock(child);
534
535 bh_unlock_sock(child);
536 local_bh_enable();
537 sock_put(child);
538
539 sk_acceptq_removed(sk);
540 __reqsk_free(req);
541 }
542 BUG_TRAP(!sk->sk_ack_backlog);
543 }
544
545 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
546 {
547 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
548 tp->pushed_seq = tp->write_seq;
549 }
550
551 static inline int forced_push(struct tcp_sock *tp)
552 {
553 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
554 }
555
556 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
557 struct sk_buff *skb)
558 {
559 skb->csum = 0;
560 TCP_SKB_CB(skb)->seq = tp->write_seq;
561 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
562 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
563 TCP_SKB_CB(skb)->sacked = 0;
564 skb_header_release(skb);
565 __skb_queue_tail(&sk->sk_write_queue, skb);
566 sk_charge_skb(sk, skb);
567 if (!sk->sk_send_head)
568 sk->sk_send_head = skb;
569 if (tp->nonagle & TCP_NAGLE_PUSH)
570 tp->nonagle &= ~TCP_NAGLE_PUSH;
571 }
572
573 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
574 struct sk_buff *skb)
575 {
576 if (flags & MSG_OOB) {
577 tp->urg_mode = 1;
578 tp->snd_up = tp->write_seq;
579 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
580 }
581 }
582
583 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
584 int mss_now, int nonagle)
585 {
586 if (sk->sk_send_head) {
587 struct sk_buff *skb = sk->sk_write_queue.prev;
588 if (!(flags & MSG_MORE) || forced_push(tp))
589 tcp_mark_push(tp, skb);
590 tcp_mark_urg(tp, flags, skb);
591 __tcp_push_pending_frames(sk, tp, mss_now,
592 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
593 }
594 }
595
596 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
597 size_t psize, int flags)
598 {
599 struct tcp_sock *tp = tcp_sk(sk);
600 int mss_now, size_goal;
601 int err;
602 ssize_t copied;
603 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
604
605 /* Wait for a connection to finish. */
606 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
607 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
608 goto out_err;
609
610 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
611
612 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
613 size_goal = tp->xmit_size_goal;
614 copied = 0;
615
616 err = -EPIPE;
617 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
618 goto do_error;
619
620 while (psize > 0) {
621 struct sk_buff *skb = sk->sk_write_queue.prev;
622 struct page *page = pages[poffset / PAGE_SIZE];
623 int copy, i, can_coalesce;
624 int offset = poffset % PAGE_SIZE;
625 int size = min_t(size_t, psize, PAGE_SIZE - offset);
626
627 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
628 new_segment:
629 if (!sk_stream_memory_free(sk))
630 goto wait_for_sndbuf;
631
632 skb = sk_stream_alloc_pskb(sk, 0, 0,
633 sk->sk_allocation);
634 if (!skb)
635 goto wait_for_memory;
636
637 skb_entail(sk, tp, skb);
638 copy = size_goal;
639 }
640
641 if (copy > size)
642 copy = size;
643
644 i = skb_shinfo(skb)->nr_frags;
645 can_coalesce = skb_can_coalesce(skb, i, page, offset);
646 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
647 tcp_mark_push(tp, skb);
648 goto new_segment;
649 }
650 if (sk->sk_forward_alloc < copy &&
651 !sk_stream_mem_schedule(sk, copy, 0))
652 goto wait_for_memory;
653
654 if (can_coalesce) {
655 skb_shinfo(skb)->frags[i - 1].size += copy;
656 } else {
657 get_page(page);
658 skb_fill_page_desc(skb, i, page, offset, copy);
659 }
660
661 skb->len += copy;
662 skb->data_len += copy;
663 skb->truesize += copy;
664 sk->sk_wmem_queued += copy;
665 sk->sk_forward_alloc -= copy;
666 skb->ip_summed = CHECKSUM_HW;
667 tp->write_seq += copy;
668 TCP_SKB_CB(skb)->end_seq += copy;
669 skb_shinfo(skb)->tso_segs = 0;
670
671 if (!copied)
672 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
673
674 copied += copy;
675 poffset += copy;
676 if (!(psize -= copy))
677 goto out;
678
679 if (skb->len < mss_now || (flags & MSG_OOB))
680 continue;
681
682 if (forced_push(tp)) {
683 tcp_mark_push(tp, skb);
684 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
685 } else if (skb == sk->sk_send_head)
686 tcp_push_one(sk, mss_now);
687 continue;
688
689 wait_for_sndbuf:
690 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
691 wait_for_memory:
692 if (copied)
693 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
694
695 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
696 goto do_error;
697
698 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
699 size_goal = tp->xmit_size_goal;
700 }
701
702 out:
703 if (copied)
704 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
705 return copied;
706
707 do_error:
708 if (copied)
709 goto out;
710 out_err:
711 return sk_stream_error(sk, flags, err);
712 }
713
714 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
715 size_t size, int flags)
716 {
717 ssize_t res;
718 struct sock *sk = sock->sk;
719
720 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
721
722 if (!(sk->sk_route_caps & NETIF_F_SG) ||
723 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
724 return sock_no_sendpage(sock, page, offset, size, flags);
725
726 #undef TCP_ZC_CSUM_FLAGS
727
728 lock_sock(sk);
729 TCP_CHECK_TIMER(sk);
730 res = do_tcp_sendpages(sk, &page, offset, size, flags);
731 TCP_CHECK_TIMER(sk);
732 release_sock(sk);
733 return res;
734 }
735
736 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
737 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
738
739 static inline int select_size(struct sock *sk, struct tcp_sock *tp)
740 {
741 int tmp = tp->mss_cache;
742
743 if (sk->sk_route_caps & NETIF_F_SG) {
744 if (sk->sk_route_caps & NETIF_F_TSO)
745 tmp = 0;
746 else {
747 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
748
749 if (tmp >= pgbreak &&
750 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
751 tmp = pgbreak;
752 }
753 }
754
755 return tmp;
756 }
757
758 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
759 size_t size)
760 {
761 struct iovec *iov;
762 struct tcp_sock *tp = tcp_sk(sk);
763 struct sk_buff *skb;
764 int iovlen, flags;
765 int mss_now, size_goal;
766 int err, copied;
767 long timeo;
768
769 lock_sock(sk);
770 TCP_CHECK_TIMER(sk);
771
772 flags = msg->msg_flags;
773 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
774
775 /* Wait for a connection to finish. */
776 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
777 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
778 goto out_err;
779
780 /* This should be in poll */
781 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
782
783 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
784 size_goal = tp->xmit_size_goal;
785
786 /* Ok commence sending. */
787 iovlen = msg->msg_iovlen;
788 iov = msg->msg_iov;
789 copied = 0;
790
791 err = -EPIPE;
792 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
793 goto do_error;
794
795 while (--iovlen >= 0) {
796 int seglen = iov->iov_len;
797 unsigned char __user *from = iov->iov_base;
798
799 iov++;
800
801 while (seglen > 0) {
802 int copy;
803
804 skb = sk->sk_write_queue.prev;
805
806 if (!sk->sk_send_head ||
807 (copy = size_goal - skb->len) <= 0) {
808
809 new_segment:
810 /* Allocate new segment. If the interface is SG,
811 * allocate skb fitting to single page.
812 */
813 if (!sk_stream_memory_free(sk))
814 goto wait_for_sndbuf;
815
816 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
817 0, sk->sk_allocation);
818 if (!skb)
819 goto wait_for_memory;
820
821 /*
822 * Check whether we can use HW checksum.
823 */
824 if (sk->sk_route_caps &
825 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
826 NETIF_F_HW_CSUM))
827 skb->ip_summed = CHECKSUM_HW;
828
829 skb_entail(sk, tp, skb);
830 copy = size_goal;
831 }
832
833 /* Try to append data to the end of skb. */
834 if (copy > seglen)
835 copy = seglen;
836
837 /* Where to copy to? */
838 if (skb_tailroom(skb) > 0) {
839 /* We have some space in skb head. Superb! */
840 if (copy > skb_tailroom(skb))
841 copy = skb_tailroom(skb);
842 if ((err = skb_add_data(skb, from, copy)) != 0)
843 goto do_fault;
844 } else {
845 int merge = 0;
846 int i = skb_shinfo(skb)->nr_frags;
847 struct page *page = TCP_PAGE(sk);
848 int off = TCP_OFF(sk);
849
850 if (skb_can_coalesce(skb, i, page, off) &&
851 off != PAGE_SIZE) {
852 /* We can extend the last page
853 * fragment. */
854 merge = 1;
855 } else if (i == MAX_SKB_FRAGS ||
856 (!i &&
857 !(sk->sk_route_caps & NETIF_F_SG))) {
858 /* Need to add new fragment and cannot
859 * do this because interface is non-SG,
860 * or because all the page slots are
861 * busy. */
862 tcp_mark_push(tp, skb);
863 goto new_segment;
864 } else if (page) {
865 if (off == PAGE_SIZE) {
866 put_page(page);
867 TCP_PAGE(sk) = page = NULL;
868 }
869 }
870
871 if (!page) {
872 /* Allocate new cache page. */
873 if (!(page = sk_stream_alloc_page(sk)))
874 goto wait_for_memory;
875 off = 0;
876 }
877
878 if (copy > PAGE_SIZE - off)
879 copy = PAGE_SIZE - off;
880
881 /* Time to copy data. We are close to
882 * the end! */
883 err = skb_copy_to_page(sk, from, skb, page,
884 off, copy);
885 if (err) {
886 /* If this page was new, give it to the
887 * socket so it does not get leaked.
888 */
889 if (!TCP_PAGE(sk)) {
890 TCP_PAGE(sk) = page;
891 TCP_OFF(sk) = 0;
892 }
893 goto do_error;
894 }
895
896 /* Update the skb. */
897 if (merge) {
898 skb_shinfo(skb)->frags[i - 1].size +=
899 copy;
900 } else {
901 skb_fill_page_desc(skb, i, page, off, copy);
902 if (TCP_PAGE(sk)) {
903 get_page(page);
904 } else if (off + copy < PAGE_SIZE) {
905 get_page(page);
906 TCP_PAGE(sk) = page;
907 }
908 }
909
910 TCP_OFF(sk) = off + copy;
911 }
912
913 if (!copied)
914 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
915
916 tp->write_seq += copy;
917 TCP_SKB_CB(skb)->end_seq += copy;
918 skb_shinfo(skb)->tso_segs = 0;
919
920 from += copy;
921 copied += copy;
922 if ((seglen -= copy) == 0 && iovlen == 0)
923 goto out;
924
925 if (skb->len < mss_now || (flags & MSG_OOB))
926 continue;
927
928 if (forced_push(tp)) {
929 tcp_mark_push(tp, skb);
930 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
931 } else if (skb == sk->sk_send_head)
932 tcp_push_one(sk, mss_now);
933 continue;
934
935 wait_for_sndbuf:
936 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
937 wait_for_memory:
938 if (copied)
939 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
940
941 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
942 goto do_error;
943
944 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
945 size_goal = tp->xmit_size_goal;
946 }
947 }
948
949 out:
950 if (copied)
951 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
952 TCP_CHECK_TIMER(sk);
953 release_sock(sk);
954 return copied;
955
956 do_fault:
957 if (!skb->len) {
958 if (sk->sk_send_head == skb)
959 sk->sk_send_head = NULL;
960 __skb_unlink(skb, &sk->sk_write_queue);
961 sk_stream_free_skb(sk, skb);
962 }
963
964 do_error:
965 if (copied)
966 goto out;
967 out_err:
968 err = sk_stream_error(sk, flags, err);
969 TCP_CHECK_TIMER(sk);
970 release_sock(sk);
971 return err;
972 }
973
974 /*
975 * Handle reading urgent data. BSD has very simple semantics for
976 * this, no blocking and very strange errors 8)
977 */
978
979 static int tcp_recv_urg(struct sock *sk, long timeo,
980 struct msghdr *msg, int len, int flags,
981 int *addr_len)
982 {
983 struct tcp_sock *tp = tcp_sk(sk);
984
985 /* No URG data to read. */
986 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
987 tp->urg_data == TCP_URG_READ)
988 return -EINVAL; /* Yes this is right ! */
989
990 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
991 return -ENOTCONN;
992
993 if (tp->urg_data & TCP_URG_VALID) {
994 int err = 0;
995 char c = tp->urg_data;
996
997 if (!(flags & MSG_PEEK))
998 tp->urg_data = TCP_URG_READ;
999
1000 /* Read urgent data. */
1001 msg->msg_flags |= MSG_OOB;
1002
1003 if (len > 0) {
1004 if (!(flags & MSG_TRUNC))
1005 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1006 len = 1;
1007 } else
1008 msg->msg_flags |= MSG_TRUNC;
1009
1010 return err ? -EFAULT : len;
1011 }
1012
1013 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1014 return 0;
1015
1016 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1017 * the available implementations agree in this case:
1018 * this call should never block, independent of the
1019 * blocking state of the socket.
1020 * Mike <pall@rz.uni-karlsruhe.de>
1021 */
1022 return -EAGAIN;
1023 }
1024
1025 /* Clean up the receive buffer for full frames taken by the user,
1026 * then send an ACK if necessary. COPIED is the number of bytes
1027 * tcp_recvmsg has given to the user so far, it speeds up the
1028 * calculation of whether or not we must ACK for the sake of
1029 * a window update.
1030 */
1031 static void cleanup_rbuf(struct sock *sk, int copied)
1032 {
1033 struct tcp_sock *tp = tcp_sk(sk);
1034 int time_to_ack = 0;
1035
1036 #if TCP_DEBUG
1037 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1038
1039 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1040 #endif
1041
1042 if (inet_csk_ack_scheduled(sk)) {
1043 const struct inet_connection_sock *icsk = inet_csk(sk);
1044 /* Delayed ACKs frequently hit locked sockets during bulk
1045 * receive. */
1046 if (icsk->icsk_ack.blocked ||
1047 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1048 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1049 /*
1050 * If this read emptied read buffer, we send ACK, if
1051 * connection is not bidirectional, user drained
1052 * receive buffer and there was a small segment
1053 * in queue.
1054 */
1055 (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1056 !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1057 time_to_ack = 1;
1058 }
1059
1060 /* We send an ACK if we can now advertise a non-zero window
1061 * which has been raised "significantly".
1062 *
1063 * Even if window raised up to infinity, do not send window open ACK
1064 * in states, where we will not receive more. It is useless.
1065 */
1066 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1067 __u32 rcv_window_now = tcp_receive_window(tp);
1068
1069 /* Optimize, __tcp_select_window() is not cheap. */
1070 if (2*rcv_window_now <= tp->window_clamp) {
1071 __u32 new_window = __tcp_select_window(sk);
1072
1073 /* Send ACK now, if this read freed lots of space
1074 * in our buffer. Certainly, new_window is new window.
1075 * We can advertise it now, if it is not less than current one.
1076 * "Lots" means "at least twice" here.
1077 */
1078 if (new_window && new_window >= 2 * rcv_window_now)
1079 time_to_ack = 1;
1080 }
1081 }
1082 if (time_to_ack)
1083 tcp_send_ack(sk);
1084 }
1085
1086 static void tcp_prequeue_process(struct sock *sk)
1087 {
1088 struct sk_buff *skb;
1089 struct tcp_sock *tp = tcp_sk(sk);
1090
1091 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1092
1093 /* RX process wants to run with disabled BHs, though it is not
1094 * necessary */
1095 local_bh_disable();
1096 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1097 sk->sk_backlog_rcv(sk, skb);
1098 local_bh_enable();
1099
1100 /* Clear memory counter. */
1101 tp->ucopy.memory = 0;
1102 }
1103
1104 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1105 {
1106 struct sk_buff *skb;
1107 u32 offset;
1108
1109 skb_queue_walk(&sk->sk_receive_queue, skb) {
1110 offset = seq - TCP_SKB_CB(skb)->seq;
1111 if (skb->h.th->syn)
1112 offset--;
1113 if (offset < skb->len || skb->h.th->fin) {
1114 *off = offset;
1115 return skb;
1116 }
1117 }
1118 return NULL;
1119 }
1120
1121 /*
1122 * This routine provides an alternative to tcp_recvmsg() for routines
1123 * that would like to handle copying from skbuffs directly in 'sendfile'
1124 * fashion.
1125 * Note:
1126 * - It is assumed that the socket was locked by the caller.
1127 * - The routine does not block.
1128 * - At present, there is no support for reading OOB data
1129 * or for 'peeking' the socket using this routine
1130 * (although both would be easy to implement).
1131 */
1132 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1133 sk_read_actor_t recv_actor)
1134 {
1135 struct sk_buff *skb;
1136 struct tcp_sock *tp = tcp_sk(sk);
1137 u32 seq = tp->copied_seq;
1138 u32 offset;
1139 int copied = 0;
1140
1141 if (sk->sk_state == TCP_LISTEN)
1142 return -ENOTCONN;
1143 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1144 if (offset < skb->len) {
1145 size_t used, len;
1146
1147 len = skb->len - offset;
1148 /* Stop reading if we hit a patch of urgent data */
1149 if (tp->urg_data) {
1150 u32 urg_offset = tp->urg_seq - seq;
1151 if (urg_offset < len)
1152 len = urg_offset;
1153 if (!len)
1154 break;
1155 }
1156 used = recv_actor(desc, skb, offset, len);
1157 if (used <= len) {
1158 seq += used;
1159 copied += used;
1160 offset += used;
1161 }
1162 if (offset != skb->len)
1163 break;
1164 }
1165 if (skb->h.th->fin) {
1166 sk_eat_skb(sk, skb);
1167 ++seq;
1168 break;
1169 }
1170 sk_eat_skb(sk, skb);
1171 if (!desc->count)
1172 break;
1173 }
1174 tp->copied_seq = seq;
1175
1176 tcp_rcv_space_adjust(sk);
1177
1178 /* Clean up data we have read: This will do ACK frames. */
1179 if (copied)
1180 cleanup_rbuf(sk, copied);
1181 return copied;
1182 }
1183
1184 /*
1185 * This routine copies from a sock struct into the user buffer.
1186 *
1187 * Technical note: in 2.3 we work on _locked_ socket, so that
1188 * tricks with *seq access order and skb->users are not required.
1189 * Probably, code can be easily improved even more.
1190 */
1191
1192 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1193 size_t len, int nonblock, int flags, int *addr_len)
1194 {
1195 struct tcp_sock *tp = tcp_sk(sk);
1196 int copied = 0;
1197 u32 peek_seq;
1198 u32 *seq;
1199 unsigned long used;
1200 int err;
1201 int target; /* Read at least this many bytes */
1202 long timeo;
1203 struct task_struct *user_recv = NULL;
1204
1205 lock_sock(sk);
1206
1207 TCP_CHECK_TIMER(sk);
1208
1209 err = -ENOTCONN;
1210 if (sk->sk_state == TCP_LISTEN)
1211 goto out;
1212
1213 timeo = sock_rcvtimeo(sk, nonblock);
1214
1215 /* Urgent data needs to be handled specially. */
1216 if (flags & MSG_OOB)
1217 goto recv_urg;
1218
1219 seq = &tp->copied_seq;
1220 if (flags & MSG_PEEK) {
1221 peek_seq = tp->copied_seq;
1222 seq = &peek_seq;
1223 }
1224
1225 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1226
1227 do {
1228 struct sk_buff *skb;
1229 u32 offset;
1230
1231 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1232 if (tp->urg_data && tp->urg_seq == *seq) {
1233 if (copied)
1234 break;
1235 if (signal_pending(current)) {
1236 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1237 break;
1238 }
1239 }
1240
1241 /* Next get a buffer. */
1242
1243 skb = skb_peek(&sk->sk_receive_queue);
1244 do {
1245 if (!skb)
1246 break;
1247
1248 /* Now that we have two receive queues this
1249 * shouldn't happen.
1250 */
1251 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1252 printk(KERN_INFO "recvmsg bug: copied %X "
1253 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1254 break;
1255 }
1256 offset = *seq - TCP_SKB_CB(skb)->seq;
1257 if (skb->h.th->syn)
1258 offset--;
1259 if (offset < skb->len)
1260 goto found_ok_skb;
1261 if (skb->h.th->fin)
1262 goto found_fin_ok;
1263 BUG_TRAP(flags & MSG_PEEK);
1264 skb = skb->next;
1265 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1266
1267 /* Well, if we have backlog, try to process it now yet. */
1268
1269 if (copied >= target && !sk->sk_backlog.tail)
1270 break;
1271
1272 if (copied) {
1273 if (sk->sk_err ||
1274 sk->sk_state == TCP_CLOSE ||
1275 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1276 !timeo ||
1277 signal_pending(current) ||
1278 (flags & MSG_PEEK))
1279 break;
1280 } else {
1281 if (sock_flag(sk, SOCK_DONE))
1282 break;
1283
1284 if (sk->sk_err) {
1285 copied = sock_error(sk);
1286 break;
1287 }
1288
1289 if (sk->sk_shutdown & RCV_SHUTDOWN)
1290 break;
1291
1292 if (sk->sk_state == TCP_CLOSE) {
1293 if (!sock_flag(sk, SOCK_DONE)) {
1294 /* This occurs when user tries to read
1295 * from never connected socket.
1296 */
1297 copied = -ENOTCONN;
1298 break;
1299 }
1300 break;
1301 }
1302
1303 if (!timeo) {
1304 copied = -EAGAIN;
1305 break;
1306 }
1307
1308 if (signal_pending(current)) {
1309 copied = sock_intr_errno(timeo);
1310 break;
1311 }
1312 }
1313
1314 cleanup_rbuf(sk, copied);
1315
1316 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1317 /* Install new reader */
1318 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1319 user_recv = current;
1320 tp->ucopy.task = user_recv;
1321 tp->ucopy.iov = msg->msg_iov;
1322 }
1323
1324 tp->ucopy.len = len;
1325
1326 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1327 (flags & (MSG_PEEK | MSG_TRUNC)));
1328
1329 /* Ugly... If prequeue is not empty, we have to
1330 * process it before releasing socket, otherwise
1331 * order will be broken at second iteration.
1332 * More elegant solution is required!!!
1333 *
1334 * Look: we have the following (pseudo)queues:
1335 *
1336 * 1. packets in flight
1337 * 2. backlog
1338 * 3. prequeue
1339 * 4. receive_queue
1340 *
1341 * Each queue can be processed only if the next ones
1342 * are empty. At this point we have empty receive_queue.
1343 * But prequeue _can_ be not empty after 2nd iteration,
1344 * when we jumped to start of loop because backlog
1345 * processing added something to receive_queue.
1346 * We cannot release_sock(), because backlog contains
1347 * packets arrived _after_ prequeued ones.
1348 *
1349 * Shortly, algorithm is clear --- to process all
1350 * the queues in order. We could make it more directly,
1351 * requeueing packets from backlog to prequeue, if
1352 * is not empty. It is more elegant, but eats cycles,
1353 * unfortunately.
1354 */
1355 if (!skb_queue_empty(&tp->ucopy.prequeue))
1356 goto do_prequeue;
1357
1358 /* __ Set realtime policy in scheduler __ */
1359 }
1360
1361 if (copied >= target) {
1362 /* Do not sleep, just process backlog. */
1363 release_sock(sk);
1364 lock_sock(sk);
1365 } else
1366 sk_wait_data(sk, &timeo);
1367
1368 if (user_recv) {
1369 int chunk;
1370
1371 /* __ Restore normal policy in scheduler __ */
1372
1373 if ((chunk = len - tp->ucopy.len) != 0) {
1374 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1375 len -= chunk;
1376 copied += chunk;
1377 }
1378
1379 if (tp->rcv_nxt == tp->copied_seq &&
1380 !skb_queue_empty(&tp->ucopy.prequeue)) {
1381 do_prequeue:
1382 tcp_prequeue_process(sk);
1383
1384 if ((chunk = len - tp->ucopy.len) != 0) {
1385 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1386 len -= chunk;
1387 copied += chunk;
1388 }
1389 }
1390 }
1391 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1392 if (net_ratelimit())
1393 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1394 current->comm, current->pid);
1395 peek_seq = tp->copied_seq;
1396 }
1397 continue;
1398
1399 found_ok_skb:
1400 /* Ok so how much can we use? */
1401 used = skb->len - offset;
1402 if (len < used)
1403 used = len;
1404
1405 /* Do we have urgent data here? */
1406 if (tp->urg_data) {
1407 u32 urg_offset = tp->urg_seq - *seq;
1408 if (urg_offset < used) {
1409 if (!urg_offset) {
1410 if (!sock_flag(sk, SOCK_URGINLINE)) {
1411 ++*seq;
1412 offset++;
1413 used--;
1414 if (!used)
1415 goto skip_copy;
1416 }
1417 } else
1418 used = urg_offset;
1419 }
1420 }
1421
1422 if (!(flags & MSG_TRUNC)) {
1423 err = skb_copy_datagram_iovec(skb, offset,
1424 msg->msg_iov, used);
1425 if (err) {
1426 /* Exception. Bailout! */
1427 if (!copied)
1428 copied = -EFAULT;
1429 break;
1430 }
1431 }
1432
1433 *seq += used;
1434 copied += used;
1435 len -= used;
1436
1437 tcp_rcv_space_adjust(sk);
1438
1439 skip_copy:
1440 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1441 tp->urg_data = 0;
1442 tcp_fast_path_check(sk, tp);
1443 }
1444 if (used + offset < skb->len)
1445 continue;
1446
1447 if (skb->h.th->fin)
1448 goto found_fin_ok;
1449 if (!(flags & MSG_PEEK))
1450 sk_eat_skb(sk, skb);
1451 continue;
1452
1453 found_fin_ok:
1454 /* Process the FIN. */
1455 ++*seq;
1456 if (!(flags & MSG_PEEK))
1457 sk_eat_skb(sk, skb);
1458 break;
1459 } while (len > 0);
1460
1461 if (user_recv) {
1462 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1463 int chunk;
1464
1465 tp->ucopy.len = copied > 0 ? len : 0;
1466
1467 tcp_prequeue_process(sk);
1468
1469 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1470 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1471 len -= chunk;
1472 copied += chunk;
1473 }
1474 }
1475
1476 tp->ucopy.task = NULL;
1477 tp->ucopy.len = 0;
1478 }
1479
1480 /* According to UNIX98, msg_name/msg_namelen are ignored
1481 * on connected socket. I was just happy when found this 8) --ANK
1482 */
1483
1484 /* Clean up data we have read: This will do ACK frames. */
1485 cleanup_rbuf(sk, copied);
1486
1487 TCP_CHECK_TIMER(sk);
1488 release_sock(sk);
1489 return copied;
1490
1491 out:
1492 TCP_CHECK_TIMER(sk);
1493 release_sock(sk);
1494 return err;
1495
1496 recv_urg:
1497 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1498 goto out;
1499 }
1500
1501 /*
1502 * State processing on a close. This implements the state shift for
1503 * sending our FIN frame. Note that we only send a FIN for some
1504 * states. A shutdown() may have already sent the FIN, or we may be
1505 * closed.
1506 */
1507
1508 static unsigned char new_state[16] = {
1509 /* current state: new state: action: */
1510 /* (Invalid) */ TCP_CLOSE,
1511 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1512 /* TCP_SYN_SENT */ TCP_CLOSE,
1513 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1514 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1515 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1516 /* TCP_TIME_WAIT */ TCP_CLOSE,
1517 /* TCP_CLOSE */ TCP_CLOSE,
1518 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1519 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1520 /* TCP_LISTEN */ TCP_CLOSE,
1521 /* TCP_CLOSING */ TCP_CLOSING,
1522 };
1523
1524 static int tcp_close_state(struct sock *sk)
1525 {
1526 int next = (int)new_state[sk->sk_state];
1527 int ns = next & TCP_STATE_MASK;
1528
1529 tcp_set_state(sk, ns);
1530
1531 return next & TCP_ACTION_FIN;
1532 }
1533
1534 /*
1535 * Shutdown the sending side of a connection. Much like close except
1536 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1537 */
1538
1539 void tcp_shutdown(struct sock *sk, int how)
1540 {
1541 /* We need to grab some memory, and put together a FIN,
1542 * and then put it into the queue to be sent.
1543 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1544 */
1545 if (!(how & SEND_SHUTDOWN))
1546 return;
1547
1548 /* If we've already sent a FIN, or it's a closed state, skip this. */
1549 if ((1 << sk->sk_state) &
1550 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1551 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1552 /* Clear out any half completed packets. FIN if needed. */
1553 if (tcp_close_state(sk))
1554 tcp_send_fin(sk);
1555 }
1556 }
1557
1558 /*
1559 * At this point, there should be no process reference to this
1560 * socket, and thus no user references at all. Therefore we
1561 * can assume the socket waitqueue is inactive and nobody will
1562 * try to jump onto it.
1563 */
1564 void tcp_destroy_sock(struct sock *sk)
1565 {
1566 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1567 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1568
1569 /* It cannot be in hash table! */
1570 BUG_TRAP(sk_unhashed(sk));
1571
1572 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1573 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
1574
1575 sk->sk_prot->destroy(sk);
1576
1577 sk_stream_kill_queues(sk);
1578
1579 xfrm_sk_free_policy(sk);
1580
1581 sk_refcnt_debug_release(sk);
1582
1583 atomic_dec(&tcp_orphan_count);
1584 sock_put(sk);
1585 }
1586
1587 void tcp_close(struct sock *sk, long timeout)
1588 {
1589 struct sk_buff *skb;
1590 int data_was_unread = 0;
1591
1592 lock_sock(sk);
1593 sk->sk_shutdown = SHUTDOWN_MASK;
1594
1595 if (sk->sk_state == TCP_LISTEN) {
1596 tcp_set_state(sk, TCP_CLOSE);
1597
1598 /* Special case. */
1599 tcp_listen_stop(sk);
1600
1601 goto adjudge_to_death;
1602 }
1603
1604 /* We need to flush the recv. buffs. We do this only on the
1605 * descriptor close, not protocol-sourced closes, because the
1606 * reader process may not have drained the data yet!
1607 */
1608 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1609 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1610 skb->h.th->fin;
1611 data_was_unread += len;
1612 __kfree_skb(skb);
1613 }
1614
1615 sk_stream_mem_reclaim(sk);
1616
1617 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1618 * 3.10, we send a RST here because data was lost. To
1619 * witness the awful effects of the old behavior of always
1620 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1621 * a bulk GET in an FTP client, suspend the process, wait
1622 * for the client to advertise a zero window, then kill -9
1623 * the FTP client, wheee... Note: timeout is always zero
1624 * in such a case.
1625 */
1626 if (data_was_unread) {
1627 /* Unread data was tossed, zap the connection. */
1628 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1629 tcp_set_state(sk, TCP_CLOSE);
1630 tcp_send_active_reset(sk, GFP_KERNEL);
1631 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1632 /* Check zero linger _after_ checking for unread data. */
1633 sk->sk_prot->disconnect(sk, 0);
1634 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1635 } else if (tcp_close_state(sk)) {
1636 /* We FIN if the application ate all the data before
1637 * zapping the connection.
1638 */
1639
1640 /* RED-PEN. Formally speaking, we have broken TCP state
1641 * machine. State transitions:
1642 *
1643 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1644 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1645 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1646 *
1647 * are legal only when FIN has been sent (i.e. in window),
1648 * rather than queued out of window. Purists blame.
1649 *
1650 * F.e. "RFC state" is ESTABLISHED,
1651 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1652 *
1653 * The visible declinations are that sometimes
1654 * we enter time-wait state, when it is not required really
1655 * (harmless), do not send active resets, when they are
1656 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1657 * they look as CLOSING or LAST_ACK for Linux)
1658 * Probably, I missed some more holelets.
1659 * --ANK
1660 */
1661 tcp_send_fin(sk);
1662 }
1663
1664 sk_stream_wait_close(sk, timeout);
1665
1666 adjudge_to_death:
1667 /* It is the last release_sock in its life. It will remove backlog. */
1668 release_sock(sk);
1669
1670
1671 /* Now socket is owned by kernel and we acquire BH lock
1672 to finish close. No need to check for user refs.
1673 */
1674 local_bh_disable();
1675 bh_lock_sock(sk);
1676 BUG_TRAP(!sock_owned_by_user(sk));
1677
1678 sock_hold(sk);
1679 sock_orphan(sk);
1680
1681 /* This is a (useful) BSD violating of the RFC. There is a
1682 * problem with TCP as specified in that the other end could
1683 * keep a socket open forever with no application left this end.
1684 * We use a 3 minute timeout (about the same as BSD) then kill
1685 * our end. If they send after that then tough - BUT: long enough
1686 * that we won't make the old 4*rto = almost no time - whoops
1687 * reset mistake.
1688 *
1689 * Nope, it was not mistake. It is really desired behaviour
1690 * f.e. on http servers, when such sockets are useless, but
1691 * consume significant resources. Let's do it with special
1692 * linger2 option. --ANK
1693 */
1694
1695 if (sk->sk_state == TCP_FIN_WAIT2) {
1696 struct tcp_sock *tp = tcp_sk(sk);
1697 if (tp->linger2 < 0) {
1698 tcp_set_state(sk, TCP_CLOSE);
1699 tcp_send_active_reset(sk, GFP_ATOMIC);
1700 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1701 } else {
1702 const int tmo = tcp_fin_time(sk);
1703
1704 if (tmo > TCP_TIMEWAIT_LEN) {
1705 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1706 } else {
1707 atomic_inc(&tcp_orphan_count);
1708 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1709 goto out;
1710 }
1711 }
1712 }
1713 if (sk->sk_state != TCP_CLOSE) {
1714 sk_stream_mem_reclaim(sk);
1715 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
1716 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1717 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1718 if (net_ratelimit())
1719 printk(KERN_INFO "TCP: too many of orphaned "
1720 "sockets\n");
1721 tcp_set_state(sk, TCP_CLOSE);
1722 tcp_send_active_reset(sk, GFP_ATOMIC);
1723 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1724 }
1725 }
1726 atomic_inc(&tcp_orphan_count);
1727
1728 if (sk->sk_state == TCP_CLOSE)
1729 tcp_destroy_sock(sk);
1730 /* Otherwise, socket is reprieved until protocol close. */
1731
1732 out:
1733 bh_unlock_sock(sk);
1734 local_bh_enable();
1735 sock_put(sk);
1736 }
1737
1738 /* These states need RST on ABORT according to RFC793 */
1739
1740 static inline int tcp_need_reset(int state)
1741 {
1742 return (1 << state) &
1743 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1744 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1745 }
1746
1747 int tcp_disconnect(struct sock *sk, int flags)
1748 {
1749 struct inet_sock *inet = inet_sk(sk);
1750 struct inet_connection_sock *icsk = inet_csk(sk);
1751 struct tcp_sock *tp = tcp_sk(sk);
1752 int err = 0;
1753 int old_state = sk->sk_state;
1754
1755 if (old_state != TCP_CLOSE)
1756 tcp_set_state(sk, TCP_CLOSE);
1757
1758 /* ABORT function of RFC793 */
1759 if (old_state == TCP_LISTEN) {
1760 tcp_listen_stop(sk);
1761 } else if (tcp_need_reset(old_state) ||
1762 (tp->snd_nxt != tp->write_seq &&
1763 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1764 /* The last check adjusts for discrepance of Linux wrt. RFC
1765 * states
1766 */
1767 tcp_send_active_reset(sk, gfp_any());
1768 sk->sk_err = ECONNRESET;
1769 } else if (old_state == TCP_SYN_SENT)
1770 sk->sk_err = ECONNRESET;
1771
1772 tcp_clear_xmit_timers(sk);
1773 __skb_queue_purge(&sk->sk_receive_queue);
1774 sk_stream_writequeue_purge(sk);
1775 __skb_queue_purge(&tp->out_of_order_queue);
1776
1777 inet->dport = 0;
1778
1779 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1780 inet_reset_saddr(sk);
1781
1782 sk->sk_shutdown = 0;
1783 sock_reset_flag(sk, SOCK_DONE);
1784 tp->srtt = 0;
1785 if ((tp->write_seq += tp->max_window + 2) == 0)
1786 tp->write_seq = 1;
1787 icsk->icsk_backoff = 0;
1788 tp->snd_cwnd = 2;
1789 tp->probes_out = 0;
1790 tp->packets_out = 0;
1791 tp->snd_ssthresh = 0x7fffffff;
1792 tp->snd_cwnd_cnt = 0;
1793 tcp_set_ca_state(tp, TCP_CA_Open);
1794 tcp_clear_retrans(tp);
1795 inet_csk_delack_init(sk);
1796 sk->sk_send_head = NULL;
1797 tp->rx_opt.saw_tstamp = 0;
1798 tcp_sack_reset(&tp->rx_opt);
1799 __sk_dst_reset(sk);
1800
1801 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1802
1803 sk->sk_error_report(sk);
1804 return err;
1805 }
1806
1807 /*
1808 * Wait for an incoming connection, avoid race
1809 * conditions. This must be called with the socket locked.
1810 */
1811 static int wait_for_connect(struct sock *sk, long timeo)
1812 {
1813 struct inet_connection_sock *icsk = inet_csk(sk);
1814 DEFINE_WAIT(wait);
1815 int err;
1816
1817 /*
1818 * True wake-one mechanism for incoming connections: only
1819 * one process gets woken up, not the 'whole herd'.
1820 * Since we do not 'race & poll' for established sockets
1821 * anymore, the common case will execute the loop only once.
1822 *
1823 * Subtle issue: "add_wait_queue_exclusive()" will be added
1824 * after any current non-exclusive waiters, and we know that
1825 * it will always _stay_ after any new non-exclusive waiters
1826 * because all non-exclusive waiters are added at the
1827 * beginning of the wait-queue. As such, it's ok to "drop"
1828 * our exclusiveness temporarily when we get woken up without
1829 * having to remove and re-insert us on the wait queue.
1830 */
1831 for (;;) {
1832 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
1833 TASK_INTERRUPTIBLE);
1834 release_sock(sk);
1835 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
1836 timeo = schedule_timeout(timeo);
1837 lock_sock(sk);
1838 err = 0;
1839 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
1840 break;
1841 err = -EINVAL;
1842 if (sk->sk_state != TCP_LISTEN)
1843 break;
1844 err = sock_intr_errno(timeo);
1845 if (signal_pending(current))
1846 break;
1847 err = -EAGAIN;
1848 if (!timeo)
1849 break;
1850 }
1851 finish_wait(sk->sk_sleep, &wait);
1852 return err;
1853 }
1854
1855 /*
1856 * This will accept the next outstanding connection.
1857 */
1858
1859 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
1860 {
1861 struct inet_connection_sock *icsk = inet_csk(sk);
1862 struct sock *newsk;
1863 int error;
1864
1865 lock_sock(sk);
1866
1867 /* We need to make sure that this socket is listening,
1868 * and that it has something pending.
1869 */
1870 error = -EINVAL;
1871 if (sk->sk_state != TCP_LISTEN)
1872 goto out_err;
1873
1874 /* Find already established connection */
1875 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
1876 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1877
1878 /* If this is a non blocking socket don't sleep */
1879 error = -EAGAIN;
1880 if (!timeo)
1881 goto out_err;
1882
1883 error = wait_for_connect(sk, timeo);
1884 if (error)
1885 goto out_err;
1886 }
1887
1888 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
1889 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
1890 out:
1891 release_sock(sk);
1892 return newsk;
1893 out_err:
1894 newsk = NULL;
1895 *err = error;
1896 goto out;
1897 }
1898
1899 /*
1900 * Socket option code for TCP.
1901 */
1902 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1903 int optlen)
1904 {
1905 struct tcp_sock *tp = tcp_sk(sk);
1906 struct inet_connection_sock *icsk = inet_csk(sk);
1907 int val;
1908 int err = 0;
1909
1910 if (level != SOL_TCP)
1911 return tp->af_specific->setsockopt(sk, level, optname,
1912 optval, optlen);
1913
1914 /* This is a string value all the others are int's */
1915 if (optname == TCP_CONGESTION) {
1916 char name[TCP_CA_NAME_MAX];
1917
1918 if (optlen < 1)
1919 return -EINVAL;
1920
1921 val = strncpy_from_user(name, optval,
1922 min(TCP_CA_NAME_MAX-1, optlen));
1923 if (val < 0)
1924 return -EFAULT;
1925 name[val] = 0;
1926
1927 lock_sock(sk);
1928 err = tcp_set_congestion_control(tp, name);
1929 release_sock(sk);
1930 return err;
1931 }
1932
1933 if (optlen < sizeof(int))
1934 return -EINVAL;
1935
1936 if (get_user(val, (int __user *)optval))
1937 return -EFAULT;
1938
1939 lock_sock(sk);
1940
1941 switch (optname) {
1942 case TCP_MAXSEG:
1943 /* Values greater than interface MTU won't take effect. However
1944 * at the point when this call is done we typically don't yet
1945 * know which interface is going to be used */
1946 if (val < 8 || val > MAX_TCP_WINDOW) {
1947 err = -EINVAL;
1948 break;
1949 }
1950 tp->rx_opt.user_mss = val;
1951 break;
1952
1953 case TCP_NODELAY:
1954 if (val) {
1955 /* TCP_NODELAY is weaker than TCP_CORK, so that
1956 * this option on corked socket is remembered, but
1957 * it is not activated until cork is cleared.
1958 *
1959 * However, when TCP_NODELAY is set we make
1960 * an explicit push, which overrides even TCP_CORK
1961 * for currently queued segments.
1962 */
1963 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1964 tcp_push_pending_frames(sk, tp);
1965 } else {
1966 tp->nonagle &= ~TCP_NAGLE_OFF;
1967 }
1968 break;
1969
1970 case TCP_CORK:
1971 /* When set indicates to always queue non-full frames.
1972 * Later the user clears this option and we transmit
1973 * any pending partial frames in the queue. This is
1974 * meant to be used alongside sendfile() to get properly
1975 * filled frames when the user (for example) must write
1976 * out headers with a write() call first and then use
1977 * sendfile to send out the data parts.
1978 *
1979 * TCP_CORK can be set together with TCP_NODELAY and it is
1980 * stronger than TCP_NODELAY.
1981 */
1982 if (val) {
1983 tp->nonagle |= TCP_NAGLE_CORK;
1984 } else {
1985 tp->nonagle &= ~TCP_NAGLE_CORK;
1986 if (tp->nonagle&TCP_NAGLE_OFF)
1987 tp->nonagle |= TCP_NAGLE_PUSH;
1988 tcp_push_pending_frames(sk, tp);
1989 }
1990 break;
1991
1992 case TCP_KEEPIDLE:
1993 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1994 err = -EINVAL;
1995 else {
1996 tp->keepalive_time = val * HZ;
1997 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1998 !((1 << sk->sk_state) &
1999 (TCPF_CLOSE | TCPF_LISTEN))) {
2000 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2001 if (tp->keepalive_time > elapsed)
2002 elapsed = tp->keepalive_time - elapsed;
2003 else
2004 elapsed = 0;
2005 inet_csk_reset_keepalive_timer(sk, elapsed);
2006 }
2007 }
2008 break;
2009 case TCP_KEEPINTVL:
2010 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2011 err = -EINVAL;
2012 else
2013 tp->keepalive_intvl = val * HZ;
2014 break;
2015 case TCP_KEEPCNT:
2016 if (val < 1 || val > MAX_TCP_KEEPCNT)
2017 err = -EINVAL;
2018 else
2019 tp->keepalive_probes = val;
2020 break;
2021 case TCP_SYNCNT:
2022 if (val < 1 || val > MAX_TCP_SYNCNT)
2023 err = -EINVAL;
2024 else
2025 icsk->icsk_syn_retries = val;
2026 break;
2027
2028 case TCP_LINGER2:
2029 if (val < 0)
2030 tp->linger2 = -1;
2031 else if (val > sysctl_tcp_fin_timeout / HZ)
2032 tp->linger2 = 0;
2033 else
2034 tp->linger2 = val * HZ;
2035 break;
2036
2037 case TCP_DEFER_ACCEPT:
2038 tp->defer_accept = 0;
2039 if (val > 0) {
2040 /* Translate value in seconds to number of
2041 * retransmits */
2042 while (tp->defer_accept < 32 &&
2043 val > ((TCP_TIMEOUT_INIT / HZ) <<
2044 tp->defer_accept))
2045 tp->defer_accept++;
2046 tp->defer_accept++;
2047 }
2048 break;
2049
2050 case TCP_WINDOW_CLAMP:
2051 if (!val) {
2052 if (sk->sk_state != TCP_CLOSE) {
2053 err = -EINVAL;
2054 break;
2055 }
2056 tp->window_clamp = 0;
2057 } else
2058 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2059 SOCK_MIN_RCVBUF / 2 : val;
2060 break;
2061
2062 case TCP_QUICKACK:
2063 if (!val) {
2064 icsk->icsk_ack.pingpong = 1;
2065 } else {
2066 icsk->icsk_ack.pingpong = 0;
2067 if ((1 << sk->sk_state) &
2068 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2069 inet_csk_ack_scheduled(sk)) {
2070 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2071 cleanup_rbuf(sk, 1);
2072 if (!(val & 1))
2073 icsk->icsk_ack.pingpong = 1;
2074 }
2075 }
2076 break;
2077
2078 default:
2079 err = -ENOPROTOOPT;
2080 break;
2081 };
2082 release_sock(sk);
2083 return err;
2084 }
2085
2086 /* Return information about state of tcp endpoint in API format. */
2087 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2088 {
2089 struct tcp_sock *tp = tcp_sk(sk);
2090 const struct inet_connection_sock *icsk = inet_csk(sk);
2091 u32 now = tcp_time_stamp;
2092
2093 memset(info, 0, sizeof(*info));
2094
2095 info->tcpi_state = sk->sk_state;
2096 info->tcpi_ca_state = tp->ca_state;
2097 info->tcpi_retransmits = icsk->icsk_retransmits;
2098 info->tcpi_probes = tp->probes_out;
2099 info->tcpi_backoff = icsk->icsk_backoff;
2100
2101 if (tp->rx_opt.tstamp_ok)
2102 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2103 if (tp->rx_opt.sack_ok)
2104 info->tcpi_options |= TCPI_OPT_SACK;
2105 if (tp->rx_opt.wscale_ok) {
2106 info->tcpi_options |= TCPI_OPT_WSCALE;
2107 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2108 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2109 }
2110
2111 if (tp->ecn_flags&TCP_ECN_OK)
2112 info->tcpi_options |= TCPI_OPT_ECN;
2113
2114 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2115 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2116 info->tcpi_snd_mss = tp->mss_cache;
2117 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2118
2119 info->tcpi_unacked = tp->packets_out;
2120 info->tcpi_sacked = tp->sacked_out;
2121 info->tcpi_lost = tp->lost_out;
2122 info->tcpi_retrans = tp->retrans_out;
2123 info->tcpi_fackets = tp->fackets_out;
2124
2125 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2126 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2127 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2128
2129 info->tcpi_pmtu = tp->pmtu_cookie;
2130 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2131 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2132 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2133 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2134 info->tcpi_snd_cwnd = tp->snd_cwnd;
2135 info->tcpi_advmss = tp->advmss;
2136 info->tcpi_reordering = tp->reordering;
2137
2138 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2139 info->tcpi_rcv_space = tp->rcvq_space.space;
2140
2141 info->tcpi_total_retrans = tp->total_retrans;
2142 }
2143
2144 EXPORT_SYMBOL_GPL(tcp_get_info);
2145
2146 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2147 int __user *optlen)
2148 {
2149 struct tcp_sock *tp = tcp_sk(sk);
2150 int val, len;
2151
2152 if (level != SOL_TCP)
2153 return tp->af_specific->getsockopt(sk, level, optname,
2154 optval, optlen);
2155
2156 if (get_user(len, optlen))
2157 return -EFAULT;
2158
2159 len = min_t(unsigned int, len, sizeof(int));
2160
2161 if (len < 0)
2162 return -EINVAL;
2163
2164 switch (optname) {
2165 case TCP_MAXSEG:
2166 val = tp->mss_cache;
2167 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2168 val = tp->rx_opt.user_mss;
2169 break;
2170 case TCP_NODELAY:
2171 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2172 break;
2173 case TCP_CORK:
2174 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2175 break;
2176 case TCP_KEEPIDLE:
2177 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2178 break;
2179 case TCP_KEEPINTVL:
2180 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2181 break;
2182 case TCP_KEEPCNT:
2183 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2184 break;
2185 case TCP_SYNCNT:
2186 val = inet_csk(sk)->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2187 break;
2188 case TCP_LINGER2:
2189 val = tp->linger2;
2190 if (val >= 0)
2191 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2192 break;
2193 case TCP_DEFER_ACCEPT:
2194 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2195 (tp->defer_accept - 1));
2196 break;
2197 case TCP_WINDOW_CLAMP:
2198 val = tp->window_clamp;
2199 break;
2200 case TCP_INFO: {
2201 struct tcp_info info;
2202
2203 if (get_user(len, optlen))
2204 return -EFAULT;
2205
2206 tcp_get_info(sk, &info);
2207
2208 len = min_t(unsigned int, len, sizeof(info));
2209 if (put_user(len, optlen))
2210 return -EFAULT;
2211 if (copy_to_user(optval, &info, len))
2212 return -EFAULT;
2213 return 0;
2214 }
2215 case TCP_QUICKACK:
2216 val = !inet_csk(sk)->icsk_ack.pingpong;
2217 break;
2218
2219 case TCP_CONGESTION:
2220 if (get_user(len, optlen))
2221 return -EFAULT;
2222 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2223 if (put_user(len, optlen))
2224 return -EFAULT;
2225 if (copy_to_user(optval, tp->ca_ops->name, len))
2226 return -EFAULT;
2227 return 0;
2228 default:
2229 return -ENOPROTOOPT;
2230 };
2231
2232 if (put_user(len, optlen))
2233 return -EFAULT;
2234 if (copy_to_user(optval, &val, len))
2235 return -EFAULT;
2236 return 0;
2237 }
2238
2239
2240 extern void __skb_cb_too_small_for_tcp(int, int);
2241 extern struct tcp_congestion_ops tcp_reno;
2242
2243 static __initdata unsigned long thash_entries;
2244 static int __init set_thash_entries(char *str)
2245 {
2246 if (!str)
2247 return 0;
2248 thash_entries = simple_strtoul(str, &str, 0);
2249 return 1;
2250 }
2251 __setup("thash_entries=", set_thash_entries);
2252
2253 void __init tcp_init(void)
2254 {
2255 struct sk_buff *skb = NULL;
2256 int order, i;
2257
2258 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2259 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2260 sizeof(skb->cb));
2261
2262 tcp_hashinfo.bind_bucket_cachep =
2263 kmem_cache_create("tcp_bind_bucket",
2264 sizeof(struct inet_bind_bucket), 0,
2265 SLAB_HWCACHE_ALIGN, NULL, NULL);
2266 if (!tcp_hashinfo.bind_bucket_cachep)
2267 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2268
2269 /* Size and allocate the main established and bind bucket
2270 * hash tables.
2271 *
2272 * The methodology is similar to that of the buffer cache.
2273 */
2274 tcp_hashinfo.ehash =
2275 alloc_large_system_hash("TCP established",
2276 sizeof(struct inet_ehash_bucket),
2277 thash_entries,
2278 (num_physpages >= 128 * 1024) ?
2279 (25 - PAGE_SHIFT) :
2280 (27 - PAGE_SHIFT),
2281 HASH_HIGHMEM,
2282 &tcp_hashinfo.ehash_size,
2283 NULL,
2284 0);
2285 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2286 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2287 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2288 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2289 }
2290
2291 tcp_hashinfo.bhash =
2292 alloc_large_system_hash("TCP bind",
2293 sizeof(struct inet_bind_hashbucket),
2294 tcp_hashinfo.ehash_size,
2295 (num_physpages >= 128 * 1024) ?
2296 (25 - PAGE_SHIFT) :
2297 (27 - PAGE_SHIFT),
2298 HASH_HIGHMEM,
2299 &tcp_hashinfo.bhash_size,
2300 NULL,
2301 64 * 1024);
2302 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2303 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2304 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2305 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2306 }
2307
2308 /* Try to be a bit smarter and adjust defaults depending
2309 * on available memory.
2310 */
2311 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2312 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2313 order++)
2314 ;
2315 if (order >= 4) {
2316 sysctl_local_port_range[0] = 32768;
2317 sysctl_local_port_range[1] = 61000;
2318 sysctl_tcp_max_tw_buckets = 180000;
2319 sysctl_tcp_max_orphans = 4096 << (order - 4);
2320 sysctl_max_syn_backlog = 1024;
2321 } else if (order < 3) {
2322 sysctl_local_port_range[0] = 1024 * (3 - order);
2323 sysctl_tcp_max_tw_buckets >>= (3 - order);
2324 sysctl_tcp_max_orphans >>= (3 - order);
2325 sysctl_max_syn_backlog = 128;
2326 }
2327 tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
2328
2329 sysctl_tcp_mem[0] = 768 << order;
2330 sysctl_tcp_mem[1] = 1024 << order;
2331 sysctl_tcp_mem[2] = 1536 << order;
2332
2333 if (order < 3) {
2334 sysctl_tcp_wmem[2] = 64 * 1024;
2335 sysctl_tcp_rmem[0] = PAGE_SIZE;
2336 sysctl_tcp_rmem[1] = 43689;
2337 sysctl_tcp_rmem[2] = 2 * 43689;
2338 }
2339
2340 printk(KERN_INFO "TCP: Hash tables configured "
2341 "(established %d bind %d)\n",
2342 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2343
2344 tcp_register_congestion_control(&tcp_reno);
2345 }
2346
2347 EXPORT_SYMBOL(inet_csk_accept);
2348 EXPORT_SYMBOL(tcp_close);
2349 EXPORT_SYMBOL(tcp_destroy_sock);
2350 EXPORT_SYMBOL(tcp_disconnect);
2351 EXPORT_SYMBOL(tcp_getsockopt);
2352 EXPORT_SYMBOL(tcp_ioctl);
2353 EXPORT_SYMBOL(tcp_poll);
2354 EXPORT_SYMBOL(tcp_read_sock);
2355 EXPORT_SYMBOL(tcp_recvmsg);
2356 EXPORT_SYMBOL(tcp_sendmsg);
2357 EXPORT_SYMBOL(tcp_sendpage);
2358 EXPORT_SYMBOL(tcp_setsockopt);
2359 EXPORT_SYMBOL(tcp_shutdown);
2360 EXPORT_SYMBOL(tcp_statistics);