[NET]: Annotate checksums in on-the-wire packets.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / tcp.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
1da177e4
LT
250#include <linux/module.h>
251#include <linux/types.h>
252#include <linux/fcntl.h>
253#include <linux/poll.h>
254#include <linux/init.h>
255#include <linux/smp_lock.h>
256#include <linux/fs.h>
257#include <linux/random.h>
258#include <linux/bootmem.h>
b8059ead 259#include <linux/cache.h>
f4c50d99 260#include <linux/err.h>
cfb6eeb4 261#include <linux/crypto.h>
1da177e4
LT
262
263#include <net/icmp.h>
264#include <net/tcp.h>
265#include <net/xfrm.h>
266#include <net/ip.h>
1a2449a8 267#include <net/netdma.h>
1da177e4
LT
268
269#include <asm/uaccess.h>
270#include <asm/ioctls.h>
271
ab32ea5d 272int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
1da177e4 273
ba89966c 274DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
1da177e4 275
1da177e4
LT
276atomic_t tcp_orphan_count = ATOMIC_INIT(0);
277
0a5578cf
ACM
278EXPORT_SYMBOL_GPL(tcp_orphan_count);
279
b8059ead
DM
280int sysctl_tcp_mem[3] __read_mostly;
281int sysctl_tcp_wmem[3] __read_mostly;
282int sysctl_tcp_rmem[3] __read_mostly;
1da177e4
LT
283
284EXPORT_SYMBOL(sysctl_tcp_mem);
285EXPORT_SYMBOL(sysctl_tcp_rmem);
286EXPORT_SYMBOL(sysctl_tcp_wmem);
287
288atomic_t tcp_memory_allocated; /* Current allocated memory. */
289atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
290
291EXPORT_SYMBOL(tcp_memory_allocated);
292EXPORT_SYMBOL(tcp_sockets_allocated);
293
294/*
295 * Pressure flag: try to collapse.
296 * Technical note: it is used by multiple contexts non atomically.
297 * All the sk_stream_mem_schedule() is of this nature: accounting
298 * is strict, actions are advisory and have some latency.
299 */
300int tcp_memory_pressure;
301
302EXPORT_SYMBOL(tcp_memory_pressure);
303
304void tcp_enter_memory_pressure(void)
305{
306 if (!tcp_memory_pressure) {
307 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
308 tcp_memory_pressure = 1;
309 }
310}
311
312EXPORT_SYMBOL(tcp_enter_memory_pressure);
313
1da177e4
LT
314/*
315 * Wait for a TCP event.
316 *
317 * Note that we don't need to lock the socket, as the upper poll layers
318 * take care of normal races (between the test and the event) and we don't
319 * go look at any of the socket buffers directly.
320 */
321unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
322{
323 unsigned int mask;
324 struct sock *sk = sock->sk;
325 struct tcp_sock *tp = tcp_sk(sk);
326
327 poll_wait(file, sk->sk_sleep, wait);
328 if (sk->sk_state == TCP_LISTEN)
dc40c7bc 329 return inet_csk_listen_poll(sk);
1da177e4
LT
330
331 /* Socket is not locked. We are protected from async events
332 by poll logic and correct handling of state changes
333 made by another threads is impossible in any case.
334 */
335
336 mask = 0;
337 if (sk->sk_err)
338 mask = POLLERR;
339
340 /*
341 * POLLHUP is certainly not done right. But poll() doesn't
342 * have a notion of HUP in just one direction, and for a
343 * socket the read side is more interesting.
344 *
345 * Some poll() documentation says that POLLHUP is incompatible
346 * with the POLLOUT/POLLWR flags, so somebody should check this
347 * all. But careful, it tends to be safer to return too many
348 * bits than too few, and you can easily break real applications
349 * if you don't tell them that something has hung up!
350 *
351 * Check-me.
352 *
353 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
354 * our fs/select.c). It means that after we received EOF,
355 * poll always returns immediately, making impossible poll() on write()
356 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
357 * if and only if shutdown has been made in both directions.
358 * Actually, it is interesting to look how Solaris and DUX
359 * solve this dilemma. I would prefer, if PULLHUP were maskable,
360 * then we could set it on SND_SHUTDOWN. BTW examples given
361 * in Stevens' books assume exactly this behaviour, it explains
362 * why PULLHUP is incompatible with POLLOUT. --ANK
363 *
364 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
365 * blocking on fresh not-connected or disconnected socket. --ANK
366 */
367 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
368 mask |= POLLHUP;
369 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 370 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
1da177e4
LT
371
372 /* Connected? */
373 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
374 /* Potential race condition. If read of tp below will
375 * escape above sk->sk_state, we can be illegally awaken
376 * in SYN_* states. */
377 if ((tp->rcv_nxt != tp->copied_seq) &&
378 (tp->urg_seq != tp->copied_seq ||
379 tp->rcv_nxt != tp->copied_seq + 1 ||
380 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
381 mask |= POLLIN | POLLRDNORM;
382
383 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
384 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
385 mask |= POLLOUT | POLLWRNORM;
386 } else { /* send SIGIO later */
387 set_bit(SOCK_ASYNC_NOSPACE,
388 &sk->sk_socket->flags);
389 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
390
391 /* Race breaker. If space is freed after
392 * wspace test but before the flags are set,
393 * IO signal will be lost.
394 */
395 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
396 mask |= POLLOUT | POLLWRNORM;
397 }
398 }
399
400 if (tp->urg_data & TCP_URG_VALID)
401 mask |= POLLPRI;
402 }
403 return mask;
404}
405
406int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
407{
408 struct tcp_sock *tp = tcp_sk(sk);
409 int answ;
410
411 switch (cmd) {
412 case SIOCINQ:
413 if (sk->sk_state == TCP_LISTEN)
414 return -EINVAL;
415
416 lock_sock(sk);
417 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
418 answ = 0;
419 else if (sock_flag(sk, SOCK_URGINLINE) ||
420 !tp->urg_data ||
421 before(tp->urg_seq, tp->copied_seq) ||
422 !before(tp->urg_seq, tp->rcv_nxt)) {
423 answ = tp->rcv_nxt - tp->copied_seq;
424
425 /* Subtract 1, if FIN is in queue. */
426 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 answ -=
428 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
429 } else
430 answ = tp->urg_seq - tp->copied_seq;
431 release_sock(sk);
432 break;
433 case SIOCATMARK:
434 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
435 break;
436 case SIOCOUTQ:
437 if (sk->sk_state == TCP_LISTEN)
438 return -EINVAL;
439
440 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
441 answ = 0;
442 else
443 answ = tp->write_seq - tp->snd_una;
444 break;
445 default:
446 return -ENOIOCTLCMD;
447 };
448
449 return put_user(answ, (int __user *)arg);
450}
451
1da177e4
LT
452static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
453{
454 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
455 tp->pushed_seq = tp->write_seq;
456}
457
458static inline int forced_push(struct tcp_sock *tp)
459{
460 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
461}
462
463static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
464 struct sk_buff *skb)
465{
466 skb->csum = 0;
467 TCP_SKB_CB(skb)->seq = tp->write_seq;
468 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
469 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
470 TCP_SKB_CB(skb)->sacked = 0;
471 skb_header_release(skb);
472 __skb_queue_tail(&sk->sk_write_queue, skb);
473 sk_charge_skb(sk, skb);
474 if (!sk->sk_send_head)
475 sk->sk_send_head = skb;
89ebd197 476 if (tp->nonagle & TCP_NAGLE_PUSH)
1da177e4
LT
477 tp->nonagle &= ~TCP_NAGLE_PUSH;
478}
479
480static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
481 struct sk_buff *skb)
482{
483 if (flags & MSG_OOB) {
484 tp->urg_mode = 1;
485 tp->snd_up = tp->write_seq;
486 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
487 }
488}
489
490static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
491 int mss_now, int nonagle)
492{
493 if (sk->sk_send_head) {
494 struct sk_buff *skb = sk->sk_write_queue.prev;
495 if (!(flags & MSG_MORE) || forced_push(tp))
496 tcp_mark_push(tp, skb);
497 tcp_mark_urg(tp, flags, skb);
498 __tcp_push_pending_frames(sk, tp, mss_now,
499 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
500 }
501}
502
503static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
504 size_t psize, int flags)
505{
506 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 507 int mss_now, size_goal;
1da177e4
LT
508 int err;
509 ssize_t copied;
510 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
511
512 /* Wait for a connection to finish. */
513 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
514 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
515 goto out_err;
516
517 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
518
519 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 520 size_goal = tp->xmit_size_goal;
1da177e4
LT
521 copied = 0;
522
523 err = -EPIPE;
524 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
525 goto do_error;
526
527 while (psize > 0) {
528 struct sk_buff *skb = sk->sk_write_queue.prev;
529 struct page *page = pages[poffset / PAGE_SIZE];
530 int copy, i, can_coalesce;
531 int offset = poffset % PAGE_SIZE;
532 int size = min_t(size_t, psize, PAGE_SIZE - offset);
533
c1b4a7e6 534 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
535new_segment:
536 if (!sk_stream_memory_free(sk))
537 goto wait_for_sndbuf;
538
539 skb = sk_stream_alloc_pskb(sk, 0, 0,
540 sk->sk_allocation);
541 if (!skb)
542 goto wait_for_memory;
543
544 skb_entail(sk, tp, skb);
c1b4a7e6 545 copy = size_goal;
1da177e4
LT
546 }
547
548 if (copy > size)
549 copy = size;
550
551 i = skb_shinfo(skb)->nr_frags;
552 can_coalesce = skb_can_coalesce(skb, i, page, offset);
553 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
554 tcp_mark_push(tp, skb);
555 goto new_segment;
556 }
d80d99d6 557 if (!sk_stream_wmem_schedule(sk, copy))
1da177e4
LT
558 goto wait_for_memory;
559
560 if (can_coalesce) {
561 skb_shinfo(skb)->frags[i - 1].size += copy;
562 } else {
563 get_page(page);
564 skb_fill_page_desc(skb, i, page, offset, copy);
565 }
566
567 skb->len += copy;
568 skb->data_len += copy;
569 skb->truesize += copy;
570 sk->sk_wmem_queued += copy;
571 sk->sk_forward_alloc -= copy;
84fa7933 572 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
573 tp->write_seq += copy;
574 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 575 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
576
577 if (!copied)
578 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
579
580 copied += copy;
581 poffset += copy;
582 if (!(psize -= copy))
583 goto out;
584
c1b4a7e6 585 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
586 continue;
587
588 if (forced_push(tp)) {
589 tcp_mark_push(tp, skb);
590 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
591 } else if (skb == sk->sk_send_head)
592 tcp_push_one(sk, mss_now);
593 continue;
594
595wait_for_sndbuf:
596 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
597wait_for_memory:
598 if (copied)
599 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
600
601 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
602 goto do_error;
603
604 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 605 size_goal = tp->xmit_size_goal;
1da177e4
LT
606 }
607
608out:
609 if (copied)
610 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
611 return copied;
612
613do_error:
614 if (copied)
615 goto out;
616out_err:
617 return sk_stream_error(sk, flags, err);
618}
619
620ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
621 size_t size, int flags)
622{
623 ssize_t res;
624 struct sock *sk = sock->sk;
625
1da177e4 626 if (!(sk->sk_route_caps & NETIF_F_SG) ||
8648b305 627 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1da177e4
LT
628 return sock_no_sendpage(sock, page, offset, size, flags);
629
1da177e4
LT
630 lock_sock(sk);
631 TCP_CHECK_TIMER(sk);
632 res = do_tcp_sendpages(sk, &page, offset, size, flags);
633 TCP_CHECK_TIMER(sk);
634 release_sock(sk);
635 return res;
636}
637
638#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
639#define TCP_OFF(sk) (sk->sk_sndmsg_off)
640
641static inline int select_size(struct sock *sk, struct tcp_sock *tp)
642{
c1b4a7e6 643 int tmp = tp->mss_cache;
1da177e4 644
b4e26f5e 645 if (sk->sk_route_caps & NETIF_F_SG) {
bcd76111 646 if (sk_can_gso(sk))
b4e26f5e
DM
647 tmp = 0;
648 else {
649 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
650
651 if (tmp >= pgbreak &&
652 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
653 tmp = pgbreak;
654 }
655 }
1da177e4 656
1da177e4
LT
657 return tmp;
658}
659
660int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
661 size_t size)
662{
663 struct iovec *iov;
664 struct tcp_sock *tp = tcp_sk(sk);
665 struct sk_buff *skb;
666 int iovlen, flags;
c1b4a7e6 667 int mss_now, size_goal;
1da177e4
LT
668 int err, copied;
669 long timeo;
670
671 lock_sock(sk);
672 TCP_CHECK_TIMER(sk);
673
674 flags = msg->msg_flags;
675 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
676
677 /* Wait for a connection to finish. */
678 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
679 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
680 goto out_err;
681
682 /* This should be in poll */
683 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
684
685 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 686 size_goal = tp->xmit_size_goal;
1da177e4
LT
687
688 /* Ok commence sending. */
689 iovlen = msg->msg_iovlen;
690 iov = msg->msg_iov;
691 copied = 0;
692
693 err = -EPIPE;
694 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
695 goto do_error;
696
697 while (--iovlen >= 0) {
698 int seglen = iov->iov_len;
699 unsigned char __user *from = iov->iov_base;
700
701 iov++;
702
703 while (seglen > 0) {
704 int copy;
705
706 skb = sk->sk_write_queue.prev;
707
708 if (!sk->sk_send_head ||
c1b4a7e6 709 (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
710
711new_segment:
712 /* Allocate new segment. If the interface is SG,
713 * allocate skb fitting to single page.
714 */
715 if (!sk_stream_memory_free(sk))
716 goto wait_for_sndbuf;
717
718 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
719 0, sk->sk_allocation);
720 if (!skb)
721 goto wait_for_memory;
722
723 /*
724 * Check whether we can use HW checksum.
725 */
8648b305 726 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
84fa7933 727 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
728
729 skb_entail(sk, tp, skb);
c1b4a7e6 730 copy = size_goal;
1da177e4
LT
731 }
732
733 /* Try to append data to the end of skb. */
734 if (copy > seglen)
735 copy = seglen;
736
737 /* Where to copy to? */
738 if (skb_tailroom(skb) > 0) {
739 /* We have some space in skb head. Superb! */
740 if (copy > skb_tailroom(skb))
741 copy = skb_tailroom(skb);
742 if ((err = skb_add_data(skb, from, copy)) != 0)
743 goto do_fault;
744 } else {
745 int merge = 0;
746 int i = skb_shinfo(skb)->nr_frags;
747 struct page *page = TCP_PAGE(sk);
748 int off = TCP_OFF(sk);
749
750 if (skb_can_coalesce(skb, i, page, off) &&
751 off != PAGE_SIZE) {
752 /* We can extend the last page
753 * fragment. */
754 merge = 1;
755 } else if (i == MAX_SKB_FRAGS ||
756 (!i &&
757 !(sk->sk_route_caps & NETIF_F_SG))) {
758 /* Need to add new fragment and cannot
759 * do this because interface is non-SG,
760 * or because all the page slots are
761 * busy. */
762 tcp_mark_push(tp, skb);
763 goto new_segment;
764 } else if (page) {
1da177e4
LT
765 if (off == PAGE_SIZE) {
766 put_page(page);
767 TCP_PAGE(sk) = page = NULL;
fb5f5e6e 768 off = 0;
1da177e4 769 }
ef015786 770 } else
fb5f5e6e 771 off = 0;
ef015786
HX
772
773 if (copy > PAGE_SIZE - off)
774 copy = PAGE_SIZE - off;
775
776 if (!sk_stream_wmem_schedule(sk, copy))
777 goto wait_for_memory;
1da177e4
LT
778
779 if (!page) {
780 /* Allocate new cache page. */
781 if (!(page = sk_stream_alloc_page(sk)))
782 goto wait_for_memory;
1da177e4
LT
783 }
784
1da177e4
LT
785 /* Time to copy data. We are close to
786 * the end! */
787 err = skb_copy_to_page(sk, from, skb, page,
788 off, copy);
789 if (err) {
790 /* If this page was new, give it to the
791 * socket so it does not get leaked.
792 */
793 if (!TCP_PAGE(sk)) {
794 TCP_PAGE(sk) = page;
795 TCP_OFF(sk) = 0;
796 }
797 goto do_error;
798 }
799
800 /* Update the skb. */
801 if (merge) {
802 skb_shinfo(skb)->frags[i - 1].size +=
803 copy;
804 } else {
805 skb_fill_page_desc(skb, i, page, off, copy);
806 if (TCP_PAGE(sk)) {
807 get_page(page);
808 } else if (off + copy < PAGE_SIZE) {
809 get_page(page);
810 TCP_PAGE(sk) = page;
811 }
812 }
813
814 TCP_OFF(sk) = off + copy;
815 }
816
817 if (!copied)
818 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
819
820 tp->write_seq += copy;
821 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 822 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
823
824 from += copy;
825 copied += copy;
826 if ((seglen -= copy) == 0 && iovlen == 0)
827 goto out;
828
c1b4a7e6 829 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
830 continue;
831
832 if (forced_push(tp)) {
833 tcp_mark_push(tp, skb);
834 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
835 } else if (skb == sk->sk_send_head)
836 tcp_push_one(sk, mss_now);
837 continue;
838
839wait_for_sndbuf:
840 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
841wait_for_memory:
842 if (copied)
843 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
844
845 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
846 goto do_error;
847
848 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 849 size_goal = tp->xmit_size_goal;
1da177e4
LT
850 }
851 }
852
853out:
854 if (copied)
855 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
856 TCP_CHECK_TIMER(sk);
857 release_sock(sk);
858 return copied;
859
860do_fault:
861 if (!skb->len) {
862 if (sk->sk_send_head == skb)
863 sk->sk_send_head = NULL;
8728b834 864 __skb_unlink(skb, &sk->sk_write_queue);
1da177e4
LT
865 sk_stream_free_skb(sk, skb);
866 }
867
868do_error:
869 if (copied)
870 goto out;
871out_err:
872 err = sk_stream_error(sk, flags, err);
873 TCP_CHECK_TIMER(sk);
874 release_sock(sk);
875 return err;
876}
877
878/*
879 * Handle reading urgent data. BSD has very simple semantics for
880 * this, no blocking and very strange errors 8)
881 */
882
883static int tcp_recv_urg(struct sock *sk, long timeo,
884 struct msghdr *msg, int len, int flags,
885 int *addr_len)
886{
887 struct tcp_sock *tp = tcp_sk(sk);
888
889 /* No URG data to read. */
890 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
891 tp->urg_data == TCP_URG_READ)
892 return -EINVAL; /* Yes this is right ! */
893
894 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
895 return -ENOTCONN;
896
897 if (tp->urg_data & TCP_URG_VALID) {
898 int err = 0;
899 char c = tp->urg_data;
900
901 if (!(flags & MSG_PEEK))
902 tp->urg_data = TCP_URG_READ;
903
904 /* Read urgent data. */
905 msg->msg_flags |= MSG_OOB;
906
907 if (len > 0) {
908 if (!(flags & MSG_TRUNC))
909 err = memcpy_toiovec(msg->msg_iov, &c, 1);
910 len = 1;
911 } else
912 msg->msg_flags |= MSG_TRUNC;
913
914 return err ? -EFAULT : len;
915 }
916
917 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
918 return 0;
919
920 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
921 * the available implementations agree in this case:
922 * this call should never block, independent of the
923 * blocking state of the socket.
924 * Mike <pall@rz.uni-karlsruhe.de>
925 */
926 return -EAGAIN;
927}
928
929/* Clean up the receive buffer for full frames taken by the user,
930 * then send an ACK if necessary. COPIED is the number of bytes
931 * tcp_recvmsg has given to the user so far, it speeds up the
932 * calculation of whether or not we must ACK for the sake of
933 * a window update.
934 */
0e4b4992 935void tcp_cleanup_rbuf(struct sock *sk, int copied)
1da177e4
LT
936{
937 struct tcp_sock *tp = tcp_sk(sk);
938 int time_to_ack = 0;
939
940#if TCP_DEBUG
941 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
942
943 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
944#endif
945
463c84b9
ACM
946 if (inet_csk_ack_scheduled(sk)) {
947 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
948 /* Delayed ACKs frequently hit locked sockets during bulk
949 * receive. */
463c84b9 950 if (icsk->icsk_ack.blocked ||
1da177e4 951 /* Once-per-two-segments ACK was not sent by tcp_input.c */
463c84b9 952 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1da177e4
LT
953 /*
954 * If this read emptied read buffer, we send ACK, if
955 * connection is not bidirectional, user drained
956 * receive buffer and there was a small segment
957 * in queue.
958 */
1ef9696c
AK
959 (copied > 0 &&
960 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
961 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
962 !icsk->icsk_ack.pingpong)) &&
963 !atomic_read(&sk->sk_rmem_alloc)))
1da177e4
LT
964 time_to_ack = 1;
965 }
966
967 /* We send an ACK if we can now advertise a non-zero window
968 * which has been raised "significantly".
969 *
970 * Even if window raised up to infinity, do not send window open ACK
971 * in states, where we will not receive more. It is useless.
972 */
973 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
974 __u32 rcv_window_now = tcp_receive_window(tp);
975
976 /* Optimize, __tcp_select_window() is not cheap. */
977 if (2*rcv_window_now <= tp->window_clamp) {
978 __u32 new_window = __tcp_select_window(sk);
979
980 /* Send ACK now, if this read freed lots of space
981 * in our buffer. Certainly, new_window is new window.
982 * We can advertise it now, if it is not less than current one.
983 * "Lots" means "at least twice" here.
984 */
985 if (new_window && new_window >= 2 * rcv_window_now)
986 time_to_ack = 1;
987 }
988 }
989 if (time_to_ack)
990 tcp_send_ack(sk);
991}
992
993static void tcp_prequeue_process(struct sock *sk)
994{
995 struct sk_buff *skb;
996 struct tcp_sock *tp = tcp_sk(sk);
997
b03efcfb 998 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1da177e4
LT
999
1000 /* RX process wants to run with disabled BHs, though it is not
1001 * necessary */
1002 local_bh_disable();
1003 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1004 sk->sk_backlog_rcv(sk, skb);
1005 local_bh_enable();
1006
1007 /* Clear memory counter. */
1008 tp->ucopy.memory = 0;
1009}
1010
1011static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1012{
1013 struct sk_buff *skb;
1014 u32 offset;
1015
1016 skb_queue_walk(&sk->sk_receive_queue, skb) {
1017 offset = seq - TCP_SKB_CB(skb)->seq;
1018 if (skb->h.th->syn)
1019 offset--;
1020 if (offset < skb->len || skb->h.th->fin) {
1021 *off = offset;
1022 return skb;
1023 }
1024 }
1025 return NULL;
1026}
1027
1028/*
1029 * This routine provides an alternative to tcp_recvmsg() for routines
1030 * that would like to handle copying from skbuffs directly in 'sendfile'
1031 * fashion.
1032 * Note:
1033 * - It is assumed that the socket was locked by the caller.
1034 * - The routine does not block.
1035 * - At present, there is no support for reading OOB data
1036 * or for 'peeking' the socket using this routine
1037 * (although both would be easy to implement).
1038 */
1039int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1040 sk_read_actor_t recv_actor)
1041{
1042 struct sk_buff *skb;
1043 struct tcp_sock *tp = tcp_sk(sk);
1044 u32 seq = tp->copied_seq;
1045 u32 offset;
1046 int copied = 0;
1047
1048 if (sk->sk_state == TCP_LISTEN)
1049 return -ENOTCONN;
1050 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1051 if (offset < skb->len) {
1052 size_t used, len;
1053
1054 len = skb->len - offset;
1055 /* Stop reading if we hit a patch of urgent data */
1056 if (tp->urg_data) {
1057 u32 urg_offset = tp->urg_seq - seq;
1058 if (urg_offset < len)
1059 len = urg_offset;
1060 if (!len)
1061 break;
1062 }
1063 used = recv_actor(desc, skb, offset, len);
1064 if (used <= len) {
1065 seq += used;
1066 copied += used;
1067 offset += used;
1068 }
1069 if (offset != skb->len)
1070 break;
1071 }
1072 if (skb->h.th->fin) {
624d1164 1073 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1074 ++seq;
1075 break;
1076 }
624d1164 1077 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1078 if (!desc->count)
1079 break;
1080 }
1081 tp->copied_seq = seq;
1082
1083 tcp_rcv_space_adjust(sk);
1084
1085 /* Clean up data we have read: This will do ACK frames. */
1086 if (copied)
0e4b4992 1087 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1088 return copied;
1089}
1090
1091/*
1092 * This routine copies from a sock struct into the user buffer.
1093 *
1094 * Technical note: in 2.3 we work on _locked_ socket, so that
1095 * tricks with *seq access order and skb->users are not required.
1096 * Probably, code can be easily improved even more.
1097 */
1098
1099int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1100 size_t len, int nonblock, int flags, int *addr_len)
1101{
1102 struct tcp_sock *tp = tcp_sk(sk);
1103 int copied = 0;
1104 u32 peek_seq;
1105 u32 *seq;
1106 unsigned long used;
1107 int err;
1108 int target; /* Read at least this many bytes */
1109 long timeo;
1110 struct task_struct *user_recv = NULL;
1a2449a8 1111 int copied_early = 0;
1da177e4
LT
1112
1113 lock_sock(sk);
1114
1115 TCP_CHECK_TIMER(sk);
1116
1117 err = -ENOTCONN;
1118 if (sk->sk_state == TCP_LISTEN)
1119 goto out;
1120
1121 timeo = sock_rcvtimeo(sk, nonblock);
1122
1123 /* Urgent data needs to be handled specially. */
1124 if (flags & MSG_OOB)
1125 goto recv_urg;
1126
1127 seq = &tp->copied_seq;
1128 if (flags & MSG_PEEK) {
1129 peek_seq = tp->copied_seq;
1130 seq = &peek_seq;
1131 }
1132
1133 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1134
1a2449a8
CL
1135#ifdef CONFIG_NET_DMA
1136 tp->ucopy.dma_chan = NULL;
1137 preempt_disable();
1138 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
29bbd72d 1139 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1a2449a8
CL
1140 preempt_enable_no_resched();
1141 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1142 } else
1143 preempt_enable_no_resched();
1144#endif
1145
1da177e4
LT
1146 do {
1147 struct sk_buff *skb;
1148 u32 offset;
1149
1150 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1151 if (tp->urg_data && tp->urg_seq == *seq) {
1152 if (copied)
1153 break;
1154 if (signal_pending(current)) {
1155 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1156 break;
1157 }
1158 }
1159
1160 /* Next get a buffer. */
1161
1162 skb = skb_peek(&sk->sk_receive_queue);
1163 do {
1164 if (!skb)
1165 break;
1166
1167 /* Now that we have two receive queues this
1168 * shouldn't happen.
1169 */
1170 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1171 printk(KERN_INFO "recvmsg bug: copied %X "
1172 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1173 break;
1174 }
1175 offset = *seq - TCP_SKB_CB(skb)->seq;
1176 if (skb->h.th->syn)
1177 offset--;
1178 if (offset < skb->len)
1179 goto found_ok_skb;
1180 if (skb->h.th->fin)
1181 goto found_fin_ok;
1182 BUG_TRAP(flags & MSG_PEEK);
1183 skb = skb->next;
1184 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1185
1186 /* Well, if we have backlog, try to process it now yet. */
1187
1188 if (copied >= target && !sk->sk_backlog.tail)
1189 break;
1190
1191 if (copied) {
1192 if (sk->sk_err ||
1193 sk->sk_state == TCP_CLOSE ||
1194 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1195 !timeo ||
1196 signal_pending(current) ||
1197 (flags & MSG_PEEK))
1198 break;
1199 } else {
1200 if (sock_flag(sk, SOCK_DONE))
1201 break;
1202
1203 if (sk->sk_err) {
1204 copied = sock_error(sk);
1205 break;
1206 }
1207
1208 if (sk->sk_shutdown & RCV_SHUTDOWN)
1209 break;
1210
1211 if (sk->sk_state == TCP_CLOSE) {
1212 if (!sock_flag(sk, SOCK_DONE)) {
1213 /* This occurs when user tries to read
1214 * from never connected socket.
1215 */
1216 copied = -ENOTCONN;
1217 break;
1218 }
1219 break;
1220 }
1221
1222 if (!timeo) {
1223 copied = -EAGAIN;
1224 break;
1225 }
1226
1227 if (signal_pending(current)) {
1228 copied = sock_intr_errno(timeo);
1229 break;
1230 }
1231 }
1232
0e4b4992 1233 tcp_cleanup_rbuf(sk, copied);
1da177e4 1234
7df55125 1235 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1da177e4
LT
1236 /* Install new reader */
1237 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1238 user_recv = current;
1239 tp->ucopy.task = user_recv;
1240 tp->ucopy.iov = msg->msg_iov;
1241 }
1242
1243 tp->ucopy.len = len;
1244
1245 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1246 (flags & (MSG_PEEK | MSG_TRUNC)));
1247
1248 /* Ugly... If prequeue is not empty, we have to
1249 * process it before releasing socket, otherwise
1250 * order will be broken at second iteration.
1251 * More elegant solution is required!!!
1252 *
1253 * Look: we have the following (pseudo)queues:
1254 *
1255 * 1. packets in flight
1256 * 2. backlog
1257 * 3. prequeue
1258 * 4. receive_queue
1259 *
1260 * Each queue can be processed only if the next ones
1261 * are empty. At this point we have empty receive_queue.
1262 * But prequeue _can_ be not empty after 2nd iteration,
1263 * when we jumped to start of loop because backlog
1264 * processing added something to receive_queue.
1265 * We cannot release_sock(), because backlog contains
1266 * packets arrived _after_ prequeued ones.
1267 *
1268 * Shortly, algorithm is clear --- to process all
1269 * the queues in order. We could make it more directly,
1270 * requeueing packets from backlog to prequeue, if
1271 * is not empty. It is more elegant, but eats cycles,
1272 * unfortunately.
1273 */
b03efcfb 1274 if (!skb_queue_empty(&tp->ucopy.prequeue))
1da177e4
LT
1275 goto do_prequeue;
1276
1277 /* __ Set realtime policy in scheduler __ */
1278 }
1279
1280 if (copied >= target) {
1281 /* Do not sleep, just process backlog. */
1282 release_sock(sk);
1283 lock_sock(sk);
1284 } else
1285 sk_wait_data(sk, &timeo);
1286
1a2449a8
CL
1287#ifdef CONFIG_NET_DMA
1288 tp->ucopy.wakeup = 0;
1289#endif
1290
1da177e4
LT
1291 if (user_recv) {
1292 int chunk;
1293
1294 /* __ Restore normal policy in scheduler __ */
1295
1296 if ((chunk = len - tp->ucopy.len) != 0) {
1297 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1298 len -= chunk;
1299 copied += chunk;
1300 }
1301
1302 if (tp->rcv_nxt == tp->copied_seq &&
b03efcfb 1303 !skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1304do_prequeue:
1305 tcp_prequeue_process(sk);
1306
1307 if ((chunk = len - tp->ucopy.len) != 0) {
1308 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1309 len -= chunk;
1310 copied += chunk;
1311 }
1312 }
1313 }
1314 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1315 if (net_ratelimit())
1316 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1317 current->comm, current->pid);
1318 peek_seq = tp->copied_seq;
1319 }
1320 continue;
1321
1322 found_ok_skb:
1323 /* Ok so how much can we use? */
1324 used = skb->len - offset;
1325 if (len < used)
1326 used = len;
1327
1328 /* Do we have urgent data here? */
1329 if (tp->urg_data) {
1330 u32 urg_offset = tp->urg_seq - *seq;
1331 if (urg_offset < used) {
1332 if (!urg_offset) {
1333 if (!sock_flag(sk, SOCK_URGINLINE)) {
1334 ++*seq;
1335 offset++;
1336 used--;
1337 if (!used)
1338 goto skip_copy;
1339 }
1340 } else
1341 used = urg_offset;
1342 }
1343 }
1344
1345 if (!(flags & MSG_TRUNC)) {
1a2449a8
CL
1346#ifdef CONFIG_NET_DMA
1347 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1348 tp->ucopy.dma_chan = get_softnet_dma();
1349
1350 if (tp->ucopy.dma_chan) {
1351 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1352 tp->ucopy.dma_chan, skb, offset,
1353 msg->msg_iov, used,
1354 tp->ucopy.pinned_list);
1355
1356 if (tp->ucopy.dma_cookie < 0) {
1357
1358 printk(KERN_ALERT "dma_cookie < 0\n");
1359
1360 /* Exception. Bailout! */
1361 if (!copied)
1362 copied = -EFAULT;
1363 break;
1364 }
1365 if ((offset + used) == skb->len)
1366 copied_early = 1;
1367
1368 } else
1369#endif
1370 {
1371 err = skb_copy_datagram_iovec(skb, offset,
1372 msg->msg_iov, used);
1373 if (err) {
1374 /* Exception. Bailout! */
1375 if (!copied)
1376 copied = -EFAULT;
1377 break;
1378 }
1da177e4
LT
1379 }
1380 }
1381
1382 *seq += used;
1383 copied += used;
1384 len -= used;
1385
1386 tcp_rcv_space_adjust(sk);
1387
1388skip_copy:
1389 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1390 tp->urg_data = 0;
1391 tcp_fast_path_check(sk, tp);
1392 }
1393 if (used + offset < skb->len)
1394 continue;
1395
1396 if (skb->h.th->fin)
1397 goto found_fin_ok;
1a2449a8
CL
1398 if (!(flags & MSG_PEEK)) {
1399 sk_eat_skb(sk, skb, copied_early);
1400 copied_early = 0;
1401 }
1da177e4
LT
1402 continue;
1403
1404 found_fin_ok:
1405 /* Process the FIN. */
1406 ++*seq;
1a2449a8
CL
1407 if (!(flags & MSG_PEEK)) {
1408 sk_eat_skb(sk, skb, copied_early);
1409 copied_early = 0;
1410 }
1da177e4
LT
1411 break;
1412 } while (len > 0);
1413
1414 if (user_recv) {
b03efcfb 1415 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1416 int chunk;
1417
1418 tp->ucopy.len = copied > 0 ? len : 0;
1419
1420 tcp_prequeue_process(sk);
1421
1422 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1423 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1424 len -= chunk;
1425 copied += chunk;
1426 }
1427 }
1428
1429 tp->ucopy.task = NULL;
1430 tp->ucopy.len = 0;
1431 }
1432
1a2449a8
CL
1433#ifdef CONFIG_NET_DMA
1434 if (tp->ucopy.dma_chan) {
1435 struct sk_buff *skb;
1436 dma_cookie_t done, used;
1437
1438 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1439
1440 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1441 tp->ucopy.dma_cookie, &done,
1442 &used) == DMA_IN_PROGRESS) {
1443 /* do partial cleanup of sk_async_wait_queue */
1444 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1445 (dma_async_is_complete(skb->dma_cookie, done,
1446 used) == DMA_SUCCESS)) {
1447 __skb_dequeue(&sk->sk_async_wait_queue);
1448 kfree_skb(skb);
1449 }
1450 }
1451
1452 /* Safe to free early-copied skbs now */
1453 __skb_queue_purge(&sk->sk_async_wait_queue);
1454 dma_chan_put(tp->ucopy.dma_chan);
1455 tp->ucopy.dma_chan = NULL;
1456 }
1457 if (tp->ucopy.pinned_list) {
1458 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1459 tp->ucopy.pinned_list = NULL;
1460 }
1461#endif
1462
1da177e4
LT
1463 /* According to UNIX98, msg_name/msg_namelen are ignored
1464 * on connected socket. I was just happy when found this 8) --ANK
1465 */
1466
1467 /* Clean up data we have read: This will do ACK frames. */
0e4b4992 1468 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1469
1470 TCP_CHECK_TIMER(sk);
1471 release_sock(sk);
1472 return copied;
1473
1474out:
1475 TCP_CHECK_TIMER(sk);
1476 release_sock(sk);
1477 return err;
1478
1479recv_urg:
1480 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1481 goto out;
1482}
1483
1484/*
1485 * State processing on a close. This implements the state shift for
1486 * sending our FIN frame. Note that we only send a FIN for some
1487 * states. A shutdown() may have already sent the FIN, or we may be
1488 * closed.
1489 */
1490
9b5b5cff 1491static const unsigned char new_state[16] = {
1da177e4
LT
1492 /* current state: new state: action: */
1493 /* (Invalid) */ TCP_CLOSE,
1494 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1495 /* TCP_SYN_SENT */ TCP_CLOSE,
1496 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1497 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1498 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1499 /* TCP_TIME_WAIT */ TCP_CLOSE,
1500 /* TCP_CLOSE */ TCP_CLOSE,
1501 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1502 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1503 /* TCP_LISTEN */ TCP_CLOSE,
1504 /* TCP_CLOSING */ TCP_CLOSING,
1505};
1506
1507static int tcp_close_state(struct sock *sk)
1508{
1509 int next = (int)new_state[sk->sk_state];
1510 int ns = next & TCP_STATE_MASK;
1511
1512 tcp_set_state(sk, ns);
1513
1514 return next & TCP_ACTION_FIN;
1515}
1516
1517/*
1518 * Shutdown the sending side of a connection. Much like close except
1519 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1520 */
1521
1522void tcp_shutdown(struct sock *sk, int how)
1523{
1524 /* We need to grab some memory, and put together a FIN,
1525 * and then put it into the queue to be sent.
1526 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1527 */
1528 if (!(how & SEND_SHUTDOWN))
1529 return;
1530
1531 /* If we've already sent a FIN, or it's a closed state, skip this. */
1532 if ((1 << sk->sk_state) &
1533 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1534 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1535 /* Clear out any half completed packets. FIN if needed. */
1536 if (tcp_close_state(sk))
1537 tcp_send_fin(sk);
1538 }
1539}
1540
1da177e4
LT
1541void tcp_close(struct sock *sk, long timeout)
1542{
1543 struct sk_buff *skb;
1544 int data_was_unread = 0;
75c2d907 1545 int state;
1da177e4
LT
1546
1547 lock_sock(sk);
1548 sk->sk_shutdown = SHUTDOWN_MASK;
1549
1550 if (sk->sk_state == TCP_LISTEN) {
1551 tcp_set_state(sk, TCP_CLOSE);
1552
1553 /* Special case. */
0a5578cf 1554 inet_csk_listen_stop(sk);
1da177e4
LT
1555
1556 goto adjudge_to_death;
1557 }
1558
1559 /* We need to flush the recv. buffs. We do this only on the
1560 * descriptor close, not protocol-sourced closes, because the
1561 * reader process may not have drained the data yet!
1562 */
1563 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1564 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1565 skb->h.th->fin;
1566 data_was_unread += len;
1567 __kfree_skb(skb);
1568 }
1569
1570 sk_stream_mem_reclaim(sk);
1571
1572 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1573 * 3.10, we send a RST here because data was lost. To
1574 * witness the awful effects of the old behavior of always
1575 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1576 * a bulk GET in an FTP client, suspend the process, wait
1577 * for the client to advertise a zero window, then kill -9
1578 * the FTP client, wheee... Note: timeout is always zero
1579 * in such a case.
1580 */
1581 if (data_was_unread) {
1582 /* Unread data was tossed, zap the connection. */
1583 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1584 tcp_set_state(sk, TCP_CLOSE);
1585 tcp_send_active_reset(sk, GFP_KERNEL);
1586 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1587 /* Check zero linger _after_ checking for unread data. */
1588 sk->sk_prot->disconnect(sk, 0);
1589 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1590 } else if (tcp_close_state(sk)) {
1591 /* We FIN if the application ate all the data before
1592 * zapping the connection.
1593 */
1594
1595 /* RED-PEN. Formally speaking, we have broken TCP state
1596 * machine. State transitions:
1597 *
1598 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1599 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1600 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1601 *
1602 * are legal only when FIN has been sent (i.e. in window),
1603 * rather than queued out of window. Purists blame.
1604 *
1605 * F.e. "RFC state" is ESTABLISHED,
1606 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1607 *
1608 * The visible declinations are that sometimes
1609 * we enter time-wait state, when it is not required really
1610 * (harmless), do not send active resets, when they are
1611 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1612 * they look as CLOSING or LAST_ACK for Linux)
1613 * Probably, I missed some more holelets.
1614 * --ANK
1615 */
1616 tcp_send_fin(sk);
1617 }
1618
1619 sk_stream_wait_close(sk, timeout);
1620
1621adjudge_to_death:
75c2d907
HX
1622 state = sk->sk_state;
1623 sock_hold(sk);
1624 sock_orphan(sk);
1625 atomic_inc(sk->sk_prot->orphan_count);
1626
1da177e4
LT
1627 /* It is the last release_sock in its life. It will remove backlog. */
1628 release_sock(sk);
1629
1630
1631 /* Now socket is owned by kernel and we acquire BH lock
1632 to finish close. No need to check for user refs.
1633 */
1634 local_bh_disable();
1635 bh_lock_sock(sk);
1636 BUG_TRAP(!sock_owned_by_user(sk));
1637
75c2d907
HX
1638 /* Have we already been destroyed by a softirq or backlog? */
1639 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1640 goto out;
1da177e4
LT
1641
1642 /* This is a (useful) BSD violating of the RFC. There is a
1643 * problem with TCP as specified in that the other end could
1644 * keep a socket open forever with no application left this end.
1645 * We use a 3 minute timeout (about the same as BSD) then kill
1646 * our end. If they send after that then tough - BUT: long enough
1647 * that we won't make the old 4*rto = almost no time - whoops
1648 * reset mistake.
1649 *
1650 * Nope, it was not mistake. It is really desired behaviour
1651 * f.e. on http servers, when such sockets are useless, but
1652 * consume significant resources. Let's do it with special
1653 * linger2 option. --ANK
1654 */
1655
1656 if (sk->sk_state == TCP_FIN_WAIT2) {
1657 struct tcp_sock *tp = tcp_sk(sk);
1658 if (tp->linger2 < 0) {
1659 tcp_set_state(sk, TCP_CLOSE);
1660 tcp_send_active_reset(sk, GFP_ATOMIC);
1661 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1662 } else {
463c84b9 1663 const int tmo = tcp_fin_time(sk);
1da177e4
LT
1664
1665 if (tmo > TCP_TIMEWAIT_LEN) {
52499afe
DM
1666 inet_csk_reset_keepalive_timer(sk,
1667 tmo - TCP_TIMEWAIT_LEN);
1da177e4 1668 } else {
1da177e4
LT
1669 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1670 goto out;
1671 }
1672 }
1673 }
1674 if (sk->sk_state != TCP_CLOSE) {
1675 sk_stream_mem_reclaim(sk);
0a5578cf 1676 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1da177e4
LT
1677 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1678 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1679 if (net_ratelimit())
1680 printk(KERN_INFO "TCP: too many of orphaned "
1681 "sockets\n");
1682 tcp_set_state(sk, TCP_CLOSE);
1683 tcp_send_active_reset(sk, GFP_ATOMIC);
1684 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1685 }
1686 }
1da177e4
LT
1687
1688 if (sk->sk_state == TCP_CLOSE)
0a5578cf 1689 inet_csk_destroy_sock(sk);
1da177e4
LT
1690 /* Otherwise, socket is reprieved until protocol close. */
1691
1692out:
1693 bh_unlock_sock(sk);
1694 local_bh_enable();
1695 sock_put(sk);
1696}
1697
1698/* These states need RST on ABORT according to RFC793 */
1699
1700static inline int tcp_need_reset(int state)
1701{
1702 return (1 << state) &
1703 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1704 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1705}
1706
1707int tcp_disconnect(struct sock *sk, int flags)
1708{
1709 struct inet_sock *inet = inet_sk(sk);
463c84b9 1710 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1711 struct tcp_sock *tp = tcp_sk(sk);
1712 int err = 0;
1713 int old_state = sk->sk_state;
1714
1715 if (old_state != TCP_CLOSE)
1716 tcp_set_state(sk, TCP_CLOSE);
1717
1718 /* ABORT function of RFC793 */
1719 if (old_state == TCP_LISTEN) {
0a5578cf 1720 inet_csk_listen_stop(sk);
1da177e4
LT
1721 } else if (tcp_need_reset(old_state) ||
1722 (tp->snd_nxt != tp->write_seq &&
1723 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
caa20d9a 1724 /* The last check adjusts for discrepancy of Linux wrt. RFC
1da177e4
LT
1725 * states
1726 */
1727 tcp_send_active_reset(sk, gfp_any());
1728 sk->sk_err = ECONNRESET;
1729 } else if (old_state == TCP_SYN_SENT)
1730 sk->sk_err = ECONNRESET;
1731
1732 tcp_clear_xmit_timers(sk);
1733 __skb_queue_purge(&sk->sk_receive_queue);
1734 sk_stream_writequeue_purge(sk);
1735 __skb_queue_purge(&tp->out_of_order_queue);
1a2449a8
CL
1736#ifdef CONFIG_NET_DMA
1737 __skb_queue_purge(&sk->sk_async_wait_queue);
1738#endif
1da177e4
LT
1739
1740 inet->dport = 0;
1741
1742 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1743 inet_reset_saddr(sk);
1744
1745 sk->sk_shutdown = 0;
1746 sock_reset_flag(sk, SOCK_DONE);
1747 tp->srtt = 0;
1748 if ((tp->write_seq += tp->max_window + 2) == 0)
1749 tp->write_seq = 1;
463c84b9 1750 icsk->icsk_backoff = 0;
1da177e4 1751 tp->snd_cwnd = 2;
6687e988 1752 icsk->icsk_probes_out = 0;
1da177e4
LT
1753 tp->packets_out = 0;
1754 tp->snd_ssthresh = 0x7fffffff;
1755 tp->snd_cwnd_cnt = 0;
9772efb9 1756 tp->bytes_acked = 0;
6687e988 1757 tcp_set_ca_state(sk, TCP_CA_Open);
1da177e4 1758 tcp_clear_retrans(tp);
463c84b9 1759 inet_csk_delack_init(sk);
1da177e4
LT
1760 sk->sk_send_head = NULL;
1761 tp->rx_opt.saw_tstamp = 0;
1762 tcp_sack_reset(&tp->rx_opt);
1763 __sk_dst_reset(sk);
1764
463c84b9 1765 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1da177e4
LT
1766
1767 sk->sk_error_report(sk);
1768 return err;
1769}
1770
1da177e4
LT
1771/*
1772 * Socket option code for TCP.
1773 */
3fdadf7d
DM
1774static int do_tcp_setsockopt(struct sock *sk, int level,
1775 int optname, char __user *optval, int optlen)
1da177e4
LT
1776{
1777 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1778 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1779 int val;
1780 int err = 0;
1781
5f8ef48d
SH
1782 /* This is a string value all the others are int's */
1783 if (optname == TCP_CONGESTION) {
1784 char name[TCP_CA_NAME_MAX];
1785
1786 if (optlen < 1)
1787 return -EINVAL;
1788
1789 val = strncpy_from_user(name, optval,
1790 min(TCP_CA_NAME_MAX-1, optlen));
1791 if (val < 0)
1792 return -EFAULT;
1793 name[val] = 0;
1794
1795 lock_sock(sk);
6687e988 1796 err = tcp_set_congestion_control(sk, name);
5f8ef48d
SH
1797 release_sock(sk);
1798 return err;
1799 }
1800
1da177e4
LT
1801 if (optlen < sizeof(int))
1802 return -EINVAL;
1803
1804 if (get_user(val, (int __user *)optval))
1805 return -EFAULT;
1806
1807 lock_sock(sk);
1808
1809 switch (optname) {
1810 case TCP_MAXSEG:
1811 /* Values greater than interface MTU won't take effect. However
1812 * at the point when this call is done we typically don't yet
1813 * know which interface is going to be used */
1814 if (val < 8 || val > MAX_TCP_WINDOW) {
1815 err = -EINVAL;
1816 break;
1817 }
1818 tp->rx_opt.user_mss = val;
1819 break;
1820
1821 case TCP_NODELAY:
1822 if (val) {
1823 /* TCP_NODELAY is weaker than TCP_CORK, so that
1824 * this option on corked socket is remembered, but
1825 * it is not activated until cork is cleared.
1826 *
1827 * However, when TCP_NODELAY is set we make
1828 * an explicit push, which overrides even TCP_CORK
1829 * for currently queued segments.
1830 */
1831 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1832 tcp_push_pending_frames(sk, tp);
1833 } else {
1834 tp->nonagle &= ~TCP_NAGLE_OFF;
1835 }
1836 break;
1837
1838 case TCP_CORK:
1839 /* When set indicates to always queue non-full frames.
1840 * Later the user clears this option and we transmit
1841 * any pending partial frames in the queue. This is
1842 * meant to be used alongside sendfile() to get properly
1843 * filled frames when the user (for example) must write
1844 * out headers with a write() call first and then use
1845 * sendfile to send out the data parts.
1846 *
1847 * TCP_CORK can be set together with TCP_NODELAY and it is
1848 * stronger than TCP_NODELAY.
1849 */
1850 if (val) {
1851 tp->nonagle |= TCP_NAGLE_CORK;
1852 } else {
1853 tp->nonagle &= ~TCP_NAGLE_CORK;
1854 if (tp->nonagle&TCP_NAGLE_OFF)
1855 tp->nonagle |= TCP_NAGLE_PUSH;
1856 tcp_push_pending_frames(sk, tp);
1857 }
1858 break;
1859
1860 case TCP_KEEPIDLE:
1861 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1862 err = -EINVAL;
1863 else {
1864 tp->keepalive_time = val * HZ;
1865 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1866 !((1 << sk->sk_state) &
1867 (TCPF_CLOSE | TCPF_LISTEN))) {
1868 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1869 if (tp->keepalive_time > elapsed)
1870 elapsed = tp->keepalive_time - elapsed;
1871 else
1872 elapsed = 0;
463c84b9 1873 inet_csk_reset_keepalive_timer(sk, elapsed);
1da177e4
LT
1874 }
1875 }
1876 break;
1877 case TCP_KEEPINTVL:
1878 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1879 err = -EINVAL;
1880 else
1881 tp->keepalive_intvl = val * HZ;
1882 break;
1883 case TCP_KEEPCNT:
1884 if (val < 1 || val > MAX_TCP_KEEPCNT)
1885 err = -EINVAL;
1886 else
1887 tp->keepalive_probes = val;
1888 break;
1889 case TCP_SYNCNT:
1890 if (val < 1 || val > MAX_TCP_SYNCNT)
1891 err = -EINVAL;
1892 else
463c84b9 1893 icsk->icsk_syn_retries = val;
1da177e4
LT
1894 break;
1895
1896 case TCP_LINGER2:
1897 if (val < 0)
1898 tp->linger2 = -1;
1899 else if (val > sysctl_tcp_fin_timeout / HZ)
1900 tp->linger2 = 0;
1901 else
1902 tp->linger2 = val * HZ;
1903 break;
1904
1905 case TCP_DEFER_ACCEPT:
295f7324 1906 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1da177e4
LT
1907 if (val > 0) {
1908 /* Translate value in seconds to number of
1909 * retransmits */
295f7324 1910 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1da177e4 1911 val > ((TCP_TIMEOUT_INIT / HZ) <<
295f7324
ACM
1912 icsk->icsk_accept_queue.rskq_defer_accept))
1913 icsk->icsk_accept_queue.rskq_defer_accept++;
1914 icsk->icsk_accept_queue.rskq_defer_accept++;
1da177e4
LT
1915 }
1916 break;
1917
1918 case TCP_WINDOW_CLAMP:
1919 if (!val) {
1920 if (sk->sk_state != TCP_CLOSE) {
1921 err = -EINVAL;
1922 break;
1923 }
1924 tp->window_clamp = 0;
1925 } else
1926 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1927 SOCK_MIN_RCVBUF / 2 : val;
1928 break;
1929
1930 case TCP_QUICKACK:
1931 if (!val) {
463c84b9 1932 icsk->icsk_ack.pingpong = 1;
1da177e4 1933 } else {
463c84b9 1934 icsk->icsk_ack.pingpong = 0;
1da177e4
LT
1935 if ((1 << sk->sk_state) &
1936 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
463c84b9
ACM
1937 inet_csk_ack_scheduled(sk)) {
1938 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
0e4b4992 1939 tcp_cleanup_rbuf(sk, 1);
1da177e4 1940 if (!(val & 1))
463c84b9 1941 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
1942 }
1943 }
1944 break;
1945
cfb6eeb4
YH
1946#ifdef CONFIG_TCP_MD5SIG
1947 case TCP_MD5SIG:
1948 /* Read the IP->Key mappings from userspace */
1949 err = tp->af_specific->md5_parse(sk, optval, optlen);
1950 break;
1951#endif
1952
1da177e4
LT
1953 default:
1954 err = -ENOPROTOOPT;
1955 break;
1956 };
1957 release_sock(sk);
1958 return err;
1959}
1960
3fdadf7d
DM
1961int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1962 int optlen)
1963{
1964 struct inet_connection_sock *icsk = inet_csk(sk);
1965
1966 if (level != SOL_TCP)
1967 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1968 optval, optlen);
1969 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1970}
1971
1972#ifdef CONFIG_COMPAT
543d9cfe
ACM
1973int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1974 char __user *optval, int optlen)
3fdadf7d 1975{
dec73ff0
ACM
1976 if (level != SOL_TCP)
1977 return inet_csk_compat_setsockopt(sk, level, optname,
1978 optval, optlen);
3fdadf7d
DM
1979 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1980}
543d9cfe
ACM
1981
1982EXPORT_SYMBOL(compat_tcp_setsockopt);
3fdadf7d
DM
1983#endif
1984
1da177e4
LT
1985/* Return information about state of tcp endpoint in API format. */
1986void tcp_get_info(struct sock *sk, struct tcp_info *info)
1987{
1988 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1989 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1990 u32 now = tcp_time_stamp;
1991
1992 memset(info, 0, sizeof(*info));
1993
1994 info->tcpi_state = sk->sk_state;
6687e988 1995 info->tcpi_ca_state = icsk->icsk_ca_state;
463c84b9 1996 info->tcpi_retransmits = icsk->icsk_retransmits;
6687e988 1997 info->tcpi_probes = icsk->icsk_probes_out;
463c84b9 1998 info->tcpi_backoff = icsk->icsk_backoff;
1da177e4
LT
1999
2000 if (tp->rx_opt.tstamp_ok)
2001 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2002 if (tp->rx_opt.sack_ok)
2003 info->tcpi_options |= TCPI_OPT_SACK;
2004 if (tp->rx_opt.wscale_ok) {
2005 info->tcpi_options |= TCPI_OPT_WSCALE;
2006 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2007 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2008 }
2009
2010 if (tp->ecn_flags&TCP_ECN_OK)
2011 info->tcpi_options |= TCPI_OPT_ECN;
2012
463c84b9
ACM
2013 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2014 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
c1b4a7e6 2015 info->tcpi_snd_mss = tp->mss_cache;
463c84b9 2016 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1da177e4
LT
2017
2018 info->tcpi_unacked = tp->packets_out;
2019 info->tcpi_sacked = tp->sacked_out;
2020 info->tcpi_lost = tp->lost_out;
2021 info->tcpi_retrans = tp->retrans_out;
2022 info->tcpi_fackets = tp->fackets_out;
2023
2024 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
463c84b9 2025 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1da177e4
LT
2026 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2027
d83d8461 2028 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
1da177e4
LT
2029 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2030 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2031 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2032 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2033 info->tcpi_snd_cwnd = tp->snd_cwnd;
2034 info->tcpi_advmss = tp->advmss;
2035 info->tcpi_reordering = tp->reordering;
2036
2037 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2038 info->tcpi_rcv_space = tp->rcvq_space.space;
2039
2040 info->tcpi_total_retrans = tp->total_retrans;
2041}
2042
2043EXPORT_SYMBOL_GPL(tcp_get_info);
2044
3fdadf7d
DM
2045static int do_tcp_getsockopt(struct sock *sk, int level,
2046 int optname, char __user *optval, int __user *optlen)
1da177e4 2047{
295f7324 2048 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
2049 struct tcp_sock *tp = tcp_sk(sk);
2050 int val, len;
2051
1da177e4
LT
2052 if (get_user(len, optlen))
2053 return -EFAULT;
2054
2055 len = min_t(unsigned int, len, sizeof(int));
2056
2057 if (len < 0)
2058 return -EINVAL;
2059
2060 switch (optname) {
2061 case TCP_MAXSEG:
c1b4a7e6 2062 val = tp->mss_cache;
1da177e4
LT
2063 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2064 val = tp->rx_opt.user_mss;
2065 break;
2066 case TCP_NODELAY:
2067 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2068 break;
2069 case TCP_CORK:
2070 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2071 break;
2072 case TCP_KEEPIDLE:
2073 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2074 break;
2075 case TCP_KEEPINTVL:
2076 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2077 break;
2078 case TCP_KEEPCNT:
2079 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2080 break;
2081 case TCP_SYNCNT:
295f7324 2082 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1da177e4
LT
2083 break;
2084 case TCP_LINGER2:
2085 val = tp->linger2;
2086 if (val >= 0)
2087 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2088 break;
2089 case TCP_DEFER_ACCEPT:
295f7324
ACM
2090 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2091 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
1da177e4
LT
2092 break;
2093 case TCP_WINDOW_CLAMP:
2094 val = tp->window_clamp;
2095 break;
2096 case TCP_INFO: {
2097 struct tcp_info info;
2098
2099 if (get_user(len, optlen))
2100 return -EFAULT;
2101
2102 tcp_get_info(sk, &info);
2103
2104 len = min_t(unsigned int, len, sizeof(info));
2105 if (put_user(len, optlen))
2106 return -EFAULT;
2107 if (copy_to_user(optval, &info, len))
2108 return -EFAULT;
2109 return 0;
2110 }
2111 case TCP_QUICKACK:
295f7324 2112 val = !icsk->icsk_ack.pingpong;
1da177e4 2113 break;
5f8ef48d
SH
2114
2115 case TCP_CONGESTION:
2116 if (get_user(len, optlen))
2117 return -EFAULT;
2118 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2119 if (put_user(len, optlen))
2120 return -EFAULT;
6687e988 2121 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
5f8ef48d
SH
2122 return -EFAULT;
2123 return 0;
1da177e4
LT
2124 default:
2125 return -ENOPROTOOPT;
2126 };
2127
2128 if (put_user(len, optlen))
2129 return -EFAULT;
2130 if (copy_to_user(optval, &val, len))
2131 return -EFAULT;
2132 return 0;
2133}
2134
3fdadf7d
DM
2135int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2136 int __user *optlen)
2137{
2138 struct inet_connection_sock *icsk = inet_csk(sk);
2139
2140 if (level != SOL_TCP)
2141 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2142 optval, optlen);
2143 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2144}
2145
2146#ifdef CONFIG_COMPAT
543d9cfe
ACM
2147int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2148 char __user *optval, int __user *optlen)
3fdadf7d 2149{
dec73ff0
ACM
2150 if (level != SOL_TCP)
2151 return inet_csk_compat_getsockopt(sk, level, optname,
2152 optval, optlen);
3fdadf7d
DM
2153 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2154}
543d9cfe
ACM
2155
2156EXPORT_SYMBOL(compat_tcp_getsockopt);
3fdadf7d 2157#endif
1da177e4 2158
576a30eb 2159struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
f4c50d99
HX
2160{
2161 struct sk_buff *segs = ERR_PTR(-EINVAL);
2162 struct tcphdr *th;
2163 unsigned thlen;
2164 unsigned int seq;
2165 unsigned int delta;
2166 unsigned int oldlen;
2167 unsigned int len;
2168
2169 if (!pskb_may_pull(skb, sizeof(*th)))
2170 goto out;
2171
2172 th = skb->h.th;
2173 thlen = th->doff * 4;
2174 if (thlen < sizeof(*th))
2175 goto out;
2176
2177 if (!pskb_may_pull(skb, thlen))
2178 goto out;
2179
0718bcc0 2180 oldlen = (u16)~skb->len;
f4c50d99
HX
2181 __skb_pull(skb, thlen);
2182
3820c3f3
HX
2183 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2184 /* Packet is from an untrusted source, reset gso_segs. */
bbcf467d
HX
2185 int type = skb_shinfo(skb)->gso_type;
2186 int mss;
2187
2188 if (unlikely(type &
2189 ~(SKB_GSO_TCPV4 |
2190 SKB_GSO_DODGY |
2191 SKB_GSO_TCP_ECN |
2192 SKB_GSO_TCPV6 |
2193 0) ||
2194 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2195 goto out;
3820c3f3 2196
bbcf467d 2197 mss = skb_shinfo(skb)->gso_size;
3820c3f3
HX
2198 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2199
2200 segs = NULL;
2201 goto out;
2202 }
2203
576a30eb 2204 segs = skb_segment(skb, features);
f4c50d99
HX
2205 if (IS_ERR(segs))
2206 goto out;
2207
2208 len = skb_shinfo(skb)->gso_size;
0718bcc0 2209 delta = htonl(oldlen + (thlen + len));
f4c50d99
HX
2210
2211 skb = segs;
2212 th = skb->h.th;
2213 seq = ntohl(th->seq);
2214
2215 do {
2216 th->fin = th->psh = 0;
2217
0718bcc0 2218 th->check = ~csum_fold(th->check + delta);
84fa7933 2219 if (skb->ip_summed != CHECKSUM_PARTIAL)
0718bcc0
HX
2220 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2221 skb->csum));
f4c50d99
HX
2222
2223 seq += len;
2224 skb = skb->next;
2225 th = skb->h.th;
2226
2227 th->seq = htonl(seq);
2228 th->cwr = 0;
2229 } while (skb->next);
2230
0718bcc0
HX
2231 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2232 th->check = ~csum_fold(th->check + delta);
84fa7933 2233 if (skb->ip_summed != CHECKSUM_PARTIAL)
0718bcc0
HX
2234 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2235 skb->csum));
f4c50d99
HX
2236
2237out:
2238 return segs;
2239}
adcfc7d0 2240EXPORT_SYMBOL(tcp_tso_segment);
f4c50d99 2241
cfb6eeb4
YH
2242#ifdef CONFIG_TCP_MD5SIG
2243static unsigned long tcp_md5sig_users;
2244static struct tcp_md5sig_pool **tcp_md5sig_pool;
2245static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2246
2247static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2248{
2249 int cpu;
2250 for_each_possible_cpu(cpu) {
2251 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2252 if (p) {
2253 if (p->md5_desc.tfm)
2254 crypto_free_hash(p->md5_desc.tfm);
2255 kfree(p);
2256 p = NULL;
2257 }
2258 }
2259 free_percpu(pool);
2260}
2261
2262void tcp_free_md5sig_pool(void)
2263{
2264 struct tcp_md5sig_pool **pool = NULL;
2265
2266 spin_lock(&tcp_md5sig_pool_lock);
2267 if (--tcp_md5sig_users == 0) {
2268 pool = tcp_md5sig_pool;
2269 tcp_md5sig_pool = NULL;
2270 }
2271 spin_unlock(&tcp_md5sig_pool_lock);
2272 if (pool)
2273 __tcp_free_md5sig_pool(pool);
2274}
2275
2276EXPORT_SYMBOL(tcp_free_md5sig_pool);
2277
2278struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2279{
2280 int cpu;
2281 struct tcp_md5sig_pool **pool;
2282
2283 pool = alloc_percpu(struct tcp_md5sig_pool *);
2284 if (!pool)
2285 return NULL;
2286
2287 for_each_possible_cpu(cpu) {
2288 struct tcp_md5sig_pool *p;
2289 struct crypto_hash *hash;
2290
2291 p = kzalloc(sizeof(*p), GFP_KERNEL);
2292 if (!p)
2293 goto out_free;
2294 *per_cpu_ptr(pool, cpu) = p;
2295
2296 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2297 if (!hash || IS_ERR(hash))
2298 goto out_free;
2299
2300 p->md5_desc.tfm = hash;
2301 }
2302 return pool;
2303out_free:
2304 __tcp_free_md5sig_pool(pool);
2305 return NULL;
2306}
2307
2308struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2309{
2310 struct tcp_md5sig_pool **pool;
2311 int alloc = 0;
2312
2313retry:
2314 spin_lock(&tcp_md5sig_pool_lock);
2315 pool = tcp_md5sig_pool;
2316 if (tcp_md5sig_users++ == 0) {
2317 alloc = 1;
2318 spin_unlock(&tcp_md5sig_pool_lock);
2319 } else if (!pool) {
2320 tcp_md5sig_users--;
2321 spin_unlock(&tcp_md5sig_pool_lock);
2322 cpu_relax();
2323 goto retry;
2324 } else
2325 spin_unlock(&tcp_md5sig_pool_lock);
2326
2327 if (alloc) {
2328 /* we cannot hold spinlock here because this may sleep. */
2329 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2330 spin_lock(&tcp_md5sig_pool_lock);
2331 if (!p) {
2332 tcp_md5sig_users--;
2333 spin_unlock(&tcp_md5sig_pool_lock);
2334 return NULL;
2335 }
2336 pool = tcp_md5sig_pool;
2337 if (pool) {
2338 /* oops, it has already been assigned. */
2339 spin_unlock(&tcp_md5sig_pool_lock);
2340 __tcp_free_md5sig_pool(p);
2341 } else {
2342 tcp_md5sig_pool = pool = p;
2343 spin_unlock(&tcp_md5sig_pool_lock);
2344 }
2345 }
2346 return pool;
2347}
2348
2349EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2350
2351struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2352{
2353 struct tcp_md5sig_pool **p;
2354 spin_lock(&tcp_md5sig_pool_lock);
2355 p = tcp_md5sig_pool;
2356 if (p)
2357 tcp_md5sig_users++;
2358 spin_unlock(&tcp_md5sig_pool_lock);
2359 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2360}
2361
2362EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2363
2364void __tcp_put_md5sig_pool(void) {
2365 __tcp_free_md5sig_pool(tcp_md5sig_pool);
2366}
2367
2368EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2369#endif
2370
1da177e4 2371extern void __skb_cb_too_small_for_tcp(int, int);
5f8ef48d 2372extern struct tcp_congestion_ops tcp_reno;
1da177e4
LT
2373
2374static __initdata unsigned long thash_entries;
2375static int __init set_thash_entries(char *str)
2376{
2377 if (!str)
2378 return 0;
2379 thash_entries = simple_strtoul(str, &str, 0);
2380 return 1;
2381}
2382__setup("thash_entries=", set_thash_entries);
2383
2384void __init tcp_init(void)
2385{
2386 struct sk_buff *skb = NULL;
7b4f4b5e
JH
2387 unsigned long limit;
2388 int order, i, max_share;
1da177e4
LT
2389
2390 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2391 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2392 sizeof(skb->cb));
2393
6e04e021
ACM
2394 tcp_hashinfo.bind_bucket_cachep =
2395 kmem_cache_create("tcp_bind_bucket",
2396 sizeof(struct inet_bind_bucket), 0,
e5d679f3 2397 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1da177e4 2398
1da177e4
LT
2399 /* Size and allocate the main established and bind bucket
2400 * hash tables.
2401 *
2402 * The methodology is similar to that of the buffer cache.
2403 */
6e04e021 2404 tcp_hashinfo.ehash =
1da177e4 2405 alloc_large_system_hash("TCP established",
0f7ff927 2406 sizeof(struct inet_ehash_bucket),
1da177e4
LT
2407 thash_entries,
2408 (num_physpages >= 128 * 1024) ?
18955cfc 2409 13 : 15,
9e950efa 2410 0,
6e04e021 2411 &tcp_hashinfo.ehash_size,
1da177e4
LT
2412 NULL,
2413 0);
6e04e021
ACM
2414 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2415 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2416 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2417 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
1da177e4
LT
2418 }
2419
6e04e021 2420 tcp_hashinfo.bhash =
1da177e4 2421 alloc_large_system_hash("TCP bind",
0f7ff927 2422 sizeof(struct inet_bind_hashbucket),
6e04e021 2423 tcp_hashinfo.ehash_size,
1da177e4 2424 (num_physpages >= 128 * 1024) ?
18955cfc 2425 13 : 15,
9e950efa 2426 0,
6e04e021 2427 &tcp_hashinfo.bhash_size,
1da177e4
LT
2428 NULL,
2429 64 * 1024);
6e04e021
ACM
2430 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2431 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2432 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2433 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
1da177e4
LT
2434 }
2435
2436 /* Try to be a bit smarter and adjust defaults depending
2437 * on available memory.
2438 */
2439 for (order = 0; ((1 << order) << PAGE_SHIFT) <
6e04e021 2440 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
1da177e4
LT
2441 order++)
2442 ;
e7626486 2443 if (order >= 4) {
1da177e4
LT
2444 sysctl_local_port_range[0] = 32768;
2445 sysctl_local_port_range[1] = 61000;
295ff7ed 2446 tcp_death_row.sysctl_max_tw_buckets = 180000;
1da177e4
LT
2447 sysctl_tcp_max_orphans = 4096 << (order - 4);
2448 sysctl_max_syn_backlog = 1024;
2449 } else if (order < 3) {
2450 sysctl_local_port_range[0] = 1024 * (3 - order);
295ff7ed 2451 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
1da177e4
LT
2452 sysctl_tcp_max_orphans >>= (3 - order);
2453 sysctl_max_syn_backlog = 128;
2454 }
1da177e4 2455
52bf376c
JH
2456 /* Allow no more than 3/4 kernel memory (usually less) allocated to TCP */
2457 sysctl_tcp_mem[0] = (1536 / sizeof (struct inet_bind_hashbucket)) << order;
2458 sysctl_tcp_mem[1] = sysctl_tcp_mem[0] * 4 / 3;
2459 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
1da177e4 2460
7b4f4b5e
JH
2461 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2462 max_share = min(4UL*1024*1024, limit);
2463
2464 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2465 sysctl_tcp_wmem[1] = 16*1024;
2466 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2467
2468 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2469 sysctl_tcp_rmem[1] = 87380;
2470 sysctl_tcp_rmem[2] = max(87380, max_share);
1da177e4
LT
2471
2472 printk(KERN_INFO "TCP: Hash tables configured "
2473 "(established %d bind %d)\n",
6e04e021 2474 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
317a76f9
SH
2475
2476 tcp_register_congestion_control(&tcp_reno);
1da177e4
LT
2477}
2478
1da177e4 2479EXPORT_SYMBOL(tcp_close);
1da177e4
LT
2480EXPORT_SYMBOL(tcp_disconnect);
2481EXPORT_SYMBOL(tcp_getsockopt);
2482EXPORT_SYMBOL(tcp_ioctl);
1da177e4
LT
2483EXPORT_SYMBOL(tcp_poll);
2484EXPORT_SYMBOL(tcp_read_sock);
2485EXPORT_SYMBOL(tcp_recvmsg);
2486EXPORT_SYMBOL(tcp_sendmsg);
2487EXPORT_SYMBOL(tcp_sendpage);
2488EXPORT_SYMBOL(tcp_setsockopt);
2489EXPORT_SYMBOL(tcp_shutdown);
2490EXPORT_SYMBOL(tcp_statistics);