return (struct tcp_request_sock *)req;
}
+struct tcp_deferred_accept_info {
+ struct sock *listen_sk;
+ struct request_sock *request;
+};
+
struct tcp_sock {
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2;
+ struct tcp_deferred_accept_info defer_tcp_accept;
+
unsigned long last_synq_overflow;
u32 tso_deferred;
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
rwlock_t syn_wait_lock;
- u8 rskq_defer_accept;
- /* 3 bytes hole, try to pack */
+ u16 rskq_defer_accept;
+ /* 2 bytes hole, try to pack */
struct listen_sock *listen_opt;
};
#define MAX_TCP_KEEPINTVL 32767
#define MAX_TCP_KEEPCNT 127
#define MAX_TCP_SYNCNT 127
+#define MAX_TCP_ACCEPT_DEFERRED 65535
#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
struct inet_connection_sock *icsk = inet_csk(parent);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct listen_sock *lopt = queue->listen_opt;
- int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
- int thresh = max_retries;
+ int thresh = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
unsigned long now = jiffies;
struct request_sock **reqp, *req;
int i, budget;
}
}
- if (queue->rskq_defer_accept)
- max_retries = queue->rskq_defer_accept;
-
budget = 2 * (lopt->nr_table_entries / (timeout / interval));
i = lopt->clock_hand;
reqp=&lopt->syn_table[i];
while ((req = *reqp) != NULL) {
if (time_after_eq(now, req->expires)) {
- if ((req->retrans < (inet_rsk(req)->acked ? max_retries : thresh)) &&
- (inet_rsk(req)->acked ||
- !req->rsk_ops->rtx_syn_ack(parent, req))) {
+ if (req->retrans < thresh &&
+ !req->rsk_ops->rtx_syn_ack(parent, req)) {
unsigned long timeo;
if (req->retrans++ == 0)
break;
case TCP_DEFER_ACCEPT:
- icsk->icsk_accept_queue.rskq_defer_accept = 0;
- if (val > 0) {
- /* Translate value in seconds to number of
- * retransmits */
- while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
- val > ((TCP_TIMEOUT_INIT / HZ) <<
- icsk->icsk_accept_queue.rskq_defer_accept))
- icsk->icsk_accept_queue.rskq_defer_accept++;
- icsk->icsk_accept_queue.rskq_defer_accept++;
+ if (val < 0) {
+ err = -EINVAL;
+ } else {
+ if (val > MAX_TCP_ACCEPT_DEFERRED)
+ val = MAX_TCP_ACCEPT_DEFERRED;
+ icsk->icsk_accept_queue.rskq_defer_accept = val;
}
break;
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
break;
case TCP_DEFER_ACCEPT:
- val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
- ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
+ val = icsk->icsk_accept_queue.rskq_defer_accept;
break;
case TCP_WINDOW_CLAMP:
val = tp->window_clamp;
}
}
+static int tcp_defer_accept_check(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->defer_tcp_accept.request) {
+ int queued_data = tp->rcv_nxt - tp->copied_seq;
+ int hasfin = !skb_queue_empty(&sk->sk_receive_queue) ?
+ tcp_hdr((struct sk_buff *)
+ sk->sk_receive_queue.prev)->fin : 0;
+
+ if (queued_data && hasfin)
+ queued_data--;
+
+ if (queued_data &&
+ tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) {
+ if (sock_flag(sk, SOCK_KEEPOPEN)) {
+ inet_csk_reset_keepalive_timer(sk,
+ keepalive_time_when(tp));
+ } else {
+ inet_csk_delete_keepalive_timer(sk);
+ }
+
+ inet_csk_reqsk_queue_add(
+ tp->defer_tcp_accept.listen_sk,
+ tp->defer_tcp_accept.request,
+ sk);
+
+ tp->defer_tcp_accept.listen_sk->sk_data_ready(
+ tp->defer_tcp_accept.listen_sk, 0);
+
+ sock_put(tp->defer_tcp_accept.listen_sk);
+ sock_put(sk);
+ tp->defer_tcp_accept.listen_sk = NULL;
+ tp->defer_tcp_accept.request = NULL;
+ } else if (hasfin ||
+ tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) {
+ tcp_reset(sk);
+ return -1;
+ }
+ }
+ return 0;
+}
+
static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
+
+ if (tcp_defer_accept_check(sk))
+ return -1;
return 0;
csum_error:
sk->sk_sndmsg_page = NULL;
}
+ if (tp->defer_tcp_accept.request) {
+ reqsk_free(tp->defer_tcp_accept.request);
+ sock_put(tp->defer_tcp_accept.listen_sk);
+ sock_put(sk);
+ tp->defer_tcp_accept.listen_sk = NULL;
+ tp->defer_tcp_accept.request = NULL;
+ }
+
atomic_dec(&tcp_sockets_allocated);
return 0;
does sequence test, SYN is truncated, and thus we consider
it a bare ACK.
- If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
- bare ACK. Otherwise, we create an established connection. Both
- ends (listening sockets) accept the new incoming connection and try
- to talk to each other. 8-)
+ Both ends (listening sockets) accept the new incoming
+ connection and try to talk to each other. 8-)
Note: This case is both harmless, and rare. Possibility is about the
same as us discovering intelligent life on another plant tomorrow.
if (!(flg & TCP_FLAG_ACK))
return NULL;
- /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
- if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
- TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
- inet_rsk(req)->acked = 1;
- return NULL;
- }
-
/* OK, ACK is valid, create big socket and
* feed this segment to it. It will repeat all
* the tests. THIS SEGMENT MUST MOVE SOCKET TO
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
- inet_csk_reqsk_queue_add(sk, req, child);
+ if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
+ TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
+
+ /* the accept queue handling is done is est recv slow
+ * path so lets make sure to start there
+ */
+ tcp_sk(child)->pred_flags = 0;
+ sock_hold(sk);
+ sock_hold(child);
+ tcp_sk(child)->defer_tcp_accept.listen_sk = sk;
+ tcp_sk(child)->defer_tcp_accept.request = req;
+
+ inet_csk_reset_keepalive_timer(child,
+ inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ);
+ } else {
+ inet_csk_reqsk_queue_add(sk, req, child);
+ }
+
return child;
listen_overflow:
goto death;
}
+ if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) {
+ tcp_send_active_reset(sk, GFP_ATOMIC);
+ goto death;
+ }
+
if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
goto out;