Revert "net: core: Support UID-based routing."
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / inet_connection_sock.c
CommitLineData
3f421baa
ACM
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Support for INET connection oriented protocols.
7 *
8 * Authors: See the TCP sources
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
14 */
15
3f421baa
ACM
16#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
a019d6fe 25#include <net/xfrm.h>
3f421baa
ACM
26
27#ifdef INET_CSK_DEBUG
28const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30#endif
31
32/*
3c689b73 33 * This struct holds the first and last local port number.
3f421baa 34 */
3c689b73 35struct local_ports sysctl_local_ports __read_mostly = {
c4dbe54e 36 .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
3c689b73
ED
37 .range = { 32768, 61000 },
38};
227b60f5 39
e3826f1e
AW
40unsigned long *sysctl_local_reserved_ports;
41EXPORT_SYMBOL(sysctl_local_reserved_ports);
42
227b60f5
SH
43void inet_get_local_port_range(int *low, int *high)
44{
95c96174
ED
45 unsigned int seq;
46
227b60f5 47 do {
3c689b73 48 seq = read_seqbegin(&sysctl_local_ports.lock);
227b60f5 49
3c689b73
ED
50 *low = sysctl_local_ports.range[0];
51 *high = sysctl_local_ports.range[1];
52 } while (read_seqretry(&sysctl_local_ports.lock, seq));
227b60f5
SH
53}
54EXPORT_SYMBOL(inet_get_local_port_range);
3f421baa 55
971af18b 56int inet_csk_bind_conflict(const struct sock *sk,
aacd9289 57 const struct inet_bind_bucket *tb, bool relax)
3f421baa 58{
3f421baa 59 struct sock *sk2;
3f421baa 60 int reuse = sk->sk_reuse;
da5e3630
TH
61 int reuseport = sk->sk_reuseport;
62 kuid_t uid = sock_i_uid((struct sock *)sk);
3f421baa 63
7477fd2e
PE
64 /*
65 * Unlike other sk lookup places we do not check
66 * for sk_net here, since _all_ the socks listed
67 * in tb->owners list belong to the same net - the
68 * one this bucket belongs to.
69 */
70
b67bfe0d 71 sk_for_each_bound(sk2, &tb->owners) {
3f421baa
ACM
72 if (sk != sk2 &&
73 !inet_v6_ipv6only(sk2) &&
74 (!sk->sk_bound_dev_if ||
75 !sk2->sk_bound_dev_if ||
76 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
da5e3630
TH
77 if ((!reuse || !sk2->sk_reuse ||
78 sk2->sk_state == TCP_LISTEN) &&
79 (!reuseport || !sk2->sk_reuseport ||
80 (sk2->sk_state != TCP_TIME_WAIT &&
81 !uid_eq(uid, sock_i_uid(sk2))))) {
68835aba
ED
82 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
83 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
84 sk2_rcv_saddr == sk_rcv_saddr(sk))
3f421baa 85 break;
8d238b25 86 }
aacd9289
AC
87 if (!relax && reuse && sk2->sk_reuse &&
88 sk2->sk_state != TCP_LISTEN) {
89 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
90
91 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
92 sk2_rcv_saddr == sk_rcv_saddr(sk))
93 break;
94 }
3f421baa
ACM
95 }
96 }
b67bfe0d 97 return sk2 != NULL;
3f421baa 98}
971af18b
ACM
99EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
100
3f421baa
ACM
101/* Obtain a reference to a local port for the given sock,
102 * if snum is zero it means select any available local port.
103 */
ab1e0a13 104int inet_csk_get_port(struct sock *sk, unsigned short snum)
3f421baa 105{
39d8cda7 106 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
3f421baa 107 struct inet_bind_hashbucket *head;
3f421baa 108 struct inet_bind_bucket *tb;
a9d8f911 109 int ret, attempts = 5;
3b1e0a65 110 struct net *net = sock_net(sk);
a9d8f911 111 int smallest_size = -1, smallest_rover;
da5e3630 112 kuid_t uid = sock_i_uid(sk);
3f421baa
ACM
113
114 local_bh_disable();
115 if (!snum) {
227b60f5
SH
116 int remaining, rover, low, high;
117
a9d8f911 118again:
227b60f5 119 inet_get_local_port_range(&low, &high);
a25de534 120 remaining = (high - low) + 1;
a9d8f911 121 smallest_rover = rover = net_random() % remaining + low;
3f421baa 122
a9d8f911 123 smallest_size = -1;
3f421baa 124 do {
e3826f1e
AW
125 if (inet_is_reserved_local_port(rover))
126 goto next_nolock;
7f635ab7
PE
127 head = &hashinfo->bhash[inet_bhashfn(net, rover,
128 hashinfo->bhash_size)];
3f421baa 129 spin_lock(&head->lock);
b67bfe0d 130 inet_bind_bucket_for_each(tb, &head->chain)
09ad9bc7 131 if (net_eq(ib_net(tb), net) && tb->port == rover) {
da5e3630
TH
132 if (((tb->fastreuse > 0 &&
133 sk->sk_reuse &&
134 sk->sk_state != TCP_LISTEN) ||
135 (tb->fastreuseport > 0 &&
136 sk->sk_reuseport &&
137 uid_eq(tb->fastuid, uid))) &&
a9d8f911
EP
138 (tb->num_owners < smallest_size || smallest_size == -1)) {
139 smallest_size = tb->num_owners;
140 smallest_rover = rover;
aacd9289
AC
141 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
142 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
8d238b25 143 snum = smallest_rover;
fddb7b57 144 goto tb_found;
a9d8f911
EP
145 }
146 }
aacd9289 147 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
2b05ad33 148 snum = rover;
fddb7b57 149 goto tb_found;
2b05ad33 150 }
3f421baa 151 goto next;
a9d8f911 152 }
3f421baa
ACM
153 break;
154 next:
155 spin_unlock(&head->lock);
e3826f1e 156 next_nolock:
6df71634
SH
157 if (++rover > high)
158 rover = low;
3f421baa 159 } while (--remaining > 0);
3f421baa
ACM
160
161 /* Exhausted local port range during search? It is not
162 * possible for us to be holding one of the bind hash
163 * locks if this test triggers, because if 'remaining'
164 * drops to zero, we broke out of the do/while loop at
165 * the top level, not from the 'break;' statement.
166 */
167 ret = 1;
a9d8f911
EP
168 if (remaining <= 0) {
169 if (smallest_size != -1) {
170 snum = smallest_rover;
171 goto have_snum;
172 }
3f421baa 173 goto fail;
a9d8f911 174 }
3f421baa
ACM
175 /* OK, here is the one we will use. HEAD is
176 * non-NULL and we hold it's mutex.
177 */
178 snum = rover;
179 } else {
a9d8f911 180have_snum:
7f635ab7
PE
181 head = &hashinfo->bhash[inet_bhashfn(net, snum,
182 hashinfo->bhash_size)];
3f421baa 183 spin_lock(&head->lock);
b67bfe0d 184 inet_bind_bucket_for_each(tb, &head->chain)
09ad9bc7 185 if (net_eq(ib_net(tb), net) && tb->port == snum)
3f421baa
ACM
186 goto tb_found;
187 }
188 tb = NULL;
189 goto tb_not_found;
190tb_found:
191 if (!hlist_empty(&tb->owners)) {
4a17fd52
PE
192 if (sk->sk_reuse == SK_FORCE_REUSE)
193 goto success;
194
da5e3630
TH
195 if (((tb->fastreuse > 0 &&
196 sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
197 (tb->fastreuseport > 0 &&
198 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
a9d8f911 199 smallest_size == -1) {
3f421baa
ACM
200 goto success;
201 } else {
202 ret = 1;
aacd9289 203 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
da5e3630 204 if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
9c5e0c0b
TH
205 (tb->fastreuseport > 0 &&
206 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
5add3009 207 smallest_size != -1 && --attempts >= 0) {
a9d8f911
EP
208 spin_unlock(&head->lock);
209 goto again;
210 }
aacd9289 211
3f421baa 212 goto fail_unlock;
a9d8f911 213 }
3f421baa
ACM
214 }
215 }
216tb_not_found:
217 ret = 1;
941b1d22
PE
218 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
219 net, head, snum)) == NULL)
3f421baa
ACM
220 goto fail_unlock;
221 if (hlist_empty(&tb->owners)) {
222 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
223 tb->fastreuse = 1;
224 else
225 tb->fastreuse = 0;
da5e3630
TH
226 if (sk->sk_reuseport) {
227 tb->fastreuseport = 1;
228 tb->fastuid = uid;
9c5e0c0b 229 } else
da5e3630 230 tb->fastreuseport = 0;
da5e3630
TH
231 } else {
232 if (tb->fastreuse &&
233 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
234 tb->fastreuse = 0;
235 if (tb->fastreuseport &&
9c5e0c0b 236 (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
da5e3630 237 tb->fastreuseport = 0;
da5e3630 238 }
3f421baa
ACM
239success:
240 if (!inet_csk(sk)->icsk_bind_hash)
241 inet_bind_hash(sk, tb, snum);
547b792c 242 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
e905a9ed 243 ret = 0;
3f421baa
ACM
244
245fail_unlock:
246 spin_unlock(&head->lock);
247fail:
248 local_bh_enable();
249 return ret;
250}
3f421baa
ACM
251EXPORT_SYMBOL_GPL(inet_csk_get_port);
252
253/*
254 * Wait for an incoming connection, avoid race conditions. This must be called
255 * with the socket locked.
256 */
257static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
258{
259 struct inet_connection_sock *icsk = inet_csk(sk);
260 DEFINE_WAIT(wait);
261 int err;
262
263 /*
264 * True wake-one mechanism for incoming connections: only
265 * one process gets woken up, not the 'whole herd'.
266 * Since we do not 'race & poll' for established sockets
267 * anymore, the common case will execute the loop only once.
268 *
269 * Subtle issue: "add_wait_queue_exclusive()" will be added
270 * after any current non-exclusive waiters, and we know that
271 * it will always _stay_ after any new non-exclusive waiters
272 * because all non-exclusive waiters are added at the
273 * beginning of the wait-queue. As such, it's ok to "drop"
274 * our exclusiveness temporarily when we get woken up without
275 * having to remove and re-insert us on the wait queue.
276 */
277 for (;;) {
aa395145 278 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
3f421baa
ACM
279 TASK_INTERRUPTIBLE);
280 release_sock(sk);
281 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
282 timeo = schedule_timeout(timeo);
283 lock_sock(sk);
284 err = 0;
285 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
286 break;
287 err = -EINVAL;
288 if (sk->sk_state != TCP_LISTEN)
289 break;
290 err = sock_intr_errno(timeo);
291 if (signal_pending(current))
292 break;
293 err = -EAGAIN;
294 if (!timeo)
295 break;
296 }
aa395145 297 finish_wait(sk_sleep(sk), &wait);
3f421baa
ACM
298 return err;
299}
300
301/*
302 * This will accept the next outstanding connection.
303 */
304struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
305{
306 struct inet_connection_sock *icsk = inet_csk(sk);
8336886f 307 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
3f421baa 308 struct sock *newsk;
8336886f 309 struct request_sock *req;
3f421baa
ACM
310 int error;
311
312 lock_sock(sk);
313
314 /* We need to make sure that this socket is listening,
315 * and that it has something pending.
316 */
317 error = -EINVAL;
318 if (sk->sk_state != TCP_LISTEN)
319 goto out_err;
320
321 /* Find already established connection */
8336886f 322 if (reqsk_queue_empty(queue)) {
3f421baa
ACM
323 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
324
325 /* If this is a non blocking socket don't sleep */
326 error = -EAGAIN;
327 if (!timeo)
328 goto out_err;
329
330 error = inet_csk_wait_for_connect(sk, timeo);
331 if (error)
332 goto out_err;
333 }
8336886f
JC
334 req = reqsk_queue_remove(queue);
335 newsk = req->sk;
336
337 sk_acceptq_removed(sk);
7ab4551f 338 if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
8336886f
JC
339 spin_lock_bh(&queue->fastopenq->lock);
340 if (tcp_rsk(req)->listener) {
341 /* We are still waiting for the final ACK from 3WHS
342 * so can't free req now. Instead, we set req->sk to
343 * NULL to signify that the child socket is taken
344 * so reqsk_fastopen_remove() will free the req
345 * when 3WHS finishes (or is aborted).
346 */
347 req->sk = NULL;
348 req = NULL;
349 }
350 spin_unlock_bh(&queue->fastopenq->lock);
351 }
3f421baa
ACM
352out:
353 release_sock(sk);
8336886f
JC
354 if (req)
355 __reqsk_free(req);
3f421baa
ACM
356 return newsk;
357out_err:
358 newsk = NULL;
8336886f 359 req = NULL;
3f421baa
ACM
360 *err = error;
361 goto out;
362}
3f421baa
ACM
363EXPORT_SYMBOL(inet_csk_accept);
364
365/*
366 * Using different timers for retransmit, delayed acks and probes
e905a9ed 367 * We may wish use just one timer maintaining a list of expire jiffies
3f421baa
ACM
368 * to optimize.
369 */
370void inet_csk_init_xmit_timers(struct sock *sk,
371 void (*retransmit_handler)(unsigned long),
372 void (*delack_handler)(unsigned long),
373 void (*keepalive_handler)(unsigned long))
374{
375 struct inet_connection_sock *icsk = inet_csk(sk);
376
b24b8a24
PE
377 setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
378 (unsigned long)sk);
379 setup_timer(&icsk->icsk_delack_timer, delack_handler,
380 (unsigned long)sk);
381 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
3f421baa
ACM
382 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
383}
3f421baa
ACM
384EXPORT_SYMBOL(inet_csk_init_xmit_timers);
385
386void inet_csk_clear_xmit_timers(struct sock *sk)
387{
388 struct inet_connection_sock *icsk = inet_csk(sk);
389
390 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
391
392 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
393 sk_stop_timer(sk, &icsk->icsk_delack_timer);
394 sk_stop_timer(sk, &sk->sk_timer);
395}
3f421baa
ACM
396EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
397
398void inet_csk_delete_keepalive_timer(struct sock *sk)
399{
400 sk_stop_timer(sk, &sk->sk_timer);
401}
3f421baa
ACM
402EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
403
404void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
405{
406 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
407}
3f421baa
ACM
408EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
409
d9319100 410struct dst_entry *inet_csk_route_req(struct sock *sk,
6bd023f3 411 struct flowi4 *fl4,
ba3f7f04 412 const struct request_sock *req)
3f421baa
ACM
413{
414 struct rtable *rt;
415 const struct inet_request_sock *ireq = inet_rsk(req);
f6d8bd05 416 struct ip_options_rcu *opt = inet_rsk(req)->opt;
84a3aa00 417 struct net *net = sock_net(sk);
3e12939a 418 int flags = inet_sk_flowi_flags(sk);
3f421baa 419
6fa3eb70 420 flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
e79d9bc7 421 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
7433819a 422 sk->sk_protocol,
7586eceb 423 flags,
f6d8bd05 424 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
613e8336 425 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
6bd023f3
DM
426 security_req_classify_flow(req, flowi4_to_flowi(fl4));
427 rt = ip_route_output_flow(net, fl4, sk);
b23dd4fe 428 if (IS_ERR(rt))
857a6e0a 429 goto no_route;
155e8336 430 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
857a6e0a 431 goto route_err;
d8d1f30b 432 return &rt->dst;
857a6e0a
IJ
433
434route_err:
435 ip_rt_put(rt);
436no_route:
437 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
438 return NULL;
3f421baa 439}
3f421baa
ACM
440EXPORT_SYMBOL_GPL(inet_csk_route_req);
441
77357a95
DM
442struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
443 struct sock *newsk,
444 const struct request_sock *req)
445{
446 const struct inet_request_sock *ireq = inet_rsk(req);
447 struct inet_sock *newinet = inet_sk(newsk);
1a7b27c9 448 struct ip_options_rcu *opt;
77357a95
DM
449 struct net *net = sock_net(sk);
450 struct flowi4 *fl4;
451 struct rtable *rt;
452
453 fl4 = &newinet->cork.fl.u.ip4;
1a7b27c9
CP
454
455 rcu_read_lock();
456 opt = rcu_dereference(newinet->inet_opt);
6fa3eb70 457 flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
77357a95
DM
458 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
459 sk->sk_protocol, inet_sk_flowi_flags(sk),
460 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
613e8336 461 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
77357a95
DM
462 security_req_classify_flow(req, flowi4_to_flowi(fl4));
463 rt = ip_route_output_flow(net, fl4, sk);
464 if (IS_ERR(rt))
465 goto no_route;
155e8336 466 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
77357a95 467 goto route_err;
1a7b27c9 468 rcu_read_unlock();
77357a95
DM
469 return &rt->dst;
470
471route_err:
472 ip_rt_put(rt);
473no_route:
1a7b27c9 474 rcu_read_unlock();
77357a95
DM
475 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
476 return NULL;
477}
478EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
479
6b72977b 480static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
72a3effa 481 const u32 rnd, const u32 synq_hsize)
3f421baa 482{
6b72977b 483 return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
3f421baa
ACM
484}
485
dfd56b8b 486#if IS_ENABLED(CONFIG_IPV6)
3f421baa
ACM
487#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
488#else
489#define AF_INET_FAMILY(fam) 1
490#endif
491
492struct request_sock *inet_csk_search_req(const struct sock *sk,
493 struct request_sock ***prevp,
6b72977b 494 const __be16 rport, const __be32 raddr,
7f25afbb 495 const __be32 laddr)
3f421baa
ACM
496{
497 const struct inet_connection_sock *icsk = inet_csk(sk);
498 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
499 struct request_sock *req, **prev;
500
501 for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
502 lopt->nr_table_entries)];
503 (req = *prev) != NULL;
504 prev = &req->dl_next) {
505 const struct inet_request_sock *ireq = inet_rsk(req);
506
507 if (ireq->rmt_port == rport &&
508 ireq->rmt_addr == raddr &&
509 ireq->loc_addr == laddr &&
510 AF_INET_FAMILY(req->rsk_ops->family)) {
547b792c 511 WARN_ON(req->sk);
3f421baa
ACM
512 *prevp = prev;
513 break;
514 }
515 }
516
517 return req;
518}
3f421baa
ACM
519EXPORT_SYMBOL_GPL(inet_csk_search_req);
520
521void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
c2977c22 522 unsigned long timeout)
3f421baa
ACM
523{
524 struct inet_connection_sock *icsk = inet_csk(sk);
525 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
526 const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
527 lopt->hash_rnd, lopt->nr_table_entries);
528
529 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
530 inet_csk_reqsk_queue_added(sk, timeout);
531}
4bc2f18b 532EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
3f421baa 533
a019d6fe
ACM
534/* Only thing we need from tcp.h */
535extern int sysctl_tcp_synack_retries;
536
9f1d2604 537
0c3d79bc
JA
538/* Decide when to expire the request and when to resend SYN-ACK */
539static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
540 const int max_retries,
541 const u8 rskq_defer_accept,
542 int *expire, int *resend)
543{
544 if (!rskq_defer_accept) {
e6c022a4 545 *expire = req->num_timeout >= thresh;
0c3d79bc
JA
546 *resend = 1;
547 return;
548 }
e6c022a4
ED
549 *expire = req->num_timeout >= thresh &&
550 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
0c3d79bc
JA
551 /*
552 * Do not resend while waiting for data after ACK,
553 * start to resend on end of deferring period to give
554 * last chance for data or ACK to create established socket.
555 */
556 *resend = !inet_rsk(req)->acked ||
e6c022a4 557 req->num_timeout >= rskq_defer_accept - 1;
0c3d79bc
JA
558}
559
e6c022a4
ED
560int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
561{
1a2c6181 562 int err = req->rsk_ops->rtx_syn_ack(parent, req);
e6c022a4
ED
563
564 if (!err)
565 req->num_retrans++;
566 return err;
567}
568EXPORT_SYMBOL(inet_rtx_syn_ack);
569
a019d6fe
ACM
570void inet_csk_reqsk_queue_prune(struct sock *parent,
571 const unsigned long interval,
572 const unsigned long timeout,
573 const unsigned long max_rto)
574{
575 struct inet_connection_sock *icsk = inet_csk(parent);
576 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
577 struct listen_sock *lopt = queue->listen_opt;
ec0a1966
DM
578 int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
579 int thresh = max_retries;
a019d6fe
ACM
580 unsigned long now = jiffies;
581 struct request_sock **reqp, *req;
582 int i, budget;
583
584 if (lopt == NULL || lopt->qlen == 0)
585 return;
586
587 /* Normally all the openreqs are young and become mature
588 * (i.e. converted to established socket) for first timeout.
fd4f2cea 589 * If synack was not acknowledged for 1 second, it means
a019d6fe
ACM
590 * one of the following things: synack was lost, ack was lost,
591 * rtt is high or nobody planned to ack (i.e. synflood).
592 * When server is a bit loaded, queue is populated with old
593 * open requests, reducing effective size of queue.
594 * When server is well loaded, queue size reduces to zero
595 * after several minutes of work. It is not synflood,
596 * it is normal operation. The solution is pruning
597 * too old entries overriding normal timeout, when
598 * situation becomes dangerous.
599 *
600 * Essentially, we reserve half of room for young
601 * embrions; and abort old ones without pity, if old
602 * ones are about to clog our table.
603 */
604 if (lopt->qlen>>(lopt->max_qlen_log-1)) {
605 int young = (lopt->qlen_young<<1);
606
607 while (thresh > 2) {
608 if (lopt->qlen < young)
609 break;
610 thresh--;
611 young <<= 1;
612 }
613 }
614
ec0a1966
DM
615 if (queue->rskq_defer_accept)
616 max_retries = queue->rskq_defer_accept;
617
a019d6fe
ACM
618 budget = 2 * (lopt->nr_table_entries / (timeout / interval));
619 i = lopt->clock_hand;
620
621 do {
622 reqp=&lopt->syn_table[i];
623 while ((req = *reqp) != NULL) {
624 if (time_after_eq(now, req->expires)) {
0c3d79bc
JA
625 int expire = 0, resend = 0;
626
627 syn_ack_recalc(req, thresh, max_retries,
628 queue->rskq_defer_accept,
629 &expire, &resend);
c72e1183 630 req->rsk_ops->syn_ack_timeout(parent, req);
0c3d79bc
JA
631 if (!expire &&
632 (!resend ||
e6c022a4 633 !inet_rtx_syn_ack(parent, req) ||
0c3d79bc 634 inet_rsk(req)->acked)) {
a019d6fe
ACM
635 unsigned long timeo;
636
e6c022a4 637 if (req->num_timeout++ == 0)
a019d6fe 638 lopt->qlen_young--;
e6c022a4
ED
639 timeo = min(timeout << req->num_timeout,
640 max_rto);
a019d6fe
ACM
641 req->expires = now + timeo;
642 reqp = &req->dl_next;
643 continue;
644 }
645
646 /* Drop this request */
647 inet_csk_reqsk_queue_unlink(parent, req, reqp);
648 reqsk_queue_removed(queue, req);
649 reqsk_free(req);
650 continue;
651 }
652 reqp = &req->dl_next;
653 }
654
655 i = (i + 1) & (lopt->nr_table_entries - 1);
656
657 } while (--budget > 0);
658
659 lopt->clock_hand = i;
660
661 if (lopt->qlen)
662 inet_csk_reset_keepalive_timer(parent, interval);
663}
a019d6fe
ACM
664EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
665
e56c57d0
ED
666/**
667 * inet_csk_clone_lock - clone an inet socket, and lock its clone
668 * @sk: the socket to clone
669 * @req: request_sock
670 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
671 *
672 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
673 */
674struct sock *inet_csk_clone_lock(const struct sock *sk,
675 const struct request_sock *req,
676 const gfp_t priority)
9f1d2604 677{
e56c57d0 678 struct sock *newsk = sk_clone_lock(sk, priority);
9f1d2604
ACM
679
680 if (newsk != NULL) {
681 struct inet_connection_sock *newicsk = inet_csk(newsk);
682
683 newsk->sk_state = TCP_SYN_RECV;
684 newicsk->icsk_bind_hash = NULL;
685
c720c7e8
ED
686 inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
687 inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
688 inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
9f1d2604
ACM
689 newsk->sk_write_space = sk_stream_write_space;
690
2f954a62 691 inet_sk(newsk)->mc_list = NULL;
6fa3eb70 692
9f1d2604 693 newicsk->icsk_retransmits = 0;
6687e988
ACM
694 newicsk->icsk_backoff = 0;
695 newicsk->icsk_probes_out = 0;
9f1d2604
ACM
696
697 /* Deinitialize accept_queue to trap illegal accesses. */
698 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
4237c75c
VY
699
700 security_inet_csk_clone(newsk, req);
9f1d2604
ACM
701 }
702 return newsk;
703}
e56c57d0 704EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
a019d6fe
ACM
705
706/*
707 * At this point, there should be no process reference to this
708 * socket, and thus no user references at all. Therefore we
709 * can assume the socket waitqueue is inactive and nobody will
710 * try to jump onto it.
711 */
712void inet_csk_destroy_sock(struct sock *sk)
713{
547b792c
IJ
714 WARN_ON(sk->sk_state != TCP_CLOSE);
715 WARN_ON(!sock_flag(sk, SOCK_DEAD));
a019d6fe
ACM
716
717 /* It cannot be in hash table! */
547b792c 718 WARN_ON(!sk_unhashed(sk));
a019d6fe 719
c720c7e8
ED
720 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
721 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
a019d6fe
ACM
722
723 sk->sk_prot->destroy(sk);
724
725 sk_stream_kill_queues(sk);
726
727 xfrm_sk_free_policy(sk);
728
729 sk_refcnt_debug_release(sk);
730
dd24c001 731 percpu_counter_dec(sk->sk_prot->orphan_count);
a019d6fe
ACM
732 sock_put(sk);
733}
a019d6fe
ACM
734EXPORT_SYMBOL(inet_csk_destroy_sock);
735
e337e24d
CP
736/* This function allows to force a closure of a socket after the call to
737 * tcp/dccp_create_openreq_child().
738 */
739void inet_csk_prepare_forced_close(struct sock *sk)
c10cb5fc 740 __releases(&sk->sk_lock.slock)
e337e24d
CP
741{
742 /* sk_clone_lock locked the socket and set refcnt to 2 */
743 bh_unlock_sock(sk);
744 sock_put(sk);
745
746 /* The below has to be done to allow calling inet_csk_destroy_sock */
747 sock_set_flag(sk, SOCK_DEAD);
748 percpu_counter_inc(sk->sk_prot->orphan_count);
749 inet_sk(sk)->inet_num = 0;
750}
751EXPORT_SYMBOL(inet_csk_prepare_forced_close);
752
a019d6fe
ACM
753int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
754{
755 struct inet_sock *inet = inet_sk(sk);
756 struct inet_connection_sock *icsk = inet_csk(sk);
757 int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
758
759 if (rc != 0)
760 return rc;
761
762 sk->sk_max_ack_backlog = 0;
763 sk->sk_ack_backlog = 0;
764 inet_csk_delack_init(sk);
765
766 /* There is race window here: we announce ourselves listening,
767 * but this transition is still not validated by get_port().
768 * It is OK, because this socket enters to hash table only
769 * after validation is complete.
770 */
771 sk->sk_state = TCP_LISTEN;
c720c7e8
ED
772 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
773 inet->inet_sport = htons(inet->inet_num);
6fa3eb70
S
774 #ifdef CONFIG_MTK_NET_LOGGING
775 printk(KERN_WARNING "[mtk_net][socket] inet_csk_listen_start inet->inet_sport:%d,inet->inet_num:%d",inet->inet_sport,inet->inet_num);
776 #endif
a019d6fe
ACM
777 sk_dst_reset(sk);
778 sk->sk_prot->hash(sk);
779
780 return 0;
781 }
782
783 sk->sk_state = TCP_CLOSE;
784 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
785 return -EADDRINUSE;
786}
a019d6fe
ACM
787EXPORT_SYMBOL_GPL(inet_csk_listen_start);
788
789/*
790 * This routine closes sockets which have been at least partially
791 * opened, but not yet accepted.
792 */
793void inet_csk_listen_stop(struct sock *sk)
794{
795 struct inet_connection_sock *icsk = inet_csk(sk);
8336886f 796 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
a019d6fe
ACM
797 struct request_sock *acc_req;
798 struct request_sock *req;
799
800 inet_csk_delete_keepalive_timer(sk);
801
802 /* make all the listen_opt local to us */
8336886f 803 acc_req = reqsk_queue_yank_acceptq(queue);
a019d6fe
ACM
804
805 /* Following specs, it would be better either to send FIN
806 * (and enter FIN-WAIT-1, it is normal close)
807 * or to send active reset (abort).
808 * Certainly, it is pretty dangerous while synflood, but it is
809 * bad justification for our negligence 8)
810 * To be honest, we are not able to make either
811 * of the variants now. --ANK
812 */
8336886f 813 reqsk_queue_destroy(queue);
a019d6fe
ACM
814
815 while ((req = acc_req) != NULL) {
816 struct sock *child = req->sk;
817
818 acc_req = req->dl_next;
819
820 local_bh_disable();
821 bh_lock_sock(child);
547b792c 822 WARN_ON(sock_owned_by_user(child));
a019d6fe
ACM
823 sock_hold(child);
824
825 sk->sk_prot->disconnect(child, O_NONBLOCK);
826
827 sock_orphan(child);
828
eb4dea58
HX
829 percpu_counter_inc(sk->sk_prot->orphan_count);
830
7ab4551f 831 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
8336886f
JC
832 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
833 BUG_ON(sk != tcp_rsk(req)->listener);
834
835 /* Paranoid, to prevent race condition if
836 * an inbound pkt destined for child is
837 * blocked by sock lock in tcp_v4_rcv().
838 * Also to satisfy an assertion in
839 * tcp_v4_destroy_sock().
840 */
841 tcp_sk(child)->fastopen_rsk = NULL;
842 sock_put(sk);
843 }
a019d6fe
ACM
844 inet_csk_destroy_sock(child);
845
846 bh_unlock_sock(child);
847 local_bh_enable();
848 sock_put(child);
849
850 sk_acceptq_removed(sk);
851 __reqsk_free(req);
852 }
8336886f
JC
853 if (queue->fastopenq != NULL) {
854 /* Free all the reqs queued in rskq_rst_head. */
855 spin_lock_bh(&queue->fastopenq->lock);
856 acc_req = queue->fastopenq->rskq_rst_head;
857 queue->fastopenq->rskq_rst_head = NULL;
858 spin_unlock_bh(&queue->fastopenq->lock);
859 while ((req = acc_req) != NULL) {
860 acc_req = req->dl_next;
861 __reqsk_free(req);
862 }
863 }
547b792c 864 WARN_ON(sk->sk_ack_backlog);
a019d6fe 865}
a019d6fe 866EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
af05dc93
ACM
867
868void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
869{
870 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
871 const struct inet_sock *inet = inet_sk(sk);
872
873 sin->sin_family = AF_INET;
c720c7e8
ED
874 sin->sin_addr.s_addr = inet->inet_daddr;
875 sin->sin_port = inet->inet_dport;
af05dc93 876}
af05dc93 877EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
c4d93909 878
dec73ff0
ACM
879#ifdef CONFIG_COMPAT
880int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
881 char __user *optval, int __user *optlen)
882{
dbeff12b 883 const struct inet_connection_sock *icsk = inet_csk(sk);
dec73ff0
ACM
884
885 if (icsk->icsk_af_ops->compat_getsockopt != NULL)
886 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
887 optval, optlen);
888 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
889 optval, optlen);
890}
dec73ff0
ACM
891EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
892
893int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
b7058842 894 char __user *optval, unsigned int optlen)
dec73ff0 895{
dbeff12b 896 const struct inet_connection_sock *icsk = inet_csk(sk);
dec73ff0
ACM
897
898 if (icsk->icsk_af_ops->compat_setsockopt != NULL)
899 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
900 optval, optlen);
901 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
902 optval, optlen);
903}
dec73ff0
ACM
904EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
905#endif
80d0a69f
DM
906
907static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
908{
5abf7f7e
ED
909 const struct inet_sock *inet = inet_sk(sk);
910 const struct ip_options_rcu *inet_opt;
80d0a69f
DM
911 __be32 daddr = inet->inet_daddr;
912 struct flowi4 *fl4;
913 struct rtable *rt;
914
915 rcu_read_lock();
916 inet_opt = rcu_dereference(inet->inet_opt);
917 if (inet_opt && inet_opt->opt.srr)
918 daddr = inet_opt->opt.faddr;
919 fl4 = &fl->u.ip4;
920 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
921 inet->inet_saddr, inet->inet_dport,
922 inet->inet_sport, sk->sk_protocol,
923 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
924 if (IS_ERR(rt))
925 rt = NULL;
926 if (rt)
927 sk_setup_caps(sk, &rt->dst);
928 rcu_read_unlock();
929
930 return &rt->dst;
931}
932
933struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
934{
935 struct dst_entry *dst = __sk_dst_check(sk, 0);
936 struct inet_sock *inet = inet_sk(sk);
937
938 if (!dst) {
939 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
940 if (!dst)
941 goto out;
942 }
6700c270 943 dst->ops->update_pmtu(dst, sk, NULL, mtu);
80d0a69f
DM
944
945 dst = __sk_dst_check(sk, 0);
946 if (!dst)
947 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
948out:
949 return dst;
950}
951EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);