Commit | Line | Data |
---|---|---|
3f421baa ACM |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Support for INET connection oriented protocols. | |
7 | * | |
8 | * Authors: See the TCP sources | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or(at your option) any later version. | |
14 | */ | |
15 | ||
3f421baa ACM |
16 | #include <linux/module.h> |
17 | #include <linux/jhash.h> | |
18 | ||
19 | #include <net/inet_connection_sock.h> | |
20 | #include <net/inet_hashtables.h> | |
21 | #include <net/inet_timewait_sock.h> | |
22 | #include <net/ip.h> | |
23 | #include <net/route.h> | |
24 | #include <net/tcp_states.h> | |
a019d6fe | 25 | #include <net/xfrm.h> |
3f421baa ACM |
26 | |
27 | #ifdef INET_CSK_DEBUG | |
28 | const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; | |
29 | EXPORT_SYMBOL(inet_csk_timer_bug_msg); | |
30 | #endif | |
31 | ||
32 | /* | |
3c689b73 | 33 | * This struct holds the first and last local port number. |
3f421baa | 34 | */ |
3c689b73 | 35 | struct local_ports sysctl_local_ports __read_mostly = { |
c4dbe54e | 36 | .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock), |
3c689b73 ED |
37 | .range = { 32768, 61000 }, |
38 | }; | |
227b60f5 | 39 | |
e3826f1e AW |
40 | unsigned long *sysctl_local_reserved_ports; |
41 | EXPORT_SYMBOL(sysctl_local_reserved_ports); | |
42 | ||
227b60f5 SH |
43 | void inet_get_local_port_range(int *low, int *high) |
44 | { | |
95c96174 ED |
45 | unsigned int seq; |
46 | ||
227b60f5 | 47 | do { |
3c689b73 | 48 | seq = read_seqbegin(&sysctl_local_ports.lock); |
227b60f5 | 49 | |
3c689b73 ED |
50 | *low = sysctl_local_ports.range[0]; |
51 | *high = sysctl_local_ports.range[1]; | |
52 | } while (read_seqretry(&sysctl_local_ports.lock, seq)); | |
227b60f5 SH |
53 | } |
54 | EXPORT_SYMBOL(inet_get_local_port_range); | |
3f421baa | 55 | |
971af18b | 56 | int inet_csk_bind_conflict(const struct sock *sk, |
aacd9289 | 57 | const struct inet_bind_bucket *tb, bool relax) |
3f421baa | 58 | { |
3f421baa | 59 | struct sock *sk2; |
3f421baa | 60 | int reuse = sk->sk_reuse; |
da5e3630 TH |
61 | int reuseport = sk->sk_reuseport; |
62 | kuid_t uid = sock_i_uid((struct sock *)sk); | |
3f421baa | 63 | |
7477fd2e PE |
64 | /* |
65 | * Unlike other sk lookup places we do not check | |
66 | * for sk_net here, since _all_ the socks listed | |
67 | * in tb->owners list belong to the same net - the | |
68 | * one this bucket belongs to. | |
69 | */ | |
70 | ||
b67bfe0d | 71 | sk_for_each_bound(sk2, &tb->owners) { |
3f421baa ACM |
72 | if (sk != sk2 && |
73 | !inet_v6_ipv6only(sk2) && | |
74 | (!sk->sk_bound_dev_if || | |
75 | !sk2->sk_bound_dev_if || | |
76 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | |
da5e3630 TH |
77 | if ((!reuse || !sk2->sk_reuse || |
78 | sk2->sk_state == TCP_LISTEN) && | |
79 | (!reuseport || !sk2->sk_reuseport || | |
80 | (sk2->sk_state != TCP_TIME_WAIT && | |
81 | !uid_eq(uid, sock_i_uid(sk2))))) { | |
68835aba ED |
82 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); |
83 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || | |
84 | sk2_rcv_saddr == sk_rcv_saddr(sk)) | |
3f421baa | 85 | break; |
8d238b25 | 86 | } |
aacd9289 AC |
87 | if (!relax && reuse && sk2->sk_reuse && |
88 | sk2->sk_state != TCP_LISTEN) { | |
89 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); | |
90 | ||
91 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || | |
92 | sk2_rcv_saddr == sk_rcv_saddr(sk)) | |
93 | break; | |
94 | } | |
3f421baa ACM |
95 | } |
96 | } | |
b67bfe0d | 97 | return sk2 != NULL; |
3f421baa | 98 | } |
971af18b ACM |
99 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); |
100 | ||
3f421baa ACM |
101 | /* Obtain a reference to a local port for the given sock, |
102 | * if snum is zero it means select any available local port. | |
103 | */ | |
ab1e0a13 | 104 | int inet_csk_get_port(struct sock *sk, unsigned short snum) |
3f421baa | 105 | { |
39d8cda7 | 106 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
3f421baa | 107 | struct inet_bind_hashbucket *head; |
3f421baa | 108 | struct inet_bind_bucket *tb; |
a9d8f911 | 109 | int ret, attempts = 5; |
3b1e0a65 | 110 | struct net *net = sock_net(sk); |
a9d8f911 | 111 | int smallest_size = -1, smallest_rover; |
da5e3630 | 112 | kuid_t uid = sock_i_uid(sk); |
3f421baa ACM |
113 | |
114 | local_bh_disable(); | |
115 | if (!snum) { | |
227b60f5 SH |
116 | int remaining, rover, low, high; |
117 | ||
a9d8f911 | 118 | again: |
227b60f5 | 119 | inet_get_local_port_range(&low, &high); |
a25de534 | 120 | remaining = (high - low) + 1; |
a9d8f911 | 121 | smallest_rover = rover = net_random() % remaining + low; |
3f421baa | 122 | |
a9d8f911 | 123 | smallest_size = -1; |
3f421baa | 124 | do { |
e3826f1e AW |
125 | if (inet_is_reserved_local_port(rover)) |
126 | goto next_nolock; | |
7f635ab7 PE |
127 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
128 | hashinfo->bhash_size)]; | |
3f421baa | 129 | spin_lock(&head->lock); |
b67bfe0d | 130 | inet_bind_bucket_for_each(tb, &head->chain) |
09ad9bc7 | 131 | if (net_eq(ib_net(tb), net) && tb->port == rover) { |
da5e3630 TH |
132 | if (((tb->fastreuse > 0 && |
133 | sk->sk_reuse && | |
134 | sk->sk_state != TCP_LISTEN) || | |
135 | (tb->fastreuseport > 0 && | |
136 | sk->sk_reuseport && | |
137 | uid_eq(tb->fastuid, uid))) && | |
a9d8f911 EP |
138 | (tb->num_owners < smallest_size || smallest_size == -1)) { |
139 | smallest_size = tb->num_owners; | |
140 | smallest_rover = rover; | |
aacd9289 AC |
141 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && |
142 | !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { | |
8d238b25 | 143 | snum = smallest_rover; |
fddb7b57 | 144 | goto tb_found; |
a9d8f911 EP |
145 | } |
146 | } | |
aacd9289 | 147 | if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { |
2b05ad33 | 148 | snum = rover; |
fddb7b57 | 149 | goto tb_found; |
2b05ad33 | 150 | } |
3f421baa | 151 | goto next; |
a9d8f911 | 152 | } |
3f421baa ACM |
153 | break; |
154 | next: | |
155 | spin_unlock(&head->lock); | |
e3826f1e | 156 | next_nolock: |
6df71634 SH |
157 | if (++rover > high) |
158 | rover = low; | |
3f421baa | 159 | } while (--remaining > 0); |
3f421baa ACM |
160 | |
161 | /* Exhausted local port range during search? It is not | |
162 | * possible for us to be holding one of the bind hash | |
163 | * locks if this test triggers, because if 'remaining' | |
164 | * drops to zero, we broke out of the do/while loop at | |
165 | * the top level, not from the 'break;' statement. | |
166 | */ | |
167 | ret = 1; | |
a9d8f911 EP |
168 | if (remaining <= 0) { |
169 | if (smallest_size != -1) { | |
170 | snum = smallest_rover; | |
171 | goto have_snum; | |
172 | } | |
3f421baa | 173 | goto fail; |
a9d8f911 | 174 | } |
3f421baa ACM |
175 | /* OK, here is the one we will use. HEAD is |
176 | * non-NULL and we hold it's mutex. | |
177 | */ | |
178 | snum = rover; | |
179 | } else { | |
a9d8f911 | 180 | have_snum: |
7f635ab7 PE |
181 | head = &hashinfo->bhash[inet_bhashfn(net, snum, |
182 | hashinfo->bhash_size)]; | |
3f421baa | 183 | spin_lock(&head->lock); |
b67bfe0d | 184 | inet_bind_bucket_for_each(tb, &head->chain) |
09ad9bc7 | 185 | if (net_eq(ib_net(tb), net) && tb->port == snum) |
3f421baa ACM |
186 | goto tb_found; |
187 | } | |
188 | tb = NULL; | |
189 | goto tb_not_found; | |
190 | tb_found: | |
191 | if (!hlist_empty(&tb->owners)) { | |
4a17fd52 PE |
192 | if (sk->sk_reuse == SK_FORCE_REUSE) |
193 | goto success; | |
194 | ||
da5e3630 TH |
195 | if (((tb->fastreuse > 0 && |
196 | sk->sk_reuse && sk->sk_state != TCP_LISTEN) || | |
197 | (tb->fastreuseport > 0 && | |
198 | sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && | |
a9d8f911 | 199 | smallest_size == -1) { |
3f421baa ACM |
200 | goto success; |
201 | } else { | |
202 | ret = 1; | |
aacd9289 | 203 | if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { |
da5e3630 | 204 | if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) || |
9c5e0c0b TH |
205 | (tb->fastreuseport > 0 && |
206 | sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && | |
5add3009 | 207 | smallest_size != -1 && --attempts >= 0) { |
a9d8f911 EP |
208 | spin_unlock(&head->lock); |
209 | goto again; | |
210 | } | |
aacd9289 | 211 | |
3f421baa | 212 | goto fail_unlock; |
a9d8f911 | 213 | } |
3f421baa ACM |
214 | } |
215 | } | |
216 | tb_not_found: | |
217 | ret = 1; | |
941b1d22 PE |
218 | if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, |
219 | net, head, snum)) == NULL) | |
3f421baa ACM |
220 | goto fail_unlock; |
221 | if (hlist_empty(&tb->owners)) { | |
222 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | |
223 | tb->fastreuse = 1; | |
224 | else | |
225 | tb->fastreuse = 0; | |
da5e3630 TH |
226 | if (sk->sk_reuseport) { |
227 | tb->fastreuseport = 1; | |
228 | tb->fastuid = uid; | |
9c5e0c0b | 229 | } else |
da5e3630 | 230 | tb->fastreuseport = 0; |
da5e3630 TH |
231 | } else { |
232 | if (tb->fastreuse && | |
233 | (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) | |
234 | tb->fastreuse = 0; | |
235 | if (tb->fastreuseport && | |
9c5e0c0b | 236 | (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))) |
da5e3630 | 237 | tb->fastreuseport = 0; |
da5e3630 | 238 | } |
3f421baa ACM |
239 | success: |
240 | if (!inet_csk(sk)->icsk_bind_hash) | |
241 | inet_bind_hash(sk, tb, snum); | |
547b792c | 242 | WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); |
e905a9ed | 243 | ret = 0; |
3f421baa ACM |
244 | |
245 | fail_unlock: | |
246 | spin_unlock(&head->lock); | |
247 | fail: | |
248 | local_bh_enable(); | |
249 | return ret; | |
250 | } | |
3f421baa ACM |
251 | EXPORT_SYMBOL_GPL(inet_csk_get_port); |
252 | ||
253 | /* | |
254 | * Wait for an incoming connection, avoid race conditions. This must be called | |
255 | * with the socket locked. | |
256 | */ | |
257 | static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |
258 | { | |
259 | struct inet_connection_sock *icsk = inet_csk(sk); | |
260 | DEFINE_WAIT(wait); | |
261 | int err; | |
262 | ||
263 | /* | |
264 | * True wake-one mechanism for incoming connections: only | |
265 | * one process gets woken up, not the 'whole herd'. | |
266 | * Since we do not 'race & poll' for established sockets | |
267 | * anymore, the common case will execute the loop only once. | |
268 | * | |
269 | * Subtle issue: "add_wait_queue_exclusive()" will be added | |
270 | * after any current non-exclusive waiters, and we know that | |
271 | * it will always _stay_ after any new non-exclusive waiters | |
272 | * because all non-exclusive waiters are added at the | |
273 | * beginning of the wait-queue. As such, it's ok to "drop" | |
274 | * our exclusiveness temporarily when we get woken up without | |
275 | * having to remove and re-insert us on the wait queue. | |
276 | */ | |
277 | for (;;) { | |
aa395145 | 278 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
3f421baa ACM |
279 | TASK_INTERRUPTIBLE); |
280 | release_sock(sk); | |
281 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
282 | timeo = schedule_timeout(timeo); | |
283 | lock_sock(sk); | |
284 | err = 0; | |
285 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
286 | break; | |
287 | err = -EINVAL; | |
288 | if (sk->sk_state != TCP_LISTEN) | |
289 | break; | |
290 | err = sock_intr_errno(timeo); | |
291 | if (signal_pending(current)) | |
292 | break; | |
293 | err = -EAGAIN; | |
294 | if (!timeo) | |
295 | break; | |
296 | } | |
aa395145 | 297 | finish_wait(sk_sleep(sk), &wait); |
3f421baa ACM |
298 | return err; |
299 | } | |
300 | ||
301 | /* | |
302 | * This will accept the next outstanding connection. | |
303 | */ | |
304 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) | |
305 | { | |
306 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 307 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
3f421baa | 308 | struct sock *newsk; |
8336886f | 309 | struct request_sock *req; |
3f421baa ACM |
310 | int error; |
311 | ||
312 | lock_sock(sk); | |
313 | ||
314 | /* We need to make sure that this socket is listening, | |
315 | * and that it has something pending. | |
316 | */ | |
317 | error = -EINVAL; | |
318 | if (sk->sk_state != TCP_LISTEN) | |
319 | goto out_err; | |
320 | ||
321 | /* Find already established connection */ | |
8336886f | 322 | if (reqsk_queue_empty(queue)) { |
3f421baa ACM |
323 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
324 | ||
325 | /* If this is a non blocking socket don't sleep */ | |
326 | error = -EAGAIN; | |
327 | if (!timeo) | |
328 | goto out_err; | |
329 | ||
330 | error = inet_csk_wait_for_connect(sk, timeo); | |
331 | if (error) | |
332 | goto out_err; | |
333 | } | |
8336886f JC |
334 | req = reqsk_queue_remove(queue); |
335 | newsk = req->sk; | |
336 | ||
337 | sk_acceptq_removed(sk); | |
7ab4551f | 338 | if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) { |
8336886f JC |
339 | spin_lock_bh(&queue->fastopenq->lock); |
340 | if (tcp_rsk(req)->listener) { | |
341 | /* We are still waiting for the final ACK from 3WHS | |
342 | * so can't free req now. Instead, we set req->sk to | |
343 | * NULL to signify that the child socket is taken | |
344 | * so reqsk_fastopen_remove() will free the req | |
345 | * when 3WHS finishes (or is aborted). | |
346 | */ | |
347 | req->sk = NULL; | |
348 | req = NULL; | |
349 | } | |
350 | spin_unlock_bh(&queue->fastopenq->lock); | |
351 | } | |
3f421baa ACM |
352 | out: |
353 | release_sock(sk); | |
8336886f JC |
354 | if (req) |
355 | __reqsk_free(req); | |
3f421baa ACM |
356 | return newsk; |
357 | out_err: | |
358 | newsk = NULL; | |
8336886f | 359 | req = NULL; |
3f421baa ACM |
360 | *err = error; |
361 | goto out; | |
362 | } | |
3f421baa ACM |
363 | EXPORT_SYMBOL(inet_csk_accept); |
364 | ||
365 | /* | |
366 | * Using different timers for retransmit, delayed acks and probes | |
e905a9ed | 367 | * We may wish use just one timer maintaining a list of expire jiffies |
3f421baa ACM |
368 | * to optimize. |
369 | */ | |
370 | void inet_csk_init_xmit_timers(struct sock *sk, | |
371 | void (*retransmit_handler)(unsigned long), | |
372 | void (*delack_handler)(unsigned long), | |
373 | void (*keepalive_handler)(unsigned long)) | |
374 | { | |
375 | struct inet_connection_sock *icsk = inet_csk(sk); | |
376 | ||
b24b8a24 PE |
377 | setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, |
378 | (unsigned long)sk); | |
379 | setup_timer(&icsk->icsk_delack_timer, delack_handler, | |
380 | (unsigned long)sk); | |
381 | setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); | |
3f421baa ACM |
382 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; |
383 | } | |
3f421baa ACM |
384 | EXPORT_SYMBOL(inet_csk_init_xmit_timers); |
385 | ||
386 | void inet_csk_clear_xmit_timers(struct sock *sk) | |
387 | { | |
388 | struct inet_connection_sock *icsk = inet_csk(sk); | |
389 | ||
390 | icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; | |
391 | ||
392 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
393 | sk_stop_timer(sk, &icsk->icsk_delack_timer); | |
394 | sk_stop_timer(sk, &sk->sk_timer); | |
395 | } | |
3f421baa ACM |
396 | EXPORT_SYMBOL(inet_csk_clear_xmit_timers); |
397 | ||
398 | void inet_csk_delete_keepalive_timer(struct sock *sk) | |
399 | { | |
400 | sk_stop_timer(sk, &sk->sk_timer); | |
401 | } | |
3f421baa ACM |
402 | EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); |
403 | ||
404 | void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) | |
405 | { | |
406 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | |
407 | } | |
3f421baa ACM |
408 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); |
409 | ||
d9319100 | 410 | struct dst_entry *inet_csk_route_req(struct sock *sk, |
6bd023f3 | 411 | struct flowi4 *fl4, |
ba3f7f04 | 412 | const struct request_sock *req) |
3f421baa ACM |
413 | { |
414 | struct rtable *rt; | |
415 | const struct inet_request_sock *ireq = inet_rsk(req); | |
f6d8bd05 | 416 | struct ip_options_rcu *opt = inet_rsk(req)->opt; |
84a3aa00 | 417 | struct net *net = sock_net(sk); |
3e12939a | 418 | int flags = inet_sk_flowi_flags(sk); |
3f421baa | 419 | |
6fa3eb70 | 420 | flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark, |
e79d9bc7 | 421 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
7433819a | 422 | sk->sk_protocol, |
7586eceb | 423 | flags, |
f6d8bd05 | 424 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, |
6fa3eb70 S |
425 | ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport, |
426 | sock_i_uid(sk)); | |
6bd023f3 DM |
427 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); |
428 | rt = ip_route_output_flow(net, fl4, sk); | |
b23dd4fe | 429 | if (IS_ERR(rt)) |
857a6e0a | 430 | goto no_route; |
155e8336 | 431 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
857a6e0a | 432 | goto route_err; |
d8d1f30b | 433 | return &rt->dst; |
857a6e0a IJ |
434 | |
435 | route_err: | |
436 | ip_rt_put(rt); | |
437 | no_route: | |
438 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | |
439 | return NULL; | |
3f421baa | 440 | } |
3f421baa ACM |
441 | EXPORT_SYMBOL_GPL(inet_csk_route_req); |
442 | ||
77357a95 DM |
443 | struct dst_entry *inet_csk_route_child_sock(struct sock *sk, |
444 | struct sock *newsk, | |
445 | const struct request_sock *req) | |
446 | { | |
447 | const struct inet_request_sock *ireq = inet_rsk(req); | |
448 | struct inet_sock *newinet = inet_sk(newsk); | |
1a7b27c9 | 449 | struct ip_options_rcu *opt; |
77357a95 DM |
450 | struct net *net = sock_net(sk); |
451 | struct flowi4 *fl4; | |
452 | struct rtable *rt; | |
453 | ||
454 | fl4 = &newinet->cork.fl.u.ip4; | |
1a7b27c9 CP |
455 | |
456 | rcu_read_lock(); | |
457 | opt = rcu_dereference(newinet->inet_opt); | |
6fa3eb70 | 458 | flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark, |
77357a95 DM |
459 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
460 | sk->sk_protocol, inet_sk_flowi_flags(sk), | |
461 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, | |
6fa3eb70 S |
462 | ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport, |
463 | sock_i_uid(sk)); | |
77357a95 DM |
464 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); |
465 | rt = ip_route_output_flow(net, fl4, sk); | |
466 | if (IS_ERR(rt)) | |
467 | goto no_route; | |
155e8336 | 468 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
77357a95 | 469 | goto route_err; |
1a7b27c9 | 470 | rcu_read_unlock(); |
77357a95 DM |
471 | return &rt->dst; |
472 | ||
473 | route_err: | |
474 | ip_rt_put(rt); | |
475 | no_route: | |
1a7b27c9 | 476 | rcu_read_unlock(); |
77357a95 DM |
477 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
478 | return NULL; | |
479 | } | |
480 | EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); | |
481 | ||
6b72977b | 482 | static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, |
72a3effa | 483 | const u32 rnd, const u32 synq_hsize) |
3f421baa | 484 | { |
6b72977b | 485 | return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); |
3f421baa ACM |
486 | } |
487 | ||
dfd56b8b | 488 | #if IS_ENABLED(CONFIG_IPV6) |
3f421baa ACM |
489 | #define AF_INET_FAMILY(fam) ((fam) == AF_INET) |
490 | #else | |
491 | #define AF_INET_FAMILY(fam) 1 | |
492 | #endif | |
493 | ||
494 | struct request_sock *inet_csk_search_req(const struct sock *sk, | |
495 | struct request_sock ***prevp, | |
6b72977b | 496 | const __be16 rport, const __be32 raddr, |
7f25afbb | 497 | const __be32 laddr) |
3f421baa ACM |
498 | { |
499 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
500 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | |
501 | struct request_sock *req, **prev; | |
502 | ||
503 | for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, | |
504 | lopt->nr_table_entries)]; | |
505 | (req = *prev) != NULL; | |
506 | prev = &req->dl_next) { | |
507 | const struct inet_request_sock *ireq = inet_rsk(req); | |
508 | ||
509 | if (ireq->rmt_port == rport && | |
510 | ireq->rmt_addr == raddr && | |
511 | ireq->loc_addr == laddr && | |
512 | AF_INET_FAMILY(req->rsk_ops->family)) { | |
547b792c | 513 | WARN_ON(req->sk); |
3f421baa ACM |
514 | *prevp = prev; |
515 | break; | |
516 | } | |
517 | } | |
518 | ||
519 | return req; | |
520 | } | |
3f421baa ACM |
521 | EXPORT_SYMBOL_GPL(inet_csk_search_req); |
522 | ||
523 | void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, | |
c2977c22 | 524 | unsigned long timeout) |
3f421baa ACM |
525 | { |
526 | struct inet_connection_sock *icsk = inet_csk(sk); | |
527 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | |
528 | const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, | |
529 | lopt->hash_rnd, lopt->nr_table_entries); | |
530 | ||
531 | reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); | |
532 | inet_csk_reqsk_queue_added(sk, timeout); | |
533 | } | |
4bc2f18b | 534 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); |
3f421baa | 535 | |
a019d6fe ACM |
536 | /* Only thing we need from tcp.h */ |
537 | extern int sysctl_tcp_synack_retries; | |
538 | ||
9f1d2604 | 539 | |
0c3d79bc JA |
540 | /* Decide when to expire the request and when to resend SYN-ACK */ |
541 | static inline void syn_ack_recalc(struct request_sock *req, const int thresh, | |
542 | const int max_retries, | |
543 | const u8 rskq_defer_accept, | |
544 | int *expire, int *resend) | |
545 | { | |
546 | if (!rskq_defer_accept) { | |
e6c022a4 | 547 | *expire = req->num_timeout >= thresh; |
0c3d79bc JA |
548 | *resend = 1; |
549 | return; | |
550 | } | |
e6c022a4 ED |
551 | *expire = req->num_timeout >= thresh && |
552 | (!inet_rsk(req)->acked || req->num_timeout >= max_retries); | |
0c3d79bc JA |
553 | /* |
554 | * Do not resend while waiting for data after ACK, | |
555 | * start to resend on end of deferring period to give | |
556 | * last chance for data or ACK to create established socket. | |
557 | */ | |
558 | *resend = !inet_rsk(req)->acked || | |
e6c022a4 | 559 | req->num_timeout >= rskq_defer_accept - 1; |
0c3d79bc JA |
560 | } |
561 | ||
e6c022a4 ED |
562 | int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) |
563 | { | |
1a2c6181 | 564 | int err = req->rsk_ops->rtx_syn_ack(parent, req); |
e6c022a4 ED |
565 | |
566 | if (!err) | |
567 | req->num_retrans++; | |
568 | return err; | |
569 | } | |
570 | EXPORT_SYMBOL(inet_rtx_syn_ack); | |
571 | ||
a019d6fe ACM |
572 | void inet_csk_reqsk_queue_prune(struct sock *parent, |
573 | const unsigned long interval, | |
574 | const unsigned long timeout, | |
575 | const unsigned long max_rto) | |
576 | { | |
577 | struct inet_connection_sock *icsk = inet_csk(parent); | |
578 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; | |
579 | struct listen_sock *lopt = queue->listen_opt; | |
ec0a1966 DM |
580 | int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; |
581 | int thresh = max_retries; | |
a019d6fe ACM |
582 | unsigned long now = jiffies; |
583 | struct request_sock **reqp, *req; | |
584 | int i, budget; | |
585 | ||
586 | if (lopt == NULL || lopt->qlen == 0) | |
587 | return; | |
588 | ||
589 | /* Normally all the openreqs are young and become mature | |
590 | * (i.e. converted to established socket) for first timeout. | |
fd4f2cea | 591 | * If synack was not acknowledged for 1 second, it means |
a019d6fe ACM |
592 | * one of the following things: synack was lost, ack was lost, |
593 | * rtt is high or nobody planned to ack (i.e. synflood). | |
594 | * When server is a bit loaded, queue is populated with old | |
595 | * open requests, reducing effective size of queue. | |
596 | * When server is well loaded, queue size reduces to zero | |
597 | * after several minutes of work. It is not synflood, | |
598 | * it is normal operation. The solution is pruning | |
599 | * too old entries overriding normal timeout, when | |
600 | * situation becomes dangerous. | |
601 | * | |
602 | * Essentially, we reserve half of room for young | |
603 | * embrions; and abort old ones without pity, if old | |
604 | * ones are about to clog our table. | |
605 | */ | |
606 | if (lopt->qlen>>(lopt->max_qlen_log-1)) { | |
607 | int young = (lopt->qlen_young<<1); | |
608 | ||
609 | while (thresh > 2) { | |
610 | if (lopt->qlen < young) | |
611 | break; | |
612 | thresh--; | |
613 | young <<= 1; | |
614 | } | |
615 | } | |
616 | ||
ec0a1966 DM |
617 | if (queue->rskq_defer_accept) |
618 | max_retries = queue->rskq_defer_accept; | |
619 | ||
a019d6fe ACM |
620 | budget = 2 * (lopt->nr_table_entries / (timeout / interval)); |
621 | i = lopt->clock_hand; | |
622 | ||
623 | do { | |
624 | reqp=&lopt->syn_table[i]; | |
625 | while ((req = *reqp) != NULL) { | |
626 | if (time_after_eq(now, req->expires)) { | |
0c3d79bc JA |
627 | int expire = 0, resend = 0; |
628 | ||
629 | syn_ack_recalc(req, thresh, max_retries, | |
630 | queue->rskq_defer_accept, | |
631 | &expire, &resend); | |
c72e1183 | 632 | req->rsk_ops->syn_ack_timeout(parent, req); |
0c3d79bc JA |
633 | if (!expire && |
634 | (!resend || | |
e6c022a4 | 635 | !inet_rtx_syn_ack(parent, req) || |
0c3d79bc | 636 | inet_rsk(req)->acked)) { |
a019d6fe ACM |
637 | unsigned long timeo; |
638 | ||
e6c022a4 | 639 | if (req->num_timeout++ == 0) |
a019d6fe | 640 | lopt->qlen_young--; |
e6c022a4 ED |
641 | timeo = min(timeout << req->num_timeout, |
642 | max_rto); | |
a019d6fe ACM |
643 | req->expires = now + timeo; |
644 | reqp = &req->dl_next; | |
645 | continue; | |
646 | } | |
647 | ||
648 | /* Drop this request */ | |
649 | inet_csk_reqsk_queue_unlink(parent, req, reqp); | |
650 | reqsk_queue_removed(queue, req); | |
651 | reqsk_free(req); | |
652 | continue; | |
653 | } | |
654 | reqp = &req->dl_next; | |
655 | } | |
656 | ||
657 | i = (i + 1) & (lopt->nr_table_entries - 1); | |
658 | ||
659 | } while (--budget > 0); | |
660 | ||
661 | lopt->clock_hand = i; | |
662 | ||
663 | if (lopt->qlen) | |
664 | inet_csk_reset_keepalive_timer(parent, interval); | |
665 | } | |
a019d6fe ACM |
666 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); |
667 | ||
e56c57d0 ED |
668 | /** |
669 | * inet_csk_clone_lock - clone an inet socket, and lock its clone | |
670 | * @sk: the socket to clone | |
671 | * @req: request_sock | |
672 | * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) | |
673 | * | |
674 | * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) | |
675 | */ | |
676 | struct sock *inet_csk_clone_lock(const struct sock *sk, | |
677 | const struct request_sock *req, | |
678 | const gfp_t priority) | |
9f1d2604 | 679 | { |
e56c57d0 | 680 | struct sock *newsk = sk_clone_lock(sk, priority); |
9f1d2604 ACM |
681 | |
682 | if (newsk != NULL) { | |
683 | struct inet_connection_sock *newicsk = inet_csk(newsk); | |
684 | ||
685 | newsk->sk_state = TCP_SYN_RECV; | |
686 | newicsk->icsk_bind_hash = NULL; | |
687 | ||
c720c7e8 ED |
688 | inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; |
689 | inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); | |
690 | inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; | |
9f1d2604 ACM |
691 | newsk->sk_write_space = sk_stream_write_space; |
692 | ||
6fa3eb70 S |
693 | newsk->sk_mark = inet_rsk(req)->ir_mark; |
694 | ||
9f1d2604 | 695 | newicsk->icsk_retransmits = 0; |
6687e988 ACM |
696 | newicsk->icsk_backoff = 0; |
697 | newicsk->icsk_probes_out = 0; | |
9f1d2604 ACM |
698 | |
699 | /* Deinitialize accept_queue to trap illegal accesses. */ | |
700 | memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); | |
4237c75c VY |
701 | |
702 | security_inet_csk_clone(newsk, req); | |
9f1d2604 ACM |
703 | } |
704 | return newsk; | |
705 | } | |
e56c57d0 | 706 | EXPORT_SYMBOL_GPL(inet_csk_clone_lock); |
a019d6fe ACM |
707 | |
708 | /* | |
709 | * At this point, there should be no process reference to this | |
710 | * socket, and thus no user references at all. Therefore we | |
711 | * can assume the socket waitqueue is inactive and nobody will | |
712 | * try to jump onto it. | |
713 | */ | |
714 | void inet_csk_destroy_sock(struct sock *sk) | |
715 | { | |
547b792c IJ |
716 | WARN_ON(sk->sk_state != TCP_CLOSE); |
717 | WARN_ON(!sock_flag(sk, SOCK_DEAD)); | |
a019d6fe ACM |
718 | |
719 | /* It cannot be in hash table! */ | |
547b792c | 720 | WARN_ON(!sk_unhashed(sk)); |
a019d6fe | 721 | |
c720c7e8 ED |
722 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
723 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); | |
a019d6fe ACM |
724 | |
725 | sk->sk_prot->destroy(sk); | |
726 | ||
727 | sk_stream_kill_queues(sk); | |
728 | ||
729 | xfrm_sk_free_policy(sk); | |
730 | ||
731 | sk_refcnt_debug_release(sk); | |
732 | ||
dd24c001 | 733 | percpu_counter_dec(sk->sk_prot->orphan_count); |
a019d6fe ACM |
734 | sock_put(sk); |
735 | } | |
a019d6fe ACM |
736 | EXPORT_SYMBOL(inet_csk_destroy_sock); |
737 | ||
e337e24d CP |
738 | /* This function allows to force a closure of a socket after the call to |
739 | * tcp/dccp_create_openreq_child(). | |
740 | */ | |
741 | void inet_csk_prepare_forced_close(struct sock *sk) | |
c10cb5fc | 742 | __releases(&sk->sk_lock.slock) |
e337e24d CP |
743 | { |
744 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | |
745 | bh_unlock_sock(sk); | |
746 | sock_put(sk); | |
747 | ||
748 | /* The below has to be done to allow calling inet_csk_destroy_sock */ | |
749 | sock_set_flag(sk, SOCK_DEAD); | |
750 | percpu_counter_inc(sk->sk_prot->orphan_count); | |
751 | inet_sk(sk)->inet_num = 0; | |
752 | } | |
753 | EXPORT_SYMBOL(inet_csk_prepare_forced_close); | |
754 | ||
a019d6fe ACM |
755 | int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) |
756 | { | |
757 | struct inet_sock *inet = inet_sk(sk); | |
758 | struct inet_connection_sock *icsk = inet_csk(sk); | |
759 | int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries); | |
760 | ||
761 | if (rc != 0) | |
762 | return rc; | |
763 | ||
764 | sk->sk_max_ack_backlog = 0; | |
765 | sk->sk_ack_backlog = 0; | |
766 | inet_csk_delack_init(sk); | |
767 | ||
768 | /* There is race window here: we announce ourselves listening, | |
769 | * but this transition is still not validated by get_port(). | |
770 | * It is OK, because this socket enters to hash table only | |
771 | * after validation is complete. | |
772 | */ | |
773 | sk->sk_state = TCP_LISTEN; | |
c720c7e8 ED |
774 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { |
775 | inet->inet_sport = htons(inet->inet_num); | |
6fa3eb70 S |
776 | #ifdef CONFIG_MTK_NET_LOGGING |
777 | printk(KERN_WARNING "[mtk_net][socket] inet_csk_listen_start inet->inet_sport:%d,inet->inet_num:%d",inet->inet_sport,inet->inet_num); | |
778 | #endif | |
a019d6fe ACM |
779 | sk_dst_reset(sk); |
780 | sk->sk_prot->hash(sk); | |
781 | ||
782 | return 0; | |
783 | } | |
784 | ||
785 | sk->sk_state = TCP_CLOSE; | |
786 | __reqsk_queue_destroy(&icsk->icsk_accept_queue); | |
787 | return -EADDRINUSE; | |
788 | } | |
a019d6fe ACM |
789 | EXPORT_SYMBOL_GPL(inet_csk_listen_start); |
790 | ||
791 | /* | |
792 | * This routine closes sockets which have been at least partially | |
793 | * opened, but not yet accepted. | |
794 | */ | |
795 | void inet_csk_listen_stop(struct sock *sk) | |
796 | { | |
797 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 798 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
a019d6fe ACM |
799 | struct request_sock *acc_req; |
800 | struct request_sock *req; | |
801 | ||
802 | inet_csk_delete_keepalive_timer(sk); | |
803 | ||
804 | /* make all the listen_opt local to us */ | |
8336886f | 805 | acc_req = reqsk_queue_yank_acceptq(queue); |
a019d6fe ACM |
806 | |
807 | /* Following specs, it would be better either to send FIN | |
808 | * (and enter FIN-WAIT-1, it is normal close) | |
809 | * or to send active reset (abort). | |
810 | * Certainly, it is pretty dangerous while synflood, but it is | |
811 | * bad justification for our negligence 8) | |
812 | * To be honest, we are not able to make either | |
813 | * of the variants now. --ANK | |
814 | */ | |
8336886f | 815 | reqsk_queue_destroy(queue); |
a019d6fe ACM |
816 | |
817 | while ((req = acc_req) != NULL) { | |
818 | struct sock *child = req->sk; | |
819 | ||
820 | acc_req = req->dl_next; | |
821 | ||
822 | local_bh_disable(); | |
823 | bh_lock_sock(child); | |
547b792c | 824 | WARN_ON(sock_owned_by_user(child)); |
a019d6fe ACM |
825 | sock_hold(child); |
826 | ||
827 | sk->sk_prot->disconnect(child, O_NONBLOCK); | |
828 | ||
829 | sock_orphan(child); | |
830 | ||
eb4dea58 HX |
831 | percpu_counter_inc(sk->sk_prot->orphan_count); |
832 | ||
7ab4551f | 833 | if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) { |
8336886f JC |
834 | BUG_ON(tcp_sk(child)->fastopen_rsk != req); |
835 | BUG_ON(sk != tcp_rsk(req)->listener); | |
836 | ||
837 | /* Paranoid, to prevent race condition if | |
838 | * an inbound pkt destined for child is | |
839 | * blocked by sock lock in tcp_v4_rcv(). | |
840 | * Also to satisfy an assertion in | |
841 | * tcp_v4_destroy_sock(). | |
842 | */ | |
843 | tcp_sk(child)->fastopen_rsk = NULL; | |
844 | sock_put(sk); | |
845 | } | |
a019d6fe ACM |
846 | inet_csk_destroy_sock(child); |
847 | ||
848 | bh_unlock_sock(child); | |
849 | local_bh_enable(); | |
850 | sock_put(child); | |
851 | ||
852 | sk_acceptq_removed(sk); | |
853 | __reqsk_free(req); | |
854 | } | |
8336886f JC |
855 | if (queue->fastopenq != NULL) { |
856 | /* Free all the reqs queued in rskq_rst_head. */ | |
857 | spin_lock_bh(&queue->fastopenq->lock); | |
858 | acc_req = queue->fastopenq->rskq_rst_head; | |
859 | queue->fastopenq->rskq_rst_head = NULL; | |
860 | spin_unlock_bh(&queue->fastopenq->lock); | |
861 | while ((req = acc_req) != NULL) { | |
862 | acc_req = req->dl_next; | |
863 | __reqsk_free(req); | |
864 | } | |
865 | } | |
547b792c | 866 | WARN_ON(sk->sk_ack_backlog); |
a019d6fe | 867 | } |
a019d6fe | 868 | EXPORT_SYMBOL_GPL(inet_csk_listen_stop); |
af05dc93 ACM |
869 | |
870 | void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |
871 | { | |
872 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | |
873 | const struct inet_sock *inet = inet_sk(sk); | |
874 | ||
875 | sin->sin_family = AF_INET; | |
c720c7e8 ED |
876 | sin->sin_addr.s_addr = inet->inet_daddr; |
877 | sin->sin_port = inet->inet_dport; | |
af05dc93 | 878 | } |
af05dc93 | 879 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); |
c4d93909 | 880 | |
dec73ff0 ACM |
881 | #ifdef CONFIG_COMPAT |
882 | int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, | |
883 | char __user *optval, int __user *optlen) | |
884 | { | |
dbeff12b | 885 | const struct inet_connection_sock *icsk = inet_csk(sk); |
dec73ff0 ACM |
886 | |
887 | if (icsk->icsk_af_ops->compat_getsockopt != NULL) | |
888 | return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, | |
889 | optval, optlen); | |
890 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | |
891 | optval, optlen); | |
892 | } | |
dec73ff0 ACM |
893 | EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); |
894 | ||
895 | int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, | |
b7058842 | 896 | char __user *optval, unsigned int optlen) |
dec73ff0 | 897 | { |
dbeff12b | 898 | const struct inet_connection_sock *icsk = inet_csk(sk); |
dec73ff0 ACM |
899 | |
900 | if (icsk->icsk_af_ops->compat_setsockopt != NULL) | |
901 | return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, | |
902 | optval, optlen); | |
903 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | |
904 | optval, optlen); | |
905 | } | |
dec73ff0 ACM |
906 | EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); |
907 | #endif | |
80d0a69f DM |
908 | |
909 | static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) | |
910 | { | |
5abf7f7e ED |
911 | const struct inet_sock *inet = inet_sk(sk); |
912 | const struct ip_options_rcu *inet_opt; | |
80d0a69f DM |
913 | __be32 daddr = inet->inet_daddr; |
914 | struct flowi4 *fl4; | |
915 | struct rtable *rt; | |
916 | ||
917 | rcu_read_lock(); | |
918 | inet_opt = rcu_dereference(inet->inet_opt); | |
919 | if (inet_opt && inet_opt->opt.srr) | |
920 | daddr = inet_opt->opt.faddr; | |
921 | fl4 = &fl->u.ip4; | |
922 | rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, | |
923 | inet->inet_saddr, inet->inet_dport, | |
924 | inet->inet_sport, sk->sk_protocol, | |
925 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); | |
926 | if (IS_ERR(rt)) | |
927 | rt = NULL; | |
928 | if (rt) | |
929 | sk_setup_caps(sk, &rt->dst); | |
930 | rcu_read_unlock(); | |
931 | ||
932 | return &rt->dst; | |
933 | } | |
934 | ||
935 | struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) | |
936 | { | |
937 | struct dst_entry *dst = __sk_dst_check(sk, 0); | |
938 | struct inet_sock *inet = inet_sk(sk); | |
939 | ||
940 | if (!dst) { | |
941 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
942 | if (!dst) | |
943 | goto out; | |
944 | } | |
6700c270 | 945 | dst->ops->update_pmtu(dst, sk, NULL, mtu); |
80d0a69f DM |
946 | |
947 | dst = __sk_dst_check(sk, 0); | |
948 | if (!dst) | |
949 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
950 | out: | |
951 | return dst; | |
952 | } | |
953 | EXPORT_SYMBOL_GPL(inet_csk_update_pmtu); |