include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / inet6_connection_sock.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Support for INET6 connection oriented protocols.
7 *
8 * Authors: See the TCPv6 sources
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21
22 #include <net/addrconf.h>
23 #include <net/inet_connection_sock.h>
24 #include <net/inet_ecn.h>
25 #include <net/inet_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/sock.h>
28 #include <net/inet6_connection_sock.h>
29
30 int inet6_csk_bind_conflict(const struct sock *sk,
31 const struct inet_bind_bucket *tb)
32 {
33 const struct sock *sk2;
34 const struct hlist_node *node;
35
36 /* We must walk the whole port owner list in this case. -DaveM */
37 /*
38 * See comment in inet_csk_bind_conflict about sock lookup
39 * vs net namespaces issues.
40 */
41 sk_for_each_bound(sk2, node, &tb->owners) {
42 if (sk != sk2 &&
43 (!sk->sk_bound_dev_if ||
44 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
46 (!sk->sk_reuse || !sk2->sk_reuse ||
47 sk2->sk_state == TCP_LISTEN) &&
48 ipv6_rcv_saddr_equal(sk, sk2))
49 break;
50 }
51
52 return node != NULL;
53 }
54
55 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
56
57 /*
58 * request_sock (formerly open request) hash tables.
59 */
60 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
61 const u32 rnd, const u16 synq_hsize)
62 {
63 u32 a = (__force u32)raddr->s6_addr32[0];
64 u32 b = (__force u32)raddr->s6_addr32[1];
65 u32 c = (__force u32)raddr->s6_addr32[2];
66
67 a += JHASH_GOLDEN_RATIO;
68 b += JHASH_GOLDEN_RATIO;
69 c += rnd;
70 __jhash_mix(a, b, c);
71
72 a += (__force u32)raddr->s6_addr32[3];
73 b += (__force u32)rport;
74 __jhash_mix(a, b, c);
75
76 return c & (synq_hsize - 1);
77 }
78
79 struct request_sock *inet6_csk_search_req(const struct sock *sk,
80 struct request_sock ***prevp,
81 const __be16 rport,
82 const struct in6_addr *raddr,
83 const struct in6_addr *laddr,
84 const int iif)
85 {
86 const struct inet_connection_sock *icsk = inet_csk(sk);
87 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
88 struct request_sock *req, **prev;
89
90 for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
91 lopt->hash_rnd,
92 lopt->nr_table_entries)];
93 (req = *prev) != NULL;
94 prev = &req->dl_next) {
95 const struct inet6_request_sock *treq = inet6_rsk(req);
96
97 if (inet_rsk(req)->rmt_port == rport &&
98 req->rsk_ops->family == AF_INET6 &&
99 ipv6_addr_equal(&treq->rmt_addr, raddr) &&
100 ipv6_addr_equal(&treq->loc_addr, laddr) &&
101 (!treq->iif || treq->iif == iif)) {
102 WARN_ON(req->sk != NULL);
103 *prevp = prev;
104 return req;
105 }
106 }
107
108 return NULL;
109 }
110
111 EXPORT_SYMBOL_GPL(inet6_csk_search_req);
112
113 void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
114 struct request_sock *req,
115 const unsigned long timeout)
116 {
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
119 const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
120 inet_rsk(req)->rmt_port,
121 lopt->hash_rnd, lopt->nr_table_entries);
122
123 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
124 inet_csk_reqsk_queue_added(sk, timeout);
125 }
126
127 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
128
129 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
130 {
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
133
134 sin6->sin6_family = AF_INET6;
135 ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
136 sin6->sin6_port = inet_sk(sk)->inet_dport;
137 /* We do not store received flowlabel for TCP */
138 sin6->sin6_flowinfo = 0;
139 sin6->sin6_scope_id = 0;
140 if (sk->sk_bound_dev_if &&
141 ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
142 sin6->sin6_scope_id = sk->sk_bound_dev_if;
143 }
144
145 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
146
147 static inline
148 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
149 struct in6_addr *daddr, struct in6_addr *saddr)
150 {
151 __ip6_dst_store(sk, dst, daddr, saddr);
152
153 #ifdef CONFIG_XFRM
154 {
155 struct rt6_info *rt = (struct rt6_info *)dst;
156 rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
157 }
158 #endif
159 }
160
161 static inline
162 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
163 {
164 struct dst_entry *dst;
165
166 dst = __sk_dst_check(sk, cookie);
167
168 #ifdef CONFIG_XFRM
169 if (dst) {
170 struct rt6_info *rt = (struct rt6_info *)dst;
171 if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
172 __sk_dst_reset(sk);
173 dst = NULL;
174 }
175 }
176 #endif
177
178 return dst;
179 }
180
181 int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
182 {
183 struct sock *sk = skb->sk;
184 struct inet_sock *inet = inet_sk(sk);
185 struct ipv6_pinfo *np = inet6_sk(sk);
186 struct flowi fl;
187 struct dst_entry *dst;
188 struct in6_addr *final_p = NULL, final;
189
190 memset(&fl, 0, sizeof(fl));
191 fl.proto = sk->sk_protocol;
192 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
193 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
194 fl.fl6_flowlabel = np->flow_label;
195 IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
196 fl.oif = sk->sk_bound_dev_if;
197 fl.mark = sk->sk_mark;
198 fl.fl_ip_sport = inet->inet_sport;
199 fl.fl_ip_dport = inet->inet_dport;
200 security_sk_classify_flow(sk, &fl);
201
202 if (np->opt && np->opt->srcrt) {
203 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
204 ipv6_addr_copy(&final, &fl.fl6_dst);
205 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
206 final_p = &final;
207 }
208
209 dst = __inet6_csk_dst_check(sk, np->dst_cookie);
210
211 if (dst == NULL) {
212 int err = ip6_dst_lookup(sk, &dst, &fl);
213
214 if (err) {
215 sk->sk_err_soft = -err;
216 kfree_skb(skb);
217 return err;
218 }
219
220 if (final_p)
221 ipv6_addr_copy(&fl.fl6_dst, final_p);
222
223 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
224 sk->sk_route_caps = 0;
225 kfree_skb(skb);
226 return err;
227 }
228
229 __inet6_csk_dst_store(sk, dst, NULL, NULL);
230 }
231
232 skb_dst_set(skb, dst_clone(dst));
233
234 /* Restore final destination back after routing done */
235 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
236
237 return ip6_xmit(sk, skb, &fl, np->opt, 0);
238 }
239
240 EXPORT_SYMBOL_GPL(inet6_csk_xmit);