[IPSEC]: Add async resume support on input
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / xfrm / xfrm_input.c
1 /*
2 * xfrm_input.c
3 *
4 * Changes:
5 * YOSHIFUJI Hideaki @USAGI
6 * Split up af-specific portion
7 *
8 */
9
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <net/dst.h>
14 #include <net/ip.h>
15 #include <net/xfrm.h>
16
17 static struct kmem_cache *secpath_cachep __read_mostly;
18
19 void __secpath_destroy(struct sec_path *sp)
20 {
21 int i;
22 for (i = 0; i < sp->len; i++)
23 xfrm_state_put(sp->xvec[i]);
24 kmem_cache_free(secpath_cachep, sp);
25 }
26 EXPORT_SYMBOL(__secpath_destroy);
27
28 struct sec_path *secpath_dup(struct sec_path *src)
29 {
30 struct sec_path *sp;
31
32 sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
33 if (!sp)
34 return NULL;
35
36 sp->len = 0;
37 if (src) {
38 int i;
39
40 memcpy(sp, src, sizeof(*sp));
41 for (i = 0; i < sp->len; i++)
42 xfrm_state_hold(sp->xvec[i]);
43 }
44 atomic_set(&sp->refcnt, 1);
45 return sp;
46 }
47 EXPORT_SYMBOL(secpath_dup);
48
49 /* Fetch spi and seq from ipsec header */
50
51 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
52 {
53 int offset, offset_seq;
54 int hlen;
55
56 switch (nexthdr) {
57 case IPPROTO_AH:
58 hlen = sizeof(struct ip_auth_hdr);
59 offset = offsetof(struct ip_auth_hdr, spi);
60 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
61 break;
62 case IPPROTO_ESP:
63 hlen = sizeof(struct ip_esp_hdr);
64 offset = offsetof(struct ip_esp_hdr, spi);
65 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
66 break;
67 case IPPROTO_COMP:
68 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
69 return -EINVAL;
70 *spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2)));
71 *seq = 0;
72 return 0;
73 default:
74 return 1;
75 }
76
77 if (!pskb_may_pull(skb, hlen))
78 return -EINVAL;
79
80 *spi = *(__be32*)(skb_transport_header(skb) + offset);
81 *seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
82 return 0;
83 }
84 EXPORT_SYMBOL(xfrm_parse_spi);
85
86 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
87 {
88 int err;
89
90 err = x->outer_mode->afinfo->extract_input(x, skb);
91 if (err)
92 return err;
93
94 skb->protocol = x->inner_mode->afinfo->eth_proto;
95 return x->inner_mode->input2(x, skb);
96 }
97 EXPORT_SYMBOL(xfrm_prepare_input);
98
99 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
100 {
101 int err;
102 __be32 seq;
103 struct xfrm_state *x;
104 xfrm_address_t *daddr;
105 int decaps = 0;
106 int async = 0;
107
108 /* A negative encap_type indicates async resumption. */
109 if (encap_type < 0) {
110 async = 1;
111 x = skb->sp->xvec[skb->sp->len - 1];
112 seq = XFRM_SKB_CB(skb)->seq;
113 goto resume;
114 }
115
116 /* Allocate new secpath or COW existing one. */
117 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
118 struct sec_path *sp;
119
120 sp = secpath_dup(skb->sp);
121 if (!sp)
122 goto drop;
123 if (skb->sp)
124 secpath_put(skb->sp);
125 skb->sp = sp;
126 }
127
128 daddr = (xfrm_address_t *)(skb_network_header(skb) +
129 XFRM_SPI_SKB_CB(skb)->daddroff);
130
131 seq = 0;
132 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
133 goto drop;
134
135 do {
136 if (skb->sp->len == XFRM_MAX_DEPTH)
137 goto drop;
138
139 x = xfrm_state_lookup(daddr, spi, nexthdr, AF_INET);
140 if (x == NULL)
141 goto drop;
142
143 skb->sp->xvec[skb->sp->len++] = x;
144
145 spin_lock(&x->lock);
146 if (unlikely(x->km.state != XFRM_STATE_VALID))
147 goto drop_unlock;
148
149 if ((x->encap ? x->encap->encap_type : 0) != encap_type)
150 goto drop_unlock;
151
152 if (x->props.replay_window && xfrm_replay_check(x, seq))
153 goto drop_unlock;
154
155 if (xfrm_state_check_expire(x))
156 goto drop_unlock;
157
158 spin_unlock(&x->lock);
159
160 XFRM_SKB_CB(skb)->seq = seq;
161
162 nexthdr = x->type->input(x, skb);
163
164 if (nexthdr == -EINPROGRESS)
165 return 0;
166
167 resume:
168 spin_lock(&x->lock);
169 if (nexthdr <= 0) {
170 if (nexthdr == -EBADMSG)
171 x->stats.integrity_failed++;
172 goto drop_unlock;
173 }
174
175 /* only the first xfrm gets the encap type */
176 encap_type = 0;
177
178 if (x->props.replay_window)
179 xfrm_replay_advance(x, seq);
180
181 x->curlft.bytes += skb->len;
182 x->curlft.packets++;
183
184 spin_unlock(&x->lock);
185
186 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
187
188 if (x->inner_mode->input(x, skb))
189 goto drop;
190
191 if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
192 decaps = 1;
193 break;
194 }
195
196 /*
197 * We need the inner address. However, we only get here for
198 * transport mode so the outer address is identical.
199 */
200 daddr = &x->id.daddr;
201
202 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
203 if (err < 0)
204 goto drop;
205 } while (!err);
206
207 nf_reset(skb);
208
209 if (decaps) {
210 dst_release(skb->dst);
211 skb->dst = NULL;
212 netif_rx(skb);
213 return 0;
214 } else {
215 return x->inner_mode->afinfo->transport_finish(skb, async);
216 }
217
218 drop_unlock:
219 spin_unlock(&x->lock);
220 drop:
221 kfree_skb(skb);
222 return 0;
223 }
224 EXPORT_SYMBOL(xfrm_input);
225
226 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
227 {
228 return xfrm_input(skb, nexthdr, 0, -1);
229 }
230 EXPORT_SYMBOL(xfrm_input_resume);
231
232 void __init xfrm_input_init(void)
233 {
234 secpath_cachep = kmem_cache_create("secpath_cache",
235 sizeof(struct sec_path),
236 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
237 NULL);
238 }