net: Add batman-adv meshing protocol
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / batman-adv / unicast.c
1 /*
2 * Copyright (C) 2010 B.A.T.M.A.N. contributors:
3 *
4 * Andreas Langer
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "unicast.h"
24 #include "send.h"
25 #include "soft-interface.h"
26 #include "gateway_client.h"
27 #include "originator.h"
28 #include "hash.h"
29 #include "translation-table.h"
30 #include "routing.h"
31 #include "hard-interface.h"
32
33
34 static struct sk_buff *frag_merge_packet(struct list_head *head,
35 struct frag_packet_list_entry *tfp,
36 struct sk_buff *skb)
37 {
38 struct unicast_frag_packet *up =
39 (struct unicast_frag_packet *)skb->data;
40 struct sk_buff *tmp_skb;
41 struct unicast_packet *unicast_packet;
42 int hdr_len = sizeof(struct unicast_packet),
43 uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
44
45 /* set skb to the first part and tmp_skb to the second part */
46 if (up->flags & UNI_FRAG_HEAD) {
47 tmp_skb = tfp->skb;
48 } else {
49 tmp_skb = skb;
50 skb = tfp->skb;
51 }
52
53 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
54 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
55 /* free buffered skb, skb will be freed later */
56 kfree_skb(tfp->skb);
57 return NULL;
58 }
59
60 /* move free entry to end */
61 tfp->skb = NULL;
62 tfp->seqno = 0;
63 list_move_tail(&tfp->list, head);
64
65 memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
66 kfree_skb(tmp_skb);
67
68 memmove(skb->data + uni_diff, skb->data, hdr_len);
69 unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff);
70 unicast_packet->packet_type = BAT_UNICAST;
71
72 return skb;
73 }
74
75 static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
76 {
77 struct frag_packet_list_entry *tfp;
78 struct unicast_frag_packet *up =
79 (struct unicast_frag_packet *)skb->data;
80
81 /* free and oldest packets stand at the end */
82 tfp = list_entry((head)->prev, typeof(*tfp), list);
83 kfree_skb(tfp->skb);
84
85 tfp->seqno = ntohs(up->seqno);
86 tfp->skb = skb;
87 list_move(&tfp->list, head);
88 return;
89 }
90
91 static int frag_create_buffer(struct list_head *head)
92 {
93 int i;
94 struct frag_packet_list_entry *tfp;
95
96 for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
97 tfp = kmalloc(sizeof(struct frag_packet_list_entry),
98 GFP_ATOMIC);
99 if (!tfp) {
100 frag_list_free(head);
101 return -ENOMEM;
102 }
103 tfp->skb = NULL;
104 tfp->seqno = 0;
105 INIT_LIST_HEAD(&tfp->list);
106 list_add(&tfp->list, head);
107 }
108
109 return 0;
110 }
111
112 static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
113 struct unicast_frag_packet *up)
114 {
115 struct frag_packet_list_entry *tfp;
116 struct unicast_frag_packet *tmp_up = NULL;
117 uint16_t search_seqno;
118
119 if (up->flags & UNI_FRAG_HEAD)
120 search_seqno = ntohs(up->seqno)+1;
121 else
122 search_seqno = ntohs(up->seqno)-1;
123
124 list_for_each_entry(tfp, head, list) {
125
126 if (!tfp->skb)
127 continue;
128
129 if (tfp->seqno == ntohs(up->seqno))
130 goto mov_tail;
131
132 tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
133
134 if (tfp->seqno == search_seqno) {
135
136 if ((tmp_up->flags & UNI_FRAG_HEAD) !=
137 (up->flags & UNI_FRAG_HEAD))
138 return tfp;
139 else
140 goto mov_tail;
141 }
142 }
143 return NULL;
144
145 mov_tail:
146 list_move_tail(&tfp->list, head);
147 return NULL;
148 }
149
150 void frag_list_free(struct list_head *head)
151 {
152 struct frag_packet_list_entry *pf, *tmp_pf;
153
154 if (!list_empty(head)) {
155
156 list_for_each_entry_safe(pf, tmp_pf, head, list) {
157 kfree_skb(pf->skb);
158 list_del(&pf->list);
159 kfree(pf);
160 }
161 }
162 return;
163 }
164
165 /* frag_reassemble_skb():
166 * returns NET_RX_DROP if the operation failed - skb is left intact
167 * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
168 * or the skb could be reassembled (skb_new will point to the new packet and
169 * skb was freed)
170 */
171 int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
172 struct sk_buff **new_skb)
173 {
174 struct orig_node *orig_node;
175 struct frag_packet_list_entry *tmp_frag_entry;
176 int ret = NET_RX_DROP;
177 struct unicast_frag_packet *unicast_packet =
178 (struct unicast_frag_packet *)skb->data;
179
180 *new_skb = NULL;
181 spin_lock_bh(&bat_priv->orig_hash_lock);
182 orig_node = ((struct orig_node *)
183 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
184 unicast_packet->orig));
185
186 if (!orig_node) {
187 pr_debug("couldn't find originator in orig_hash\n");
188 goto out;
189 }
190
191 orig_node->last_frag_packet = jiffies;
192
193 if (list_empty(&orig_node->frag_list) &&
194 frag_create_buffer(&orig_node->frag_list)) {
195 pr_debug("couldn't create frag buffer\n");
196 goto out;
197 }
198
199 tmp_frag_entry = frag_search_packet(&orig_node->frag_list,
200 unicast_packet);
201
202 if (!tmp_frag_entry) {
203 frag_create_entry(&orig_node->frag_list, skb);
204 ret = NET_RX_SUCCESS;
205 goto out;
206 }
207
208 *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry,
209 skb);
210 /* if not, merge failed */
211 if (*new_skb)
212 ret = NET_RX_SUCCESS;
213 out:
214 spin_unlock_bh(&bat_priv->orig_hash_lock);
215
216 return ret;
217 }
218
219 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
220 struct batman_if *batman_if, uint8_t dstaddr[])
221 {
222 struct unicast_packet tmp_uc, *unicast_packet;
223 struct sk_buff *frag_skb;
224 struct unicast_frag_packet *frag1, *frag2;
225 int uc_hdr_len = sizeof(struct unicast_packet);
226 int ucf_hdr_len = sizeof(struct unicast_frag_packet);
227 int data_len = skb->len;
228
229 if (!bat_priv->primary_if)
230 goto dropped;
231
232 unicast_packet = (struct unicast_packet *) skb->data;
233
234 memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
235 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
236 skb_split(skb, frag_skb, data_len / 2);
237
238 if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
239 my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
240 goto drop_frag;
241
242 frag1 = (struct unicast_frag_packet *)skb->data;
243 frag2 = (struct unicast_frag_packet *)frag_skb->data;
244
245 memcpy(frag1, &tmp_uc, sizeof(struct unicast_packet));
246
247 frag1->ttl--;
248 frag1->version = COMPAT_VERSION;
249 frag1->packet_type = BAT_UNICAST_FRAG;
250
251 memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
252 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
253
254 frag1->flags |= UNI_FRAG_HEAD;
255 frag2->flags &= ~UNI_FRAG_HEAD;
256
257 frag1->seqno = htons((uint16_t)atomic_inc_return(
258 &batman_if->frag_seqno));
259 frag2->seqno = htons((uint16_t)atomic_inc_return(
260 &batman_if->frag_seqno));
261
262 send_skb_packet(skb, batman_if, dstaddr);
263 send_skb_packet(frag_skb, batman_if, dstaddr);
264 return NET_RX_SUCCESS;
265
266 drop_frag:
267 kfree_skb(frag_skb);
268 dropped:
269 kfree_skb(skb);
270 return NET_RX_DROP;
271 }
272
273 int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
274 {
275 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
276 struct unicast_packet *unicast_packet;
277 struct orig_node *orig_node;
278 struct batman_if *batman_if;
279 struct neigh_node *router;
280 int data_len = skb->len;
281 uint8_t dstaddr[6];
282
283 spin_lock_bh(&bat_priv->orig_hash_lock);
284
285 /* get routing information */
286 if (is_multicast_ether_addr(ethhdr->h_dest))
287 orig_node = (struct orig_node *)gw_get_selected(bat_priv);
288 else
289 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
290 compare_orig,
291 choose_orig,
292 ethhdr->h_dest));
293
294 /* check for hna host */
295 if (!orig_node)
296 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
297
298 router = find_router(bat_priv, orig_node, NULL);
299
300 if (!router)
301 goto unlock;
302
303 /* don't lock while sending the packets ... we therefore
304 * copy the required data before sending */
305
306 batman_if = router->if_incoming;
307 memcpy(dstaddr, router->addr, ETH_ALEN);
308
309 spin_unlock_bh(&bat_priv->orig_hash_lock);
310
311 if (batman_if->if_status != IF_ACTIVE)
312 goto dropped;
313
314 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
315 goto dropped;
316
317 unicast_packet = (struct unicast_packet *)skb->data;
318
319 unicast_packet->version = COMPAT_VERSION;
320 /* batman packet type: unicast */
321 unicast_packet->packet_type = BAT_UNICAST;
322 /* set unicast ttl */
323 unicast_packet->ttl = TTL;
324 /* copy the destination for faster routing */
325 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
326
327 if (atomic_read(&bat_priv->fragmentation) &&
328 data_len + sizeof(struct unicast_packet) >
329 batman_if->net_dev->mtu) {
330 /* send frag skb decreases ttl */
331 unicast_packet->ttl++;
332 return frag_send_skb(skb, bat_priv, batman_if,
333 dstaddr);
334 }
335 send_skb_packet(skb, batman_if, dstaddr);
336 return 0;
337
338 unlock:
339 spin_unlock_bh(&bat_priv->orig_hash_lock);
340 dropped:
341 kfree_skb(skb);
342 return 1;
343 }