Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / mac80211 / mesh_pathtbl.c
1 /*
2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
23
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
26
27 static inline bool mpath_expired(struct mesh_path *mpath)
28 {
29 return (mpath->flags & MESH_PATH_ACTIVE) &&
30 time_after(jiffies, mpath->exp_time) &&
31 !(mpath->flags & MESH_PATH_FIXED);
32 }
33
34 struct mpath_node {
35 struct hlist_node list;
36 struct rcu_head rcu;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
39 */
40 struct mesh_path *mpath;
41 };
42
43 static struct mesh_table __rcu *mesh_paths;
44 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
45
46 int mesh_paths_generation;
47
48 /* This lock will have the grow table function as writer and add / delete nodes
49 * as readers. RCU provides sufficient protection only when reading the table
50 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
51 * the read lock or we risk operating on an old table. The write lock is only
52 * needed when modifying the number of buckets a table.
53 */
54 static DEFINE_RWLOCK(pathtbl_resize_lock);
55
56
57 static inline struct mesh_table *resize_dereference_mesh_paths(void)
58 {
59 return rcu_dereference_protected(mesh_paths,
60 lockdep_is_held(&pathtbl_resize_lock));
61 }
62
63 static inline struct mesh_table *resize_dereference_mpp_paths(void)
64 {
65 return rcu_dereference_protected(mpp_paths,
66 lockdep_is_held(&pathtbl_resize_lock));
67 }
68
69 /*
70 * CAREFUL -- "tbl" must not be an expression,
71 * in particular not an rcu_dereference(), since
72 * it's used twice. So it is illegal to do
73 * for_each_mesh_entry(rcu_dereference(...), ...)
74 */
75 #define for_each_mesh_entry(tbl, node, i) \
76 for (i = 0; i <= tbl->hash_mask; i++) \
77 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
78
79
80 static struct mesh_table *mesh_table_alloc(int size_order)
81 {
82 int i;
83 struct mesh_table *newtbl;
84
85 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
86 if (!newtbl)
87 return NULL;
88
89 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
90 (1 << size_order), GFP_ATOMIC);
91
92 if (!newtbl->hash_buckets) {
93 kfree(newtbl);
94 return NULL;
95 }
96
97 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
98 (1 << size_order), GFP_ATOMIC);
99 if (!newtbl->hashwlock) {
100 kfree(newtbl->hash_buckets);
101 kfree(newtbl);
102 return NULL;
103 }
104
105 newtbl->size_order = size_order;
106 newtbl->hash_mask = (1 << size_order) - 1;
107 atomic_set(&newtbl->entries, 0);
108 get_random_bytes(&newtbl->hash_rnd,
109 sizeof(newtbl->hash_rnd));
110 for (i = 0; i <= newtbl->hash_mask; i++)
111 spin_lock_init(&newtbl->hashwlock[i]);
112 spin_lock_init(&newtbl->gates_lock);
113
114 return newtbl;
115 }
116
117 static void __mesh_table_free(struct mesh_table *tbl)
118 {
119 kfree(tbl->hash_buckets);
120 kfree(tbl->hashwlock);
121 kfree(tbl);
122 }
123
124 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
125 {
126 struct hlist_head *mesh_hash;
127 struct hlist_node *p, *q;
128 struct mpath_node *gate;
129 int i;
130
131 mesh_hash = tbl->hash_buckets;
132 for (i = 0; i <= tbl->hash_mask; i++) {
133 spin_lock_bh(&tbl->hashwlock[i]);
134 hlist_for_each_safe(p, q, &mesh_hash[i]) {
135 tbl->free_node(p, free_leafs);
136 atomic_dec(&tbl->entries);
137 }
138 spin_unlock_bh(&tbl->hashwlock[i]);
139 }
140 if (free_leafs) {
141 spin_lock_bh(&tbl->gates_lock);
142 hlist_for_each_entry_safe(gate, q,
143 tbl->known_gates, list) {
144 hlist_del(&gate->list);
145 kfree(gate);
146 }
147 kfree(tbl->known_gates);
148 spin_unlock_bh(&tbl->gates_lock);
149 }
150
151 __mesh_table_free(tbl);
152 }
153
154 static int mesh_table_grow(struct mesh_table *oldtbl,
155 struct mesh_table *newtbl)
156 {
157 struct hlist_head *oldhash;
158 struct hlist_node *p, *q;
159 int i;
160
161 if (atomic_read(&oldtbl->entries)
162 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
163 return -EAGAIN;
164
165 newtbl->free_node = oldtbl->free_node;
166 newtbl->mean_chain_len = oldtbl->mean_chain_len;
167 newtbl->copy_node = oldtbl->copy_node;
168 newtbl->known_gates = oldtbl->known_gates;
169 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
170
171 oldhash = oldtbl->hash_buckets;
172 for (i = 0; i <= oldtbl->hash_mask; i++)
173 hlist_for_each(p, &oldhash[i])
174 if (oldtbl->copy_node(p, newtbl) < 0)
175 goto errcopy;
176
177 return 0;
178
179 errcopy:
180 for (i = 0; i <= newtbl->hash_mask; i++) {
181 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
182 oldtbl->free_node(p, 0);
183 }
184 return -ENOMEM;
185 }
186
187 static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
188 struct mesh_table *tbl)
189 {
190 /* Use last four bytes of hw addr and interface index as hash index */
191 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
192 tbl->hash_rnd) & tbl->hash_mask;
193 }
194
195
196 /**
197 *
198 * mesh_path_assign_nexthop - update mesh path next hop
199 *
200 * @mpath: mesh path to update
201 * @sta: next hop to assign
202 *
203 * Locking: mpath->state_lock must be held when calling this function
204 */
205 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
206 {
207 struct sk_buff *skb;
208 struct ieee80211_hdr *hdr;
209 unsigned long flags;
210
211 rcu_assign_pointer(mpath->next_hop, sta);
212
213 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
214 skb_queue_walk(&mpath->frame_queue, skb) {
215 hdr = (struct ieee80211_hdr *) skb->data;
216 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
217 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
218 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
219 }
220
221 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
222 }
223
224 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
225 struct mesh_path *gate_mpath)
226 {
227 struct ieee80211_hdr *hdr;
228 struct ieee80211s_hdr *mshdr;
229 int mesh_hdrlen, hdrlen;
230 char *next_hop;
231
232 hdr = (struct ieee80211_hdr *) skb->data;
233 hdrlen = ieee80211_hdrlen(hdr->frame_control);
234 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
235
236 if (!(mshdr->flags & MESH_FLAGS_AE)) {
237 /* size of the fixed part of the mesh header */
238 mesh_hdrlen = 6;
239
240 /* make room for the two extended addresses */
241 skb_push(skb, 2 * ETH_ALEN);
242 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
243
244 hdr = (struct ieee80211_hdr *) skb->data;
245
246 /* we preserve the previous mesh header and only add
247 * the new addreses */
248 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
249 mshdr->flags = MESH_FLAGS_AE_A5_A6;
250 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
251 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
252 }
253
254 /* update next hop */
255 hdr = (struct ieee80211_hdr *) skb->data;
256 rcu_read_lock();
257 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
258 memcpy(hdr->addr1, next_hop, ETH_ALEN);
259 rcu_read_unlock();
260 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
261 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
262 }
263
264 /**
265 *
266 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
267 *
268 * This function is used to transfer or copy frames from an unresolved mpath to
269 * a gate mpath. The function also adds the Address Extension field and
270 * updates the next hop.
271 *
272 * If a frame already has an Address Extension field, only the next hop and
273 * destination addresses are updated.
274 *
275 * The gate mpath must be an active mpath with a valid mpath->next_hop.
276 *
277 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
278 * @from_mpath: The failed mpath
279 * @copy: When true, copy all the frames to the new mpath queue. When false,
280 * move them.
281 */
282 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
283 struct mesh_path *from_mpath,
284 bool copy)
285 {
286 struct sk_buff *skb, *fskb, *tmp;
287 struct sk_buff_head failq;
288 unsigned long flags;
289
290 BUG_ON(gate_mpath == from_mpath);
291 BUG_ON(!gate_mpath->next_hop);
292
293 __skb_queue_head_init(&failq);
294
295 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
296 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
297 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
298
299 skb_queue_walk_safe(&failq, fskb, tmp) {
300 if (skb_queue_len(&gate_mpath->frame_queue) >=
301 MESH_FRAME_QUEUE_LEN) {
302 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
303 break;
304 }
305
306 skb = skb_copy(fskb, GFP_ATOMIC);
307 if (WARN_ON(!skb))
308 break;
309
310 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
311 skb_queue_tail(&gate_mpath->frame_queue, skb);
312
313 if (copy)
314 continue;
315
316 __skb_unlink(fskb, &failq);
317 kfree_skb(fskb);
318 }
319
320 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
321 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
322
323 if (!copy)
324 return;
325
326 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
327 skb_queue_splice(&failq, &from_mpath->frame_queue);
328 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
329 }
330
331
332 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
333 struct ieee80211_sub_if_data *sdata)
334 {
335 struct mesh_path *mpath;
336 struct hlist_head *bucket;
337 struct mpath_node *node;
338
339 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
340 hlist_for_each_entry_rcu(node, bucket, list) {
341 mpath = node->mpath;
342 if (mpath->sdata == sdata &&
343 ether_addr_equal(dst, mpath->dst)) {
344 if (mpath_expired(mpath)) {
345 spin_lock_bh(&mpath->state_lock);
346 mpath->flags &= ~MESH_PATH_ACTIVE;
347 spin_unlock_bh(&mpath->state_lock);
348 }
349 return mpath;
350 }
351 }
352 return NULL;
353 }
354
355 /**
356 * mesh_path_lookup - look up a path in the mesh path table
357 * @sdata: local subif
358 * @dst: hardware address (ETH_ALEN length) of destination
359 *
360 * Returns: pointer to the mesh path structure, or NULL if not found
361 *
362 * Locking: must be called within a read rcu section.
363 */
364 struct mesh_path *
365 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
366 {
367 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
368 }
369
370 struct mesh_path *
371 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
372 {
373 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
374 }
375
376
377 /**
378 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
379 * @idx: index
380 * @sdata: local subif, or NULL for all entries
381 *
382 * Returns: pointer to the mesh path structure, or NULL if not found.
383 *
384 * Locking: must be called within a read rcu section.
385 */
386 struct mesh_path *
387 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
388 {
389 struct mesh_table *tbl = rcu_dereference(mesh_paths);
390 struct mpath_node *node;
391 int i;
392 int j = 0;
393
394 for_each_mesh_entry(tbl, node, i) {
395 if (sdata && node->mpath->sdata != sdata)
396 continue;
397 if (j++ == idx) {
398 if (mpath_expired(node->mpath)) {
399 spin_lock_bh(&node->mpath->state_lock);
400 node->mpath->flags &= ~MESH_PATH_ACTIVE;
401 spin_unlock_bh(&node->mpath->state_lock);
402 }
403 return node->mpath;
404 }
405 }
406
407 return NULL;
408 }
409
410 /**
411 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
412 * @mpath: gate path to add to table
413 */
414 int mesh_path_add_gate(struct mesh_path *mpath)
415 {
416 struct mesh_table *tbl;
417 struct mpath_node *gate, *new_gate;
418 int err;
419
420 rcu_read_lock();
421 tbl = rcu_dereference(mesh_paths);
422
423 hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
424 if (gate->mpath == mpath) {
425 err = -EEXIST;
426 goto err_rcu;
427 }
428
429 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
430 if (!new_gate) {
431 err = -ENOMEM;
432 goto err_rcu;
433 }
434
435 mpath->is_gate = true;
436 mpath->sdata->u.mesh.num_gates++;
437 new_gate->mpath = mpath;
438 spin_lock_bh(&tbl->gates_lock);
439 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
440 spin_unlock_bh(&tbl->gates_lock);
441 mpath_dbg(mpath->sdata,
442 "Mesh path: Recorded new gate: %pM. %d known gates\n",
443 mpath->dst, mpath->sdata->u.mesh.num_gates);
444 err = 0;
445 err_rcu:
446 rcu_read_unlock();
447 return err;
448 }
449
450 /**
451 * mesh_gate_del - remove a mesh gate from the list of known gates
452 * @tbl: table which holds our list of known gates
453 * @mpath: gate mpath
454 *
455 * Locking: must be called inside rcu_read_lock() section
456 */
457 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
458 {
459 struct mpath_node *gate;
460 struct hlist_node *q;
461
462 hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
463 if (gate->mpath != mpath)
464 continue;
465 spin_lock_bh(&tbl->gates_lock);
466 hlist_del_rcu(&gate->list);
467 kfree_rcu(gate, rcu);
468 spin_unlock_bh(&tbl->gates_lock);
469 mpath->sdata->u.mesh.num_gates--;
470 mpath->is_gate = false;
471 mpath_dbg(mpath->sdata,
472 "Mesh path: Deleted gate: %pM. %d known gates\n",
473 mpath->dst, mpath->sdata->u.mesh.num_gates);
474 break;
475 }
476 }
477
478 /**
479 * mesh_gate_num - number of gates known to this interface
480 * @sdata: subif data
481 */
482 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
483 {
484 return sdata->u.mesh.num_gates;
485 }
486
487 /**
488 * mesh_path_add - allocate and add a new path to the mesh path table
489 * @dst: destination address of the path (ETH_ALEN length)
490 * @sdata: local subif
491 *
492 * Returns: 0 on success
493 *
494 * State: the initial state of the new path is set to 0
495 */
496 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
497 const u8 *dst)
498 {
499 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
500 struct ieee80211_local *local = sdata->local;
501 struct mesh_table *tbl;
502 struct mesh_path *mpath, *new_mpath;
503 struct mpath_node *node, *new_node;
504 struct hlist_head *bucket;
505 int grow = 0;
506 int err;
507 u32 hash_idx;
508
509 if (ether_addr_equal(dst, sdata->vif.addr))
510 /* never add ourselves as neighbours */
511 return ERR_PTR(-ENOTSUPP);
512
513 if (is_multicast_ether_addr(dst))
514 return ERR_PTR(-ENOTSUPP);
515
516 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
517 return ERR_PTR(-ENOSPC);
518
519 read_lock_bh(&pathtbl_resize_lock);
520 tbl = resize_dereference_mesh_paths();
521
522 hash_idx = mesh_table_hash(dst, sdata, tbl);
523 bucket = &tbl->hash_buckets[hash_idx];
524
525 spin_lock(&tbl->hashwlock[hash_idx]);
526
527 hlist_for_each_entry(node, bucket, list) {
528 mpath = node->mpath;
529 if (mpath->sdata == sdata &&
530 ether_addr_equal(dst, mpath->dst))
531 goto found;
532 }
533
534 err = -ENOMEM;
535 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
536 if (!new_mpath)
537 goto err_path_alloc;
538
539 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
540 if (!new_node)
541 goto err_node_alloc;
542
543 memcpy(new_mpath->dst, dst, ETH_ALEN);
544 eth_broadcast_addr(new_mpath->rann_snd_addr);
545 new_mpath->is_root = false;
546 new_mpath->sdata = sdata;
547 new_mpath->flags = 0;
548 skb_queue_head_init(&new_mpath->frame_queue);
549 new_node->mpath = new_mpath;
550 new_mpath->timer.data = (unsigned long) new_mpath;
551 new_mpath->timer.function = mesh_path_timer;
552 new_mpath->exp_time = jiffies;
553 spin_lock_init(&new_mpath->state_lock);
554 init_timer(&new_mpath->timer);
555
556 hlist_add_head_rcu(&new_node->list, bucket);
557 if (atomic_inc_return(&tbl->entries) >=
558 tbl->mean_chain_len * (tbl->hash_mask + 1))
559 grow = 1;
560
561 mesh_paths_generation++;
562
563 if (grow) {
564 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
565 ieee80211_queue_work(&local->hw, &sdata->work);
566 }
567 mpath = new_mpath;
568 found:
569 spin_unlock(&tbl->hashwlock[hash_idx]);
570 read_unlock_bh(&pathtbl_resize_lock);
571 return mpath;
572
573 err_node_alloc:
574 kfree(new_mpath);
575 err_path_alloc:
576 atomic_dec(&sdata->u.mesh.mpaths);
577 spin_unlock(&tbl->hashwlock[hash_idx]);
578 read_unlock_bh(&pathtbl_resize_lock);
579 return ERR_PTR(err);
580 }
581
582 static void mesh_table_free_rcu(struct rcu_head *rcu)
583 {
584 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
585
586 mesh_table_free(tbl, false);
587 }
588
589 void mesh_mpath_table_grow(void)
590 {
591 struct mesh_table *oldtbl, *newtbl;
592
593 write_lock_bh(&pathtbl_resize_lock);
594 oldtbl = resize_dereference_mesh_paths();
595 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
596 if (!newtbl)
597 goto out;
598 if (mesh_table_grow(oldtbl, newtbl) < 0) {
599 __mesh_table_free(newtbl);
600 goto out;
601 }
602 rcu_assign_pointer(mesh_paths, newtbl);
603
604 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
605
606 out:
607 write_unlock_bh(&pathtbl_resize_lock);
608 }
609
610 void mesh_mpp_table_grow(void)
611 {
612 struct mesh_table *oldtbl, *newtbl;
613
614 write_lock_bh(&pathtbl_resize_lock);
615 oldtbl = resize_dereference_mpp_paths();
616 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
617 if (!newtbl)
618 goto out;
619 if (mesh_table_grow(oldtbl, newtbl) < 0) {
620 __mesh_table_free(newtbl);
621 goto out;
622 }
623 rcu_assign_pointer(mpp_paths, newtbl);
624 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
625
626 out:
627 write_unlock_bh(&pathtbl_resize_lock);
628 }
629
630 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
631 const u8 *dst, const u8 *mpp)
632 {
633 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
634 struct ieee80211_local *local = sdata->local;
635 struct mesh_table *tbl;
636 struct mesh_path *mpath, *new_mpath;
637 struct mpath_node *node, *new_node;
638 struct hlist_head *bucket;
639 int grow = 0;
640 int err = 0;
641 u32 hash_idx;
642
643 if (ether_addr_equal(dst, sdata->vif.addr))
644 /* never add ourselves as neighbours */
645 return -ENOTSUPP;
646
647 if (is_multicast_ether_addr(dst))
648 return -ENOTSUPP;
649
650 err = -ENOMEM;
651 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
652 if (!new_mpath)
653 goto err_path_alloc;
654
655 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
656 if (!new_node)
657 goto err_node_alloc;
658
659 read_lock_bh(&pathtbl_resize_lock);
660 memcpy(new_mpath->dst, dst, ETH_ALEN);
661 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
662 new_mpath->sdata = sdata;
663 new_mpath->flags = 0;
664 skb_queue_head_init(&new_mpath->frame_queue);
665 new_node->mpath = new_mpath;
666 init_timer(&new_mpath->timer);
667 new_mpath->exp_time = jiffies;
668 spin_lock_init(&new_mpath->state_lock);
669
670 tbl = resize_dereference_mpp_paths();
671
672 hash_idx = mesh_table_hash(dst, sdata, tbl);
673 bucket = &tbl->hash_buckets[hash_idx];
674
675 spin_lock(&tbl->hashwlock[hash_idx]);
676
677 err = -EEXIST;
678 hlist_for_each_entry(node, bucket, list) {
679 mpath = node->mpath;
680 if (mpath->sdata == sdata &&
681 ether_addr_equal(dst, mpath->dst))
682 goto err_exists;
683 }
684
685 hlist_add_head_rcu(&new_node->list, bucket);
686 if (atomic_inc_return(&tbl->entries) >=
687 tbl->mean_chain_len * (tbl->hash_mask + 1))
688 grow = 1;
689
690 spin_unlock(&tbl->hashwlock[hash_idx]);
691 read_unlock_bh(&pathtbl_resize_lock);
692 if (grow) {
693 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
694 ieee80211_queue_work(&local->hw, &sdata->work);
695 }
696 return 0;
697
698 err_exists:
699 spin_unlock(&tbl->hashwlock[hash_idx]);
700 read_unlock_bh(&pathtbl_resize_lock);
701 kfree(new_node);
702 err_node_alloc:
703 kfree(new_mpath);
704 err_path_alloc:
705 return err;
706 }
707
708
709 /**
710 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
711 *
712 * @sta: broken peer link
713 *
714 * This function must be called from the rate control algorithm if enough
715 * delivery errors suggest that a peer link is no longer usable.
716 */
717 void mesh_plink_broken(struct sta_info *sta)
718 {
719 struct mesh_table *tbl;
720 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
721 struct mesh_path *mpath;
722 struct mpath_node *node;
723 struct ieee80211_sub_if_data *sdata = sta->sdata;
724 int i;
725 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
726
727 rcu_read_lock();
728 tbl = rcu_dereference(mesh_paths);
729 for_each_mesh_entry(tbl, node, i) {
730 mpath = node->mpath;
731 if (rcu_dereference(mpath->next_hop) == sta &&
732 mpath->flags & MESH_PATH_ACTIVE &&
733 !(mpath->flags & MESH_PATH_FIXED)) {
734 spin_lock_bh(&mpath->state_lock);
735 mpath->flags &= ~MESH_PATH_ACTIVE;
736 ++mpath->sn;
737 spin_unlock_bh(&mpath->state_lock);
738 mesh_path_error_tx(sdata,
739 sdata->u.mesh.mshcfg.element_ttl,
740 mpath->dst, cpu_to_le32(mpath->sn),
741 reason, bcast);
742 }
743 }
744 rcu_read_unlock();
745 }
746
747 static void mesh_path_node_reclaim(struct rcu_head *rp)
748 {
749 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
750 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
751
752 del_timer_sync(&node->mpath->timer);
753 atomic_dec(&sdata->u.mesh.mpaths);
754 kfree(node->mpath);
755 kfree(node);
756 }
757
758 /* needs to be called with the corresponding hashwlock taken */
759 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
760 {
761 struct mesh_path *mpath;
762 mpath = node->mpath;
763 spin_lock(&mpath->state_lock);
764 mpath->flags |= MESH_PATH_RESOLVING;
765 if (mpath->is_gate)
766 mesh_gate_del(tbl, mpath);
767 hlist_del_rcu(&node->list);
768 call_rcu(&node->rcu, mesh_path_node_reclaim);
769 spin_unlock(&mpath->state_lock);
770 atomic_dec(&tbl->entries);
771 }
772
773 /**
774 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
775 *
776 * @sta: mesh peer to match
777 *
778 * RCU notes: this function is called when a mesh plink transitions from
779 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
780 * allows path creation. This will happen before the sta can be freed (because
781 * sta_info_destroy() calls this) so any reader in a rcu read block will be
782 * protected against the plink disappearing.
783 */
784 void mesh_path_flush_by_nexthop(struct sta_info *sta)
785 {
786 struct mesh_table *tbl;
787 struct mesh_path *mpath;
788 struct mpath_node *node;
789 int i;
790
791 rcu_read_lock();
792 read_lock_bh(&pathtbl_resize_lock);
793 tbl = resize_dereference_mesh_paths();
794 for_each_mesh_entry(tbl, node, i) {
795 mpath = node->mpath;
796 if (rcu_dereference(mpath->next_hop) == sta) {
797 spin_lock(&tbl->hashwlock[i]);
798 __mesh_path_del(tbl, node);
799 spin_unlock(&tbl->hashwlock[i]);
800 }
801 }
802 read_unlock_bh(&pathtbl_resize_lock);
803 rcu_read_unlock();
804 }
805
806 static void table_flush_by_iface(struct mesh_table *tbl,
807 struct ieee80211_sub_if_data *sdata)
808 {
809 struct mesh_path *mpath;
810 struct mpath_node *node;
811 int i;
812
813 WARN_ON(!rcu_read_lock_held());
814 for_each_mesh_entry(tbl, node, i) {
815 mpath = node->mpath;
816 if (mpath->sdata != sdata)
817 continue;
818 spin_lock_bh(&tbl->hashwlock[i]);
819 __mesh_path_del(tbl, node);
820 spin_unlock_bh(&tbl->hashwlock[i]);
821 }
822 }
823
824 /**
825 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
826 *
827 * This function deletes both mesh paths as well as mesh portal paths.
828 *
829 * @sdata: interface data to match
830 *
831 */
832 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
833 {
834 struct mesh_table *tbl;
835
836 rcu_read_lock();
837 read_lock_bh(&pathtbl_resize_lock);
838 tbl = resize_dereference_mesh_paths();
839 table_flush_by_iface(tbl, sdata);
840 tbl = resize_dereference_mpp_paths();
841 table_flush_by_iface(tbl, sdata);
842 read_unlock_bh(&pathtbl_resize_lock);
843 rcu_read_unlock();
844 }
845
846 /**
847 * mesh_path_del - delete a mesh path from the table
848 *
849 * @addr: dst address (ETH_ALEN length)
850 * @sdata: local subif
851 *
852 * Returns: 0 if successful
853 */
854 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
855 {
856 struct mesh_table *tbl;
857 struct mesh_path *mpath;
858 struct mpath_node *node;
859 struct hlist_head *bucket;
860 int hash_idx;
861 int err = 0;
862
863 read_lock_bh(&pathtbl_resize_lock);
864 tbl = resize_dereference_mesh_paths();
865 hash_idx = mesh_table_hash(addr, sdata, tbl);
866 bucket = &tbl->hash_buckets[hash_idx];
867
868 spin_lock(&tbl->hashwlock[hash_idx]);
869 hlist_for_each_entry(node, bucket, list) {
870 mpath = node->mpath;
871 if (mpath->sdata == sdata &&
872 ether_addr_equal(addr, mpath->dst)) {
873 __mesh_path_del(tbl, node);
874 goto enddel;
875 }
876 }
877
878 err = -ENXIO;
879 enddel:
880 mesh_paths_generation++;
881 spin_unlock(&tbl->hashwlock[hash_idx]);
882 read_unlock_bh(&pathtbl_resize_lock);
883 return err;
884 }
885
886 /**
887 * mesh_path_tx_pending - sends pending frames in a mesh path queue
888 *
889 * @mpath: mesh path to activate
890 *
891 * Locking: the state_lock of the mpath structure must NOT be held when calling
892 * this function.
893 */
894 void mesh_path_tx_pending(struct mesh_path *mpath)
895 {
896 if (mpath->flags & MESH_PATH_ACTIVE)
897 ieee80211_add_pending_skbs(mpath->sdata->local,
898 &mpath->frame_queue);
899 }
900
901 /**
902 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
903 *
904 * @mpath: mesh path whose queue will be emptied
905 *
906 * If there is only one gate, the frames are transferred from the failed mpath
907 * queue to that gate's queue. If there are more than one gates, the frames
908 * are copied from each gate to the next. After frames are copied, the
909 * mpath queues are emptied onto the transmission queue.
910 */
911 int mesh_path_send_to_gates(struct mesh_path *mpath)
912 {
913 struct ieee80211_sub_if_data *sdata = mpath->sdata;
914 struct mesh_table *tbl;
915 struct mesh_path *from_mpath = mpath;
916 struct mpath_node *gate = NULL;
917 bool copy = false;
918 struct hlist_head *known_gates;
919
920 rcu_read_lock();
921 tbl = rcu_dereference(mesh_paths);
922 known_gates = tbl->known_gates;
923 rcu_read_unlock();
924
925 if (!known_gates)
926 return -EHOSTUNREACH;
927
928 hlist_for_each_entry_rcu(gate, known_gates, list) {
929 if (gate->mpath->sdata != sdata)
930 continue;
931
932 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
933 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
934 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
935 from_mpath = gate->mpath;
936 copy = true;
937 } else {
938 mpath_dbg(sdata,
939 "Not forwarding %p (flags %#x)\n",
940 gate->mpath, gate->mpath->flags);
941 }
942 }
943
944 hlist_for_each_entry_rcu(gate, known_gates, list)
945 if (gate->mpath->sdata == sdata) {
946 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
947 mesh_path_tx_pending(gate->mpath);
948 }
949
950 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
951 }
952
953 /**
954 * mesh_path_discard_frame - discard a frame whose path could not be resolved
955 *
956 * @skb: frame to discard
957 * @sdata: network subif the frame was to be sent through
958 *
959 * Locking: the function must me called within a rcu_read_lock region
960 */
961 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
962 struct sk_buff *skb)
963 {
964 kfree_skb(skb);
965 sdata->u.mesh.mshstats.dropped_frames_no_route++;
966 }
967
968 /**
969 * mesh_path_flush_pending - free the pending queue of a mesh path
970 *
971 * @mpath: mesh path whose queue has to be freed
972 *
973 * Locking: the function must me called within a rcu_read_lock region
974 */
975 void mesh_path_flush_pending(struct mesh_path *mpath)
976 {
977 struct sk_buff *skb;
978
979 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
980 mesh_path_discard_frame(mpath->sdata, skb);
981 }
982
983 /**
984 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
985 *
986 * @mpath: the mesh path to modify
987 * @next_hop: the next hop to force
988 *
989 * Locking: this function must be called holding mpath->state_lock
990 */
991 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
992 {
993 spin_lock_bh(&mpath->state_lock);
994 mesh_path_assign_nexthop(mpath, next_hop);
995 mpath->sn = 0xffff;
996 mpath->metric = 0;
997 mpath->hop_count = 0;
998 mpath->exp_time = 0;
999 mpath->flags |= MESH_PATH_FIXED;
1000 mesh_path_activate(mpath);
1001 spin_unlock_bh(&mpath->state_lock);
1002 mesh_path_tx_pending(mpath);
1003 }
1004
1005 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1006 {
1007 struct mesh_path *mpath;
1008 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1009 mpath = node->mpath;
1010 hlist_del_rcu(p);
1011 if (free_leafs) {
1012 del_timer_sync(&mpath->timer);
1013 kfree(mpath);
1014 }
1015 kfree(node);
1016 }
1017
1018 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1019 {
1020 struct mesh_path *mpath;
1021 struct mpath_node *node, *new_node;
1022 u32 hash_idx;
1023
1024 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1025 if (new_node == NULL)
1026 return -ENOMEM;
1027
1028 node = hlist_entry(p, struct mpath_node, list);
1029 mpath = node->mpath;
1030 new_node->mpath = mpath;
1031 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1032 hlist_add_head(&new_node->list,
1033 &newtbl->hash_buckets[hash_idx]);
1034 return 0;
1035 }
1036
1037 int mesh_pathtbl_init(void)
1038 {
1039 struct mesh_table *tbl_path, *tbl_mpp;
1040 int ret;
1041
1042 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1043 if (!tbl_path)
1044 return -ENOMEM;
1045 tbl_path->free_node = &mesh_path_node_free;
1046 tbl_path->copy_node = &mesh_path_node_copy;
1047 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1048 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1049 if (!tbl_path->known_gates) {
1050 ret = -ENOMEM;
1051 goto free_path;
1052 }
1053 INIT_HLIST_HEAD(tbl_path->known_gates);
1054
1055
1056 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1057 if (!tbl_mpp) {
1058 ret = -ENOMEM;
1059 goto free_path;
1060 }
1061 tbl_mpp->free_node = &mesh_path_node_free;
1062 tbl_mpp->copy_node = &mesh_path_node_copy;
1063 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1064 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1065 if (!tbl_mpp->known_gates) {
1066 ret = -ENOMEM;
1067 goto free_mpp;
1068 }
1069 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1070
1071 /* Need no locking since this is during init */
1072 RCU_INIT_POINTER(mesh_paths, tbl_path);
1073 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1074
1075 return 0;
1076
1077 free_mpp:
1078 mesh_table_free(tbl_mpp, true);
1079 free_path:
1080 mesh_table_free(tbl_path, true);
1081 return ret;
1082 }
1083
1084 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1085 {
1086 struct mesh_table *tbl;
1087 struct mesh_path *mpath;
1088 struct mpath_node *node;
1089 int i;
1090
1091 rcu_read_lock();
1092 tbl = rcu_dereference(mesh_paths);
1093 for_each_mesh_entry(tbl, node, i) {
1094 if (node->mpath->sdata != sdata)
1095 continue;
1096 mpath = node->mpath;
1097 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1098 (!(mpath->flags & MESH_PATH_FIXED)) &&
1099 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1100 mesh_path_del(mpath->sdata, mpath->dst);
1101 }
1102 rcu_read_unlock();
1103 }
1104
1105 void mesh_pathtbl_unregister(void)
1106 {
1107 /* no need for locking during exit path */
1108 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1109 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1110 }