mac80211: Report allocation failure from mesh_path_node_copy.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / mac80211 / mesh_hwmp.c
1 /*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include "mesh.h"
11
12 #define TEST_FRAME_LEN 8192
13 #define MAX_METRIC 0xffffffff
14 #define ARITH_SHIFT 8
15
16 /* Number of frames buffered per destination for unresolved destinations */
17 #define MESH_FRAME_QUEUE_LEN 10
18 #define MAX_PREQ_QUEUE_LEN 64
19
20 /* Destination only */
21 #define MP_F_DO 0x1
22 /* Reply and forward */
23 #define MP_F_RF 0x2
24
25 static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
26 {
27 if (ae)
28 offset += 6;
29 return get_unaligned_le32(preq_elem + offset);
30 }
31
32 /* HWMP IE processing macros */
33 #define AE_F (1<<6)
34 #define AE_F_SET(x) (*x & AE_F)
35 #define PREQ_IE_FLAGS(x) (*(x))
36 #define PREQ_IE_HOPCOUNT(x) (*(x + 1))
37 #define PREQ_IE_TTL(x) (*(x + 2))
38 #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
39 #define PREQ_IE_ORIG_ADDR(x) (x + 7)
40 #define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0);
41 #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x));
42 #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x));
43 #define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
44 #define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
45 #define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x));
46
47
48 #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
49 #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
50 #define PREP_IE_TTL(x) PREQ_IE_TTL(x)
51 #define PREP_IE_ORIG_ADDR(x) (x + 3)
52 #define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0);
53 #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x));
54 #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x));
55 #define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
56 #define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x));
57
58 #define PERR_IE_DST_ADDR(x) (x + 2)
59 #define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0);
60
61 #define TU_TO_EXP_TIME(x) (jiffies + msecs_to_jiffies(x * 1024 / 1000))
62 #define MSEC_TO_TU(x) (x*1000/1024)
63 #define DSN_GT(x, y) ((long) (y) - (long) (x) < 0)
64 #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
65
66 #define net_traversal_jiffies(s) \
67 msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
68 #define default_lifetime(s) \
69 MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout)
70 #define min_preq_int_jiff(s) \
71 (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval))
72 #define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries)
73 #define disc_timeout_jiff(s) \
74 msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout)
75
76 enum mpath_frame_type {
77 MPATH_PREQ = 0,
78 MPATH_PREP,
79 MPATH_PERR
80 };
81
82 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
85 __le32 metric, __le32 preq_id, struct net_device *dev)
86 {
87 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
89 struct ieee80211_mgmt *mgmt;
90 u8 *pos;
91 int ie_len;
92
93 if (!skb)
94 return -1;
95 skb_reserve(skb, local->hw.extra_tx_headroom);
96 /* 25 is the size of the common mgmt part (24) plus the size of the
97 * common action part (1)
98 */
99 mgmt = (struct ieee80211_mgmt *)
100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
102 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
103 IEEE80211_STYPE_ACTION);
104
105 memcpy(mgmt->da, da, ETH_ALEN);
106 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
107 /* BSSID is left zeroed, wildcard value */
108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
109 mgmt->u.action.u.mesh_action.action_code = action;
110
111 switch (action) {
112 case MPATH_PREQ:
113 ie_len = 37;
114 pos = skb_put(skb, 2 + ie_len);
115 *pos++ = WLAN_EID_PREQ;
116 break;
117 case MPATH_PREP:
118 ie_len = 31;
119 pos = skb_put(skb, 2 + ie_len);
120 *pos++ = WLAN_EID_PREP;
121 break;
122 default:
123 kfree_skb(skb);
124 return -ENOTSUPP;
125 break;
126 }
127 *pos++ = ie_len;
128 *pos++ = flags;
129 *pos++ = hop_count;
130 *pos++ = ttl;
131 if (action == MPATH_PREQ) {
132 memcpy(pos, &preq_id, 4);
133 pos += 4;
134 }
135 memcpy(pos, orig_addr, ETH_ALEN);
136 pos += ETH_ALEN;
137 memcpy(pos, &orig_dsn, 4);
138 pos += 4;
139 memcpy(pos, &lifetime, 4);
140 pos += 4;
141 memcpy(pos, &metric, 4);
142 pos += 4;
143 if (action == MPATH_PREQ) {
144 /* destination count */
145 *pos++ = 1;
146 *pos++ = dst_flags;
147 }
148 memcpy(pos, dst, ETH_ALEN);
149 pos += ETH_ALEN;
150 memcpy(pos, &dst_dsn, 4);
151
152 ieee80211_sta_tx(dev, skb, 0);
153 return 0;
154 }
155
156 /**
157 * mesh_send_path error - Sends a PERR mesh management frame
158 *
159 * @dst: broken destination
160 * @dst_dsn: dsn of the broken destination
161 * @ra: node this frame is addressed to
162 */
163 int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
164 struct net_device *dev)
165 {
166 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
168 struct ieee80211_mgmt *mgmt;
169 u8 *pos;
170 int ie_len;
171
172 if (!skb)
173 return -1;
174 skb_reserve(skb, local->hw.extra_tx_headroom);
175 /* 25 is the size of the common mgmt part (24) plus the size of the
176 * common action part (1)
177 */
178 mgmt = (struct ieee80211_mgmt *)
179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
181 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
182 IEEE80211_STYPE_ACTION);
183
184 memcpy(mgmt->da, ra, ETH_ALEN);
185 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
186 /* BSSID is left zeroed, wildcard value */
187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
189 ie_len = 12;
190 pos = skb_put(skb, 2 + ie_len);
191 *pos++ = WLAN_EID_PERR;
192 *pos++ = ie_len;
193 /* mode flags, reserved */
194 *pos++ = 0;
195 /* number of destinations */
196 *pos++ = 1;
197 memcpy(pos, dst, ETH_ALEN);
198 pos += ETH_ALEN;
199 memcpy(pos, &dst_dsn, 4);
200
201 ieee80211_sta_tx(dev, skb, 0);
202 return 0;
203 }
204
205 static u32 airtime_link_metric_get(struct ieee80211_local *local,
206 struct sta_info *sta)
207 {
208 struct ieee80211_supported_band *sband;
209 /* This should be adjusted for each device */
210 int device_constant = 1 << ARITH_SHIFT;
211 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
212 int s_unit = 1 << ARITH_SHIFT;
213 int rate, err;
214 u32 tx_time, estimated_retx;
215 u64 result;
216
217 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
218
219 if (sta->fail_avg >= 100)
220 return MAX_METRIC;
221 err = (sta->fail_avg << ARITH_SHIFT) / 100;
222
223 /* bitrate is in units of 100 Kbps, while we need rate in units of
224 * 1Mbps. This will be corrected on tx_time computation.
225 */
226 rate = sband->bitrates[sta->txrate_idx].bitrate;
227 tx_time = (device_constant + 10 * test_frame_len / rate);
228 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
229 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
230 return (u32)result;
231 }
232
233 /**
234 * hwmp_route_info_get - Update routing info to originator and transmitter
235 *
236 * @dev: local mesh interface
237 * @mgmt: mesh management frame
238 * @hwmp_ie: hwmp information element (PREP or PREQ)
239 *
240 * This function updates the path routing information to the originator and the
241 * transmitter of a HWMP PREQ or PREP fram.
242 *
243 * Returns: metric to frame originator or 0 if the frame should not be further
244 * processed
245 *
246 * Notes: this function is the only place (besides user-provided info) where
247 * path routing information is updated.
248 */
249 static u32 hwmp_route_info_get(struct net_device *dev,
250 struct ieee80211_mgmt *mgmt,
251 u8 *hwmp_ie)
252 {
253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
254 struct mesh_path *mpath;
255 struct sta_info *sta;
256 bool fresh_info;
257 u8 *orig_addr, *ta;
258 u32 orig_dsn, orig_metric;
259 unsigned long orig_lifetime, exp_time;
260 u32 last_hop_metric, new_metric;
261 bool process = true;
262 u8 action = mgmt->u.action.u.mesh_action.action_code;
263
264 rcu_read_lock();
265 sta = sta_info_get(local, mgmt->sa);
266 if (!sta) {
267 rcu_read_unlock();
268 return 0;
269 }
270
271 last_hop_metric = airtime_link_metric_get(local, sta);
272 /* Update and check originator routing info */
273 fresh_info = true;
274
275 switch (action) {
276 case MPATH_PREQ:
277 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
278 orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie);
279 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
280 orig_metric = PREQ_IE_METRIC(hwmp_ie);
281 break;
282 case MPATH_PREP:
283 /* Originator here refers to the MP that was the destination in
284 * the Path Request. The draft refers to that MP as the
285 * destination address, even though usually it is the origin of
286 * the PREP frame. We divert from the nomenclature in the draft
287 * so that we can easily use a single function to gather path
288 * information from both PREQ and PREP frames.
289 */
290 orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
291 orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie);
292 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
293 orig_metric = PREP_IE_METRIC(hwmp_ie);
294 break;
295 default:
296 rcu_read_unlock();
297 return 0;
298 }
299 new_metric = orig_metric + last_hop_metric;
300 if (new_metric < orig_metric)
301 new_metric = MAX_METRIC;
302 exp_time = TU_TO_EXP_TIME(orig_lifetime);
303
304 if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) {
305 /* This MP is the originator, we are not interested in this
306 * frame, except for updating transmitter's path info.
307 */
308 process = false;
309 fresh_info = false;
310 } else {
311 mpath = mesh_path_lookup(orig_addr, dev);
312 if (mpath) {
313 spin_lock_bh(&mpath->state_lock);
314 if (mpath->flags & MESH_PATH_FIXED)
315 fresh_info = false;
316 else if ((mpath->flags & MESH_PATH_ACTIVE) &&
317 (mpath->flags & MESH_PATH_DSN_VALID)) {
318 if (DSN_GT(mpath->dsn, orig_dsn) ||
319 (mpath->dsn == orig_dsn &&
320 action == MPATH_PREQ &&
321 new_metric > mpath->metric)) {
322 process = false;
323 fresh_info = false;
324 }
325 }
326 } else {
327 mesh_path_add(orig_addr, dev);
328 mpath = mesh_path_lookup(orig_addr, dev);
329 if (!mpath) {
330 rcu_read_unlock();
331 return 0;
332 }
333 spin_lock_bh(&mpath->state_lock);
334 }
335
336 if (fresh_info) {
337 mesh_path_assign_nexthop(mpath, sta);
338 mpath->flags |= MESH_PATH_DSN_VALID;
339 mpath->metric = new_metric;
340 mpath->dsn = orig_dsn;
341 mpath->exp_time = time_after(mpath->exp_time, exp_time)
342 ? mpath->exp_time : exp_time;
343 mesh_path_activate(mpath);
344 spin_unlock_bh(&mpath->state_lock);
345 mesh_path_tx_pending(mpath);
346 /* draft says preq_id should be saved to, but there does
347 * not seem to be any use for it, skipping by now
348 */
349 } else
350 spin_unlock_bh(&mpath->state_lock);
351 }
352
353 /* Update and check transmitter routing info */
354 ta = mgmt->sa;
355 if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
356 fresh_info = false;
357 else {
358 fresh_info = true;
359
360 mpath = mesh_path_lookup(ta, dev);
361 if (mpath) {
362 spin_lock_bh(&mpath->state_lock);
363 if ((mpath->flags & MESH_PATH_FIXED) ||
364 ((mpath->flags & MESH_PATH_ACTIVE) &&
365 (last_hop_metric > mpath->metric)))
366 fresh_info = false;
367 } else {
368 mesh_path_add(ta, dev);
369 mpath = mesh_path_lookup(ta, dev);
370 if (!mpath) {
371 rcu_read_unlock();
372 return 0;
373 }
374 spin_lock_bh(&mpath->state_lock);
375 }
376
377 if (fresh_info) {
378 mesh_path_assign_nexthop(mpath, sta);
379 mpath->flags &= ~MESH_PATH_DSN_VALID;
380 mpath->metric = last_hop_metric;
381 mpath->exp_time = time_after(mpath->exp_time, exp_time)
382 ? mpath->exp_time : exp_time;
383 mesh_path_activate(mpath);
384 spin_unlock_bh(&mpath->state_lock);
385 mesh_path_tx_pending(mpath);
386 } else
387 spin_unlock_bh(&mpath->state_lock);
388 }
389
390 rcu_read_unlock();
391
392 return process ? new_metric : 0;
393 }
394
395 static void hwmp_preq_frame_process(struct net_device *dev,
396 struct ieee80211_mgmt *mgmt,
397 u8 *preq_elem, u32 metric) {
398 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
399 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
400 struct mesh_path *mpath;
401 u8 *dst_addr, *orig_addr;
402 u8 dst_flags, ttl;
403 u32 orig_dsn, dst_dsn, lifetime;
404 bool reply = false;
405 bool forward = true;
406
407 /* Update destination DSN, if present */
408 dst_addr = PREQ_IE_DST_ADDR(preq_elem);
409 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
410 dst_dsn = PREQ_IE_DST_DSN(preq_elem);
411 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
412 dst_flags = PREQ_IE_DST_F(preq_elem);
413
414 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) {
415 forward = false;
416 reply = true;
417 metric = 0;
418 if (time_after(jiffies, ifsta->last_dsn_update +
419 net_traversal_jiffies(sdata)) ||
420 time_before(jiffies, ifsta->last_dsn_update)) {
421 dst_dsn = ++ifsta->dsn;
422 ifsta->last_dsn_update = jiffies;
423 }
424 } else {
425 rcu_read_lock();
426 mpath = mesh_path_lookup(dst_addr, dev);
427 if (mpath) {
428 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
429 DSN_LT(mpath->dsn, dst_dsn)) {
430 mpath->dsn = dst_dsn;
431 mpath->flags &= MESH_PATH_DSN_VALID;
432 } else if ((!(dst_flags & MP_F_DO)) &&
433 (mpath->flags & MESH_PATH_ACTIVE)) {
434 reply = true;
435 metric = mpath->metric;
436 dst_dsn = mpath->dsn;
437 if (dst_flags & MP_F_RF)
438 dst_flags |= MP_F_DO;
439 else
440 forward = false;
441 }
442 }
443 rcu_read_unlock();
444 }
445
446 if (reply) {
447 lifetime = PREQ_IE_LIFETIME(preq_elem);
448 ttl = ifsta->mshcfg.dot11MeshTTL;
449 if (ttl != 0)
450 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
451 cpu_to_le32(dst_dsn), 0, orig_addr,
452 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
453 cpu_to_le32(lifetime), cpu_to_le32(metric),
454 0, dev);
455 else
456 ifsta->mshstats.dropped_frames_ttl++;
457 }
458
459 if (forward) {
460 u32 preq_id;
461 u8 hopcount, flags;
462
463 ttl = PREQ_IE_TTL(preq_elem);
464 lifetime = PREQ_IE_LIFETIME(preq_elem);
465 if (ttl <= 1) {
466 ifsta->mshstats.dropped_frames_ttl++;
467 return;
468 }
469 --ttl;
470 flags = PREQ_IE_FLAGS(preq_elem);
471 preq_id = PREQ_IE_PREQ_ID(preq_elem);
472 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
473 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
474 cpu_to_le32(orig_dsn), dst_flags, dst_addr,
475 cpu_to_le32(dst_dsn), dev->broadcast,
476 hopcount, ttl, cpu_to_le32(lifetime),
477 cpu_to_le32(metric), cpu_to_le32(preq_id),
478 dev);
479 ifsta->mshstats.fwded_frames++;
480 }
481 }
482
483
484 static void hwmp_prep_frame_process(struct net_device *dev,
485 struct ieee80211_mgmt *mgmt,
486 u8 *prep_elem, u32 metric)
487 {
488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
489 struct mesh_path *mpath;
490 u8 *dst_addr, *orig_addr;
491 u8 ttl, hopcount, flags;
492 u8 next_hop[ETH_ALEN];
493 u32 dst_dsn, orig_dsn, lifetime;
494
495 /* Note that we divert from the draft nomenclature and denominate
496 * destination to what the draft refers to as origininator. So in this
497 * function destnation refers to the final destination of the PREP,
498 * which corresponds with the originator of the PREQ which this PREP
499 * replies
500 */
501 dst_addr = PREP_IE_DST_ADDR(prep_elem);
502 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0)
503 /* destination, no forwarding required */
504 return;
505
506 ttl = PREP_IE_TTL(prep_elem);
507 if (ttl <= 1) {
508 sdata->u.sta.mshstats.dropped_frames_ttl++;
509 return;
510 }
511
512 rcu_read_lock();
513 mpath = mesh_path_lookup(dst_addr, dev);
514 if (mpath)
515 spin_lock_bh(&mpath->state_lock);
516 else
517 goto fail;
518 if (!(mpath->flags & MESH_PATH_ACTIVE)) {
519 spin_unlock_bh(&mpath->state_lock);
520 goto fail;
521 }
522 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN);
523 spin_unlock_bh(&mpath->state_lock);
524 --ttl;
525 flags = PREP_IE_FLAGS(prep_elem);
526 lifetime = PREP_IE_LIFETIME(prep_elem);
527 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
528 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
529 dst_dsn = PREP_IE_DST_DSN(prep_elem);
530 orig_dsn = PREP_IE_ORIG_DSN(prep_elem);
531
532 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
533 cpu_to_le32(orig_dsn), 0, dst_addr,
534 cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl,
535 cpu_to_le32(lifetime), cpu_to_le32(metric),
536 0, dev);
537 rcu_read_unlock();
538 sdata->u.sta.mshstats.fwded_frames++;
539 return;
540
541 fail:
542 rcu_read_unlock();
543 sdata->u.sta.mshstats.dropped_frames_no_route++;
544 return;
545 }
546
547 static void hwmp_perr_frame_process(struct net_device *dev,
548 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
549 {
550 struct mesh_path *mpath;
551 u8 *ta, *dst_addr;
552 u32 dst_dsn;
553
554 ta = mgmt->sa;
555 dst_addr = PERR_IE_DST_ADDR(perr_elem);
556 dst_dsn = PERR_IE_DST_DSN(perr_elem);
557 rcu_read_lock();
558 mpath = mesh_path_lookup(dst_addr, dev);
559 if (mpath) {
560 spin_lock_bh(&mpath->state_lock);
561 if (mpath->flags & MESH_PATH_ACTIVE &&
562 memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 &&
563 (!(mpath->flags & MESH_PATH_DSN_VALID) ||
564 DSN_GT(dst_dsn, mpath->dsn))) {
565 mpath->flags &= ~MESH_PATH_ACTIVE;
566 mpath->dsn = dst_dsn;
567 spin_unlock_bh(&mpath->state_lock);
568 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
569 dev->broadcast, dev);
570 } else
571 spin_unlock_bh(&mpath->state_lock);
572 }
573 rcu_read_unlock();
574 }
575
576
577
578 void mesh_rx_path_sel_frame(struct net_device *dev,
579 struct ieee80211_mgmt *mgmt,
580 size_t len)
581 {
582 struct ieee802_11_elems elems;
583 size_t baselen;
584 u32 last_hop_metric;
585
586 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
587 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
588 len - baselen, &elems);
589
590 switch (mgmt->u.action.u.mesh_action.action_code) {
591 case MPATH_PREQ:
592 if (!elems.preq || elems.preq_len != 37)
593 /* Right now we support just 1 destination and no AE */
594 return;
595 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq);
596 if (!last_hop_metric)
597 return;
598 hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric);
599 break;
600 case MPATH_PREP:
601 if (!elems.prep || elems.prep_len != 31)
602 /* Right now we support no AE */
603 return;
604 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep);
605 if (!last_hop_metric)
606 return;
607 hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric);
608 break;
609 case MPATH_PERR:
610 if (!elems.perr || elems.perr_len != 12)
611 /* Right now we support only one destination per PERR */
612 return;
613 hwmp_perr_frame_process(dev, mgmt, elems.perr);
614 default:
615 return;
616 }
617
618 }
619
620 /**
621 * mesh_queue_preq - queue a PREQ to a given destination
622 *
623 * @mpath: mesh path to discover
624 * @flags: special attributes of the PREQ to be sent
625 *
626 * Locking: the function must be called from within a rcu read lock block.
627 *
628 */
629 static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
630 {
631 struct ieee80211_sub_if_data *sdata =
632 IEEE80211_DEV_TO_SUB_IF(mpath->dev);
633 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
634 struct mesh_preq_queue *preq_node;
635
636 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
637 if (!preq_node) {
638 printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
639 return;
640 }
641
642 spin_lock(&ifsta->mesh_preq_queue_lock);
643 if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
644 spin_unlock(&ifsta->mesh_preq_queue_lock);
645 kfree(preq_node);
646 if (printk_ratelimit())
647 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
648 return;
649 }
650
651 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
652 preq_node->flags = flags;
653
654 list_add_tail(&preq_node->list, &ifsta->preq_queue.list);
655 ++ifsta->preq_queue_len;
656 spin_unlock(&ifsta->mesh_preq_queue_lock);
657
658 if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata)))
659 queue_work(sdata->local->hw.workqueue, &ifsta->work);
660
661 else if (time_before(jiffies, ifsta->last_preq)) {
662 /* avoid long wait if did not send preqs for a long time
663 * and jiffies wrapped around
664 */
665 ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
666 queue_work(sdata->local->hw.workqueue, &ifsta->work);
667 } else
668 mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq +
669 min_preq_int_jiff(sdata));
670 }
671
672 /**
673 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
674 *
675 * @dev: local mesh interface
676 */
677 void mesh_path_start_discovery(struct net_device *dev)
678 {
679 struct ieee80211_sub_if_data *sdata =
680 IEEE80211_DEV_TO_SUB_IF(dev);
681 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
682 struct mesh_preq_queue *preq_node;
683 struct mesh_path *mpath;
684 u8 ttl, dst_flags;
685 u32 lifetime;
686
687 spin_lock(&ifsta->mesh_preq_queue_lock);
688 if (!ifsta->preq_queue_len ||
689 time_before(jiffies, ifsta->last_preq +
690 min_preq_int_jiff(sdata))) {
691 spin_unlock(&ifsta->mesh_preq_queue_lock);
692 return;
693 }
694
695 preq_node = list_first_entry(&ifsta->preq_queue.list,
696 struct mesh_preq_queue, list);
697 list_del(&preq_node->list);
698 --ifsta->preq_queue_len;
699 spin_unlock(&ifsta->mesh_preq_queue_lock);
700
701 rcu_read_lock();
702 mpath = mesh_path_lookup(preq_node->dst, dev);
703 if (!mpath)
704 goto enddiscovery;
705
706 spin_lock_bh(&mpath->state_lock);
707 if (preq_node->flags & PREQ_Q_F_START) {
708 if (mpath->flags & MESH_PATH_RESOLVING) {
709 spin_unlock_bh(&mpath->state_lock);
710 goto enddiscovery;
711 } else {
712 mpath->flags &= ~MESH_PATH_RESOLVED;
713 mpath->flags |= MESH_PATH_RESOLVING;
714 mpath->discovery_retries = 0;
715 mpath->discovery_timeout = disc_timeout_jiff(sdata);
716 }
717 } else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
718 mpath->flags & MESH_PATH_RESOLVED) {
719 mpath->flags &= ~MESH_PATH_RESOLVING;
720 spin_unlock_bh(&mpath->state_lock);
721 goto enddiscovery;
722 }
723
724 ifsta->last_preq = jiffies;
725
726 if (time_after(jiffies, ifsta->last_dsn_update +
727 net_traversal_jiffies(sdata)) ||
728 time_before(jiffies, ifsta->last_dsn_update)) {
729 ++ifsta->dsn;
730 sdata->u.sta.last_dsn_update = jiffies;
731 }
732 lifetime = default_lifetime(sdata);
733 ttl = sdata->u.sta.mshcfg.dot11MeshTTL;
734 if (ttl == 0) {
735 sdata->u.sta.mshstats.dropped_frames_ttl++;
736 spin_unlock_bh(&mpath->state_lock);
737 goto enddiscovery;
738 }
739
740 if (preq_node->flags & PREQ_Q_F_REFRESH)
741 dst_flags = MP_F_DO;
742 else
743 dst_flags = MP_F_RF;
744
745 spin_unlock_bh(&mpath->state_lock);
746 mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr,
747 cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst,
748 cpu_to_le32(mpath->dsn), dev->broadcast, 0,
749 ttl, cpu_to_le32(lifetime), 0,
750 cpu_to_le32(ifsta->preq_id++), dev);
751 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
752
753 enddiscovery:
754 rcu_read_unlock();
755 kfree(preq_node);
756 }
757
758 /**
759 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
760 *
761 * @next_hop: output argument for next hop address
762 * @skb: frame to be sent
763 * @dev: network device the frame will be sent through
764 *
765 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
766 * found, the function will start a path discovery and queue the frame so it is
767 * sent when the path is resolved. This means the caller must not free the skb
768 * in this case.
769 */
770 int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb,
771 struct net_device *dev)
772 {
773 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
774 struct sk_buff *skb_to_free = NULL;
775 struct mesh_path *mpath;
776 int err = 0;
777
778 rcu_read_lock();
779 mpath = mesh_path_lookup(skb->data, dev);
780
781 if (!mpath) {
782 mesh_path_add(skb->data, dev);
783 mpath = mesh_path_lookup(skb->data, dev);
784 if (!mpath) {
785 dev_kfree_skb(skb);
786 sdata->u.sta.mshstats.dropped_frames_no_route++;
787 err = -ENOSPC;
788 goto endlookup;
789 }
790 }
791
792 if (mpath->flags & MESH_PATH_ACTIVE) {
793 if (time_after(jiffies, mpath->exp_time -
794 msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time))
795 && skb->pkt_type != PACKET_OTHERHOST
796 && !(mpath->flags & MESH_PATH_RESOLVING)
797 && !(mpath->flags & MESH_PATH_FIXED)) {
798 mesh_queue_preq(mpath,
799 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
800 }
801 memcpy(next_hop, mpath->next_hop->addr,
802 ETH_ALEN);
803 } else {
804 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
805 /* Start discovery only if it is not running yet */
806 mesh_queue_preq(mpath, PREQ_Q_F_START);
807 }
808
809 if (skb_queue_len(&mpath->frame_queue) >=
810 MESH_FRAME_QUEUE_LEN) {
811 skb_to_free = mpath->frame_queue.next;
812 skb_unlink(skb_to_free, &mpath->frame_queue);
813 }
814
815 skb_queue_tail(&mpath->frame_queue, skb);
816 if (skb_to_free)
817 mesh_path_discard_frame(skb_to_free, dev);
818 err = -ENOENT;
819 }
820
821 endlookup:
822 rcu_read_unlock();
823 return err;
824 }
825
826 void mesh_path_timer(unsigned long data)
827 {
828 struct ieee80211_sub_if_data *sdata;
829 struct mesh_path *mpath;
830
831 rcu_read_lock();
832 mpath = (struct mesh_path *) data;
833 mpath = rcu_dereference(mpath);
834 if (!mpath)
835 goto endmpathtimer;
836 spin_lock_bh(&mpath->state_lock);
837 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
838 if (mpath->flags & MESH_PATH_RESOLVED ||
839 (!(mpath->flags & MESH_PATH_RESOLVING)))
840 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
841 else if (mpath->discovery_retries < max_preq_retries(sdata)) {
842 ++mpath->discovery_retries;
843 mpath->discovery_timeout *= 2;
844 mesh_queue_preq(mpath, 0);
845 } else {
846 mpath->flags = 0;
847 mpath->exp_time = jiffies;
848 mesh_path_flush_pending(mpath);
849 }
850
851 spin_unlock_bh(&mpath->state_lock);
852 endmpathtimer:
853 rcu_read_unlock();
854 }