Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "led.h"
23 #include "mesh.h"
24 #include "wep.h"
25 #include "wpa.h"
26 #include "tkip.h"
27 #include "wme.h"
28
29 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
30 struct tid_ampdu_rx *tid_agg_rx,
31 struct sk_buff *skb,
32 struct ieee80211_rx_status *status,
33 u16 mpdu_seq_num,
34 int bar_req);
35 /*
36 * monitor mode reception
37 *
38 * This function cleans up the SKB, i.e. it removes all the stuff
39 * only useful for monitoring.
40 */
41 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
42 struct sk_buff *skb,
43 int rtap_len)
44 {
45 skb_pull(skb, rtap_len);
46
47 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
48 if (likely(skb->len > FCS_LEN))
49 skb_trim(skb, skb->len - FCS_LEN);
50 else {
51 /* driver bug */
52 WARN_ON(1);
53 dev_kfree_skb(skb);
54 skb = NULL;
55 }
56 }
57
58 return skb;
59 }
60
61 static inline int should_drop_frame(struct ieee80211_rx_status *status,
62 struct sk_buff *skb,
63 int present_fcs_len,
64 int radiotap_len)
65 {
66 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
67
68 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
69 return 1;
70 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
71 return 1;
72 if (ieee80211_is_ctl(hdr->frame_control) &&
73 !ieee80211_is_pspoll(hdr->frame_control) &&
74 !ieee80211_is_back_req(hdr->frame_control))
75 return 1;
76 return 0;
77 }
78
79 static int
80 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
81 struct ieee80211_rx_status *status)
82 {
83 int len;
84
85 /* always present fields */
86 len = sizeof(struct ieee80211_radiotap_header) + 9;
87
88 if (status->flag & RX_FLAG_TSFT)
89 len += 8;
90 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
91 len += 1;
92 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
93 len += 1;
94
95 if (len & 1) /* padding for RX_FLAGS if necessary */
96 len++;
97
98 /* make sure radiotap starts at a naturally aligned address */
99 if (len % 8)
100 len = roundup(len, 8);
101
102 return len;
103 }
104
105 /*
106 * ieee80211_add_rx_radiotap_header - add radiotap header
107 *
108 * add a radiotap header containing all the fields which the hardware provided.
109 */
110 static void
111 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
112 struct sk_buff *skb,
113 struct ieee80211_rx_status *status,
114 struct ieee80211_rate *rate,
115 int rtap_len)
116 {
117 struct ieee80211_radiotap_header *rthdr;
118 unsigned char *pos;
119
120 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
121 memset(rthdr, 0, rtap_len);
122
123 /* radiotap header, set always present flags */
124 rthdr->it_present =
125 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
126 (1 << IEEE80211_RADIOTAP_CHANNEL) |
127 (1 << IEEE80211_RADIOTAP_ANTENNA) |
128 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
129 rthdr->it_len = cpu_to_le16(rtap_len);
130
131 pos = (unsigned char *)(rthdr+1);
132
133 /* the order of the following fields is important */
134
135 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime);
138 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8;
141 }
142
143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
147 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
148 if (status->flag & RX_FLAG_SHORTPRE)
149 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
150 pos++;
151
152 /* IEEE80211_RADIOTAP_RATE */
153 if (status->flag & RX_FLAG_HT) {
154 /*
155 * TODO: add following information into radiotap header once
156 * suitable fields are defined for it:
157 * - MCS index (status->rate_idx)
158 * - HT40 (status->flag & RX_FLAG_40MHZ)
159 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
160 */
161 *pos = 0;
162 } else {
163 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
164 *pos = rate->bitrate / 5;
165 }
166 pos++;
167
168 /* IEEE80211_RADIOTAP_CHANNEL */
169 *(__le16 *)pos = cpu_to_le16(status->freq);
170 pos += 2;
171 if (status->band == IEEE80211_BAND_5GHZ)
172 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
173 IEEE80211_CHAN_5GHZ);
174 else if (rate->flags & IEEE80211_RATE_ERP_G)
175 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
176 IEEE80211_CHAN_2GHZ);
177 else
178 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
179 IEEE80211_CHAN_2GHZ);
180 pos += 2;
181
182 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
183 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
184 *pos = status->signal;
185 rthdr->it_present |=
186 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
187 pos++;
188 }
189
190 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
191 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
192 *pos = status->noise;
193 rthdr->it_present |=
194 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
195 pos++;
196 }
197
198 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
199
200 /* IEEE80211_RADIOTAP_ANTENNA */
201 *pos = status->antenna;
202 pos++;
203
204 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
205
206 /* IEEE80211_RADIOTAP_RX_FLAGS */
207 /* ensure 2 byte alignment for the 2 byte field as required */
208 if ((pos - (unsigned char *)rthdr) & 1)
209 pos++;
210 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
211 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
212 pos += 2;
213 }
214
215 /*
216 * This function copies a received frame to all monitor interfaces and
217 * returns a cleaned-up SKB that no longer includes the FCS nor the
218 * radiotap header the driver might have added.
219 */
220 static struct sk_buff *
221 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
222 struct ieee80211_rx_status *status,
223 struct ieee80211_rate *rate)
224 {
225 struct ieee80211_sub_if_data *sdata;
226 int needed_headroom = 0;
227 struct sk_buff *skb, *skb2;
228 struct net_device *prev_dev = NULL;
229 int present_fcs_len = 0;
230 int rtap_len = 0;
231
232 /*
233 * First, we may need to make a copy of the skb because
234 * (1) we need to modify it for radiotap (if not present), and
235 * (2) the other RX handlers will modify the skb we got.
236 *
237 * We don't need to, of course, if we aren't going to return
238 * the SKB because it has a bad FCS/PLCP checksum.
239 */
240 if (status->flag & RX_FLAG_RADIOTAP)
241 rtap_len = ieee80211_get_radiotap_len(origskb->data);
242 else
243 /* room for the radiotap header based on driver features */
244 needed_headroom = ieee80211_rx_radiotap_len(local, status);
245
246 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
247 present_fcs_len = FCS_LEN;
248
249 if (!local->monitors) {
250 if (should_drop_frame(status, origskb, present_fcs_len,
251 rtap_len)) {
252 dev_kfree_skb(origskb);
253 return NULL;
254 }
255
256 return remove_monitor_info(local, origskb, rtap_len);
257 }
258
259 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
260 /* only need to expand headroom if necessary */
261 skb = origskb;
262 origskb = NULL;
263
264 /*
265 * This shouldn't trigger often because most devices have an
266 * RX header they pull before we get here, and that should
267 * be big enough for our radiotap information. We should
268 * probably export the length to drivers so that we can have
269 * them allocate enough headroom to start with.
270 */
271 if (skb_headroom(skb) < needed_headroom &&
272 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
273 dev_kfree_skb(skb);
274 return NULL;
275 }
276 } else {
277 /*
278 * Need to make a copy and possibly remove radiotap header
279 * and FCS from the original.
280 */
281 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
282
283 origskb = remove_monitor_info(local, origskb, rtap_len);
284
285 if (!skb)
286 return origskb;
287 }
288
289 /* if necessary, prepend radiotap information */
290 if (!(status->flag & RX_FLAG_RADIOTAP))
291 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
292 needed_headroom);
293
294 skb_reset_mac_header(skb);
295 skb->ip_summed = CHECKSUM_UNNECESSARY;
296 skb->pkt_type = PACKET_OTHERHOST;
297 skb->protocol = htons(ETH_P_802_2);
298
299 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
300 if (!netif_running(sdata->dev))
301 continue;
302
303 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
304 continue;
305
306 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
307 continue;
308
309 if (prev_dev) {
310 skb2 = skb_clone(skb, GFP_ATOMIC);
311 if (skb2) {
312 skb2->dev = prev_dev;
313 netif_rx(skb2);
314 }
315 }
316
317 prev_dev = sdata->dev;
318 sdata->dev->stats.rx_packets++;
319 sdata->dev->stats.rx_bytes += skb->len;
320 }
321
322 if (prev_dev) {
323 skb->dev = prev_dev;
324 netif_rx(skb);
325 } else
326 dev_kfree_skb(skb);
327
328 return origskb;
329 }
330
331
332 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
333 {
334 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
335 int tid;
336
337 /* does the frame have a qos control field? */
338 if (ieee80211_is_data_qos(hdr->frame_control)) {
339 u8 *qc = ieee80211_get_qos_ctl(hdr);
340 /* frame has qos control */
341 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
342 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
343 rx->flags |= IEEE80211_RX_AMSDU;
344 else
345 rx->flags &= ~IEEE80211_RX_AMSDU;
346 } else {
347 /*
348 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
349 *
350 * Sequence numbers for management frames, QoS data
351 * frames with a broadcast/multicast address in the
352 * Address 1 field, and all non-QoS data frames sent
353 * by QoS STAs are assigned using an additional single
354 * modulo-4096 counter, [...]
355 *
356 * We also use that counter for non-QoS STAs.
357 */
358 tid = NUM_RX_DATA_QUEUES - 1;
359 }
360
361 rx->queue = tid;
362 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
363 * For now, set skb->priority to 0 for other cases. */
364 rx->skb->priority = (tid > 7) ? 0 : tid;
365 }
366
367 /**
368 * DOC: Packet alignment
369 *
370 * Drivers always need to pass packets that are aligned to two-byte boundaries
371 * to the stack.
372 *
373 * Additionally, should, if possible, align the payload data in a way that
374 * guarantees that the contained IP header is aligned to a four-byte
375 * boundary. In the case of regular frames, this simply means aligning the
376 * payload to a four-byte boundary (because either the IP header is directly
377 * contained, or IV/RFC1042 headers that have a length divisible by four are
378 * in front of it).
379 *
380 * With A-MSDU frames, however, the payload data address must yield two modulo
381 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
382 * push the IP header further back to a multiple of four again. Thankfully, the
383 * specs were sane enough this time around to require padding each A-MSDU
384 * subframe to a length that is a multiple of four.
385 *
386 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
387 * the payload is not supported, the driver is required to move the 802.11
388 * header to be directly in front of the payload in that case.
389 */
390 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
391 {
392 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
393 int hdrlen;
394
395 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
396 return;
397 #endif
398
399 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
400 "unaligned packet at 0x%p\n", rx->skb->data))
401 return;
402
403 if (!ieee80211_is_data_present(hdr->frame_control))
404 return;
405
406 hdrlen = ieee80211_hdrlen(hdr->frame_control);
407 if (rx->flags & IEEE80211_RX_AMSDU)
408 hdrlen += ETH_HLEN;
409 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
410 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
411 }
412
413
414 /* rx handlers */
415
416 static ieee80211_rx_result debug_noinline
417 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
418 {
419 struct ieee80211_local *local = rx->local;
420 struct sk_buff *skb = rx->skb;
421
422 if (unlikely(local->hw_scanning))
423 return ieee80211_scan_rx(rx->sdata, skb, rx->status);
424
425 if (unlikely(local->sw_scanning)) {
426 /* drop all the other packets during a software scan anyway */
427 if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
428 != RX_QUEUED)
429 dev_kfree_skb(skb);
430 return RX_QUEUED;
431 }
432
433 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
434 /* scanning finished during invoking of handlers */
435 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
436 return RX_DROP_UNUSABLE;
437 }
438
439 return RX_CONTINUE;
440 }
441
442
443 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
444 {
445 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
446
447 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
448 return 0;
449
450 return ieee80211_is_robust_mgmt_frame(hdr);
451 }
452
453
454 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
455 {
456 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
457
458 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
459 return 0;
460
461 return ieee80211_is_robust_mgmt_frame(hdr);
462 }
463
464
465 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
466 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
467 {
468 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
469 struct ieee80211_mmie *mmie;
470
471 if (skb->len < 24 + sizeof(*mmie) ||
472 !is_multicast_ether_addr(hdr->da))
473 return -1;
474
475 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
476 return -1; /* not a robust management frame */
477
478 mmie = (struct ieee80211_mmie *)
479 (skb->data + skb->len - sizeof(*mmie));
480 if (mmie->element_id != WLAN_EID_MMIE ||
481 mmie->length != sizeof(*mmie) - 2)
482 return -1;
483
484 return le16_to_cpu(mmie->key_id);
485 }
486
487
488 static ieee80211_rx_result
489 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
490 {
491 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
492 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
493
494 if (ieee80211_is_data(hdr->frame_control)) {
495 if (!ieee80211_has_a4(hdr->frame_control))
496 return RX_DROP_MONITOR;
497 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
498 return RX_DROP_MONITOR;
499 }
500
501 /* If there is not an established peer link and this is not a peer link
502 * establisment frame, beacon or probe, drop the frame.
503 */
504
505 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
506 struct ieee80211_mgmt *mgmt;
507
508 if (!ieee80211_is_mgmt(hdr->frame_control))
509 return RX_DROP_MONITOR;
510
511 if (ieee80211_is_action(hdr->frame_control)) {
512 mgmt = (struct ieee80211_mgmt *)hdr;
513 if (mgmt->u.action.category != PLINK_CATEGORY)
514 return RX_DROP_MONITOR;
515 return RX_CONTINUE;
516 }
517
518 if (ieee80211_is_probe_req(hdr->frame_control) ||
519 ieee80211_is_probe_resp(hdr->frame_control) ||
520 ieee80211_is_beacon(hdr->frame_control))
521 return RX_CONTINUE;
522
523 return RX_DROP_MONITOR;
524
525 }
526
527 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
528
529 if (ieee80211_is_data(hdr->frame_control) &&
530 is_multicast_ether_addr(hdr->addr1) &&
531 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
532 return RX_DROP_MONITOR;
533 #undef msh_h_get
534
535 return RX_CONTINUE;
536 }
537
538
539 static ieee80211_rx_result debug_noinline
540 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
541 {
542 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
543
544 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
545 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
546 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
547 rx->sta->last_seq_ctrl[rx->queue] ==
548 hdr->seq_ctrl)) {
549 if (rx->flags & IEEE80211_RX_RA_MATCH) {
550 rx->local->dot11FrameDuplicateCount++;
551 rx->sta->num_duplicates++;
552 }
553 return RX_DROP_MONITOR;
554 } else
555 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
556 }
557
558 if (unlikely(rx->skb->len < 16)) {
559 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
560 return RX_DROP_MONITOR;
561 }
562
563 /* Drop disallowed frame classes based on STA auth/assoc state;
564 * IEEE 802.11, Chap 5.5.
565 *
566 * mac80211 filters only based on association state, i.e. it drops
567 * Class 3 frames from not associated stations. hostapd sends
568 * deauth/disassoc frames when needed. In addition, hostapd is
569 * responsible for filtering on both auth and assoc states.
570 */
571
572 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
573 return ieee80211_rx_mesh_check(rx);
574
575 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
576 ieee80211_is_pspoll(hdr->frame_control)) &&
577 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
578 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
579 if ((!ieee80211_has_fromds(hdr->frame_control) &&
580 !ieee80211_has_tods(hdr->frame_control) &&
581 ieee80211_is_data(hdr->frame_control)) ||
582 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
583 /* Drop IBSS frames and frames for other hosts
584 * silently. */
585 return RX_DROP_MONITOR;
586 }
587
588 return RX_DROP_MONITOR;
589 }
590
591 return RX_CONTINUE;
592 }
593
594
595 static ieee80211_rx_result debug_noinline
596 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
597 {
598 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
599 int keyidx;
600 int hdrlen;
601 ieee80211_rx_result result = RX_DROP_UNUSABLE;
602 struct ieee80211_key *stakey = NULL;
603 int mmie_keyidx = -1;
604
605 /*
606 * Key selection 101
607 *
608 * There are four types of keys:
609 * - GTK (group keys)
610 * - IGTK (group keys for management frames)
611 * - PTK (pairwise keys)
612 * - STK (station-to-station pairwise keys)
613 *
614 * When selecting a key, we have to distinguish between multicast
615 * (including broadcast) and unicast frames, the latter can only
616 * use PTKs and STKs while the former always use GTKs and IGTKs.
617 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
618 * unicast frames can also use key indices like GTKs. Hence, if we
619 * don't have a PTK/STK we check the key index for a WEP key.
620 *
621 * Note that in a regular BSS, multicast frames are sent by the
622 * AP only, associated stations unicast the frame to the AP first
623 * which then multicasts it on their behalf.
624 *
625 * There is also a slight problem in IBSS mode: GTKs are negotiated
626 * with each station, that is something we don't currently handle.
627 * The spec seems to expect that one negotiates the same key with
628 * every station but there's no such requirement; VLANs could be
629 * possible.
630 */
631
632 if (!ieee80211_has_protected(hdr->frame_control)) {
633 if (!ieee80211_is_mgmt(hdr->frame_control) ||
634 rx->sta == NULL || !test_sta_flags(rx->sta, WLAN_STA_MFP))
635 return RX_CONTINUE;
636 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
637 if (mmie_keyidx < 0)
638 return RX_CONTINUE;
639 }
640
641 /*
642 * No point in finding a key and decrypting if the frame is neither
643 * addressed to us nor a multicast frame.
644 */
645 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
646 return RX_CONTINUE;
647
648 if (rx->sta)
649 stakey = rcu_dereference(rx->sta->key);
650
651 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
652 rx->key = stakey;
653 } else if (mmie_keyidx >= 0) {
654 /* Broadcast/multicast robust management frame / BIP */
655 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
656 (rx->status->flag & RX_FLAG_IV_STRIPPED))
657 return RX_CONTINUE;
658
659 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
660 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
661 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
662 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
663 } else {
664 /*
665 * The device doesn't give us the IV so we won't be
666 * able to look up the key. That's ok though, we
667 * don't need to decrypt the frame, we just won't
668 * be able to keep statistics accurate.
669 * Except for key threshold notifications, should
670 * we somehow allow the driver to tell us which key
671 * the hardware used if this flag is set?
672 */
673 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
674 (rx->status->flag & RX_FLAG_IV_STRIPPED))
675 return RX_CONTINUE;
676
677 hdrlen = ieee80211_hdrlen(hdr->frame_control);
678
679 if (rx->skb->len < 8 + hdrlen)
680 return RX_DROP_UNUSABLE; /* TODO: count this? */
681
682 /*
683 * no need to call ieee80211_wep_get_keyidx,
684 * it verifies a bunch of things we've done already
685 */
686 keyidx = rx->skb->data[hdrlen + 3] >> 6;
687
688 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
689
690 /*
691 * RSNA-protected unicast frames should always be sent with
692 * pairwise or station-to-station keys, but for WEP we allow
693 * using a key index as well.
694 */
695 if (rx->key && rx->key->conf.alg != ALG_WEP &&
696 !is_multicast_ether_addr(hdr->addr1))
697 rx->key = NULL;
698 }
699
700 if (rx->key) {
701 rx->key->tx_rx_count++;
702 /* TODO: add threshold stuff again */
703 } else {
704 return RX_DROP_MONITOR;
705 }
706
707 /* Check for weak IVs if possible */
708 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
709 ieee80211_is_data(hdr->frame_control) &&
710 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
711 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
712 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
713 rx->sta->wep_weak_iv_count++;
714
715 switch (rx->key->conf.alg) {
716 case ALG_WEP:
717 result = ieee80211_crypto_wep_decrypt(rx);
718 break;
719 case ALG_TKIP:
720 result = ieee80211_crypto_tkip_decrypt(rx);
721 break;
722 case ALG_CCMP:
723 result = ieee80211_crypto_ccmp_decrypt(rx);
724 break;
725 case ALG_AES_CMAC:
726 result = ieee80211_crypto_aes_cmac_decrypt(rx);
727 break;
728 }
729
730 /* either the frame has been decrypted or will be dropped */
731 rx->status->flag |= RX_FLAG_DECRYPTED;
732
733 return result;
734 }
735
736 static ieee80211_rx_result debug_noinline
737 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
738 {
739 struct ieee80211_local *local;
740 struct ieee80211_hdr *hdr;
741 struct sk_buff *skb;
742
743 local = rx->local;
744 skb = rx->skb;
745 hdr = (struct ieee80211_hdr *) skb->data;
746
747 if (!local->pspolling)
748 return RX_CONTINUE;
749
750 if (!ieee80211_has_fromds(hdr->frame_control))
751 /* this is not from AP */
752 return RX_CONTINUE;
753
754 if (!ieee80211_is_data(hdr->frame_control))
755 return RX_CONTINUE;
756
757 if (!ieee80211_has_moredata(hdr->frame_control)) {
758 /* AP has no more frames buffered for us */
759 local->pspolling = false;
760 return RX_CONTINUE;
761 }
762
763 /* more data bit is set, let's request a new frame from the AP */
764 ieee80211_send_pspoll(local, rx->sdata);
765
766 return RX_CONTINUE;
767 }
768
769 static void ap_sta_ps_start(struct sta_info *sta)
770 {
771 struct ieee80211_sub_if_data *sdata = sta->sdata;
772 struct ieee80211_local *local = sdata->local;
773
774 atomic_inc(&sdata->bss->num_sta_ps);
775 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
776 if (local->ops->sta_notify)
777 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
778 STA_NOTIFY_SLEEP, &sta->sta);
779 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
780 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
781 sdata->dev->name, sta->sta.addr, sta->sta.aid);
782 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
783 }
784
785 static int ap_sta_ps_end(struct sta_info *sta)
786 {
787 struct ieee80211_sub_if_data *sdata = sta->sdata;
788 struct ieee80211_local *local = sdata->local;
789 struct sk_buff *skb;
790 int sent = 0;
791
792 atomic_dec(&sdata->bss->num_sta_ps);
793
794 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
795 if (local->ops->sta_notify)
796 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
797 STA_NOTIFY_AWAKE, &sta->sta);
798
799 if (!skb_queue_empty(&sta->ps_tx_buf))
800 sta_info_clear_tim_bit(sta);
801
802 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
803 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
804 sdata->dev->name, sta->sta.addr, sta->sta.aid);
805 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
806
807 /* Send all buffered frames to the station */
808 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
809 sent++;
810 skb->requeue = 1;
811 dev_queue_xmit(skb);
812 }
813 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
814 local->total_ps_buffered--;
815 sent++;
816 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
817 printk(KERN_DEBUG "%s: STA %pM aid %d send PS frame "
818 "since STA not sleeping anymore\n", sdata->dev->name,
819 sta->sta.addr, sta->sta.aid);
820 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
821 skb->requeue = 1;
822 dev_queue_xmit(skb);
823 }
824
825 return sent;
826 }
827
828 static ieee80211_rx_result debug_noinline
829 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
830 {
831 struct sta_info *sta = rx->sta;
832 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
833
834 if (!sta)
835 return RX_CONTINUE;
836
837 /* Update last_rx only for IBSS packets which are for the current
838 * BSSID to avoid keeping the current IBSS network alive in cases where
839 * other STAs are using different BSSID. */
840 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
841 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
842 NL80211_IFTYPE_ADHOC);
843 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
844 sta->last_rx = jiffies;
845 } else
846 if (!is_multicast_ether_addr(hdr->addr1) ||
847 rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
848 /* Update last_rx only for unicast frames in order to prevent
849 * the Probe Request frames (the only broadcast frames from a
850 * STA in infrastructure mode) from keeping a connection alive.
851 * Mesh beacons will update last_rx when if they are found to
852 * match the current local configuration when processed.
853 */
854 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
855 ieee80211_is_beacon(hdr->frame_control)) {
856 rx->sdata->u.mgd.last_beacon = jiffies;
857 } else
858 sta->last_rx = jiffies;
859 }
860
861 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
862 return RX_CONTINUE;
863
864 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
865 ieee80211_sta_rx_notify(rx->sdata, hdr);
866
867 sta->rx_fragments++;
868 sta->rx_bytes += rx->skb->len;
869 sta->last_signal = rx->status->signal;
870 sta->last_qual = rx->status->qual;
871 sta->last_noise = rx->status->noise;
872
873 /*
874 * Change STA power saving mode only at the end of a frame
875 * exchange sequence.
876 */
877 if (!ieee80211_has_morefrags(hdr->frame_control) &&
878 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
879 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
880 if (test_sta_flags(sta, WLAN_STA_PS)) {
881 /*
882 * Ignore doze->wake transitions that are
883 * indicated by non-data frames, the standard
884 * is unclear here, but for example going to
885 * PS mode and then scanning would cause a
886 * doze->wake transition for the probe request,
887 * and that is clearly undesirable.
888 */
889 if (ieee80211_is_data(hdr->frame_control) &&
890 !ieee80211_has_pm(hdr->frame_control))
891 rx->sent_ps_buffered += ap_sta_ps_end(sta);
892 } else {
893 if (ieee80211_has_pm(hdr->frame_control))
894 ap_sta_ps_start(sta);
895 }
896 }
897
898 /* Drop data::nullfunc frames silently, since they are used only to
899 * control station power saving mode. */
900 if (ieee80211_is_nullfunc(hdr->frame_control)) {
901 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
902 /* Update counter and free packet here to avoid counting this
903 * as a dropped packed. */
904 sta->rx_packets++;
905 dev_kfree_skb(rx->skb);
906 return RX_QUEUED;
907 }
908
909 return RX_CONTINUE;
910 } /* ieee80211_rx_h_sta_process */
911
912 static inline struct ieee80211_fragment_entry *
913 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
914 unsigned int frag, unsigned int seq, int rx_queue,
915 struct sk_buff **skb)
916 {
917 struct ieee80211_fragment_entry *entry;
918 int idx;
919
920 idx = sdata->fragment_next;
921 entry = &sdata->fragments[sdata->fragment_next++];
922 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
923 sdata->fragment_next = 0;
924
925 if (!skb_queue_empty(&entry->skb_list)) {
926 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
927 struct ieee80211_hdr *hdr =
928 (struct ieee80211_hdr *) entry->skb_list.next->data;
929 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
930 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
931 "addr1=%pM addr2=%pM\n",
932 sdata->dev->name, idx,
933 jiffies - entry->first_frag_time, entry->seq,
934 entry->last_frag, hdr->addr1, hdr->addr2);
935 #endif
936 __skb_queue_purge(&entry->skb_list);
937 }
938
939 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
940 *skb = NULL;
941 entry->first_frag_time = jiffies;
942 entry->seq = seq;
943 entry->rx_queue = rx_queue;
944 entry->last_frag = frag;
945 entry->ccmp = 0;
946 entry->extra_len = 0;
947
948 return entry;
949 }
950
951 static inline struct ieee80211_fragment_entry *
952 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
953 unsigned int frag, unsigned int seq,
954 int rx_queue, struct ieee80211_hdr *hdr)
955 {
956 struct ieee80211_fragment_entry *entry;
957 int i, idx;
958
959 idx = sdata->fragment_next;
960 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
961 struct ieee80211_hdr *f_hdr;
962
963 idx--;
964 if (idx < 0)
965 idx = IEEE80211_FRAGMENT_MAX - 1;
966
967 entry = &sdata->fragments[idx];
968 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
969 entry->rx_queue != rx_queue ||
970 entry->last_frag + 1 != frag)
971 continue;
972
973 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
974
975 /*
976 * Check ftype and addresses are equal, else check next fragment
977 */
978 if (((hdr->frame_control ^ f_hdr->frame_control) &
979 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
980 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
981 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
982 continue;
983
984 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
985 __skb_queue_purge(&entry->skb_list);
986 continue;
987 }
988 return entry;
989 }
990
991 return NULL;
992 }
993
994 static ieee80211_rx_result debug_noinline
995 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
996 {
997 struct ieee80211_hdr *hdr;
998 u16 sc;
999 __le16 fc;
1000 unsigned int frag, seq;
1001 struct ieee80211_fragment_entry *entry;
1002 struct sk_buff *skb;
1003
1004 hdr = (struct ieee80211_hdr *)rx->skb->data;
1005 fc = hdr->frame_control;
1006 sc = le16_to_cpu(hdr->seq_ctrl);
1007 frag = sc & IEEE80211_SCTL_FRAG;
1008
1009 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1010 (rx->skb)->len < 24 ||
1011 is_multicast_ether_addr(hdr->addr1))) {
1012 /* not fragmented */
1013 goto out;
1014 }
1015 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1016
1017 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1018
1019 if (frag == 0) {
1020 /* This is the first fragment of a new frame. */
1021 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1022 rx->queue, &(rx->skb));
1023 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1024 ieee80211_has_protected(fc)) {
1025 /* Store CCMP PN so that we can verify that the next
1026 * fragment has a sequential PN value. */
1027 entry->ccmp = 1;
1028 memcpy(entry->last_pn,
1029 rx->key->u.ccmp.rx_pn[rx->queue],
1030 CCMP_PN_LEN);
1031 }
1032 return RX_QUEUED;
1033 }
1034
1035 /* This is a fragment for a frame that should already be pending in
1036 * fragment cache. Add this fragment to the end of the pending entry.
1037 */
1038 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1039 if (!entry) {
1040 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1041 return RX_DROP_MONITOR;
1042 }
1043
1044 /* Verify that MPDUs within one MSDU have sequential PN values.
1045 * (IEEE 802.11i, 8.3.3.4.5) */
1046 if (entry->ccmp) {
1047 int i;
1048 u8 pn[CCMP_PN_LEN], *rpn;
1049 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1050 return RX_DROP_UNUSABLE;
1051 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1052 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1053 pn[i]++;
1054 if (pn[i])
1055 break;
1056 }
1057 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1058 if (memcmp(pn, rpn, CCMP_PN_LEN))
1059 return RX_DROP_UNUSABLE;
1060 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1061 }
1062
1063 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1064 __skb_queue_tail(&entry->skb_list, rx->skb);
1065 entry->last_frag = frag;
1066 entry->extra_len += rx->skb->len;
1067 if (ieee80211_has_morefrags(fc)) {
1068 rx->skb = NULL;
1069 return RX_QUEUED;
1070 }
1071
1072 rx->skb = __skb_dequeue(&entry->skb_list);
1073 if (skb_tailroom(rx->skb) < entry->extra_len) {
1074 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1075 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1076 GFP_ATOMIC))) {
1077 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1078 __skb_queue_purge(&entry->skb_list);
1079 return RX_DROP_UNUSABLE;
1080 }
1081 }
1082 while ((skb = __skb_dequeue(&entry->skb_list))) {
1083 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1084 dev_kfree_skb(skb);
1085 }
1086
1087 /* Complete frame has been reassembled - process it now */
1088 rx->flags |= IEEE80211_RX_FRAGMENTED;
1089
1090 out:
1091 if (rx->sta)
1092 rx->sta->rx_packets++;
1093 if (is_multicast_ether_addr(hdr->addr1))
1094 rx->local->dot11MulticastReceivedFrameCount++;
1095 else
1096 ieee80211_led_rx(rx->local);
1097 return RX_CONTINUE;
1098 }
1099
1100 static ieee80211_rx_result debug_noinline
1101 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1102 {
1103 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1104 struct sk_buff *skb;
1105 int no_pending_pkts;
1106 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1107
1108 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1109 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1110 return RX_CONTINUE;
1111
1112 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1113 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1114 return RX_DROP_UNUSABLE;
1115
1116 skb = skb_dequeue(&rx->sta->tx_filtered);
1117 if (!skb) {
1118 skb = skb_dequeue(&rx->sta->ps_tx_buf);
1119 if (skb)
1120 rx->local->total_ps_buffered--;
1121 }
1122 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
1123 skb_queue_empty(&rx->sta->ps_tx_buf);
1124
1125 if (skb) {
1126 struct ieee80211_hdr *hdr =
1127 (struct ieee80211_hdr *) skb->data;
1128
1129 /*
1130 * Tell TX path to send one frame even though the STA may
1131 * still remain is PS mode after this frame exchange.
1132 */
1133 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1134
1135 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1136 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
1137 rx->sta->sta.addr, rx->sta->sta.aid,
1138 skb_queue_len(&rx->sta->ps_tx_buf));
1139 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1140
1141 /* Use MoreData flag to indicate whether there are more
1142 * buffered frames for this STA */
1143 if (no_pending_pkts)
1144 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1145 else
1146 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1147
1148 dev_queue_xmit(skb);
1149
1150 if (no_pending_pkts)
1151 sta_info_clear_tim_bit(rx->sta);
1152 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1153 } else if (!rx->sent_ps_buffered) {
1154 /*
1155 * FIXME: This can be the result of a race condition between
1156 * us expiring a frame and the station polling for it.
1157 * Should we send it a null-func frame indicating we
1158 * have nothing buffered for it?
1159 */
1160 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
1161 "though there are no buffered frames for it\n",
1162 rx->dev->name, rx->sta->sta.addr);
1163 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1164 }
1165
1166 /* Free PS Poll skb here instead of returning RX_DROP that would
1167 * count as an dropped frame. */
1168 dev_kfree_skb(rx->skb);
1169
1170 return RX_QUEUED;
1171 }
1172
1173 static ieee80211_rx_result debug_noinline
1174 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1175 {
1176 u8 *data = rx->skb->data;
1177 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1178
1179 if (!ieee80211_is_data_qos(hdr->frame_control))
1180 return RX_CONTINUE;
1181
1182 /* remove the qos control field, update frame type and meta-data */
1183 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1184 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1185 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1186 /* change frame type to non QOS */
1187 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1188
1189 return RX_CONTINUE;
1190 }
1191
1192 static int
1193 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1194 {
1195 if (unlikely(!rx->sta ||
1196 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1197 return -EACCES;
1198
1199 return 0;
1200 }
1201
1202 static int
1203 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1204 {
1205 /*
1206 * Pass through unencrypted frames if the hardware has
1207 * decrypted them already.
1208 */
1209 if (rx->status->flag & RX_FLAG_DECRYPTED)
1210 return 0;
1211
1212 /* Drop unencrypted frames if key is set. */
1213 if (unlikely(!ieee80211_has_protected(fc) &&
1214 !ieee80211_is_nullfunc(fc) &&
1215 (!ieee80211_is_mgmt(fc) ||
1216 (ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1217 rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP))) &&
1218 (rx->key || rx->sdata->drop_unencrypted)))
1219 return -EACCES;
1220 /* BIP does not use Protected field, so need to check MMIE */
1221 if (unlikely(rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP) &&
1222 ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1223 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1224 (rx->key || rx->sdata->drop_unencrypted)))
1225 return -EACCES;
1226
1227 return 0;
1228 }
1229
1230 static int
1231 ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1232 {
1233 struct net_device *dev = rx->dev;
1234 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1235 u16 hdrlen, ethertype;
1236 u8 *payload;
1237 u8 dst[ETH_ALEN];
1238 u8 src[ETH_ALEN] __aligned(2);
1239 struct sk_buff *skb = rx->skb;
1240 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1241
1242 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1243 return -1;
1244
1245 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1246
1247 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1248 * header
1249 * IEEE 802.11 address fields:
1250 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1251 * 0 0 DA SA BSSID n/a
1252 * 0 1 DA BSSID SA n/a
1253 * 1 0 BSSID SA DA n/a
1254 * 1 1 RA TA DA SA
1255 */
1256 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1257 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1258
1259 switch (hdr->frame_control &
1260 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1261 case cpu_to_le16(IEEE80211_FCTL_TODS):
1262 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1263 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1264 return -1;
1265 break;
1266 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1267 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1268 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1269 return -1;
1270 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1271 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1272 (skb->data + hdrlen);
1273 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1274 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1275 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1276 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1277 }
1278 }
1279 break;
1280 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
1281 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1282 (is_multicast_ether_addr(dst) &&
1283 !compare_ether_addr(src, dev->dev_addr)))
1284 return -1;
1285 break;
1286 case cpu_to_le16(0):
1287 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1288 return -1;
1289 break;
1290 }
1291
1292 if (unlikely(skb->len - hdrlen < 8))
1293 return -1;
1294
1295 payload = skb->data + hdrlen;
1296 ethertype = (payload[6] << 8) | payload[7];
1297
1298 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1299 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1300 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1301 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1302 * replace EtherType */
1303 skb_pull(skb, hdrlen + 6);
1304 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1305 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1306 } else {
1307 struct ethhdr *ehdr;
1308 __be16 len;
1309
1310 skb_pull(skb, hdrlen);
1311 len = htons(skb->len);
1312 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1313 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1314 memcpy(ehdr->h_source, src, ETH_ALEN);
1315 ehdr->h_proto = len;
1316 }
1317 return 0;
1318 }
1319
1320 /*
1321 * requires that rx->skb is a frame with ethernet header
1322 */
1323 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1324 {
1325 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1326 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1327 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1328
1329 /*
1330 * Allow EAPOL frames to us/the PAE group address regardless
1331 * of whether the frame was encrypted or not.
1332 */
1333 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1334 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1335 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1336 return true;
1337
1338 if (ieee80211_802_1x_port_control(rx) ||
1339 ieee80211_drop_unencrypted(rx, fc))
1340 return false;
1341
1342 return true;
1343 }
1344
1345 /*
1346 * requires that rx->skb is a frame with ethernet header
1347 */
1348 static void
1349 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1350 {
1351 struct net_device *dev = rx->dev;
1352 struct ieee80211_local *local = rx->local;
1353 struct sk_buff *skb, *xmit_skb;
1354 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1355 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1356 struct sta_info *dsta;
1357
1358 skb = rx->skb;
1359 xmit_skb = NULL;
1360
1361 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1362 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1363 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1364 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1365 if (is_multicast_ether_addr(ehdr->h_dest)) {
1366 /*
1367 * send multicast frames both to higher layers in
1368 * local net stack and back to the wireless medium
1369 */
1370 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1371 if (!xmit_skb && net_ratelimit())
1372 printk(KERN_DEBUG "%s: failed to clone "
1373 "multicast frame\n", dev->name);
1374 } else {
1375 dsta = sta_info_get(local, skb->data);
1376 if (dsta && dsta->sdata->dev == dev) {
1377 /*
1378 * The destination station is associated to
1379 * this AP (in this VLAN), so send the frame
1380 * directly to it and do not pass it to local
1381 * net stack.
1382 */
1383 xmit_skb = skb;
1384 skb = NULL;
1385 }
1386 }
1387 }
1388
1389 if (skb) {
1390 int align __maybe_unused;
1391
1392 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1393 /*
1394 * 'align' will only take the values 0 or 2 here
1395 * since all frames are required to be aligned
1396 * to 2-byte boundaries when being passed to
1397 * mac80211. That also explains the __skb_push()
1398 * below.
1399 */
1400 align = (unsigned long)skb->data & 3;
1401 if (align) {
1402 if (WARN_ON(skb_headroom(skb) < 3)) {
1403 dev_kfree_skb(skb);
1404 skb = NULL;
1405 } else {
1406 u8 *data = skb->data;
1407 size_t len = skb->len;
1408 u8 *new = __skb_push(skb, align);
1409 memmove(new, data, len);
1410 __skb_trim(skb, len);
1411 }
1412 }
1413 #endif
1414
1415 if (skb) {
1416 /* deliver to local stack */
1417 skb->protocol = eth_type_trans(skb, dev);
1418 memset(skb->cb, 0, sizeof(skb->cb));
1419 netif_rx(skb);
1420 }
1421 }
1422
1423 if (xmit_skb) {
1424 /* send to wireless media */
1425 xmit_skb->protocol = htons(ETH_P_802_3);
1426 skb_reset_network_header(xmit_skb);
1427 skb_reset_mac_header(xmit_skb);
1428 dev_queue_xmit(xmit_skb);
1429 }
1430 }
1431
1432 static ieee80211_rx_result debug_noinline
1433 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1434 {
1435 struct net_device *dev = rx->dev;
1436 struct ieee80211_local *local = rx->local;
1437 u16 ethertype;
1438 u8 *payload;
1439 struct sk_buff *skb = rx->skb, *frame = NULL;
1440 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1441 __le16 fc = hdr->frame_control;
1442 const struct ethhdr *eth;
1443 int remaining, err;
1444 u8 dst[ETH_ALEN];
1445 u8 src[ETH_ALEN];
1446
1447 if (unlikely(!ieee80211_is_data(fc)))
1448 return RX_CONTINUE;
1449
1450 if (unlikely(!ieee80211_is_data_present(fc)))
1451 return RX_DROP_MONITOR;
1452
1453 if (!(rx->flags & IEEE80211_RX_AMSDU))
1454 return RX_CONTINUE;
1455
1456 err = ieee80211_data_to_8023(rx);
1457 if (unlikely(err))
1458 return RX_DROP_UNUSABLE;
1459
1460 skb->dev = dev;
1461
1462 dev->stats.rx_packets++;
1463 dev->stats.rx_bytes += skb->len;
1464
1465 /* skip the wrapping header */
1466 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1467 if (!eth)
1468 return RX_DROP_UNUSABLE;
1469
1470 while (skb != frame) {
1471 u8 padding;
1472 __be16 len = eth->h_proto;
1473 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1474
1475 remaining = skb->len;
1476 memcpy(dst, eth->h_dest, ETH_ALEN);
1477 memcpy(src, eth->h_source, ETH_ALEN);
1478
1479 padding = ((4 - subframe_len) & 0x3);
1480 /* the last MSDU has no padding */
1481 if (subframe_len > remaining)
1482 return RX_DROP_UNUSABLE;
1483
1484 skb_pull(skb, sizeof(struct ethhdr));
1485 /* if last subframe reuse skb */
1486 if (remaining <= subframe_len + padding)
1487 frame = skb;
1488 else {
1489 /*
1490 * Allocate and reserve two bytes more for payload
1491 * alignment since sizeof(struct ethhdr) is 14.
1492 */
1493 frame = dev_alloc_skb(
1494 ALIGN(local->hw.extra_tx_headroom, 4) +
1495 subframe_len + 2);
1496
1497 if (frame == NULL)
1498 return RX_DROP_UNUSABLE;
1499
1500 skb_reserve(frame,
1501 ALIGN(local->hw.extra_tx_headroom, 4) +
1502 sizeof(struct ethhdr) + 2);
1503 memcpy(skb_put(frame, ntohs(len)), skb->data,
1504 ntohs(len));
1505
1506 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1507 padding);
1508 if (!eth) {
1509 dev_kfree_skb(frame);
1510 return RX_DROP_UNUSABLE;
1511 }
1512 }
1513
1514 skb_reset_network_header(frame);
1515 frame->dev = dev;
1516 frame->priority = skb->priority;
1517 rx->skb = frame;
1518
1519 payload = frame->data;
1520 ethertype = (payload[6] << 8) | payload[7];
1521
1522 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1523 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1524 compare_ether_addr(payload,
1525 bridge_tunnel_header) == 0)) {
1526 /* remove RFC1042 or Bridge-Tunnel
1527 * encapsulation and replace EtherType */
1528 skb_pull(frame, 6);
1529 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1530 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1531 } else {
1532 memcpy(skb_push(frame, sizeof(__be16)),
1533 &len, sizeof(__be16));
1534 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1535 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1536 }
1537
1538 if (!ieee80211_frame_allowed(rx, fc)) {
1539 if (skb == frame) /* last frame */
1540 return RX_DROP_UNUSABLE;
1541 dev_kfree_skb(frame);
1542 continue;
1543 }
1544
1545 ieee80211_deliver_skb(rx);
1546 }
1547
1548 return RX_QUEUED;
1549 }
1550
1551 #ifdef CONFIG_MAC80211_MESH
1552 static ieee80211_rx_result
1553 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1554 {
1555 struct ieee80211_hdr *hdr;
1556 struct ieee80211s_hdr *mesh_hdr;
1557 unsigned int hdrlen;
1558 struct sk_buff *skb = rx->skb, *fwd_skb;
1559
1560 hdr = (struct ieee80211_hdr *) skb->data;
1561 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1562 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1563
1564 if (!ieee80211_is_data(hdr->frame_control))
1565 return RX_CONTINUE;
1566
1567 if (!mesh_hdr->ttl)
1568 /* illegal frame */
1569 return RX_DROP_MONITOR;
1570
1571 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
1572 struct ieee80211_sub_if_data *sdata;
1573 struct mesh_path *mppath;
1574
1575 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1576 rcu_read_lock();
1577 mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
1578 if (!mppath) {
1579 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
1580 } else {
1581 spin_lock_bh(&mppath->state_lock);
1582 mppath->exp_time = jiffies;
1583 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
1584 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
1585 spin_unlock_bh(&mppath->state_lock);
1586 }
1587 rcu_read_unlock();
1588 }
1589
1590 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1591 return RX_CONTINUE;
1592
1593 mesh_hdr->ttl--;
1594
1595 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1596 if (!mesh_hdr->ttl)
1597 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1598 dropped_frames_ttl);
1599 else {
1600 struct ieee80211_hdr *fwd_hdr;
1601 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1602
1603 if (!fwd_skb && net_ratelimit())
1604 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1605 rx->dev->name);
1606
1607 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1608 /*
1609 * Save TA to addr1 to send TA a path error if a
1610 * suitable next hop is not found
1611 */
1612 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1613 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1614 fwd_skb->dev = rx->local->mdev;
1615 fwd_skb->iif = rx->dev->ifindex;
1616 dev_queue_xmit(fwd_skb);
1617 }
1618 }
1619
1620 if (is_multicast_ether_addr(hdr->addr3) ||
1621 rx->dev->flags & IFF_PROMISC)
1622 return RX_CONTINUE;
1623 else
1624 return RX_DROP_MONITOR;
1625 }
1626 #endif
1627
1628 static ieee80211_rx_result debug_noinline
1629 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1630 {
1631 struct net_device *dev = rx->dev;
1632 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1633 __le16 fc = hdr->frame_control;
1634 int err;
1635
1636 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1637 return RX_CONTINUE;
1638
1639 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1640 return RX_DROP_MONITOR;
1641
1642 err = ieee80211_data_to_8023(rx);
1643 if (unlikely(err))
1644 return RX_DROP_UNUSABLE;
1645
1646 if (!ieee80211_frame_allowed(rx, fc))
1647 return RX_DROP_MONITOR;
1648
1649 rx->skb->dev = dev;
1650
1651 dev->stats.rx_packets++;
1652 dev->stats.rx_bytes += rx->skb->len;
1653
1654 ieee80211_deliver_skb(rx);
1655
1656 return RX_QUEUED;
1657 }
1658
1659 static ieee80211_rx_result debug_noinline
1660 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1661 {
1662 struct ieee80211_local *local = rx->local;
1663 struct ieee80211_hw *hw = &local->hw;
1664 struct sk_buff *skb = rx->skb;
1665 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1666 struct tid_ampdu_rx *tid_agg_rx;
1667 u16 start_seq_num;
1668 u16 tid;
1669
1670 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1671 return RX_CONTINUE;
1672
1673 if (ieee80211_is_back_req(bar->frame_control)) {
1674 if (!rx->sta)
1675 return RX_CONTINUE;
1676 tid = le16_to_cpu(bar->control) >> 12;
1677 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1678 != HT_AGG_STATE_OPERATIONAL)
1679 return RX_CONTINUE;
1680 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1681
1682 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1683
1684 /* reset session timer */
1685 if (tid_agg_rx->timeout)
1686 mod_timer(&tid_agg_rx->session_timer,
1687 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1688
1689 /* manage reordering buffer according to requested */
1690 /* sequence number */
1691 rcu_read_lock();
1692 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
1693 start_seq_num, 1);
1694 rcu_read_unlock();
1695 return RX_DROP_UNUSABLE;
1696 }
1697
1698 return RX_CONTINUE;
1699 }
1700
1701 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1702 struct ieee80211_mgmt *mgmt,
1703 size_t len)
1704 {
1705 struct ieee80211_local *local = sdata->local;
1706 struct sk_buff *skb;
1707 struct ieee80211_mgmt *resp;
1708
1709 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1710 /* Not to own unicast address */
1711 return;
1712 }
1713
1714 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1715 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1716 /* Not from the current AP. */
1717 return;
1718 }
1719
1720 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATE) {
1721 /* Association in progress; ignore SA Query */
1722 return;
1723 }
1724
1725 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1726 /* Too short SA Query request frame */
1727 return;
1728 }
1729
1730 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1731 if (skb == NULL)
1732 return;
1733
1734 skb_reserve(skb, local->hw.extra_tx_headroom);
1735 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1736 memset(resp, 0, 24);
1737 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1738 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1739 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1740 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1741 IEEE80211_STYPE_ACTION);
1742 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1743 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1744 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1745 memcpy(resp->u.action.u.sa_query.trans_id,
1746 mgmt->u.action.u.sa_query.trans_id,
1747 WLAN_SA_QUERY_TR_ID_LEN);
1748
1749 ieee80211_tx_skb(sdata, skb, 1);
1750 }
1751
1752 static ieee80211_rx_result debug_noinline
1753 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1754 {
1755 struct ieee80211_local *local = rx->local;
1756 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1757 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1758 struct ieee80211_bss *bss;
1759 int len = rx->skb->len;
1760
1761 if (!ieee80211_is_action(mgmt->frame_control))
1762 return RX_CONTINUE;
1763
1764 if (!rx->sta)
1765 return RX_DROP_MONITOR;
1766
1767 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1768 return RX_DROP_MONITOR;
1769
1770 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1771 return RX_DROP_MONITOR;
1772
1773 /* all categories we currently handle have action_code */
1774 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1775 return RX_DROP_MONITOR;
1776
1777 switch (mgmt->u.action.category) {
1778 case WLAN_CATEGORY_BACK:
1779 /*
1780 * The aggregation code is not prepared to handle
1781 * anything but STA/AP due to the BSSID handling;
1782 * IBSS could work in the code but isn't supported
1783 * by drivers or the standard.
1784 */
1785 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1786 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1787 sdata->vif.type != NL80211_IFTYPE_AP)
1788 return RX_DROP_MONITOR;
1789
1790 switch (mgmt->u.action.u.addba_req.action_code) {
1791 case WLAN_ACTION_ADDBA_REQ:
1792 if (len < (IEEE80211_MIN_ACTION_SIZE +
1793 sizeof(mgmt->u.action.u.addba_req)))
1794 return RX_DROP_MONITOR;
1795 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1796 break;
1797 case WLAN_ACTION_ADDBA_RESP:
1798 if (len < (IEEE80211_MIN_ACTION_SIZE +
1799 sizeof(mgmt->u.action.u.addba_resp)))
1800 return RX_DROP_MONITOR;
1801 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1802 break;
1803 case WLAN_ACTION_DELBA:
1804 if (len < (IEEE80211_MIN_ACTION_SIZE +
1805 sizeof(mgmt->u.action.u.delba)))
1806 return RX_DROP_MONITOR;
1807 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1808 break;
1809 }
1810 break;
1811 case WLAN_CATEGORY_SPECTRUM_MGMT:
1812 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1813 return RX_DROP_MONITOR;
1814
1815 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1816 return RX_DROP_MONITOR;
1817
1818 switch (mgmt->u.action.u.measurement.action_code) {
1819 case WLAN_ACTION_SPCT_MSR_REQ:
1820 if (len < (IEEE80211_MIN_ACTION_SIZE +
1821 sizeof(mgmt->u.action.u.measurement)))
1822 return RX_DROP_MONITOR;
1823 ieee80211_process_measurement_req(sdata, mgmt, len);
1824 break;
1825 case WLAN_ACTION_SPCT_CHL_SWITCH:
1826 if (len < (IEEE80211_MIN_ACTION_SIZE +
1827 sizeof(mgmt->u.action.u.chan_switch)))
1828 return RX_DROP_MONITOR;
1829
1830 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1831 return RX_DROP_MONITOR;
1832
1833 bss = ieee80211_rx_bss_get(local, sdata->u.mgd.bssid,
1834 local->hw.conf.channel->center_freq,
1835 sdata->u.mgd.ssid,
1836 sdata->u.mgd.ssid_len);
1837 if (!bss)
1838 return RX_DROP_MONITOR;
1839
1840 ieee80211_process_chanswitch(sdata,
1841 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1842 ieee80211_rx_bss_put(local, bss);
1843 break;
1844 }
1845 break;
1846 case WLAN_CATEGORY_SA_QUERY:
1847 if (len < (IEEE80211_MIN_ACTION_SIZE +
1848 sizeof(mgmt->u.action.u.sa_query)))
1849 return RX_DROP_MONITOR;
1850 switch (mgmt->u.action.u.sa_query.action) {
1851 case WLAN_ACTION_SA_QUERY_REQUEST:
1852 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1853 return RX_DROP_MONITOR;
1854 ieee80211_process_sa_query_req(sdata, mgmt, len);
1855 break;
1856 case WLAN_ACTION_SA_QUERY_RESPONSE:
1857 /*
1858 * SA Query response is currently only used in AP mode
1859 * and it is processed in user space.
1860 */
1861 return RX_CONTINUE;
1862 }
1863 break;
1864 default:
1865 return RX_CONTINUE;
1866 }
1867
1868 rx->sta->rx_packets++;
1869 dev_kfree_skb(rx->skb);
1870 return RX_QUEUED;
1871 }
1872
1873 static ieee80211_rx_result debug_noinline
1874 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1875 {
1876 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1877 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1878
1879 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1880 return RX_DROP_MONITOR;
1881
1882 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1883 return RX_DROP_MONITOR;
1884
1885 if (ieee80211_vif_is_mesh(&sdata->vif))
1886 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1887
1888 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1889 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
1890
1891 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1892 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1893
1894 return RX_DROP_MONITOR;
1895 }
1896
1897 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1898 struct ieee80211_hdr *hdr,
1899 struct ieee80211_rx_data *rx)
1900 {
1901 int keyidx;
1902 unsigned int hdrlen;
1903
1904 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1905 if (rx->skb->len >= hdrlen + 4)
1906 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1907 else
1908 keyidx = -1;
1909
1910 if (!rx->sta) {
1911 /*
1912 * Some hardware seem to generate incorrect Michael MIC
1913 * reports; ignore them to avoid triggering countermeasures.
1914 */
1915 goto ignore;
1916 }
1917
1918 if (!ieee80211_has_protected(hdr->frame_control))
1919 goto ignore;
1920
1921 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1922 /*
1923 * APs with pairwise keys should never receive Michael MIC
1924 * errors for non-zero keyidx because these are reserved for
1925 * group keys and only the AP is sending real multicast
1926 * frames in the BSS.
1927 */
1928 goto ignore;
1929 }
1930
1931 if (!ieee80211_is_data(hdr->frame_control) &&
1932 !ieee80211_is_auth(hdr->frame_control))
1933 goto ignore;
1934
1935 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1936 ignore:
1937 dev_kfree_skb(rx->skb);
1938 rx->skb = NULL;
1939 }
1940
1941 /* TODO: use IEEE80211_RX_FRAGMENTED */
1942 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1943 {
1944 struct ieee80211_sub_if_data *sdata;
1945 struct ieee80211_local *local = rx->local;
1946 struct ieee80211_rtap_hdr {
1947 struct ieee80211_radiotap_header hdr;
1948 u8 flags;
1949 u8 rate;
1950 __le16 chan_freq;
1951 __le16 chan_flags;
1952 } __attribute__ ((packed)) *rthdr;
1953 struct sk_buff *skb = rx->skb, *skb2;
1954 struct net_device *prev_dev = NULL;
1955 struct ieee80211_rx_status *status = rx->status;
1956
1957 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1958 goto out_free_skb;
1959
1960 if (skb_headroom(skb) < sizeof(*rthdr) &&
1961 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1962 goto out_free_skb;
1963
1964 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1965 memset(rthdr, 0, sizeof(*rthdr));
1966 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1967 rthdr->hdr.it_present =
1968 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1969 (1 << IEEE80211_RADIOTAP_RATE) |
1970 (1 << IEEE80211_RADIOTAP_CHANNEL));
1971
1972 rthdr->rate = rx->rate->bitrate / 5;
1973 rthdr->chan_freq = cpu_to_le16(status->freq);
1974
1975 if (status->band == IEEE80211_BAND_5GHZ)
1976 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1977 IEEE80211_CHAN_5GHZ);
1978 else
1979 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1980 IEEE80211_CHAN_2GHZ);
1981
1982 skb_set_mac_header(skb, 0);
1983 skb->ip_summed = CHECKSUM_UNNECESSARY;
1984 skb->pkt_type = PACKET_OTHERHOST;
1985 skb->protocol = htons(ETH_P_802_2);
1986
1987 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1988 if (!netif_running(sdata->dev))
1989 continue;
1990
1991 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1992 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1993 continue;
1994
1995 if (prev_dev) {
1996 skb2 = skb_clone(skb, GFP_ATOMIC);
1997 if (skb2) {
1998 skb2->dev = prev_dev;
1999 netif_rx(skb2);
2000 }
2001 }
2002
2003 prev_dev = sdata->dev;
2004 sdata->dev->stats.rx_packets++;
2005 sdata->dev->stats.rx_bytes += skb->len;
2006 }
2007
2008 if (prev_dev) {
2009 skb->dev = prev_dev;
2010 netif_rx(skb);
2011 skb = NULL;
2012 } else
2013 goto out_free_skb;
2014
2015 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
2016 return;
2017
2018 out_free_skb:
2019 dev_kfree_skb(skb);
2020 }
2021
2022
2023 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2024 struct ieee80211_rx_data *rx,
2025 struct sk_buff *skb)
2026 {
2027 ieee80211_rx_result res = RX_DROP_MONITOR;
2028
2029 rx->skb = skb;
2030 rx->sdata = sdata;
2031 rx->dev = sdata->dev;
2032
2033 #define CALL_RXH(rxh) \
2034 do { \
2035 res = rxh(rx); \
2036 if (res != RX_CONTINUE) \
2037 goto rxh_done; \
2038 } while (0);
2039
2040 CALL_RXH(ieee80211_rx_h_passive_scan)
2041 CALL_RXH(ieee80211_rx_h_check)
2042 CALL_RXH(ieee80211_rx_h_decrypt)
2043 CALL_RXH(ieee80211_rx_h_check_more_data)
2044 CALL_RXH(ieee80211_rx_h_sta_process)
2045 CALL_RXH(ieee80211_rx_h_defragment)
2046 CALL_RXH(ieee80211_rx_h_ps_poll)
2047 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2048 /* must be after MMIC verify so header is counted in MPDU mic */
2049 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2050 CALL_RXH(ieee80211_rx_h_amsdu)
2051 #ifdef CONFIG_MAC80211_MESH
2052 if (ieee80211_vif_is_mesh(&sdata->vif))
2053 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2054 #endif
2055 CALL_RXH(ieee80211_rx_h_data)
2056 CALL_RXH(ieee80211_rx_h_ctrl)
2057 CALL_RXH(ieee80211_rx_h_action)
2058 CALL_RXH(ieee80211_rx_h_mgmt)
2059
2060 #undef CALL_RXH
2061
2062 rxh_done:
2063 switch (res) {
2064 case RX_DROP_MONITOR:
2065 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2066 if (rx->sta)
2067 rx->sta->rx_dropped++;
2068 /* fall through */
2069 case RX_CONTINUE:
2070 ieee80211_rx_cooked_monitor(rx);
2071 break;
2072 case RX_DROP_UNUSABLE:
2073 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2074 if (rx->sta)
2075 rx->sta->rx_dropped++;
2076 dev_kfree_skb(rx->skb);
2077 break;
2078 case RX_QUEUED:
2079 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2080 break;
2081 }
2082 }
2083
2084 /* main receive path */
2085
2086 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2087 struct ieee80211_rx_data *rx,
2088 struct ieee80211_hdr *hdr)
2089 {
2090 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
2091 int multicast = is_multicast_ether_addr(hdr->addr1);
2092
2093 switch (sdata->vif.type) {
2094 case NL80211_IFTYPE_STATION:
2095 if (!bssid)
2096 return 0;
2097 if (!ieee80211_bssid_match(bssid, sdata->u.mgd.bssid)) {
2098 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2099 return 0;
2100 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2101 } else if (!multicast &&
2102 compare_ether_addr(sdata->dev->dev_addr,
2103 hdr->addr1) != 0) {
2104 if (!(sdata->dev->flags & IFF_PROMISC))
2105 return 0;
2106 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2107 }
2108 break;
2109 case NL80211_IFTYPE_ADHOC:
2110 if (!bssid)
2111 return 0;
2112 if (ieee80211_is_beacon(hdr->frame_control)) {
2113 return 1;
2114 }
2115 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2116 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2117 return 0;
2118 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2119 } else if (!multicast &&
2120 compare_ether_addr(sdata->dev->dev_addr,
2121 hdr->addr1) != 0) {
2122 if (!(sdata->dev->flags & IFF_PROMISC))
2123 return 0;
2124 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2125 } else if (!rx->sta) {
2126 int rate_idx;
2127 if (rx->status->flag & RX_FLAG_HT)
2128 rate_idx = 0; /* TODO: HT rates */
2129 else
2130 rate_idx = rx->status->rate_idx;
2131 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2132 BIT(rate_idx));
2133 }
2134 break;
2135 case NL80211_IFTYPE_MESH_POINT:
2136 if (!multicast &&
2137 compare_ether_addr(sdata->dev->dev_addr,
2138 hdr->addr1) != 0) {
2139 if (!(sdata->dev->flags & IFF_PROMISC))
2140 return 0;
2141
2142 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2143 }
2144 break;
2145 case NL80211_IFTYPE_AP_VLAN:
2146 case NL80211_IFTYPE_AP:
2147 if (!bssid) {
2148 if (compare_ether_addr(sdata->dev->dev_addr,
2149 hdr->addr1))
2150 return 0;
2151 } else if (!ieee80211_bssid_match(bssid,
2152 sdata->dev->dev_addr)) {
2153 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2154 return 0;
2155 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2156 }
2157 break;
2158 case NL80211_IFTYPE_WDS:
2159 if (bssid || !ieee80211_is_data(hdr->frame_control))
2160 return 0;
2161 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2162 return 0;
2163 break;
2164 case NL80211_IFTYPE_MONITOR:
2165 /* take everything */
2166 break;
2167 case NL80211_IFTYPE_UNSPECIFIED:
2168 case __NL80211_IFTYPE_AFTER_LAST:
2169 /* should never get here */
2170 WARN_ON(1);
2171 break;
2172 }
2173
2174 return 1;
2175 }
2176
2177 /*
2178 * This is the actual Rx frames handler. as it blongs to Rx path it must
2179 * be called with rcu_read_lock protection.
2180 */
2181 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2182 struct sk_buff *skb,
2183 struct ieee80211_rx_status *status,
2184 struct ieee80211_rate *rate)
2185 {
2186 struct ieee80211_local *local = hw_to_local(hw);
2187 struct ieee80211_sub_if_data *sdata;
2188 struct ieee80211_hdr *hdr;
2189 struct ieee80211_rx_data rx;
2190 int prepares;
2191 struct ieee80211_sub_if_data *prev = NULL;
2192 struct sk_buff *skb_new;
2193
2194 hdr = (struct ieee80211_hdr *)skb->data;
2195 memset(&rx, 0, sizeof(rx));
2196 rx.skb = skb;
2197 rx.local = local;
2198
2199 rx.status = status;
2200 rx.rate = rate;
2201
2202 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2203 local->dot11ReceivedFragmentCount++;
2204
2205 rx.sta = sta_info_get(local, hdr->addr2);
2206 if (rx.sta) {
2207 rx.sdata = rx.sta->sdata;
2208 rx.dev = rx.sta->sdata->dev;
2209 }
2210
2211 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2212 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
2213 return;
2214 }
2215
2216 if (unlikely(local->sw_scanning || local->hw_scanning))
2217 rx.flags |= IEEE80211_RX_IN_SCAN;
2218
2219 ieee80211_parse_qos(&rx);
2220 ieee80211_verify_alignment(&rx);
2221
2222 skb = rx.skb;
2223
2224 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2225 if (!netif_running(sdata->dev))
2226 continue;
2227
2228 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
2229 continue;
2230
2231 rx.flags |= IEEE80211_RX_RA_MATCH;
2232 prepares = prepare_for_handlers(sdata, &rx, hdr);
2233
2234 if (!prepares)
2235 continue;
2236
2237 /*
2238 * frame is destined for this interface, but if it's not
2239 * also for the previous one we handle that after the
2240 * loop to avoid copying the SKB once too much
2241 */
2242
2243 if (!prev) {
2244 prev = sdata;
2245 continue;
2246 }
2247
2248 /*
2249 * frame was destined for the previous interface
2250 * so invoke RX handlers for it
2251 */
2252
2253 skb_new = skb_copy(skb, GFP_ATOMIC);
2254 if (!skb_new) {
2255 if (net_ratelimit())
2256 printk(KERN_DEBUG "%s: failed to copy "
2257 "multicast frame for %s\n",
2258 wiphy_name(local->hw.wiphy),
2259 prev->dev->name);
2260 continue;
2261 }
2262 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2263 prev = sdata;
2264 }
2265 if (prev)
2266 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2267 else
2268 dev_kfree_skb(skb);
2269 }
2270
2271 #define SEQ_MODULO 0x1000
2272 #define SEQ_MASK 0xfff
2273
2274 static inline int seq_less(u16 sq1, u16 sq2)
2275 {
2276 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2277 }
2278
2279 static inline u16 seq_inc(u16 sq)
2280 {
2281 return (sq + 1) & SEQ_MASK;
2282 }
2283
2284 static inline u16 seq_sub(u16 sq1, u16 sq2)
2285 {
2286 return (sq1 - sq2) & SEQ_MASK;
2287 }
2288
2289
2290 /*
2291 * As it function blongs to Rx path it must be called with
2292 * the proper rcu_read_lock protection for its flow.
2293 */
2294 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2295 struct tid_ampdu_rx *tid_agg_rx,
2296 struct sk_buff *skb,
2297 struct ieee80211_rx_status *rxstatus,
2298 u16 mpdu_seq_num,
2299 int bar_req)
2300 {
2301 struct ieee80211_local *local = hw_to_local(hw);
2302 struct ieee80211_rx_status status;
2303 u16 head_seq_num, buf_size;
2304 int index;
2305 struct ieee80211_supported_band *sband;
2306 struct ieee80211_rate *rate;
2307
2308 buf_size = tid_agg_rx->buf_size;
2309 head_seq_num = tid_agg_rx->head_seq_num;
2310
2311 /* frame with out of date sequence number */
2312 if (seq_less(mpdu_seq_num, head_seq_num)) {
2313 dev_kfree_skb(skb);
2314 return 1;
2315 }
2316
2317 /* if frame sequence number exceeds our buffering window size or
2318 * block Ack Request arrived - release stored frames */
2319 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2320 /* new head to the ordering buffer */
2321 if (bar_req)
2322 head_seq_num = mpdu_seq_num;
2323 else
2324 head_seq_num =
2325 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2326 /* release stored frames up to new head to stack */
2327 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2328 index = seq_sub(tid_agg_rx->head_seq_num,
2329 tid_agg_rx->ssn)
2330 % tid_agg_rx->buf_size;
2331
2332 if (tid_agg_rx->reorder_buf[index]) {
2333 /* release the reordered frames to stack */
2334 memcpy(&status,
2335 tid_agg_rx->reorder_buf[index]->cb,
2336 sizeof(status));
2337 sband = local->hw.wiphy->bands[status.band];
2338 if (status.flag & RX_FLAG_HT) {
2339 /* TODO: HT rates */
2340 rate = sband->bitrates;
2341 } else {
2342 rate = &sband->bitrates
2343 [status.rate_idx];
2344 }
2345 __ieee80211_rx_handle_packet(hw,
2346 tid_agg_rx->reorder_buf[index],
2347 &status, rate);
2348 tid_agg_rx->stored_mpdu_num--;
2349 tid_agg_rx->reorder_buf[index] = NULL;
2350 }
2351 tid_agg_rx->head_seq_num =
2352 seq_inc(tid_agg_rx->head_seq_num);
2353 }
2354 if (bar_req)
2355 return 1;
2356 }
2357
2358 /* now the new frame is always in the range of the reordering */
2359 /* buffer window */
2360 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2361 % tid_agg_rx->buf_size;
2362 /* check if we already stored this frame */
2363 if (tid_agg_rx->reorder_buf[index]) {
2364 dev_kfree_skb(skb);
2365 return 1;
2366 }
2367
2368 /* if arrived mpdu is in the right order and nothing else stored */
2369 /* release it immediately */
2370 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2371 tid_agg_rx->stored_mpdu_num == 0) {
2372 tid_agg_rx->head_seq_num =
2373 seq_inc(tid_agg_rx->head_seq_num);
2374 return 0;
2375 }
2376
2377 /* put the frame in the reordering buffer */
2378 tid_agg_rx->reorder_buf[index] = skb;
2379 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2380 sizeof(*rxstatus));
2381 tid_agg_rx->stored_mpdu_num++;
2382 /* release the buffer until next missing frame */
2383 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2384 % tid_agg_rx->buf_size;
2385 while (tid_agg_rx->reorder_buf[index]) {
2386 /* release the reordered frame back to stack */
2387 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
2388 sizeof(status));
2389 sband = local->hw.wiphy->bands[status.band];
2390 if (status.flag & RX_FLAG_HT)
2391 rate = sband->bitrates; /* TODO: HT rates */
2392 else
2393 rate = &sband->bitrates[status.rate_idx];
2394 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2395 &status, rate);
2396 tid_agg_rx->stored_mpdu_num--;
2397 tid_agg_rx->reorder_buf[index] = NULL;
2398 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2399 index = seq_sub(tid_agg_rx->head_seq_num,
2400 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2401 }
2402 return 1;
2403 }
2404
2405 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2406 struct sk_buff *skb,
2407 struct ieee80211_rx_status *status)
2408 {
2409 struct ieee80211_hw *hw = &local->hw;
2410 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2411 struct sta_info *sta;
2412 struct tid_ampdu_rx *tid_agg_rx;
2413 u16 sc;
2414 u16 mpdu_seq_num;
2415 u8 ret = 0;
2416 int tid;
2417
2418 sta = sta_info_get(local, hdr->addr2);
2419 if (!sta)
2420 return ret;
2421
2422 /* filter the QoS data rx stream according to
2423 * STA/TID and check if this STA/TID is on aggregation */
2424 if (!ieee80211_is_data_qos(hdr->frame_control))
2425 goto end_reorder;
2426
2427 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2428
2429 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2430 goto end_reorder;
2431
2432 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2433
2434 /* qos null data frames are excluded */
2435 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2436 goto end_reorder;
2437
2438 /* new un-ordered ampdu frame - process it */
2439
2440 /* reset session timer */
2441 if (tid_agg_rx->timeout)
2442 mod_timer(&tid_agg_rx->session_timer,
2443 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2444
2445 /* if this mpdu is fragmented - terminate rx aggregation session */
2446 sc = le16_to_cpu(hdr->seq_ctrl);
2447 if (sc & IEEE80211_SCTL_FRAG) {
2448 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2449 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2450 ret = 1;
2451 goto end_reorder;
2452 }
2453
2454 /* according to mpdu sequence number deal with reordering buffer */
2455 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2456 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
2457 mpdu_seq_num, 0);
2458 end_reorder:
2459 return ret;
2460 }
2461
2462 /*
2463 * This is the receive path handler. It is called by a low level driver when an
2464 * 802.11 MPDU is received from the hardware.
2465 */
2466 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2467 struct ieee80211_rx_status *status)
2468 {
2469 struct ieee80211_local *local = hw_to_local(hw);
2470 struct ieee80211_rate *rate = NULL;
2471 struct ieee80211_supported_band *sband;
2472
2473 if (status->band < 0 ||
2474 status->band >= IEEE80211_NUM_BANDS) {
2475 WARN_ON(1);
2476 return;
2477 }
2478
2479 sband = local->hw.wiphy->bands[status->band];
2480 if (!sband) {
2481 WARN_ON(1);
2482 return;
2483 }
2484
2485 if (status->flag & RX_FLAG_HT) {
2486 /* rate_idx is MCS index */
2487 if (WARN_ON(status->rate_idx < 0 ||
2488 status->rate_idx >= 76))
2489 return;
2490 /* HT rates are not in the table - use the highest legacy rate
2491 * for now since other parts of mac80211 may not yet be fully
2492 * MCS aware. */
2493 rate = &sband->bitrates[sband->n_bitrates - 1];
2494 } else {
2495 if (WARN_ON(status->rate_idx < 0 ||
2496 status->rate_idx >= sband->n_bitrates))
2497 return;
2498 rate = &sband->bitrates[status->rate_idx];
2499 }
2500
2501 /*
2502 * key references and virtual interfaces are protected using RCU
2503 * and this requires that we are in a read-side RCU section during
2504 * receive processing
2505 */
2506 rcu_read_lock();
2507
2508 /*
2509 * Frames with failed FCS/PLCP checksum are not returned,
2510 * all other frames are returned without radiotap header
2511 * if it was previously present.
2512 * Also, frames with less than 16 bytes are dropped.
2513 */
2514 skb = ieee80211_rx_monitor(local, skb, status, rate);
2515 if (!skb) {
2516 rcu_read_unlock();
2517 return;
2518 }
2519
2520 if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2521 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2522
2523 rcu_read_unlock();
2524 }
2525 EXPORT_SYMBOL(__ieee80211_rx);
2526
2527 /* This is a version of the rx handler that can be called from hard irq
2528 * context. Post the skb on the queue and schedule the tasklet */
2529 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2530 struct ieee80211_rx_status *status)
2531 {
2532 struct ieee80211_local *local = hw_to_local(hw);
2533
2534 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2535
2536 skb->dev = local->mdev;
2537 /* copy status into skb->cb for use by tasklet */
2538 memcpy(skb->cb, status, sizeof(*status));
2539 skb->pkt_type = IEEE80211_RX_MSG;
2540 skb_queue_tail(&local->skb_queue, skb);
2541 tasklet_schedule(&local->tasklet);
2542 }
2543 EXPORT_SYMBOL(ieee80211_rx_irqsafe);