Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "driver-ops.h"
23 #include "led.h"
24 #include "mesh.h"
25 #include "wep.h"
26 #include "wpa.h"
27 #include "tkip.h"
28 #include "wme.h"
29
30 /*
31 * monitor mode reception
32 *
33 * This function cleans up the SKB, i.e. it removes all the stuff
34 * only useful for monitoring.
35 */
36 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
37 struct sk_buff *skb)
38 {
39 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
40 if (likely(skb->len > FCS_LEN))
41 skb_trim(skb, skb->len - FCS_LEN);
42 else {
43 /* driver bug */
44 WARN_ON(1);
45 dev_kfree_skb(skb);
46 skb = NULL;
47 }
48 }
49
50 return skb;
51 }
52
53 static inline int should_drop_frame(struct sk_buff *skb,
54 int present_fcs_len)
55 {
56 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
57 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
58
59 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
60 return 1;
61 if (unlikely(skb->len < 16 + present_fcs_len))
62 return 1;
63 if (ieee80211_is_ctl(hdr->frame_control) &&
64 !ieee80211_is_pspoll(hdr->frame_control) &&
65 !ieee80211_is_back_req(hdr->frame_control))
66 return 1;
67 return 0;
68 }
69
70 static int
71 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
72 struct ieee80211_rx_status *status)
73 {
74 int len;
75
76 /* always present fields */
77 len = sizeof(struct ieee80211_radiotap_header) + 9;
78
79 if (status->flag & RX_FLAG_TSFT)
80 len += 8;
81 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
82 len += 1;
83 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
84 len += 1;
85
86 if (len & 1) /* padding for RX_FLAGS if necessary */
87 len++;
88
89 return len;
90 }
91
92 /*
93 * ieee80211_add_rx_radiotap_header - add radiotap header
94 *
95 * add a radiotap header containing all the fields which the hardware provided.
96 */
97 static void
98 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
99 struct sk_buff *skb,
100 struct ieee80211_rate *rate,
101 int rtap_len)
102 {
103 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
104 struct ieee80211_radiotap_header *rthdr;
105 unsigned char *pos;
106 u16 rx_flags = 0;
107
108 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
109 memset(rthdr, 0, rtap_len);
110
111 /* radiotap header, set always present flags */
112 rthdr->it_present =
113 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
114 (1 << IEEE80211_RADIOTAP_CHANNEL) |
115 (1 << IEEE80211_RADIOTAP_ANTENNA) |
116 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
117 rthdr->it_len = cpu_to_le16(rtap_len);
118
119 pos = (unsigned char *)(rthdr+1);
120
121 /* the order of the following fields is important */
122
123 /* IEEE80211_RADIOTAP_TSFT */
124 if (status->flag & RX_FLAG_TSFT) {
125 put_unaligned_le64(status->mactime, pos);
126 rthdr->it_present |=
127 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
128 pos += 8;
129 }
130
131 /* IEEE80211_RADIOTAP_FLAGS */
132 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
133 *pos |= IEEE80211_RADIOTAP_F_FCS;
134 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
135 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
136 if (status->flag & RX_FLAG_SHORTPRE)
137 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
138 pos++;
139
140 /* IEEE80211_RADIOTAP_RATE */
141 if (status->flag & RX_FLAG_HT) {
142 /*
143 * TODO: add following information into radiotap header once
144 * suitable fields are defined for it:
145 * - MCS index (status->rate_idx)
146 * - HT40 (status->flag & RX_FLAG_40MHZ)
147 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
148 */
149 *pos = 0;
150 } else {
151 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
152 *pos = rate->bitrate / 5;
153 }
154 pos++;
155
156 /* IEEE80211_RADIOTAP_CHANNEL */
157 put_unaligned_le16(status->freq, pos);
158 pos += 2;
159 if (status->band == IEEE80211_BAND_5GHZ)
160 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
161 pos);
162 else if (status->flag & RX_FLAG_HT)
163 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
164 pos);
165 else if (rate->flags & IEEE80211_RATE_ERP_G)
166 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
167 pos);
168 else
169 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
170 pos);
171 pos += 2;
172
173 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
174 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
175 *pos = status->signal;
176 rthdr->it_present |=
177 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
178 pos++;
179 }
180
181 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
182 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
183 *pos = status->noise;
184 rthdr->it_present |=
185 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
186 pos++;
187 }
188
189 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
190
191 /* IEEE80211_RADIOTAP_ANTENNA */
192 *pos = status->antenna;
193 pos++;
194
195 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
196
197 /* IEEE80211_RADIOTAP_RX_FLAGS */
198 /* ensure 2 byte alignment for the 2 byte field as required */
199 if ((pos - (u8 *)rthdr) & 1)
200 pos++;
201 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
202 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
203 put_unaligned_le16(rx_flags, pos);
204 pos += 2;
205 }
206
207 /*
208 * This function copies a received frame to all monitor interfaces and
209 * returns a cleaned-up SKB that no longer includes the FCS nor the
210 * radiotap header the driver might have added.
211 */
212 static struct sk_buff *
213 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
214 struct ieee80211_rate *rate)
215 {
216 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
217 struct ieee80211_sub_if_data *sdata;
218 int needed_headroom = 0;
219 struct sk_buff *skb, *skb2;
220 struct net_device *prev_dev = NULL;
221 int present_fcs_len = 0;
222
223 /*
224 * First, we may need to make a copy of the skb because
225 * (1) we need to modify it for radiotap (if not present), and
226 * (2) the other RX handlers will modify the skb we got.
227 *
228 * We don't need to, of course, if we aren't going to return
229 * the SKB because it has a bad FCS/PLCP checksum.
230 */
231
232 /* room for the radiotap header based on driver features */
233 needed_headroom = ieee80211_rx_radiotap_len(local, status);
234
235 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
236 present_fcs_len = FCS_LEN;
237
238 if (!local->monitors) {
239 if (should_drop_frame(origskb, present_fcs_len)) {
240 dev_kfree_skb(origskb);
241 return NULL;
242 }
243
244 return remove_monitor_info(local, origskb);
245 }
246
247 if (should_drop_frame(origskb, present_fcs_len)) {
248 /* only need to expand headroom if necessary */
249 skb = origskb;
250 origskb = NULL;
251
252 /*
253 * This shouldn't trigger often because most devices have an
254 * RX header they pull before we get here, and that should
255 * be big enough for our radiotap information. We should
256 * probably export the length to drivers so that we can have
257 * them allocate enough headroom to start with.
258 */
259 if (skb_headroom(skb) < needed_headroom &&
260 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
261 dev_kfree_skb(skb);
262 return NULL;
263 }
264 } else {
265 /*
266 * Need to make a copy and possibly remove radiotap header
267 * and FCS from the original.
268 */
269 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
270
271 origskb = remove_monitor_info(local, origskb);
272
273 if (!skb)
274 return origskb;
275 }
276
277 /* prepend radiotap information */
278 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
279
280 skb_reset_mac_header(skb);
281 skb->ip_summed = CHECKSUM_UNNECESSARY;
282 skb->pkt_type = PACKET_OTHERHOST;
283 skb->protocol = htons(ETH_P_802_2);
284
285 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
286 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
287 continue;
288
289 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
290 continue;
291
292 if (!ieee80211_sdata_running(sdata))
293 continue;
294
295 if (prev_dev) {
296 skb2 = skb_clone(skb, GFP_ATOMIC);
297 if (skb2) {
298 skb2->dev = prev_dev;
299 netif_rx(skb2);
300 }
301 }
302
303 prev_dev = sdata->dev;
304 sdata->dev->stats.rx_packets++;
305 sdata->dev->stats.rx_bytes += skb->len;
306 }
307
308 if (prev_dev) {
309 skb->dev = prev_dev;
310 netif_rx(skb);
311 } else
312 dev_kfree_skb(skb);
313
314 return origskb;
315 }
316
317
318 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
319 {
320 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
321 int tid;
322
323 /* does the frame have a qos control field? */
324 if (ieee80211_is_data_qos(hdr->frame_control)) {
325 u8 *qc = ieee80211_get_qos_ctl(hdr);
326 /* frame has qos control */
327 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
328 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
329 rx->flags |= IEEE80211_RX_AMSDU;
330 else
331 rx->flags &= ~IEEE80211_RX_AMSDU;
332 } else {
333 /*
334 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
335 *
336 * Sequence numbers for management frames, QoS data
337 * frames with a broadcast/multicast address in the
338 * Address 1 field, and all non-QoS data frames sent
339 * by QoS STAs are assigned using an additional single
340 * modulo-4096 counter, [...]
341 *
342 * We also use that counter for non-QoS STAs.
343 */
344 tid = NUM_RX_DATA_QUEUES - 1;
345 }
346
347 rx->queue = tid;
348 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
349 * For now, set skb->priority to 0 for other cases. */
350 rx->skb->priority = (tid > 7) ? 0 : tid;
351 }
352
353 /**
354 * DOC: Packet alignment
355 *
356 * Drivers always need to pass packets that are aligned to two-byte boundaries
357 * to the stack.
358 *
359 * Additionally, should, if possible, align the payload data in a way that
360 * guarantees that the contained IP header is aligned to a four-byte
361 * boundary. In the case of regular frames, this simply means aligning the
362 * payload to a four-byte boundary (because either the IP header is directly
363 * contained, or IV/RFC1042 headers that have a length divisible by four are
364 * in front of it). If the payload data is not properly aligned and the
365 * architecture doesn't support efficient unaligned operations, mac80211
366 * will align the data.
367 *
368 * With A-MSDU frames, however, the payload data address must yield two modulo
369 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
370 * push the IP header further back to a multiple of four again. Thankfully, the
371 * specs were sane enough this time around to require padding each A-MSDU
372 * subframe to a length that is a multiple of four.
373 *
374 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
375 * the payload is not supported, the driver is required to move the 802.11
376 * header to be directly in front of the payload in that case.
377 */
378 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
379 {
380 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
381 WARN_ONCE((unsigned long)rx->skb->data & 1,
382 "unaligned packet at 0x%p\n", rx->skb->data);
383 #endif
384 }
385
386
387 /* rx handlers */
388
389 static ieee80211_rx_result debug_noinline
390 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
391 {
392 struct ieee80211_local *local = rx->local;
393 struct sk_buff *skb = rx->skb;
394
395 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning)))
396 return ieee80211_scan_rx(rx->sdata, skb);
397
398 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) &&
399 (rx->flags & IEEE80211_RX_IN_SCAN))) {
400 /* drop all the other packets during a software scan anyway */
401 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
402 dev_kfree_skb(skb);
403 return RX_QUEUED;
404 }
405
406 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
407 /* scanning finished during invoking of handlers */
408 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
409 return RX_DROP_UNUSABLE;
410 }
411
412 return RX_CONTINUE;
413 }
414
415
416 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
417 {
418 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
419
420 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
421 return 0;
422
423 return ieee80211_is_robust_mgmt_frame(hdr);
424 }
425
426
427 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
428 {
429 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
430
431 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
432 return 0;
433
434 return ieee80211_is_robust_mgmt_frame(hdr);
435 }
436
437
438 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
439 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
440 {
441 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
442 struct ieee80211_mmie *mmie;
443
444 if (skb->len < 24 + sizeof(*mmie) ||
445 !is_multicast_ether_addr(hdr->da))
446 return -1;
447
448 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
449 return -1; /* not a robust management frame */
450
451 mmie = (struct ieee80211_mmie *)
452 (skb->data + skb->len - sizeof(*mmie));
453 if (mmie->element_id != WLAN_EID_MMIE ||
454 mmie->length != sizeof(*mmie) - 2)
455 return -1;
456
457 return le16_to_cpu(mmie->key_id);
458 }
459
460
461 static ieee80211_rx_result
462 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
463 {
464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
465 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
466 char *dev_addr = rx->sdata->vif.addr;
467
468 if (ieee80211_is_data(hdr->frame_control)) {
469 if (is_multicast_ether_addr(hdr->addr1)) {
470 if (ieee80211_has_tods(hdr->frame_control) ||
471 !ieee80211_has_fromds(hdr->frame_control))
472 return RX_DROP_MONITOR;
473 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
474 return RX_DROP_MONITOR;
475 } else {
476 if (!ieee80211_has_a4(hdr->frame_control))
477 return RX_DROP_MONITOR;
478 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
479 return RX_DROP_MONITOR;
480 }
481 }
482
483 /* If there is not an established peer link and this is not a peer link
484 * establisment frame, beacon or probe, drop the frame.
485 */
486
487 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
488 struct ieee80211_mgmt *mgmt;
489
490 if (!ieee80211_is_mgmt(hdr->frame_control))
491 return RX_DROP_MONITOR;
492
493 if (ieee80211_is_action(hdr->frame_control)) {
494 mgmt = (struct ieee80211_mgmt *)hdr;
495 if (mgmt->u.action.category != MESH_PLINK_CATEGORY)
496 return RX_DROP_MONITOR;
497 return RX_CONTINUE;
498 }
499
500 if (ieee80211_is_probe_req(hdr->frame_control) ||
501 ieee80211_is_probe_resp(hdr->frame_control) ||
502 ieee80211_is_beacon(hdr->frame_control))
503 return RX_CONTINUE;
504
505 return RX_DROP_MONITOR;
506
507 }
508
509 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
510
511 if (ieee80211_is_data(hdr->frame_control) &&
512 is_multicast_ether_addr(hdr->addr1) &&
513 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
514 return RX_DROP_MONITOR;
515 #undef msh_h_get
516
517 return RX_CONTINUE;
518 }
519
520 #define SEQ_MODULO 0x1000
521 #define SEQ_MASK 0xfff
522
523 static inline int seq_less(u16 sq1, u16 sq2)
524 {
525 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
526 }
527
528 static inline u16 seq_inc(u16 sq)
529 {
530 return (sq + 1) & SEQ_MASK;
531 }
532
533 static inline u16 seq_sub(u16 sq1, u16 sq2)
534 {
535 return (sq1 - sq2) & SEQ_MASK;
536 }
537
538
539 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
540 struct tid_ampdu_rx *tid_agg_rx,
541 int index,
542 struct sk_buff_head *frames)
543 {
544 struct ieee80211_supported_band *sband;
545 struct ieee80211_rate *rate = NULL;
546 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
547 struct ieee80211_rx_status *status;
548
549 if (!skb)
550 goto no_frame;
551
552 status = IEEE80211_SKB_RXCB(skb);
553
554 /* release the reordered frames to stack */
555 sband = hw->wiphy->bands[status->band];
556 if (!(status->flag & RX_FLAG_HT))
557 rate = &sband->bitrates[status->rate_idx];
558 tid_agg_rx->stored_mpdu_num--;
559 tid_agg_rx->reorder_buf[index] = NULL;
560 __skb_queue_tail(frames, skb);
561
562 no_frame:
563 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
564 }
565
566 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
567 struct tid_ampdu_rx *tid_agg_rx,
568 u16 head_seq_num,
569 struct sk_buff_head *frames)
570 {
571 int index;
572
573 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
574 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
575 tid_agg_rx->buf_size;
576 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
577 }
578 }
579
580 /*
581 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
582 * the skb was added to the buffer longer than this time ago, the earlier
583 * frames that have not yet been received are assumed to be lost and the skb
584 * can be released for processing. This may also release other skb's from the
585 * reorder buffer if there are no additional gaps between the frames.
586 */
587 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
588
589 /*
590 * As this function belongs to the RX path it must be under
591 * rcu_read_lock protection. It returns false if the frame
592 * can be processed immediately, true if it was consumed.
593 */
594 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
595 struct tid_ampdu_rx *tid_agg_rx,
596 struct sk_buff *skb,
597 struct sk_buff_head *frames)
598 {
599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
600 u16 sc = le16_to_cpu(hdr->seq_ctrl);
601 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
602 u16 head_seq_num, buf_size;
603 int index;
604
605 buf_size = tid_agg_rx->buf_size;
606 head_seq_num = tid_agg_rx->head_seq_num;
607
608 /* frame with out of date sequence number */
609 if (seq_less(mpdu_seq_num, head_seq_num)) {
610 dev_kfree_skb(skb);
611 return true;
612 }
613
614 /*
615 * If frame the sequence number exceeds our buffering window
616 * size release some previous frames to make room for this one.
617 */
618 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
619 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
620 /* release stored frames up to new head to stack */
621 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
622 frames);
623 }
624
625 /* Now the new frame is always in the range of the reordering buffer */
626
627 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
628
629 /* check if we already stored this frame */
630 if (tid_agg_rx->reorder_buf[index]) {
631 dev_kfree_skb(skb);
632 return true;
633 }
634
635 /*
636 * If the current MPDU is in the right order and nothing else
637 * is stored we can process it directly, no need to buffer it.
638 */
639 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
640 tid_agg_rx->stored_mpdu_num == 0) {
641 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
642 return false;
643 }
644
645 /* put the frame in the reordering buffer */
646 tid_agg_rx->reorder_buf[index] = skb;
647 tid_agg_rx->reorder_time[index] = jiffies;
648 tid_agg_rx->stored_mpdu_num++;
649 /* release the buffer until next missing frame */
650 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
651 tid_agg_rx->buf_size;
652 if (!tid_agg_rx->reorder_buf[index] &&
653 tid_agg_rx->stored_mpdu_num > 1) {
654 /*
655 * No buffers ready to be released, but check whether any
656 * frames in the reorder buffer have timed out.
657 */
658 int j;
659 int skipped = 1;
660 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
661 j = (j + 1) % tid_agg_rx->buf_size) {
662 if (!tid_agg_rx->reorder_buf[j]) {
663 skipped++;
664 continue;
665 }
666 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
667 HT_RX_REORDER_BUF_TIMEOUT))
668 break;
669
670 #ifdef CONFIG_MAC80211_HT_DEBUG
671 if (net_ratelimit())
672 printk(KERN_DEBUG "%s: release an RX reorder "
673 "frame due to timeout on earlier "
674 "frames\n",
675 wiphy_name(hw->wiphy));
676 #endif
677 ieee80211_release_reorder_frame(hw, tid_agg_rx,
678 j, frames);
679
680 /*
681 * Increment the head seq# also for the skipped slots.
682 */
683 tid_agg_rx->head_seq_num =
684 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
685 skipped = 0;
686 }
687 } else while (tid_agg_rx->reorder_buf[index]) {
688 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
689 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
690 tid_agg_rx->buf_size;
691 }
692
693 return true;
694 }
695
696 /*
697 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
698 * true if the MPDU was buffered, false if it should be processed.
699 */
700 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
701 struct sk_buff_head *frames)
702 {
703 struct sk_buff *skb = rx->skb;
704 struct ieee80211_local *local = rx->local;
705 struct ieee80211_hw *hw = &local->hw;
706 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
707 struct sta_info *sta = rx->sta;
708 struct tid_ampdu_rx *tid_agg_rx;
709 u16 sc;
710 int tid;
711
712 if (!ieee80211_is_data_qos(hdr->frame_control))
713 goto dont_reorder;
714
715 /*
716 * filter the QoS data rx stream according to
717 * STA/TID and check if this STA/TID is on aggregation
718 */
719
720 if (!sta)
721 goto dont_reorder;
722
723 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
724
725 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
726 goto dont_reorder;
727
728 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
729
730 /* qos null data frames are excluded */
731 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
732 goto dont_reorder;
733
734 /* new, potentially un-ordered, ampdu frame - process it */
735
736 /* reset session timer */
737 if (tid_agg_rx->timeout)
738 mod_timer(&tid_agg_rx->session_timer,
739 TU_TO_EXP_TIME(tid_agg_rx->timeout));
740
741 /* if this mpdu is fragmented - terminate rx aggregation session */
742 sc = le16_to_cpu(hdr->seq_ctrl);
743 if (sc & IEEE80211_SCTL_FRAG) {
744 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
745 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
746 dev_kfree_skb(skb);
747 return;
748 }
749
750 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
751 return;
752
753 dont_reorder:
754 __skb_queue_tail(frames, skb);
755 }
756
757 static ieee80211_rx_result debug_noinline
758 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
759 {
760 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
761
762 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
763 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
764 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
765 rx->sta->last_seq_ctrl[rx->queue] ==
766 hdr->seq_ctrl)) {
767 if (rx->flags & IEEE80211_RX_RA_MATCH) {
768 rx->local->dot11FrameDuplicateCount++;
769 rx->sta->num_duplicates++;
770 }
771 return RX_DROP_MONITOR;
772 } else
773 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
774 }
775
776 if (unlikely(rx->skb->len < 16)) {
777 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
778 return RX_DROP_MONITOR;
779 }
780
781 /* Drop disallowed frame classes based on STA auth/assoc state;
782 * IEEE 802.11, Chap 5.5.
783 *
784 * mac80211 filters only based on association state, i.e. it drops
785 * Class 3 frames from not associated stations. hostapd sends
786 * deauth/disassoc frames when needed. In addition, hostapd is
787 * responsible for filtering on both auth and assoc states.
788 */
789
790 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
791 return ieee80211_rx_mesh_check(rx);
792
793 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
794 ieee80211_is_pspoll(hdr->frame_control)) &&
795 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
796 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
797 if ((!ieee80211_has_fromds(hdr->frame_control) &&
798 !ieee80211_has_tods(hdr->frame_control) &&
799 ieee80211_is_data(hdr->frame_control)) ||
800 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
801 /* Drop IBSS frames and frames for other hosts
802 * silently. */
803 return RX_DROP_MONITOR;
804 }
805
806 return RX_DROP_MONITOR;
807 }
808
809 return RX_CONTINUE;
810 }
811
812
813 static ieee80211_rx_result debug_noinline
814 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
815 {
816 struct sk_buff *skb = rx->skb;
817 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
818 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
819 int keyidx;
820 int hdrlen;
821 ieee80211_rx_result result = RX_DROP_UNUSABLE;
822 struct ieee80211_key *stakey = NULL;
823 int mmie_keyidx = -1;
824
825 /*
826 * Key selection 101
827 *
828 * There are four types of keys:
829 * - GTK (group keys)
830 * - IGTK (group keys for management frames)
831 * - PTK (pairwise keys)
832 * - STK (station-to-station pairwise keys)
833 *
834 * When selecting a key, we have to distinguish between multicast
835 * (including broadcast) and unicast frames, the latter can only
836 * use PTKs and STKs while the former always use GTKs and IGTKs.
837 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
838 * unicast frames can also use key indices like GTKs. Hence, if we
839 * don't have a PTK/STK we check the key index for a WEP key.
840 *
841 * Note that in a regular BSS, multicast frames are sent by the
842 * AP only, associated stations unicast the frame to the AP first
843 * which then multicasts it on their behalf.
844 *
845 * There is also a slight problem in IBSS mode: GTKs are negotiated
846 * with each station, that is something we don't currently handle.
847 * The spec seems to expect that one negotiates the same key with
848 * every station but there's no such requirement; VLANs could be
849 * possible.
850 */
851
852 /*
853 * No point in finding a key and decrypting if the frame is neither
854 * addressed to us nor a multicast frame.
855 */
856 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
857 return RX_CONTINUE;
858
859 /* start without a key */
860 rx->key = NULL;
861
862 if (rx->sta)
863 stakey = rcu_dereference(rx->sta->key);
864
865 if (!ieee80211_has_protected(hdr->frame_control))
866 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
867
868 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
869 rx->key = stakey;
870 /* Skip decryption if the frame is not protected. */
871 if (!ieee80211_has_protected(hdr->frame_control))
872 return RX_CONTINUE;
873 } else if (mmie_keyidx >= 0) {
874 /* Broadcast/multicast robust management frame / BIP */
875 if ((status->flag & RX_FLAG_DECRYPTED) &&
876 (status->flag & RX_FLAG_IV_STRIPPED))
877 return RX_CONTINUE;
878
879 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
880 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
881 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
882 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
883 } else if (!ieee80211_has_protected(hdr->frame_control)) {
884 /*
885 * The frame was not protected, so skip decryption. However, we
886 * need to set rx->key if there is a key that could have been
887 * used so that the frame may be dropped if encryption would
888 * have been expected.
889 */
890 struct ieee80211_key *key = NULL;
891 if (ieee80211_is_mgmt(hdr->frame_control) &&
892 is_multicast_ether_addr(hdr->addr1) &&
893 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
894 rx->key = key;
895 else if ((key = rcu_dereference(rx->sdata->default_key)))
896 rx->key = key;
897 return RX_CONTINUE;
898 } else {
899 /*
900 * The device doesn't give us the IV so we won't be
901 * able to look up the key. That's ok though, we
902 * don't need to decrypt the frame, we just won't
903 * be able to keep statistics accurate.
904 * Except for key threshold notifications, should
905 * we somehow allow the driver to tell us which key
906 * the hardware used if this flag is set?
907 */
908 if ((status->flag & RX_FLAG_DECRYPTED) &&
909 (status->flag & RX_FLAG_IV_STRIPPED))
910 return RX_CONTINUE;
911
912 hdrlen = ieee80211_hdrlen(hdr->frame_control);
913
914 if (rx->skb->len < 8 + hdrlen)
915 return RX_DROP_UNUSABLE; /* TODO: count this? */
916
917 /*
918 * no need to call ieee80211_wep_get_keyidx,
919 * it verifies a bunch of things we've done already
920 */
921 keyidx = rx->skb->data[hdrlen + 3] >> 6;
922
923 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
924
925 /*
926 * RSNA-protected unicast frames should always be sent with
927 * pairwise or station-to-station keys, but for WEP we allow
928 * using a key index as well.
929 */
930 if (rx->key && rx->key->conf.alg != ALG_WEP &&
931 !is_multicast_ether_addr(hdr->addr1))
932 rx->key = NULL;
933 }
934
935 if (rx->key) {
936 rx->key->tx_rx_count++;
937 /* TODO: add threshold stuff again */
938 } else {
939 return RX_DROP_MONITOR;
940 }
941
942 /* Check for weak IVs if possible */
943 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
944 ieee80211_is_data(hdr->frame_control) &&
945 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
946 !(status->flag & RX_FLAG_DECRYPTED)) &&
947 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
948 rx->sta->wep_weak_iv_count++;
949
950 switch (rx->key->conf.alg) {
951 case ALG_WEP:
952 result = ieee80211_crypto_wep_decrypt(rx);
953 break;
954 case ALG_TKIP:
955 result = ieee80211_crypto_tkip_decrypt(rx);
956 break;
957 case ALG_CCMP:
958 result = ieee80211_crypto_ccmp_decrypt(rx);
959 break;
960 case ALG_AES_CMAC:
961 result = ieee80211_crypto_aes_cmac_decrypt(rx);
962 break;
963 }
964
965 /* either the frame has been decrypted or will be dropped */
966 status->flag |= RX_FLAG_DECRYPTED;
967
968 return result;
969 }
970
971 static ieee80211_rx_result debug_noinline
972 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
973 {
974 struct ieee80211_local *local;
975 struct ieee80211_hdr *hdr;
976 struct sk_buff *skb;
977
978 local = rx->local;
979 skb = rx->skb;
980 hdr = (struct ieee80211_hdr *) skb->data;
981
982 if (!local->pspolling)
983 return RX_CONTINUE;
984
985 if (!ieee80211_has_fromds(hdr->frame_control))
986 /* this is not from AP */
987 return RX_CONTINUE;
988
989 if (!ieee80211_is_data(hdr->frame_control))
990 return RX_CONTINUE;
991
992 if (!ieee80211_has_moredata(hdr->frame_control)) {
993 /* AP has no more frames buffered for us */
994 local->pspolling = false;
995 return RX_CONTINUE;
996 }
997
998 /* more data bit is set, let's request a new frame from the AP */
999 ieee80211_send_pspoll(local, rx->sdata);
1000
1001 return RX_CONTINUE;
1002 }
1003
1004 static void ap_sta_ps_start(struct sta_info *sta)
1005 {
1006 struct ieee80211_sub_if_data *sdata = sta->sdata;
1007 struct ieee80211_local *local = sdata->local;
1008
1009 atomic_inc(&sdata->bss->num_sta_ps);
1010 set_sta_flags(sta, WLAN_STA_PS_STA);
1011 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1012 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1013 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1014 sdata->name, sta->sta.addr, sta->sta.aid);
1015 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1016 }
1017
1018 static void ap_sta_ps_end(struct sta_info *sta)
1019 {
1020 struct ieee80211_sub_if_data *sdata = sta->sdata;
1021
1022 atomic_dec(&sdata->bss->num_sta_ps);
1023
1024 clear_sta_flags(sta, WLAN_STA_PS_STA);
1025
1026 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1027 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1028 sdata->name, sta->sta.addr, sta->sta.aid);
1029 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1030
1031 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1032 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1033 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1034 sdata->name, sta->sta.addr, sta->sta.aid);
1035 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1036 return;
1037 }
1038
1039 ieee80211_sta_ps_deliver_wakeup(sta);
1040 }
1041
1042 static ieee80211_rx_result debug_noinline
1043 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1044 {
1045 struct sta_info *sta = rx->sta;
1046 struct sk_buff *skb = rx->skb;
1047 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1048 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1049
1050 if (!sta)
1051 return RX_CONTINUE;
1052
1053 /*
1054 * Update last_rx only for IBSS packets which are for the current
1055 * BSSID to avoid keeping the current IBSS network alive in cases
1056 * where other STAs start using different BSSID.
1057 */
1058 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1059 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1060 NL80211_IFTYPE_ADHOC);
1061 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
1062 sta->last_rx = jiffies;
1063 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1064 /*
1065 * Mesh beacons will update last_rx when if they are found to
1066 * match the current local configuration when processed.
1067 */
1068 sta->last_rx = jiffies;
1069 }
1070
1071 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1072 return RX_CONTINUE;
1073
1074 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1075 ieee80211_sta_rx_notify(rx->sdata, hdr);
1076
1077 sta->rx_fragments++;
1078 sta->rx_bytes += rx->skb->len;
1079 sta->last_signal = status->signal;
1080 sta->last_noise = status->noise;
1081
1082 /*
1083 * Change STA power saving mode only at the end of a frame
1084 * exchange sequence.
1085 */
1086 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1087 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1088 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1089 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
1090 /*
1091 * Ignore doze->wake transitions that are
1092 * indicated by non-data frames, the standard
1093 * is unclear here, but for example going to
1094 * PS mode and then scanning would cause a
1095 * doze->wake transition for the probe request,
1096 * and that is clearly undesirable.
1097 */
1098 if (ieee80211_is_data(hdr->frame_control) &&
1099 !ieee80211_has_pm(hdr->frame_control))
1100 ap_sta_ps_end(sta);
1101 } else {
1102 if (ieee80211_has_pm(hdr->frame_control))
1103 ap_sta_ps_start(sta);
1104 }
1105 }
1106
1107 /*
1108 * Drop (qos-)data::nullfunc frames silently, since they
1109 * are used only to control station power saving mode.
1110 */
1111 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1112 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1113 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1114
1115 /*
1116 * If we receive a 4-addr nullfunc frame from a STA
1117 * that was not moved to a 4-addr STA vlan yet, drop
1118 * the frame to the monitor interface, to make sure
1119 * that hostapd sees it
1120 */
1121 if (ieee80211_has_a4(hdr->frame_control) &&
1122 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1123 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1124 !rx->sdata->u.vlan.sta)))
1125 return RX_DROP_MONITOR;
1126 /*
1127 * Update counter and free packet here to avoid
1128 * counting this as a dropped packed.
1129 */
1130 sta->rx_packets++;
1131 dev_kfree_skb(rx->skb);
1132 return RX_QUEUED;
1133 }
1134
1135 return RX_CONTINUE;
1136 } /* ieee80211_rx_h_sta_process */
1137
1138 static inline struct ieee80211_fragment_entry *
1139 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1140 unsigned int frag, unsigned int seq, int rx_queue,
1141 struct sk_buff **skb)
1142 {
1143 struct ieee80211_fragment_entry *entry;
1144 int idx;
1145
1146 idx = sdata->fragment_next;
1147 entry = &sdata->fragments[sdata->fragment_next++];
1148 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1149 sdata->fragment_next = 0;
1150
1151 if (!skb_queue_empty(&entry->skb_list)) {
1152 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1153 struct ieee80211_hdr *hdr =
1154 (struct ieee80211_hdr *) entry->skb_list.next->data;
1155 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1156 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1157 "addr1=%pM addr2=%pM\n",
1158 sdata->name, idx,
1159 jiffies - entry->first_frag_time, entry->seq,
1160 entry->last_frag, hdr->addr1, hdr->addr2);
1161 #endif
1162 __skb_queue_purge(&entry->skb_list);
1163 }
1164
1165 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1166 *skb = NULL;
1167 entry->first_frag_time = jiffies;
1168 entry->seq = seq;
1169 entry->rx_queue = rx_queue;
1170 entry->last_frag = frag;
1171 entry->ccmp = 0;
1172 entry->extra_len = 0;
1173
1174 return entry;
1175 }
1176
1177 static inline struct ieee80211_fragment_entry *
1178 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1179 unsigned int frag, unsigned int seq,
1180 int rx_queue, struct ieee80211_hdr *hdr)
1181 {
1182 struct ieee80211_fragment_entry *entry;
1183 int i, idx;
1184
1185 idx = sdata->fragment_next;
1186 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1187 struct ieee80211_hdr *f_hdr;
1188
1189 idx--;
1190 if (idx < 0)
1191 idx = IEEE80211_FRAGMENT_MAX - 1;
1192
1193 entry = &sdata->fragments[idx];
1194 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1195 entry->rx_queue != rx_queue ||
1196 entry->last_frag + 1 != frag)
1197 continue;
1198
1199 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1200
1201 /*
1202 * Check ftype and addresses are equal, else check next fragment
1203 */
1204 if (((hdr->frame_control ^ f_hdr->frame_control) &
1205 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1206 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1207 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1208 continue;
1209
1210 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1211 __skb_queue_purge(&entry->skb_list);
1212 continue;
1213 }
1214 return entry;
1215 }
1216
1217 return NULL;
1218 }
1219
1220 static ieee80211_rx_result debug_noinline
1221 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1222 {
1223 struct ieee80211_hdr *hdr;
1224 u16 sc;
1225 __le16 fc;
1226 unsigned int frag, seq;
1227 struct ieee80211_fragment_entry *entry;
1228 struct sk_buff *skb;
1229
1230 hdr = (struct ieee80211_hdr *)rx->skb->data;
1231 fc = hdr->frame_control;
1232 sc = le16_to_cpu(hdr->seq_ctrl);
1233 frag = sc & IEEE80211_SCTL_FRAG;
1234
1235 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1236 (rx->skb)->len < 24 ||
1237 is_multicast_ether_addr(hdr->addr1))) {
1238 /* not fragmented */
1239 goto out;
1240 }
1241 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1242
1243 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1244
1245 if (frag == 0) {
1246 /* This is the first fragment of a new frame. */
1247 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1248 rx->queue, &(rx->skb));
1249 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1250 ieee80211_has_protected(fc)) {
1251 /* Store CCMP PN so that we can verify that the next
1252 * fragment has a sequential PN value. */
1253 entry->ccmp = 1;
1254 memcpy(entry->last_pn,
1255 rx->key->u.ccmp.rx_pn[rx->queue],
1256 CCMP_PN_LEN);
1257 }
1258 return RX_QUEUED;
1259 }
1260
1261 /* This is a fragment for a frame that should already be pending in
1262 * fragment cache. Add this fragment to the end of the pending entry.
1263 */
1264 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1265 if (!entry) {
1266 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1267 return RX_DROP_MONITOR;
1268 }
1269
1270 /* Verify that MPDUs within one MSDU have sequential PN values.
1271 * (IEEE 802.11i, 8.3.3.4.5) */
1272 if (entry->ccmp) {
1273 int i;
1274 u8 pn[CCMP_PN_LEN], *rpn;
1275 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1276 return RX_DROP_UNUSABLE;
1277 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1278 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1279 pn[i]++;
1280 if (pn[i])
1281 break;
1282 }
1283 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1284 if (memcmp(pn, rpn, CCMP_PN_LEN))
1285 return RX_DROP_UNUSABLE;
1286 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1287 }
1288
1289 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1290 __skb_queue_tail(&entry->skb_list, rx->skb);
1291 entry->last_frag = frag;
1292 entry->extra_len += rx->skb->len;
1293 if (ieee80211_has_morefrags(fc)) {
1294 rx->skb = NULL;
1295 return RX_QUEUED;
1296 }
1297
1298 rx->skb = __skb_dequeue(&entry->skb_list);
1299 if (skb_tailroom(rx->skb) < entry->extra_len) {
1300 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1301 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1302 GFP_ATOMIC))) {
1303 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1304 __skb_queue_purge(&entry->skb_list);
1305 return RX_DROP_UNUSABLE;
1306 }
1307 }
1308 while ((skb = __skb_dequeue(&entry->skb_list))) {
1309 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1310 dev_kfree_skb(skb);
1311 }
1312
1313 /* Complete frame has been reassembled - process it now */
1314 rx->flags |= IEEE80211_RX_FRAGMENTED;
1315
1316 out:
1317 if (rx->sta)
1318 rx->sta->rx_packets++;
1319 if (is_multicast_ether_addr(hdr->addr1))
1320 rx->local->dot11MulticastReceivedFrameCount++;
1321 else
1322 ieee80211_led_rx(rx->local);
1323 return RX_CONTINUE;
1324 }
1325
1326 static ieee80211_rx_result debug_noinline
1327 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1328 {
1329 struct ieee80211_sub_if_data *sdata = rx->sdata;
1330 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1331
1332 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1333 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1334 return RX_CONTINUE;
1335
1336 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1337 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1338 return RX_DROP_UNUSABLE;
1339
1340 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1341 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1342 else
1343 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1344
1345 /* Free PS Poll skb here instead of returning RX_DROP that would
1346 * count as an dropped frame. */
1347 dev_kfree_skb(rx->skb);
1348
1349 return RX_QUEUED;
1350 }
1351
1352 static ieee80211_rx_result debug_noinline
1353 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1354 {
1355 u8 *data = rx->skb->data;
1356 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1357
1358 if (!ieee80211_is_data_qos(hdr->frame_control))
1359 return RX_CONTINUE;
1360
1361 /* remove the qos control field, update frame type and meta-data */
1362 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1363 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1364 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1365 /* change frame type to non QOS */
1366 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1367
1368 return RX_CONTINUE;
1369 }
1370
1371 static int
1372 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1373 {
1374 if (unlikely(!rx->sta ||
1375 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1376 return -EACCES;
1377
1378 return 0;
1379 }
1380
1381 static int
1382 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1383 {
1384 struct sk_buff *skb = rx->skb;
1385 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1386
1387 /*
1388 * Pass through unencrypted frames if the hardware has
1389 * decrypted them already.
1390 */
1391 if (status->flag & RX_FLAG_DECRYPTED)
1392 return 0;
1393
1394 /* Drop unencrypted frames if key is set. */
1395 if (unlikely(!ieee80211_has_protected(fc) &&
1396 !ieee80211_is_nullfunc(fc) &&
1397 ieee80211_is_data(fc) &&
1398 (rx->key || rx->sdata->drop_unencrypted)))
1399 return -EACCES;
1400
1401 return 0;
1402 }
1403
1404 static int
1405 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1406 {
1407 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1408 __le16 fc = hdr->frame_control;
1409 int res;
1410
1411 res = ieee80211_drop_unencrypted(rx, fc);
1412 if (unlikely(res))
1413 return res;
1414
1415 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1416 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1417 rx->key))
1418 return -EACCES;
1419 /* BIP does not use Protected field, so need to check MMIE */
1420 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1421 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1422 rx->key))
1423 return -EACCES;
1424 /*
1425 * When using MFP, Action frames are not allowed prior to
1426 * having configured keys.
1427 */
1428 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1429 ieee80211_is_robust_mgmt_frame(
1430 (struct ieee80211_hdr *) rx->skb->data)))
1431 return -EACCES;
1432 }
1433
1434 return 0;
1435 }
1436
1437 static int
1438 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1439 {
1440 struct ieee80211_sub_if_data *sdata = rx->sdata;
1441 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1442
1443 if (ieee80211_has_a4(hdr->frame_control) &&
1444 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1445 return -1;
1446
1447 if (is_multicast_ether_addr(hdr->addr1) &&
1448 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
1449 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1450 return -1;
1451
1452 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1453 }
1454
1455 /*
1456 * requires that rx->skb is a frame with ethernet header
1457 */
1458 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1459 {
1460 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1461 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1462 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1463
1464 /*
1465 * Allow EAPOL frames to us/the PAE group address regardless
1466 * of whether the frame was encrypted or not.
1467 */
1468 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1469 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1470 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1471 return true;
1472
1473 if (ieee80211_802_1x_port_control(rx) ||
1474 ieee80211_drop_unencrypted(rx, fc))
1475 return false;
1476
1477 return true;
1478 }
1479
1480 /*
1481 * requires that rx->skb is a frame with ethernet header
1482 */
1483 static void
1484 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1485 {
1486 struct ieee80211_sub_if_data *sdata = rx->sdata;
1487 struct net_device *dev = sdata->dev;
1488 struct sk_buff *skb, *xmit_skb;
1489 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1490 struct sta_info *dsta;
1491
1492 skb = rx->skb;
1493 xmit_skb = NULL;
1494
1495 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1496 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1497 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1498 (rx->flags & IEEE80211_RX_RA_MATCH) &&
1499 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1500 if (is_multicast_ether_addr(ehdr->h_dest)) {
1501 /*
1502 * send multicast frames both to higher layers in
1503 * local net stack and back to the wireless medium
1504 */
1505 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1506 if (!xmit_skb && net_ratelimit())
1507 printk(KERN_DEBUG "%s: failed to clone "
1508 "multicast frame\n", dev->name);
1509 } else {
1510 dsta = sta_info_get(sdata, skb->data);
1511 if (dsta) {
1512 /*
1513 * The destination station is associated to
1514 * this AP (in this VLAN), so send the frame
1515 * directly to it and do not pass it to local
1516 * net stack.
1517 */
1518 xmit_skb = skb;
1519 skb = NULL;
1520 }
1521 }
1522 }
1523
1524 if (skb) {
1525 int align __maybe_unused;
1526
1527 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1528 /*
1529 * 'align' will only take the values 0 or 2 here
1530 * since all frames are required to be aligned
1531 * to 2-byte boundaries when being passed to
1532 * mac80211. That also explains the __skb_push()
1533 * below.
1534 */
1535 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1536 if (align) {
1537 if (WARN_ON(skb_headroom(skb) < 3)) {
1538 dev_kfree_skb(skb);
1539 skb = NULL;
1540 } else {
1541 u8 *data = skb->data;
1542 size_t len = skb_headlen(skb);
1543 skb->data -= align;
1544 memmove(skb->data, data, len);
1545 skb_set_tail_pointer(skb, len);
1546 }
1547 }
1548 #endif
1549
1550 if (skb) {
1551 /* deliver to local stack */
1552 skb->protocol = eth_type_trans(skb, dev);
1553 memset(skb->cb, 0, sizeof(skb->cb));
1554 netif_rx(skb);
1555 }
1556 }
1557
1558 if (xmit_skb) {
1559 /* send to wireless media */
1560 xmit_skb->protocol = htons(ETH_P_802_3);
1561 skb_reset_network_header(xmit_skb);
1562 skb_reset_mac_header(xmit_skb);
1563 dev_queue_xmit(xmit_skb);
1564 }
1565 }
1566
1567 static ieee80211_rx_result debug_noinline
1568 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1569 {
1570 struct net_device *dev = rx->sdata->dev;
1571 struct sk_buff *skb = rx->skb;
1572 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1573 __le16 fc = hdr->frame_control;
1574 struct sk_buff_head frame_list;
1575
1576 if (unlikely(!ieee80211_is_data(fc)))
1577 return RX_CONTINUE;
1578
1579 if (unlikely(!ieee80211_is_data_present(fc)))
1580 return RX_DROP_MONITOR;
1581
1582 if (!(rx->flags & IEEE80211_RX_AMSDU))
1583 return RX_CONTINUE;
1584
1585 if (ieee80211_has_a4(hdr->frame_control) &&
1586 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1587 !rx->sdata->u.vlan.sta)
1588 return RX_DROP_UNUSABLE;
1589
1590 if (is_multicast_ether_addr(hdr->addr1) &&
1591 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1592 rx->sdata->u.vlan.sta) ||
1593 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1594 rx->sdata->u.mgd.use_4addr)))
1595 return RX_DROP_UNUSABLE;
1596
1597 skb->dev = dev;
1598 __skb_queue_head_init(&frame_list);
1599
1600 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1601 rx->sdata->vif.type,
1602 rx->local->hw.extra_tx_headroom);
1603
1604 while (!skb_queue_empty(&frame_list)) {
1605 rx->skb = __skb_dequeue(&frame_list);
1606
1607 if (!ieee80211_frame_allowed(rx, fc)) {
1608 dev_kfree_skb(rx->skb);
1609 continue;
1610 }
1611 dev->stats.rx_packets++;
1612 dev->stats.rx_bytes += rx->skb->len;
1613
1614 ieee80211_deliver_skb(rx);
1615 }
1616
1617 return RX_QUEUED;
1618 }
1619
1620 #ifdef CONFIG_MAC80211_MESH
1621 static ieee80211_rx_result
1622 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1623 {
1624 struct ieee80211_hdr *hdr;
1625 struct ieee80211s_hdr *mesh_hdr;
1626 unsigned int hdrlen;
1627 struct sk_buff *skb = rx->skb, *fwd_skb;
1628 struct ieee80211_local *local = rx->local;
1629 struct ieee80211_sub_if_data *sdata = rx->sdata;
1630
1631 hdr = (struct ieee80211_hdr *) skb->data;
1632 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1633 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1634
1635 if (!ieee80211_is_data(hdr->frame_control))
1636 return RX_CONTINUE;
1637
1638 if (!mesh_hdr->ttl)
1639 /* illegal frame */
1640 return RX_DROP_MONITOR;
1641
1642 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1643 struct mesh_path *mppath;
1644 char *proxied_addr;
1645 char *mpp_addr;
1646
1647 if (is_multicast_ether_addr(hdr->addr1)) {
1648 mpp_addr = hdr->addr3;
1649 proxied_addr = mesh_hdr->eaddr1;
1650 } else {
1651 mpp_addr = hdr->addr4;
1652 proxied_addr = mesh_hdr->eaddr2;
1653 }
1654
1655 rcu_read_lock();
1656 mppath = mpp_path_lookup(proxied_addr, sdata);
1657 if (!mppath) {
1658 mpp_path_add(proxied_addr, mpp_addr, sdata);
1659 } else {
1660 spin_lock_bh(&mppath->state_lock);
1661 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1662 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1663 spin_unlock_bh(&mppath->state_lock);
1664 }
1665 rcu_read_unlock();
1666 }
1667
1668 /* Frame has reached destination. Don't forward */
1669 if (!is_multicast_ether_addr(hdr->addr1) &&
1670 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1671 return RX_CONTINUE;
1672
1673 mesh_hdr->ttl--;
1674
1675 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1676 if (!mesh_hdr->ttl)
1677 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1678 dropped_frames_ttl);
1679 else {
1680 struct ieee80211_hdr *fwd_hdr;
1681 struct ieee80211_tx_info *info;
1682
1683 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1684
1685 if (!fwd_skb && net_ratelimit())
1686 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1687 sdata->name);
1688
1689 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1690 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1691 info = IEEE80211_SKB_CB(fwd_skb);
1692 memset(info, 0, sizeof(*info));
1693 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1694 info->control.vif = &rx->sdata->vif;
1695 skb_set_queue_mapping(skb,
1696 ieee80211_select_queue(rx->sdata, fwd_skb));
1697 ieee80211_set_qos_hdr(local, skb);
1698 if (is_multicast_ether_addr(fwd_hdr->addr1))
1699 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1700 fwded_mcast);
1701 else {
1702 int err;
1703 /*
1704 * Save TA to addr1 to send TA a path error if a
1705 * suitable next hop is not found
1706 */
1707 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1708 ETH_ALEN);
1709 err = mesh_nexthop_lookup(fwd_skb, sdata);
1710 /* Failed to immediately resolve next hop:
1711 * fwded frame was dropped or will be added
1712 * later to the pending skb queue. */
1713 if (err)
1714 return RX_DROP_MONITOR;
1715
1716 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1717 fwded_unicast);
1718 }
1719 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1720 fwded_frames);
1721 ieee80211_add_pending_skb(local, fwd_skb);
1722 }
1723 }
1724
1725 if (is_multicast_ether_addr(hdr->addr1) ||
1726 sdata->dev->flags & IFF_PROMISC)
1727 return RX_CONTINUE;
1728 else
1729 return RX_DROP_MONITOR;
1730 }
1731 #endif
1732
1733 static ieee80211_rx_result debug_noinline
1734 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1735 {
1736 struct ieee80211_sub_if_data *sdata = rx->sdata;
1737 struct ieee80211_local *local = rx->local;
1738 struct net_device *dev = sdata->dev;
1739 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1740 __le16 fc = hdr->frame_control;
1741 int err;
1742
1743 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1744 return RX_CONTINUE;
1745
1746 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1747 return RX_DROP_MONITOR;
1748
1749 /*
1750 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1751 * that a 4-addr station can be detected and moved into a separate VLAN
1752 */
1753 if (ieee80211_has_a4(hdr->frame_control) &&
1754 sdata->vif.type == NL80211_IFTYPE_AP)
1755 return RX_DROP_MONITOR;
1756
1757 err = __ieee80211_data_to_8023(rx);
1758 if (unlikely(err))
1759 return RX_DROP_UNUSABLE;
1760
1761 if (!ieee80211_frame_allowed(rx, fc))
1762 return RX_DROP_MONITOR;
1763
1764 rx->skb->dev = dev;
1765
1766 dev->stats.rx_packets++;
1767 dev->stats.rx_bytes += rx->skb->len;
1768
1769 if (ieee80211_is_data(hdr->frame_control) &&
1770 !is_multicast_ether_addr(hdr->addr1) &&
1771 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
1772 mod_timer(&local->dynamic_ps_timer, jiffies +
1773 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1774 }
1775
1776 ieee80211_deliver_skb(rx);
1777
1778 return RX_QUEUED;
1779 }
1780
1781 static ieee80211_rx_result debug_noinline
1782 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1783 {
1784 struct ieee80211_local *local = rx->local;
1785 struct ieee80211_hw *hw = &local->hw;
1786 struct sk_buff *skb = rx->skb;
1787 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1788 struct tid_ampdu_rx *tid_agg_rx;
1789 u16 start_seq_num;
1790 u16 tid;
1791
1792 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1793 return RX_CONTINUE;
1794
1795 if (ieee80211_is_back_req(bar->frame_control)) {
1796 if (!rx->sta)
1797 return RX_DROP_MONITOR;
1798 tid = le16_to_cpu(bar->control) >> 12;
1799 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1800 != HT_AGG_STATE_OPERATIONAL)
1801 return RX_DROP_MONITOR;
1802 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1803
1804 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1805
1806 /* reset session timer */
1807 if (tid_agg_rx->timeout)
1808 mod_timer(&tid_agg_rx->session_timer,
1809 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1810
1811 /* release stored frames up to start of BAR */
1812 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1813 frames);
1814 kfree_skb(skb);
1815 return RX_QUEUED;
1816 }
1817
1818 return RX_CONTINUE;
1819 }
1820
1821 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1822 struct ieee80211_mgmt *mgmt,
1823 size_t len)
1824 {
1825 struct ieee80211_local *local = sdata->local;
1826 struct sk_buff *skb;
1827 struct ieee80211_mgmt *resp;
1828
1829 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1830 /* Not to own unicast address */
1831 return;
1832 }
1833
1834 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1835 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1836 /* Not from the current AP or not associated yet. */
1837 return;
1838 }
1839
1840 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1841 /* Too short SA Query request frame */
1842 return;
1843 }
1844
1845 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1846 if (skb == NULL)
1847 return;
1848
1849 skb_reserve(skb, local->hw.extra_tx_headroom);
1850 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1851 memset(resp, 0, 24);
1852 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1853 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1854 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1855 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1856 IEEE80211_STYPE_ACTION);
1857 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1858 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1859 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1860 memcpy(resp->u.action.u.sa_query.trans_id,
1861 mgmt->u.action.u.sa_query.trans_id,
1862 WLAN_SA_QUERY_TR_ID_LEN);
1863
1864 ieee80211_tx_skb(sdata, skb);
1865 }
1866
1867 static ieee80211_rx_result debug_noinline
1868 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1869 {
1870 struct ieee80211_local *local = rx->local;
1871 struct ieee80211_sub_if_data *sdata = rx->sdata;
1872 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1873 struct sk_buff *nskb;
1874 struct ieee80211_rx_status *status;
1875 int len = rx->skb->len;
1876
1877 if (!ieee80211_is_action(mgmt->frame_control))
1878 return RX_CONTINUE;
1879
1880 /* drop too small frames */
1881 if (len < IEEE80211_MIN_ACTION_SIZE)
1882 return RX_DROP_UNUSABLE;
1883
1884 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
1885 return RX_DROP_UNUSABLE;
1886
1887 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1888 return RX_DROP_UNUSABLE;
1889
1890 if (ieee80211_drop_unencrypted_mgmt(rx))
1891 return RX_DROP_UNUSABLE;
1892
1893 switch (mgmt->u.action.category) {
1894 case WLAN_CATEGORY_BACK:
1895 /*
1896 * The aggregation code is not prepared to handle
1897 * anything but STA/AP due to the BSSID handling;
1898 * IBSS could work in the code but isn't supported
1899 * by drivers or the standard.
1900 */
1901 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1902 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1903 sdata->vif.type != NL80211_IFTYPE_AP)
1904 break;
1905
1906 /* verify action_code is present */
1907 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1908 break;
1909
1910 switch (mgmt->u.action.u.addba_req.action_code) {
1911 case WLAN_ACTION_ADDBA_REQ:
1912 if (len < (IEEE80211_MIN_ACTION_SIZE +
1913 sizeof(mgmt->u.action.u.addba_req)))
1914 return RX_DROP_MONITOR;
1915 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1916 goto handled;
1917 case WLAN_ACTION_ADDBA_RESP:
1918 if (len < (IEEE80211_MIN_ACTION_SIZE +
1919 sizeof(mgmt->u.action.u.addba_resp)))
1920 break;
1921 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1922 goto handled;
1923 case WLAN_ACTION_DELBA:
1924 if (len < (IEEE80211_MIN_ACTION_SIZE +
1925 sizeof(mgmt->u.action.u.delba)))
1926 break;
1927 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1928 goto handled;
1929 }
1930 break;
1931 case WLAN_CATEGORY_SPECTRUM_MGMT:
1932 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1933 break;
1934
1935 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1936 break;
1937
1938 /* verify action_code is present */
1939 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1940 break;
1941
1942 switch (mgmt->u.action.u.measurement.action_code) {
1943 case WLAN_ACTION_SPCT_MSR_REQ:
1944 if (len < (IEEE80211_MIN_ACTION_SIZE +
1945 sizeof(mgmt->u.action.u.measurement)))
1946 break;
1947 ieee80211_process_measurement_req(sdata, mgmt, len);
1948 goto handled;
1949 case WLAN_ACTION_SPCT_CHL_SWITCH:
1950 if (len < (IEEE80211_MIN_ACTION_SIZE +
1951 sizeof(mgmt->u.action.u.chan_switch)))
1952 break;
1953
1954 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1955 break;
1956
1957 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1958 break;
1959
1960 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1961 }
1962 break;
1963 case WLAN_CATEGORY_SA_QUERY:
1964 if (len < (IEEE80211_MIN_ACTION_SIZE +
1965 sizeof(mgmt->u.action.u.sa_query)))
1966 break;
1967
1968 switch (mgmt->u.action.u.sa_query.action) {
1969 case WLAN_ACTION_SA_QUERY_REQUEST:
1970 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1971 break;
1972 ieee80211_process_sa_query_req(sdata, mgmt, len);
1973 goto handled;
1974 }
1975 break;
1976 }
1977
1978 /*
1979 * For AP mode, hostapd is responsible for handling any action
1980 * frames that we didn't handle, including returning unknown
1981 * ones. For all other modes we will return them to the sender,
1982 * setting the 0x80 bit in the action category, as required by
1983 * 802.11-2007 7.3.1.11.
1984 */
1985 if (sdata->vif.type == NL80211_IFTYPE_AP ||
1986 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1987 return RX_DROP_MONITOR;
1988
1989 /*
1990 * Getting here means the kernel doesn't know how to handle
1991 * it, but maybe userspace does ... include returned frames
1992 * so userspace can register for those to know whether ones
1993 * it transmitted were processed or returned.
1994 */
1995 status = IEEE80211_SKB_RXCB(rx->skb);
1996
1997 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1998 cfg80211_rx_action(rx->sdata->dev, status->freq,
1999 rx->skb->data, rx->skb->len,
2000 GFP_ATOMIC))
2001 goto handled;
2002
2003 /* do not return rejected action frames */
2004 if (mgmt->u.action.category & 0x80)
2005 return RX_DROP_UNUSABLE;
2006
2007 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2008 GFP_ATOMIC);
2009 if (nskb) {
2010 struct ieee80211_mgmt *mgmt = (void *)nskb->data;
2011
2012 mgmt->u.action.category |= 0x80;
2013 memcpy(mgmt->da, mgmt->sa, ETH_ALEN);
2014 memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2015
2016 memset(nskb->cb, 0, sizeof(nskb->cb));
2017
2018 ieee80211_tx_skb(rx->sdata, nskb);
2019 }
2020
2021 handled:
2022 if (rx->sta)
2023 rx->sta->rx_packets++;
2024 dev_kfree_skb(rx->skb);
2025 return RX_QUEUED;
2026 }
2027
2028 static ieee80211_rx_result debug_noinline
2029 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2030 {
2031 struct ieee80211_sub_if_data *sdata = rx->sdata;
2032 ieee80211_rx_result rxs;
2033
2034 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2035 return RX_DROP_MONITOR;
2036
2037 if (ieee80211_drop_unencrypted_mgmt(rx))
2038 return RX_DROP_UNUSABLE;
2039
2040 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2041 if (rxs != RX_CONTINUE)
2042 return rxs;
2043
2044 if (ieee80211_vif_is_mesh(&sdata->vif))
2045 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
2046
2047 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
2048 return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
2049
2050 if (sdata->vif.type == NL80211_IFTYPE_STATION)
2051 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
2052
2053 return RX_DROP_MONITOR;
2054 }
2055
2056 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2057 struct ieee80211_rx_data *rx)
2058 {
2059 int keyidx;
2060 unsigned int hdrlen;
2061
2062 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2063 if (rx->skb->len >= hdrlen + 4)
2064 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2065 else
2066 keyidx = -1;
2067
2068 if (!rx->sta) {
2069 /*
2070 * Some hardware seem to generate incorrect Michael MIC
2071 * reports; ignore them to avoid triggering countermeasures.
2072 */
2073 return;
2074 }
2075
2076 if (!ieee80211_has_protected(hdr->frame_control))
2077 return;
2078
2079 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2080 /*
2081 * APs with pairwise keys should never receive Michael MIC
2082 * errors for non-zero keyidx because these are reserved for
2083 * group keys and only the AP is sending real multicast
2084 * frames in the BSS.
2085 */
2086 return;
2087 }
2088
2089 if (!ieee80211_is_data(hdr->frame_control) &&
2090 !ieee80211_is_auth(hdr->frame_control))
2091 return;
2092
2093 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2094 GFP_ATOMIC);
2095 }
2096
2097 /* TODO: use IEEE80211_RX_FRAGMENTED */
2098 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2099 struct ieee80211_rate *rate)
2100 {
2101 struct ieee80211_sub_if_data *sdata;
2102 struct ieee80211_local *local = rx->local;
2103 struct ieee80211_rtap_hdr {
2104 struct ieee80211_radiotap_header hdr;
2105 u8 flags;
2106 u8 rate_or_pad;
2107 __le16 chan_freq;
2108 __le16 chan_flags;
2109 } __attribute__ ((packed)) *rthdr;
2110 struct sk_buff *skb = rx->skb, *skb2;
2111 struct net_device *prev_dev = NULL;
2112 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2113
2114 if (status->flag & RX_FLAG_INTERNAL_CMTR)
2115 goto out_free_skb;
2116
2117 if (skb_headroom(skb) < sizeof(*rthdr) &&
2118 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2119 goto out_free_skb;
2120
2121 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2122 memset(rthdr, 0, sizeof(*rthdr));
2123 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2124 rthdr->hdr.it_present =
2125 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2126 (1 << IEEE80211_RADIOTAP_CHANNEL));
2127
2128 if (rate) {
2129 rthdr->rate_or_pad = rate->bitrate / 5;
2130 rthdr->hdr.it_present |=
2131 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2132 }
2133 rthdr->chan_freq = cpu_to_le16(status->freq);
2134
2135 if (status->band == IEEE80211_BAND_5GHZ)
2136 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2137 IEEE80211_CHAN_5GHZ);
2138 else
2139 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2140 IEEE80211_CHAN_2GHZ);
2141
2142 skb_set_mac_header(skb, 0);
2143 skb->ip_summed = CHECKSUM_UNNECESSARY;
2144 skb->pkt_type = PACKET_OTHERHOST;
2145 skb->protocol = htons(ETH_P_802_2);
2146
2147 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2148 if (!ieee80211_sdata_running(sdata))
2149 continue;
2150
2151 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2152 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2153 continue;
2154
2155 if (prev_dev) {
2156 skb2 = skb_clone(skb, GFP_ATOMIC);
2157 if (skb2) {
2158 skb2->dev = prev_dev;
2159 netif_rx(skb2);
2160 }
2161 }
2162
2163 prev_dev = sdata->dev;
2164 sdata->dev->stats.rx_packets++;
2165 sdata->dev->stats.rx_bytes += skb->len;
2166 }
2167
2168 if (prev_dev) {
2169 skb->dev = prev_dev;
2170 netif_rx(skb);
2171 skb = NULL;
2172 } else
2173 goto out_free_skb;
2174
2175 status->flag |= RX_FLAG_INTERNAL_CMTR;
2176 return;
2177
2178 out_free_skb:
2179 dev_kfree_skb(skb);
2180 }
2181
2182
2183 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2184 struct ieee80211_rx_data *rx,
2185 struct sk_buff *skb,
2186 struct ieee80211_rate *rate)
2187 {
2188 struct sk_buff_head reorder_release;
2189 ieee80211_rx_result res = RX_DROP_MONITOR;
2190
2191 __skb_queue_head_init(&reorder_release);
2192
2193 rx->skb = skb;
2194 rx->sdata = sdata;
2195
2196 #define CALL_RXH(rxh) \
2197 do { \
2198 res = rxh(rx); \
2199 if (res != RX_CONTINUE) \
2200 goto rxh_next; \
2201 } while (0);
2202
2203 /*
2204 * NB: the rxh_next label works even if we jump
2205 * to it from here because then the list will
2206 * be empty, which is a trivial check
2207 */
2208 CALL_RXH(ieee80211_rx_h_passive_scan)
2209 CALL_RXH(ieee80211_rx_h_check)
2210
2211 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2212
2213 while ((skb = __skb_dequeue(&reorder_release))) {
2214 /*
2215 * all the other fields are valid across frames
2216 * that belong to an aMPDU since they are on the
2217 * same TID from the same station
2218 */
2219 rx->skb = skb;
2220
2221 CALL_RXH(ieee80211_rx_h_decrypt)
2222 CALL_RXH(ieee80211_rx_h_check_more_data)
2223 CALL_RXH(ieee80211_rx_h_sta_process)
2224 CALL_RXH(ieee80211_rx_h_defragment)
2225 CALL_RXH(ieee80211_rx_h_ps_poll)
2226 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2227 /* must be after MMIC verify so header is counted in MPDU mic */
2228 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2229 CALL_RXH(ieee80211_rx_h_amsdu)
2230 #ifdef CONFIG_MAC80211_MESH
2231 if (ieee80211_vif_is_mesh(&sdata->vif))
2232 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2233 #endif
2234 CALL_RXH(ieee80211_rx_h_data)
2235
2236 /* special treatment -- needs the queue */
2237 res = ieee80211_rx_h_ctrl(rx, &reorder_release);
2238 if (res != RX_CONTINUE)
2239 goto rxh_next;
2240
2241 CALL_RXH(ieee80211_rx_h_action)
2242 CALL_RXH(ieee80211_rx_h_mgmt)
2243
2244 #undef CALL_RXH
2245
2246 rxh_next:
2247 switch (res) {
2248 case RX_DROP_MONITOR:
2249 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2250 if (rx->sta)
2251 rx->sta->rx_dropped++;
2252 /* fall through */
2253 case RX_CONTINUE:
2254 ieee80211_rx_cooked_monitor(rx, rate);
2255 break;
2256 case RX_DROP_UNUSABLE:
2257 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2258 if (rx->sta)
2259 rx->sta->rx_dropped++;
2260 dev_kfree_skb(rx->skb);
2261 break;
2262 case RX_QUEUED:
2263 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2264 break;
2265 }
2266 }
2267 }
2268
2269 /* main receive path */
2270
2271 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2272 struct ieee80211_rx_data *rx,
2273 struct ieee80211_hdr *hdr)
2274 {
2275 struct sk_buff *skb = rx->skb;
2276 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2277 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2278 int multicast = is_multicast_ether_addr(hdr->addr1);
2279
2280 switch (sdata->vif.type) {
2281 case NL80211_IFTYPE_STATION:
2282 if (!bssid && !sdata->u.mgd.use_4addr)
2283 return 0;
2284 if (!multicast &&
2285 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2286 if (!(sdata->dev->flags & IFF_PROMISC))
2287 return 0;
2288 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2289 }
2290 break;
2291 case NL80211_IFTYPE_ADHOC:
2292 if (!bssid)
2293 return 0;
2294 if (ieee80211_is_beacon(hdr->frame_control)) {
2295 return 1;
2296 }
2297 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2298 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2299 return 0;
2300 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2301 } else if (!multicast &&
2302 compare_ether_addr(sdata->vif.addr,
2303 hdr->addr1) != 0) {
2304 if (!(sdata->dev->flags & IFF_PROMISC))
2305 return 0;
2306 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2307 } else if (!rx->sta) {
2308 int rate_idx;
2309 if (status->flag & RX_FLAG_HT)
2310 rate_idx = 0; /* TODO: HT rates */
2311 else
2312 rate_idx = status->rate_idx;
2313 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2314 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2315 }
2316 break;
2317 case NL80211_IFTYPE_MESH_POINT:
2318 if (!multicast &&
2319 compare_ether_addr(sdata->vif.addr,
2320 hdr->addr1) != 0) {
2321 if (!(sdata->dev->flags & IFF_PROMISC))
2322 return 0;
2323
2324 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2325 }
2326 break;
2327 case NL80211_IFTYPE_AP_VLAN:
2328 case NL80211_IFTYPE_AP:
2329 if (!bssid) {
2330 if (compare_ether_addr(sdata->vif.addr,
2331 hdr->addr1))
2332 return 0;
2333 } else if (!ieee80211_bssid_match(bssid,
2334 sdata->vif.addr)) {
2335 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2336 return 0;
2337 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2338 }
2339 break;
2340 case NL80211_IFTYPE_WDS:
2341 if (bssid || !ieee80211_is_data(hdr->frame_control))
2342 return 0;
2343 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2344 return 0;
2345 break;
2346 case NL80211_IFTYPE_MONITOR:
2347 case NL80211_IFTYPE_UNSPECIFIED:
2348 case __NL80211_IFTYPE_AFTER_LAST:
2349 /* should never get here */
2350 WARN_ON(1);
2351 break;
2352 }
2353
2354 return 1;
2355 }
2356
2357 /*
2358 * This is the actual Rx frames handler. as it blongs to Rx path it must
2359 * be called with rcu_read_lock protection.
2360 */
2361 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2362 struct sk_buff *skb,
2363 struct ieee80211_rate *rate)
2364 {
2365 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2366 struct ieee80211_local *local = hw_to_local(hw);
2367 struct ieee80211_sub_if_data *sdata;
2368 struct ieee80211_hdr *hdr;
2369 struct ieee80211_rx_data rx;
2370 int prepares;
2371 struct ieee80211_sub_if_data *prev = NULL;
2372 struct sk_buff *skb_new;
2373 struct sta_info *sta, *tmp;
2374 bool found_sta = false;
2375
2376 hdr = (struct ieee80211_hdr *)skb->data;
2377 memset(&rx, 0, sizeof(rx));
2378 rx.skb = skb;
2379 rx.local = local;
2380
2381 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2382 local->dot11ReceivedFragmentCount++;
2383
2384 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2385 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2386 rx.flags |= IEEE80211_RX_IN_SCAN;
2387
2388 ieee80211_parse_qos(&rx);
2389 ieee80211_verify_alignment(&rx);
2390
2391 if (ieee80211_is_data(hdr->frame_control)) {
2392 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2393 rx.sta = sta;
2394 found_sta = true;
2395 rx.sdata = sta->sdata;
2396
2397 rx.flags |= IEEE80211_RX_RA_MATCH;
2398 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2399 if (prepares) {
2400 if (status->flag & RX_FLAG_MMIC_ERROR) {
2401 if (rx.flags & IEEE80211_RX_RA_MATCH)
2402 ieee80211_rx_michael_mic_report(hdr, &rx);
2403 } else
2404 prev = rx.sdata;
2405 }
2406 }
2407 }
2408 if (!found_sta) {
2409 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2410 if (!ieee80211_sdata_running(sdata))
2411 continue;
2412
2413 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2414 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2415 continue;
2416
2417 /*
2418 * frame is destined for this interface, but if it's
2419 * not also for the previous one we handle that after
2420 * the loop to avoid copying the SKB once too much
2421 */
2422
2423 if (!prev) {
2424 prev = sdata;
2425 continue;
2426 }
2427
2428 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2429
2430 rx.flags |= IEEE80211_RX_RA_MATCH;
2431 prepares = prepare_for_handlers(prev, &rx, hdr);
2432
2433 if (!prepares)
2434 goto next;
2435
2436 if (status->flag & RX_FLAG_MMIC_ERROR) {
2437 rx.sdata = prev;
2438 if (rx.flags & IEEE80211_RX_RA_MATCH)
2439 ieee80211_rx_michael_mic_report(hdr,
2440 &rx);
2441 goto next;
2442 }
2443
2444 /*
2445 * frame was destined for the previous interface
2446 * so invoke RX handlers for it
2447 */
2448
2449 skb_new = skb_copy(skb, GFP_ATOMIC);
2450 if (!skb_new) {
2451 if (net_ratelimit())
2452 printk(KERN_DEBUG "%s: failed to copy "
2453 "multicast frame for %s\n",
2454 wiphy_name(local->hw.wiphy),
2455 prev->name);
2456 goto next;
2457 }
2458 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2459 next:
2460 prev = sdata;
2461 }
2462
2463 if (prev) {
2464 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2465
2466 rx.flags |= IEEE80211_RX_RA_MATCH;
2467 prepares = prepare_for_handlers(prev, &rx, hdr);
2468
2469 if (!prepares)
2470 prev = NULL;
2471 }
2472 }
2473 if (prev)
2474 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
2475 else
2476 dev_kfree_skb(skb);
2477 }
2478
2479 /*
2480 * This is the receive path handler. It is called by a low level driver when an
2481 * 802.11 MPDU is received from the hardware.
2482 */
2483 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2484 {
2485 struct ieee80211_local *local = hw_to_local(hw);
2486 struct ieee80211_rate *rate = NULL;
2487 struct ieee80211_supported_band *sband;
2488 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2489
2490 WARN_ON_ONCE(softirq_count() == 0);
2491
2492 if (WARN_ON(status->band < 0 ||
2493 status->band >= IEEE80211_NUM_BANDS))
2494 goto drop;
2495
2496 sband = local->hw.wiphy->bands[status->band];
2497 if (WARN_ON(!sband))
2498 goto drop;
2499
2500 /*
2501 * If we're suspending, it is possible although not too likely
2502 * that we'd be receiving frames after having already partially
2503 * quiesced the stack. We can't process such frames then since
2504 * that might, for example, cause stations to be added or other
2505 * driver callbacks be invoked.
2506 */
2507 if (unlikely(local->quiescing || local->suspended))
2508 goto drop;
2509
2510 /*
2511 * The same happens when we're not even started,
2512 * but that's worth a warning.
2513 */
2514 if (WARN_ON(!local->started))
2515 goto drop;
2516
2517 if (status->flag & RX_FLAG_HT) {
2518 /*
2519 * rate_idx is MCS index, which can be [0-76] as documented on:
2520 *
2521 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2522 *
2523 * Anything else would be some sort of driver or hardware error.
2524 * The driver should catch hardware errors.
2525 */
2526 if (WARN((status->rate_idx < 0 ||
2527 status->rate_idx > 76),
2528 "Rate marked as an HT rate but passed "
2529 "status->rate_idx is not "
2530 "an MCS index [0-76]: %d (0x%02x)\n",
2531 status->rate_idx,
2532 status->rate_idx))
2533 goto drop;
2534 } else {
2535 if (WARN_ON(status->rate_idx < 0 ||
2536 status->rate_idx >= sband->n_bitrates))
2537 goto drop;
2538 rate = &sband->bitrates[status->rate_idx];
2539 }
2540
2541 /*
2542 * key references and virtual interfaces are protected using RCU
2543 * and this requires that we are in a read-side RCU section during
2544 * receive processing
2545 */
2546 rcu_read_lock();
2547
2548 /*
2549 * Frames with failed FCS/PLCP checksum are not returned,
2550 * all other frames are returned without radiotap header
2551 * if it was previously present.
2552 * Also, frames with less than 16 bytes are dropped.
2553 */
2554 skb = ieee80211_rx_monitor(local, skb, rate);
2555 if (!skb) {
2556 rcu_read_unlock();
2557 return;
2558 }
2559
2560 __ieee80211_rx_handle_packet(hw, skb, rate);
2561
2562 rcu_read_unlock();
2563
2564 return;
2565 drop:
2566 kfree_skb(skb);
2567 }
2568 EXPORT_SYMBOL(ieee80211_rx);
2569
2570 /* This is a version of the rx handler that can be called from hard irq
2571 * context. Post the skb on the queue and schedule the tasklet */
2572 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2573 {
2574 struct ieee80211_local *local = hw_to_local(hw);
2575
2576 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2577
2578 skb->pkt_type = IEEE80211_RX_MSG;
2579 skb_queue_tail(&local->skb_queue, skb);
2580 tasklet_schedule(&local->tasklet);
2581 }
2582 EXPORT_SYMBOL(ieee80211_rx_irqsafe);