MAINTAINERS: Update amd-iommu F: patterns
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <net/mac80211.h>
21 #include <net/ieee80211_radiotap.h>
22
23 #include "ieee80211_i.h"
24 #include "driver-ops.h"
25 #include "led.h"
26 #include "mesh.h"
27 #include "wep.h"
28 #include "wpa.h"
29 #include "tkip.h"
30 #include "wme.h"
31
32 /*
33 * monitor mode reception
34 *
35 * This function cleans up the SKB, i.e. it removes all the stuff
36 * only useful for monitoring.
37 */
38 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
39 struct sk_buff *skb)
40 {
41 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
42 if (likely(skb->len > FCS_LEN))
43 __pskb_trim(skb, skb->len - FCS_LEN);
44 else {
45 /* driver bug */
46 WARN_ON(1);
47 dev_kfree_skb(skb);
48 skb = NULL;
49 }
50 }
51
52 return skb;
53 }
54
55 static inline int should_drop_frame(struct sk_buff *skb,
56 int present_fcs_len)
57 {
58 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
59 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
60
61 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
62 return 1;
63 if (unlikely(skb->len < 16 + present_fcs_len))
64 return 1;
65 if (ieee80211_is_ctl(hdr->frame_control) &&
66 !ieee80211_is_pspoll(hdr->frame_control) &&
67 !ieee80211_is_back_req(hdr->frame_control))
68 return 1;
69 return 0;
70 }
71
72 static int
73 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
74 struct ieee80211_rx_status *status)
75 {
76 int len;
77
78 /* always present fields */
79 len = sizeof(struct ieee80211_radiotap_header) + 9;
80
81 if (status->flag & RX_FLAG_MACTIME_MPDU)
82 len += 8;
83 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
84 len += 1;
85
86 if (len & 1) /* padding for RX_FLAGS if necessary */
87 len++;
88
89 if (status->flag & RX_FLAG_HT) /* HT info */
90 len += 3;
91
92 return len;
93 }
94
95 /*
96 * ieee80211_add_rx_radiotap_header - add radiotap header
97 *
98 * add a radiotap header containing all the fields which the hardware provided.
99 */
100 static void
101 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
102 struct sk_buff *skb,
103 struct ieee80211_rate *rate,
104 int rtap_len)
105 {
106 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
107 struct ieee80211_radiotap_header *rthdr;
108 unsigned char *pos;
109 u16 rx_flags = 0;
110
111 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
112 memset(rthdr, 0, rtap_len);
113
114 /* radiotap header, set always present flags */
115 rthdr->it_present =
116 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
117 (1 << IEEE80211_RADIOTAP_CHANNEL) |
118 (1 << IEEE80211_RADIOTAP_ANTENNA) |
119 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
120 rthdr->it_len = cpu_to_le16(rtap_len);
121
122 pos = (unsigned char *)(rthdr+1);
123
124 /* the order of the following fields is important */
125
126 /* IEEE80211_RADIOTAP_TSFT */
127 if (status->flag & RX_FLAG_MACTIME_MPDU) {
128 put_unaligned_le64(status->mactime, pos);
129 rthdr->it_present |=
130 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
131 pos += 8;
132 }
133
134 /* IEEE80211_RADIOTAP_FLAGS */
135 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
136 *pos |= IEEE80211_RADIOTAP_F_FCS;
137 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
138 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
139 if (status->flag & RX_FLAG_SHORTPRE)
140 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
141 pos++;
142
143 /* IEEE80211_RADIOTAP_RATE */
144 if (status->flag & RX_FLAG_HT) {
145 /*
146 * MCS information is a separate field in radiotap,
147 * added below. The byte here is needed as padding
148 * for the channel though, so initialise it to 0.
149 */
150 *pos = 0;
151 } else {
152 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
153 *pos = rate->bitrate / 5;
154 }
155 pos++;
156
157 /* IEEE80211_RADIOTAP_CHANNEL */
158 put_unaligned_le16(status->freq, pos);
159 pos += 2;
160 if (status->band == IEEE80211_BAND_5GHZ)
161 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
162 pos);
163 else if (status->flag & RX_FLAG_HT)
164 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
165 pos);
166 else if (rate->flags & IEEE80211_RATE_ERP_G)
167 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
168 pos);
169 else
170 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
171 pos);
172 pos += 2;
173
174 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
175 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
176 *pos = status->signal;
177 rthdr->it_present |=
178 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
179 pos++;
180 }
181
182 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
183
184 /* IEEE80211_RADIOTAP_ANTENNA */
185 *pos = status->antenna;
186 pos++;
187
188 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
189
190 /* IEEE80211_RADIOTAP_RX_FLAGS */
191 /* ensure 2 byte alignment for the 2 byte field as required */
192 if ((pos - (u8 *)rthdr) & 1)
193 pos++;
194 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
195 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
196 put_unaligned_le16(rx_flags, pos);
197 pos += 2;
198
199 if (status->flag & RX_FLAG_HT) {
200 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
201 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
202 IEEE80211_RADIOTAP_MCS_HAVE_GI |
203 IEEE80211_RADIOTAP_MCS_HAVE_BW;
204 *pos = 0;
205 if (status->flag & RX_FLAG_SHORT_GI)
206 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
207 if (status->flag & RX_FLAG_40MHZ)
208 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
209 pos++;
210 *pos++ = status->rate_idx;
211 }
212 }
213
214 /*
215 * This function copies a received frame to all monitor interfaces and
216 * returns a cleaned-up SKB that no longer includes the FCS nor the
217 * radiotap header the driver might have added.
218 */
219 static struct sk_buff *
220 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
221 struct ieee80211_rate *rate)
222 {
223 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
224 struct ieee80211_sub_if_data *sdata;
225 int needed_headroom = 0;
226 struct sk_buff *skb, *skb2;
227 struct net_device *prev_dev = NULL;
228 int present_fcs_len = 0;
229
230 /*
231 * First, we may need to make a copy of the skb because
232 * (1) we need to modify it for radiotap (if not present), and
233 * (2) the other RX handlers will modify the skb we got.
234 *
235 * We don't need to, of course, if we aren't going to return
236 * the SKB because it has a bad FCS/PLCP checksum.
237 */
238
239 /* room for the radiotap header based on driver features */
240 needed_headroom = ieee80211_rx_radiotap_len(local, status);
241
242 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
243 present_fcs_len = FCS_LEN;
244
245 /* make sure hdr->frame_control is on the linear part */
246 if (!pskb_may_pull(origskb, 2)) {
247 dev_kfree_skb(origskb);
248 return NULL;
249 }
250
251 if (!local->monitors) {
252 if (should_drop_frame(origskb, present_fcs_len)) {
253 dev_kfree_skb(origskb);
254 return NULL;
255 }
256
257 return remove_monitor_info(local, origskb);
258 }
259
260 if (should_drop_frame(origskb, present_fcs_len)) {
261 /* only need to expand headroom if necessary */
262 skb = origskb;
263 origskb = NULL;
264
265 /*
266 * This shouldn't trigger often because most devices have an
267 * RX header they pull before we get here, and that should
268 * be big enough for our radiotap information. We should
269 * probably export the length to drivers so that we can have
270 * them allocate enough headroom to start with.
271 */
272 if (skb_headroom(skb) < needed_headroom &&
273 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
274 dev_kfree_skb(skb);
275 return NULL;
276 }
277 } else {
278 /*
279 * Need to make a copy and possibly remove radiotap header
280 * and FCS from the original.
281 */
282 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
283
284 origskb = remove_monitor_info(local, origskb);
285
286 if (!skb)
287 return origskb;
288 }
289
290 /* prepend radiotap information */
291 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
292
293 skb_reset_mac_header(skb);
294 skb->ip_summed = CHECKSUM_UNNECESSARY;
295 skb->pkt_type = PACKET_OTHERHOST;
296 skb->protocol = htons(ETH_P_802_2);
297
298 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
299 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
300 continue;
301
302 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
303 continue;
304
305 if (!ieee80211_sdata_running(sdata))
306 continue;
307
308 if (prev_dev) {
309 skb2 = skb_clone(skb, GFP_ATOMIC);
310 if (skb2) {
311 skb2->dev = prev_dev;
312 netif_receive_skb(skb2);
313 }
314 }
315
316 prev_dev = sdata->dev;
317 sdata->dev->stats.rx_packets++;
318 sdata->dev->stats.rx_bytes += skb->len;
319 }
320
321 if (prev_dev) {
322 skb->dev = prev_dev;
323 netif_receive_skb(skb);
324 } else
325 dev_kfree_skb(skb);
326
327 return origskb;
328 }
329
330
331 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
332 {
333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
334 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
335 int tid, seqno_idx, security_idx;
336
337 /* does the frame have a qos control field? */
338 if (ieee80211_is_data_qos(hdr->frame_control)) {
339 u8 *qc = ieee80211_get_qos_ctl(hdr);
340 /* frame has qos control */
341 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
342 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
343 status->rx_flags |= IEEE80211_RX_AMSDU;
344
345 seqno_idx = tid;
346 security_idx = tid;
347 } else {
348 /*
349 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
350 *
351 * Sequence numbers for management frames, QoS data
352 * frames with a broadcast/multicast address in the
353 * Address 1 field, and all non-QoS data frames sent
354 * by QoS STAs are assigned using an additional single
355 * modulo-4096 counter, [...]
356 *
357 * We also use that counter for non-QoS STAs.
358 */
359 seqno_idx = NUM_RX_DATA_QUEUES;
360 security_idx = 0;
361 if (ieee80211_is_mgmt(hdr->frame_control))
362 security_idx = NUM_RX_DATA_QUEUES;
363 tid = 0;
364 }
365
366 rx->seqno_idx = seqno_idx;
367 rx->security_idx = security_idx;
368 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
369 * For now, set skb->priority to 0 for other cases. */
370 rx->skb->priority = (tid > 7) ? 0 : tid;
371 }
372
373 /**
374 * DOC: Packet alignment
375 *
376 * Drivers always need to pass packets that are aligned to two-byte boundaries
377 * to the stack.
378 *
379 * Additionally, should, if possible, align the payload data in a way that
380 * guarantees that the contained IP header is aligned to a four-byte
381 * boundary. In the case of regular frames, this simply means aligning the
382 * payload to a four-byte boundary (because either the IP header is directly
383 * contained, or IV/RFC1042 headers that have a length divisible by four are
384 * in front of it). If the payload data is not properly aligned and the
385 * architecture doesn't support efficient unaligned operations, mac80211
386 * will align the data.
387 *
388 * With A-MSDU frames, however, the payload data address must yield two modulo
389 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
390 * push the IP header further back to a multiple of four again. Thankfully, the
391 * specs were sane enough this time around to require padding each A-MSDU
392 * subframe to a length that is a multiple of four.
393 *
394 * Padding like Atheros hardware adds which is between the 802.11 header and
395 * the payload is not supported, the driver is required to move the 802.11
396 * header to be directly in front of the payload in that case.
397 */
398 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
399 {
400 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
401 WARN_ONCE((unsigned long)rx->skb->data & 1,
402 "unaligned packet at 0x%p\n", rx->skb->data);
403 #endif
404 }
405
406
407 /* rx handlers */
408
409 static ieee80211_rx_result debug_noinline
410 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
411 {
412 struct ieee80211_local *local = rx->local;
413 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
414 struct sk_buff *skb = rx->skb;
415
416 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
417 !local->sched_scanning))
418 return RX_CONTINUE;
419
420 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
421 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
422 local->sched_scanning)
423 return ieee80211_scan_rx(rx->sdata, skb);
424
425 /* scanning finished during invoking of handlers */
426 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
427 return RX_DROP_UNUSABLE;
428 }
429
430
431 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
432 {
433 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
434
435 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
436 return 0;
437
438 return ieee80211_is_robust_mgmt_frame(hdr);
439 }
440
441
442 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
443 {
444 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
445
446 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
447 return 0;
448
449 return ieee80211_is_robust_mgmt_frame(hdr);
450 }
451
452
453 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
454 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
455 {
456 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
457 struct ieee80211_mmie *mmie;
458
459 if (skb->len < 24 + sizeof(*mmie) ||
460 !is_multicast_ether_addr(hdr->da))
461 return -1;
462
463 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
464 return -1; /* not a robust management frame */
465
466 mmie = (struct ieee80211_mmie *)
467 (skb->data + skb->len - sizeof(*mmie));
468 if (mmie->element_id != WLAN_EID_MMIE ||
469 mmie->length != sizeof(*mmie) - 2)
470 return -1;
471
472 return le16_to_cpu(mmie->key_id);
473 }
474
475
476 static ieee80211_rx_result
477 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
478 {
479 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
480 char *dev_addr = rx->sdata->vif.addr;
481
482 if (ieee80211_is_data(hdr->frame_control)) {
483 if (is_multicast_ether_addr(hdr->addr1)) {
484 if (ieee80211_has_tods(hdr->frame_control) ||
485 !ieee80211_has_fromds(hdr->frame_control))
486 return RX_DROP_MONITOR;
487 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
488 return RX_DROP_MONITOR;
489 } else {
490 if (!ieee80211_has_a4(hdr->frame_control))
491 return RX_DROP_MONITOR;
492 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
493 return RX_DROP_MONITOR;
494 }
495 }
496
497 /* If there is not an established peer link and this is not a peer link
498 * establisment frame, beacon or probe, drop the frame.
499 */
500
501 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
502 struct ieee80211_mgmt *mgmt;
503
504 if (!ieee80211_is_mgmt(hdr->frame_control))
505 return RX_DROP_MONITOR;
506
507 if (ieee80211_is_action(hdr->frame_control)) {
508 u8 category;
509 mgmt = (struct ieee80211_mgmt *)hdr;
510 category = mgmt->u.action.category;
511 if (category != WLAN_CATEGORY_MESH_ACTION &&
512 category != WLAN_CATEGORY_SELF_PROTECTED)
513 return RX_DROP_MONITOR;
514 return RX_CONTINUE;
515 }
516
517 if (ieee80211_is_probe_req(hdr->frame_control) ||
518 ieee80211_is_probe_resp(hdr->frame_control) ||
519 ieee80211_is_beacon(hdr->frame_control) ||
520 ieee80211_is_auth(hdr->frame_control))
521 return RX_CONTINUE;
522
523 return RX_DROP_MONITOR;
524
525 }
526
527 return RX_CONTINUE;
528 }
529
530 #define SEQ_MODULO 0x1000
531 #define SEQ_MASK 0xfff
532
533 static inline int seq_less(u16 sq1, u16 sq2)
534 {
535 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
536 }
537
538 static inline u16 seq_inc(u16 sq)
539 {
540 return (sq + 1) & SEQ_MASK;
541 }
542
543 static inline u16 seq_sub(u16 sq1, u16 sq2)
544 {
545 return (sq1 - sq2) & SEQ_MASK;
546 }
547
548
549 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
550 struct tid_ampdu_rx *tid_agg_rx,
551 int index)
552 {
553 struct ieee80211_local *local = hw_to_local(hw);
554 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
555 struct ieee80211_rx_status *status;
556
557 lockdep_assert_held(&tid_agg_rx->reorder_lock);
558
559 if (!skb)
560 goto no_frame;
561
562 /* release the frame from the reorder ring buffer */
563 tid_agg_rx->stored_mpdu_num--;
564 tid_agg_rx->reorder_buf[index] = NULL;
565 status = IEEE80211_SKB_RXCB(skb);
566 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
567 skb_queue_tail(&local->rx_skb_queue, skb);
568
569 no_frame:
570 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
571 }
572
573 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
574 struct tid_ampdu_rx *tid_agg_rx,
575 u16 head_seq_num)
576 {
577 int index;
578
579 lockdep_assert_held(&tid_agg_rx->reorder_lock);
580
581 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
582 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
583 tid_agg_rx->buf_size;
584 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
585 }
586 }
587
588 /*
589 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
590 * the skb was added to the buffer longer than this time ago, the earlier
591 * frames that have not yet been received are assumed to be lost and the skb
592 * can be released for processing. This may also release other skb's from the
593 * reorder buffer if there are no additional gaps between the frames.
594 *
595 * Callers must hold tid_agg_rx->reorder_lock.
596 */
597 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
598
599 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
600 struct tid_ampdu_rx *tid_agg_rx)
601 {
602 int index, j;
603
604 lockdep_assert_held(&tid_agg_rx->reorder_lock);
605
606 /* release the buffer until next missing frame */
607 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
608 tid_agg_rx->buf_size;
609 if (!tid_agg_rx->reorder_buf[index] &&
610 tid_agg_rx->stored_mpdu_num > 1) {
611 /*
612 * No buffers ready to be released, but check whether any
613 * frames in the reorder buffer have timed out.
614 */
615 int skipped = 1;
616 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
617 j = (j + 1) % tid_agg_rx->buf_size) {
618 if (!tid_agg_rx->reorder_buf[j]) {
619 skipped++;
620 continue;
621 }
622 if (skipped &&
623 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
624 HT_RX_REORDER_BUF_TIMEOUT))
625 goto set_release_timer;
626
627 #ifdef CONFIG_MAC80211_HT_DEBUG
628 if (net_ratelimit())
629 wiphy_debug(hw->wiphy,
630 "release an RX reorder frame due to timeout on earlier frames\n");
631 #endif
632 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
633
634 /*
635 * Increment the head seq# also for the skipped slots.
636 */
637 tid_agg_rx->head_seq_num =
638 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
639 skipped = 0;
640 }
641 } else while (tid_agg_rx->reorder_buf[index]) {
642 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
643 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
644 tid_agg_rx->buf_size;
645 }
646
647 if (tid_agg_rx->stored_mpdu_num) {
648 j = index = seq_sub(tid_agg_rx->head_seq_num,
649 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
650
651 for (; j != (index - 1) % tid_agg_rx->buf_size;
652 j = (j + 1) % tid_agg_rx->buf_size) {
653 if (tid_agg_rx->reorder_buf[j])
654 break;
655 }
656
657 set_release_timer:
658
659 mod_timer(&tid_agg_rx->reorder_timer,
660 tid_agg_rx->reorder_time[j] + 1 +
661 HT_RX_REORDER_BUF_TIMEOUT);
662 } else {
663 del_timer(&tid_agg_rx->reorder_timer);
664 }
665 }
666
667 /*
668 * As this function belongs to the RX path it must be under
669 * rcu_read_lock protection. It returns false if the frame
670 * can be processed immediately, true if it was consumed.
671 */
672 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
673 struct tid_ampdu_rx *tid_agg_rx,
674 struct sk_buff *skb)
675 {
676 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
677 u16 sc = le16_to_cpu(hdr->seq_ctrl);
678 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
679 u16 head_seq_num, buf_size;
680 int index;
681 bool ret = true;
682
683 spin_lock(&tid_agg_rx->reorder_lock);
684
685 buf_size = tid_agg_rx->buf_size;
686 head_seq_num = tid_agg_rx->head_seq_num;
687
688 /* frame with out of date sequence number */
689 if (seq_less(mpdu_seq_num, head_seq_num)) {
690 dev_kfree_skb(skb);
691 goto out;
692 }
693
694 /*
695 * If frame the sequence number exceeds our buffering window
696 * size release some previous frames to make room for this one.
697 */
698 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
699 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
700 /* release stored frames up to new head to stack */
701 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
702 }
703
704 /* Now the new frame is always in the range of the reordering buffer */
705
706 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
707
708 /* check if we already stored this frame */
709 if (tid_agg_rx->reorder_buf[index]) {
710 dev_kfree_skb(skb);
711 goto out;
712 }
713
714 /*
715 * If the current MPDU is in the right order and nothing else
716 * is stored we can process it directly, no need to buffer it.
717 * If it is first but there's something stored, we may be able
718 * to release frames after this one.
719 */
720 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
721 tid_agg_rx->stored_mpdu_num == 0) {
722 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
723 ret = false;
724 goto out;
725 }
726
727 /* put the frame in the reordering buffer */
728 tid_agg_rx->reorder_buf[index] = skb;
729 tid_agg_rx->reorder_time[index] = jiffies;
730 tid_agg_rx->stored_mpdu_num++;
731 ieee80211_sta_reorder_release(hw, tid_agg_rx);
732
733 out:
734 spin_unlock(&tid_agg_rx->reorder_lock);
735 return ret;
736 }
737
738 /*
739 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
740 * true if the MPDU was buffered, false if it should be processed.
741 */
742 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
743 {
744 struct sk_buff *skb = rx->skb;
745 struct ieee80211_local *local = rx->local;
746 struct ieee80211_hw *hw = &local->hw;
747 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
748 struct sta_info *sta = rx->sta;
749 struct tid_ampdu_rx *tid_agg_rx;
750 u16 sc;
751 int tid;
752
753 if (!ieee80211_is_data_qos(hdr->frame_control))
754 goto dont_reorder;
755
756 /*
757 * filter the QoS data rx stream according to
758 * STA/TID and check if this STA/TID is on aggregation
759 */
760
761 if (!sta)
762 goto dont_reorder;
763
764 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
765
766 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
767 if (!tid_agg_rx)
768 goto dont_reorder;
769
770 /* qos null data frames are excluded */
771 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
772 goto dont_reorder;
773
774 /* new, potentially un-ordered, ampdu frame - process it */
775
776 /* reset session timer */
777 if (tid_agg_rx->timeout)
778 mod_timer(&tid_agg_rx->session_timer,
779 TU_TO_EXP_TIME(tid_agg_rx->timeout));
780
781 /* if this mpdu is fragmented - terminate rx aggregation session */
782 sc = le16_to_cpu(hdr->seq_ctrl);
783 if (sc & IEEE80211_SCTL_FRAG) {
784 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
785 skb_queue_tail(&rx->sdata->skb_queue, skb);
786 ieee80211_queue_work(&local->hw, &rx->sdata->work);
787 return;
788 }
789
790 /*
791 * No locking needed -- we will only ever process one
792 * RX packet at a time, and thus own tid_agg_rx. All
793 * other code manipulating it needs to (and does) make
794 * sure that we cannot get to it any more before doing
795 * anything with it.
796 */
797 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
798 return;
799
800 dont_reorder:
801 skb_queue_tail(&local->rx_skb_queue, skb);
802 }
803
804 static ieee80211_rx_result debug_noinline
805 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
806 {
807 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
808 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
809
810 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
811 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
812 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
813 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
814 hdr->seq_ctrl)) {
815 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
816 rx->local->dot11FrameDuplicateCount++;
817 rx->sta->num_duplicates++;
818 }
819 return RX_DROP_UNUSABLE;
820 } else
821 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
822 }
823
824 if (unlikely(rx->skb->len < 16)) {
825 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
826 return RX_DROP_MONITOR;
827 }
828
829 /* Drop disallowed frame classes based on STA auth/assoc state;
830 * IEEE 802.11, Chap 5.5.
831 *
832 * mac80211 filters only based on association state, i.e. it drops
833 * Class 3 frames from not associated stations. hostapd sends
834 * deauth/disassoc frames when needed. In addition, hostapd is
835 * responsible for filtering on both auth and assoc states.
836 */
837
838 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
839 return ieee80211_rx_mesh_check(rx);
840
841 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
842 ieee80211_is_pspoll(hdr->frame_control)) &&
843 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
844 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
845 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
846 if (rx->sta && rx->sta->dummy &&
847 ieee80211_is_data_present(hdr->frame_control)) {
848 u16 ethertype;
849 u8 *payload;
850
851 payload = rx->skb->data +
852 ieee80211_hdrlen(hdr->frame_control);
853 ethertype = (payload[6] << 8) | payload[7];
854 if (cpu_to_be16(ethertype) ==
855 rx->sdata->control_port_protocol)
856 return RX_CONTINUE;
857 }
858 return RX_DROP_MONITOR;
859 }
860
861 return RX_CONTINUE;
862 }
863
864
865 static ieee80211_rx_result debug_noinline
866 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
867 {
868 struct sk_buff *skb = rx->skb;
869 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
870 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
871 int keyidx;
872 int hdrlen;
873 ieee80211_rx_result result = RX_DROP_UNUSABLE;
874 struct ieee80211_key *sta_ptk = NULL;
875 int mmie_keyidx = -1;
876 __le16 fc;
877
878 /*
879 * Key selection 101
880 *
881 * There are four types of keys:
882 * - GTK (group keys)
883 * - IGTK (group keys for management frames)
884 * - PTK (pairwise keys)
885 * - STK (station-to-station pairwise keys)
886 *
887 * When selecting a key, we have to distinguish between multicast
888 * (including broadcast) and unicast frames, the latter can only
889 * use PTKs and STKs while the former always use GTKs and IGTKs.
890 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
891 * unicast frames can also use key indices like GTKs. Hence, if we
892 * don't have a PTK/STK we check the key index for a WEP key.
893 *
894 * Note that in a regular BSS, multicast frames are sent by the
895 * AP only, associated stations unicast the frame to the AP first
896 * which then multicasts it on their behalf.
897 *
898 * There is also a slight problem in IBSS mode: GTKs are negotiated
899 * with each station, that is something we don't currently handle.
900 * The spec seems to expect that one negotiates the same key with
901 * every station but there's no such requirement; VLANs could be
902 * possible.
903 */
904
905 /*
906 * No point in finding a key and decrypting if the frame is neither
907 * addressed to us nor a multicast frame.
908 */
909 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
910 return RX_CONTINUE;
911
912 /* start without a key */
913 rx->key = NULL;
914
915 if (rx->sta)
916 sta_ptk = rcu_dereference(rx->sta->ptk);
917
918 fc = hdr->frame_control;
919
920 if (!ieee80211_has_protected(fc))
921 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
922
923 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
924 rx->key = sta_ptk;
925 if ((status->flag & RX_FLAG_DECRYPTED) &&
926 (status->flag & RX_FLAG_IV_STRIPPED))
927 return RX_CONTINUE;
928 /* Skip decryption if the frame is not protected. */
929 if (!ieee80211_has_protected(fc))
930 return RX_CONTINUE;
931 } else if (mmie_keyidx >= 0) {
932 /* Broadcast/multicast robust management frame / BIP */
933 if ((status->flag & RX_FLAG_DECRYPTED) &&
934 (status->flag & RX_FLAG_IV_STRIPPED))
935 return RX_CONTINUE;
936
937 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
938 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
939 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
940 if (rx->sta)
941 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
942 if (!rx->key)
943 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
944 } else if (!ieee80211_has_protected(fc)) {
945 /*
946 * The frame was not protected, so skip decryption. However, we
947 * need to set rx->key if there is a key that could have been
948 * used so that the frame may be dropped if encryption would
949 * have been expected.
950 */
951 struct ieee80211_key *key = NULL;
952 struct ieee80211_sub_if_data *sdata = rx->sdata;
953 int i;
954
955 if (ieee80211_is_mgmt(fc) &&
956 is_multicast_ether_addr(hdr->addr1) &&
957 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
958 rx->key = key;
959 else {
960 if (rx->sta) {
961 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
962 key = rcu_dereference(rx->sta->gtk[i]);
963 if (key)
964 break;
965 }
966 }
967 if (!key) {
968 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
969 key = rcu_dereference(sdata->keys[i]);
970 if (key)
971 break;
972 }
973 }
974 if (key)
975 rx->key = key;
976 }
977 return RX_CONTINUE;
978 } else {
979 u8 keyid;
980 /*
981 * The device doesn't give us the IV so we won't be
982 * able to look up the key. That's ok though, we
983 * don't need to decrypt the frame, we just won't
984 * be able to keep statistics accurate.
985 * Except for key threshold notifications, should
986 * we somehow allow the driver to tell us which key
987 * the hardware used if this flag is set?
988 */
989 if ((status->flag & RX_FLAG_DECRYPTED) &&
990 (status->flag & RX_FLAG_IV_STRIPPED))
991 return RX_CONTINUE;
992
993 hdrlen = ieee80211_hdrlen(fc);
994
995 if (rx->skb->len < 8 + hdrlen)
996 return RX_DROP_UNUSABLE; /* TODO: count this? */
997
998 /*
999 * no need to call ieee80211_wep_get_keyidx,
1000 * it verifies a bunch of things we've done already
1001 */
1002 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1003 keyidx = keyid >> 6;
1004
1005 /* check per-station GTK first, if multicast packet */
1006 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1007 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1008
1009 /* if not found, try default key */
1010 if (!rx->key) {
1011 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1012
1013 /*
1014 * RSNA-protected unicast frames should always be
1015 * sent with pairwise or station-to-station keys,
1016 * but for WEP we allow using a key index as well.
1017 */
1018 if (rx->key &&
1019 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1020 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1021 !is_multicast_ether_addr(hdr->addr1))
1022 rx->key = NULL;
1023 }
1024 }
1025
1026 if (rx->key) {
1027 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1028 return RX_DROP_MONITOR;
1029
1030 rx->key->tx_rx_count++;
1031 /* TODO: add threshold stuff again */
1032 } else {
1033 return RX_DROP_MONITOR;
1034 }
1035
1036 if (skb_linearize(rx->skb))
1037 return RX_DROP_UNUSABLE;
1038 /* the hdr variable is invalid now! */
1039
1040 switch (rx->key->conf.cipher) {
1041 case WLAN_CIPHER_SUITE_WEP40:
1042 case WLAN_CIPHER_SUITE_WEP104:
1043 /* Check for weak IVs if possible */
1044 if (rx->sta && ieee80211_is_data(fc) &&
1045 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1046 !(status->flag & RX_FLAG_DECRYPTED)) &&
1047 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1048 rx->sta->wep_weak_iv_count++;
1049
1050 result = ieee80211_crypto_wep_decrypt(rx);
1051 break;
1052 case WLAN_CIPHER_SUITE_TKIP:
1053 result = ieee80211_crypto_tkip_decrypt(rx);
1054 break;
1055 case WLAN_CIPHER_SUITE_CCMP:
1056 result = ieee80211_crypto_ccmp_decrypt(rx);
1057 break;
1058 case WLAN_CIPHER_SUITE_AES_CMAC:
1059 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1060 break;
1061 default:
1062 /*
1063 * We can reach here only with HW-only algorithms
1064 * but why didn't it decrypt the frame?!
1065 */
1066 return RX_DROP_UNUSABLE;
1067 }
1068
1069 /* either the frame has been decrypted or will be dropped */
1070 status->flag |= RX_FLAG_DECRYPTED;
1071
1072 return result;
1073 }
1074
1075 static ieee80211_rx_result debug_noinline
1076 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1077 {
1078 struct ieee80211_local *local;
1079 struct ieee80211_hdr *hdr;
1080 struct sk_buff *skb;
1081
1082 local = rx->local;
1083 skb = rx->skb;
1084 hdr = (struct ieee80211_hdr *) skb->data;
1085
1086 if (!local->pspolling)
1087 return RX_CONTINUE;
1088
1089 if (!ieee80211_has_fromds(hdr->frame_control))
1090 /* this is not from AP */
1091 return RX_CONTINUE;
1092
1093 if (!ieee80211_is_data(hdr->frame_control))
1094 return RX_CONTINUE;
1095
1096 if (!ieee80211_has_moredata(hdr->frame_control)) {
1097 /* AP has no more frames buffered for us */
1098 local->pspolling = false;
1099 return RX_CONTINUE;
1100 }
1101
1102 /* more data bit is set, let's request a new frame from the AP */
1103 ieee80211_send_pspoll(local, rx->sdata);
1104
1105 return RX_CONTINUE;
1106 }
1107
1108 static void ap_sta_ps_start(struct sta_info *sta)
1109 {
1110 struct ieee80211_sub_if_data *sdata = sta->sdata;
1111 struct ieee80211_local *local = sdata->local;
1112
1113 atomic_inc(&sdata->bss->num_sta_ps);
1114 set_sta_flag(sta, WLAN_STA_PS_STA);
1115 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1116 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1117 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1118 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1119 sdata->name, sta->sta.addr, sta->sta.aid);
1120 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1121 }
1122
1123 static void ap_sta_ps_end(struct sta_info *sta)
1124 {
1125 struct ieee80211_sub_if_data *sdata = sta->sdata;
1126
1127 atomic_dec(&sdata->bss->num_sta_ps);
1128
1129 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1130 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1131 sdata->name, sta->sta.addr, sta->sta.aid);
1132 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1133
1134 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1135 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1136 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1137 sdata->name, sta->sta.addr, sta->sta.aid);
1138 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1139 return;
1140 }
1141
1142 ieee80211_sta_ps_deliver_wakeup(sta);
1143 }
1144
1145 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1146 {
1147 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1148 bool in_ps;
1149
1150 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1151
1152 /* Don't let the same PS state be set twice */
1153 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1154 if ((start && in_ps) || (!start && !in_ps))
1155 return -EINVAL;
1156
1157 if (start)
1158 ap_sta_ps_start(sta_inf);
1159 else
1160 ap_sta_ps_end(sta_inf);
1161
1162 return 0;
1163 }
1164 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1165
1166 static ieee80211_rx_result debug_noinline
1167 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1168 {
1169 struct ieee80211_sub_if_data *sdata = rx->sdata;
1170 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1171 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1172 int tid, ac;
1173
1174 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1175 return RX_CONTINUE;
1176
1177 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1178 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1179 return RX_CONTINUE;
1180
1181 /*
1182 * The device handles station powersave, so don't do anything about
1183 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1184 * it to mac80211 since they're handled.)
1185 */
1186 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1187 return RX_CONTINUE;
1188
1189 /*
1190 * Don't do anything if the station isn't already asleep. In
1191 * the uAPSD case, the station will probably be marked asleep,
1192 * in the PS-Poll case the station must be confused ...
1193 */
1194 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1195 return RX_CONTINUE;
1196
1197 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1198 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1199 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1200 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1201 else
1202 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1203 }
1204
1205 /* Free PS Poll skb here instead of returning RX_DROP that would
1206 * count as an dropped frame. */
1207 dev_kfree_skb(rx->skb);
1208
1209 return RX_QUEUED;
1210 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1211 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1212 ieee80211_has_pm(hdr->frame_control) &&
1213 (ieee80211_is_data_qos(hdr->frame_control) ||
1214 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1215 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1216 ac = ieee802_1d_to_ac[tid & 7];
1217
1218 /*
1219 * If this AC is not trigger-enabled do nothing.
1220 *
1221 * NB: This could/should check a separate bitmap of trigger-
1222 * enabled queues, but for now we only implement uAPSD w/o
1223 * TSPEC changes to the ACs, so they're always the same.
1224 */
1225 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1226 return RX_CONTINUE;
1227
1228 /* if we are in a service period, do nothing */
1229 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1230 return RX_CONTINUE;
1231
1232 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1233 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1234 else
1235 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1236 }
1237
1238 return RX_CONTINUE;
1239 }
1240
1241 static ieee80211_rx_result debug_noinline
1242 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1243 {
1244 struct sta_info *sta = rx->sta;
1245 struct sk_buff *skb = rx->skb;
1246 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1247 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1248
1249 if (!sta)
1250 return RX_CONTINUE;
1251
1252 /*
1253 * Update last_rx only for IBSS packets which are for the current
1254 * BSSID to avoid keeping the current IBSS network alive in cases
1255 * where other STAs start using different BSSID.
1256 */
1257 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1258 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1259 NL80211_IFTYPE_ADHOC);
1260 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
1261 sta->last_rx = jiffies;
1262 if (ieee80211_is_data(hdr->frame_control)) {
1263 sta->last_rx_rate_idx = status->rate_idx;
1264 sta->last_rx_rate_flag = status->flag;
1265 }
1266 }
1267 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1268 /*
1269 * Mesh beacons will update last_rx when if they are found to
1270 * match the current local configuration when processed.
1271 */
1272 sta->last_rx = jiffies;
1273 if (ieee80211_is_data(hdr->frame_control)) {
1274 sta->last_rx_rate_idx = status->rate_idx;
1275 sta->last_rx_rate_flag = status->flag;
1276 }
1277 }
1278
1279 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1280 return RX_CONTINUE;
1281
1282 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1283 ieee80211_sta_rx_notify(rx->sdata, hdr);
1284
1285 sta->rx_fragments++;
1286 sta->rx_bytes += rx->skb->len;
1287 sta->last_signal = status->signal;
1288 ewma_add(&sta->avg_signal, -status->signal);
1289
1290 /*
1291 * Change STA power saving mode only at the end of a frame
1292 * exchange sequence.
1293 */
1294 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1295 !ieee80211_has_morefrags(hdr->frame_control) &&
1296 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1297 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1298 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1299 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1300 /*
1301 * Ignore doze->wake transitions that are
1302 * indicated by non-data frames, the standard
1303 * is unclear here, but for example going to
1304 * PS mode and then scanning would cause a
1305 * doze->wake transition for the probe request,
1306 * and that is clearly undesirable.
1307 */
1308 if (ieee80211_is_data(hdr->frame_control) &&
1309 !ieee80211_has_pm(hdr->frame_control))
1310 ap_sta_ps_end(sta);
1311 } else {
1312 if (ieee80211_has_pm(hdr->frame_control))
1313 ap_sta_ps_start(sta);
1314 }
1315 }
1316
1317 /*
1318 * Drop (qos-)data::nullfunc frames silently, since they
1319 * are used only to control station power saving mode.
1320 */
1321 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1322 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1323 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1324
1325 /*
1326 * If we receive a 4-addr nullfunc frame from a STA
1327 * that was not moved to a 4-addr STA vlan yet, drop
1328 * the frame to the monitor interface, to make sure
1329 * that hostapd sees it
1330 */
1331 if (ieee80211_has_a4(hdr->frame_control) &&
1332 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1333 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1334 !rx->sdata->u.vlan.sta)))
1335 return RX_DROP_MONITOR;
1336 /*
1337 * Update counter and free packet here to avoid
1338 * counting this as a dropped packed.
1339 */
1340 sta->rx_packets++;
1341 dev_kfree_skb(rx->skb);
1342 return RX_QUEUED;
1343 }
1344
1345 return RX_CONTINUE;
1346 } /* ieee80211_rx_h_sta_process */
1347
1348 static inline struct ieee80211_fragment_entry *
1349 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1350 unsigned int frag, unsigned int seq, int rx_queue,
1351 struct sk_buff **skb)
1352 {
1353 struct ieee80211_fragment_entry *entry;
1354 int idx;
1355
1356 idx = sdata->fragment_next;
1357 entry = &sdata->fragments[sdata->fragment_next++];
1358 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1359 sdata->fragment_next = 0;
1360
1361 if (!skb_queue_empty(&entry->skb_list)) {
1362 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1363 struct ieee80211_hdr *hdr =
1364 (struct ieee80211_hdr *) entry->skb_list.next->data;
1365 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1366 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1367 "addr1=%pM addr2=%pM\n",
1368 sdata->name, idx,
1369 jiffies - entry->first_frag_time, entry->seq,
1370 entry->last_frag, hdr->addr1, hdr->addr2);
1371 #endif
1372 __skb_queue_purge(&entry->skb_list);
1373 }
1374
1375 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1376 *skb = NULL;
1377 entry->first_frag_time = jiffies;
1378 entry->seq = seq;
1379 entry->rx_queue = rx_queue;
1380 entry->last_frag = frag;
1381 entry->ccmp = 0;
1382 entry->extra_len = 0;
1383
1384 return entry;
1385 }
1386
1387 static inline struct ieee80211_fragment_entry *
1388 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1389 unsigned int frag, unsigned int seq,
1390 int rx_queue, struct ieee80211_hdr *hdr)
1391 {
1392 struct ieee80211_fragment_entry *entry;
1393 int i, idx;
1394
1395 idx = sdata->fragment_next;
1396 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1397 struct ieee80211_hdr *f_hdr;
1398
1399 idx--;
1400 if (idx < 0)
1401 idx = IEEE80211_FRAGMENT_MAX - 1;
1402
1403 entry = &sdata->fragments[idx];
1404 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1405 entry->rx_queue != rx_queue ||
1406 entry->last_frag + 1 != frag)
1407 continue;
1408
1409 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1410
1411 /*
1412 * Check ftype and addresses are equal, else check next fragment
1413 */
1414 if (((hdr->frame_control ^ f_hdr->frame_control) &
1415 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1416 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1417 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1418 continue;
1419
1420 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1421 __skb_queue_purge(&entry->skb_list);
1422 continue;
1423 }
1424 return entry;
1425 }
1426
1427 return NULL;
1428 }
1429
1430 static ieee80211_rx_result debug_noinline
1431 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1432 {
1433 struct ieee80211_hdr *hdr;
1434 u16 sc;
1435 __le16 fc;
1436 unsigned int frag, seq;
1437 struct ieee80211_fragment_entry *entry;
1438 struct sk_buff *skb;
1439 struct ieee80211_rx_status *status;
1440
1441 hdr = (struct ieee80211_hdr *)rx->skb->data;
1442 fc = hdr->frame_control;
1443 sc = le16_to_cpu(hdr->seq_ctrl);
1444 frag = sc & IEEE80211_SCTL_FRAG;
1445
1446 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1447 (rx->skb)->len < 24 ||
1448 is_multicast_ether_addr(hdr->addr1))) {
1449 /* not fragmented */
1450 goto out;
1451 }
1452 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1453
1454 if (skb_linearize(rx->skb))
1455 return RX_DROP_UNUSABLE;
1456
1457 /*
1458 * skb_linearize() might change the skb->data and
1459 * previously cached variables (in this case, hdr) need to
1460 * be refreshed with the new data.
1461 */
1462 hdr = (struct ieee80211_hdr *)rx->skb->data;
1463 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1464
1465 if (frag == 0) {
1466 /* This is the first fragment of a new frame. */
1467 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1468 rx->seqno_idx, &(rx->skb));
1469 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1470 ieee80211_has_protected(fc)) {
1471 int queue = rx->security_idx;
1472 /* Store CCMP PN so that we can verify that the next
1473 * fragment has a sequential PN value. */
1474 entry->ccmp = 1;
1475 memcpy(entry->last_pn,
1476 rx->key->u.ccmp.rx_pn[queue],
1477 CCMP_PN_LEN);
1478 }
1479 return RX_QUEUED;
1480 }
1481
1482 /* This is a fragment for a frame that should already be pending in
1483 * fragment cache. Add this fragment to the end of the pending entry.
1484 */
1485 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1486 rx->seqno_idx, hdr);
1487 if (!entry) {
1488 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1489 return RX_DROP_MONITOR;
1490 }
1491
1492 /* Verify that MPDUs within one MSDU have sequential PN values.
1493 * (IEEE 802.11i, 8.3.3.4.5) */
1494 if (entry->ccmp) {
1495 int i;
1496 u8 pn[CCMP_PN_LEN], *rpn;
1497 int queue;
1498 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1499 return RX_DROP_UNUSABLE;
1500 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1501 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1502 pn[i]++;
1503 if (pn[i])
1504 break;
1505 }
1506 queue = rx->security_idx;
1507 rpn = rx->key->u.ccmp.rx_pn[queue];
1508 if (memcmp(pn, rpn, CCMP_PN_LEN))
1509 return RX_DROP_UNUSABLE;
1510 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1511 }
1512
1513 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1514 __skb_queue_tail(&entry->skb_list, rx->skb);
1515 entry->last_frag = frag;
1516 entry->extra_len += rx->skb->len;
1517 if (ieee80211_has_morefrags(fc)) {
1518 rx->skb = NULL;
1519 return RX_QUEUED;
1520 }
1521
1522 rx->skb = __skb_dequeue(&entry->skb_list);
1523 if (skb_tailroom(rx->skb) < entry->extra_len) {
1524 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1525 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1526 GFP_ATOMIC))) {
1527 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1528 __skb_queue_purge(&entry->skb_list);
1529 return RX_DROP_UNUSABLE;
1530 }
1531 }
1532 while ((skb = __skb_dequeue(&entry->skb_list))) {
1533 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1534 dev_kfree_skb(skb);
1535 }
1536
1537 /* Complete frame has been reassembled - process it now */
1538 status = IEEE80211_SKB_RXCB(rx->skb);
1539 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1540
1541 out:
1542 if (rx->sta)
1543 rx->sta->rx_packets++;
1544 if (is_multicast_ether_addr(hdr->addr1))
1545 rx->local->dot11MulticastReceivedFrameCount++;
1546 else
1547 ieee80211_led_rx(rx->local);
1548 return RX_CONTINUE;
1549 }
1550
1551 static ieee80211_rx_result debug_noinline
1552 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1553 {
1554 u8 *data = rx->skb->data;
1555 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1556
1557 if (!ieee80211_is_data_qos(hdr->frame_control))
1558 return RX_CONTINUE;
1559
1560 /* remove the qos control field, update frame type and meta-data */
1561 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1562 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1563 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1564 /* change frame type to non QOS */
1565 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1566
1567 return RX_CONTINUE;
1568 }
1569
1570 static int
1571 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1572 {
1573 if (unlikely(!rx->sta ||
1574 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1575 return -EACCES;
1576
1577 return 0;
1578 }
1579
1580 static int
1581 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1582 {
1583 struct sk_buff *skb = rx->skb;
1584 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1585
1586 /*
1587 * Pass through unencrypted frames if the hardware has
1588 * decrypted them already.
1589 */
1590 if (status->flag & RX_FLAG_DECRYPTED)
1591 return 0;
1592
1593 /* Drop unencrypted frames if key is set. */
1594 if (unlikely(!ieee80211_has_protected(fc) &&
1595 !ieee80211_is_nullfunc(fc) &&
1596 ieee80211_is_data(fc) &&
1597 (rx->key || rx->sdata->drop_unencrypted)))
1598 return -EACCES;
1599
1600 return 0;
1601 }
1602
1603 static int
1604 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1605 {
1606 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1607 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1608 __le16 fc = hdr->frame_control;
1609
1610 /*
1611 * Pass through unencrypted frames if the hardware has
1612 * decrypted them already.
1613 */
1614 if (status->flag & RX_FLAG_DECRYPTED)
1615 return 0;
1616
1617 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1618 if (unlikely(!ieee80211_has_protected(fc) &&
1619 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1620 rx->key)) {
1621 if (ieee80211_is_deauth(fc))
1622 cfg80211_send_unprot_deauth(rx->sdata->dev,
1623 rx->skb->data,
1624 rx->skb->len);
1625 else if (ieee80211_is_disassoc(fc))
1626 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1627 rx->skb->data,
1628 rx->skb->len);
1629 return -EACCES;
1630 }
1631 /* BIP does not use Protected field, so need to check MMIE */
1632 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1633 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1634 if (ieee80211_is_deauth(fc))
1635 cfg80211_send_unprot_deauth(rx->sdata->dev,
1636 rx->skb->data,
1637 rx->skb->len);
1638 else if (ieee80211_is_disassoc(fc))
1639 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1640 rx->skb->data,
1641 rx->skb->len);
1642 return -EACCES;
1643 }
1644 /*
1645 * When using MFP, Action frames are not allowed prior to
1646 * having configured keys.
1647 */
1648 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1649 ieee80211_is_robust_mgmt_frame(
1650 (struct ieee80211_hdr *) rx->skb->data)))
1651 return -EACCES;
1652 }
1653
1654 return 0;
1655 }
1656
1657 static int
1658 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1659 {
1660 struct ieee80211_sub_if_data *sdata = rx->sdata;
1661 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1662 bool check_port_control = false;
1663 struct ethhdr *ehdr;
1664 int ret;
1665
1666 *port_control = false;
1667 if (ieee80211_has_a4(hdr->frame_control) &&
1668 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1669 return -1;
1670
1671 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1672 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1673
1674 if (!sdata->u.mgd.use_4addr)
1675 return -1;
1676 else
1677 check_port_control = true;
1678 }
1679
1680 if (is_multicast_ether_addr(hdr->addr1) &&
1681 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1682 return -1;
1683
1684 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1685 if (ret < 0)
1686 return ret;
1687
1688 ehdr = (struct ethhdr *) rx->skb->data;
1689 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1690 *port_control = true;
1691 else if (check_port_control)
1692 return -1;
1693
1694 return 0;
1695 }
1696
1697 /*
1698 * requires that rx->skb is a frame with ethernet header
1699 */
1700 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1701 {
1702 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1703 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1704 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1705
1706 /*
1707 * Allow EAPOL frames to us/the PAE group address regardless
1708 * of whether the frame was encrypted or not.
1709 */
1710 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1711 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1712 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1713 return true;
1714
1715 if (ieee80211_802_1x_port_control(rx) ||
1716 ieee80211_drop_unencrypted(rx, fc))
1717 return false;
1718
1719 return true;
1720 }
1721
1722 /*
1723 * requires that rx->skb is a frame with ethernet header
1724 */
1725 static void
1726 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1727 {
1728 struct ieee80211_sub_if_data *sdata = rx->sdata;
1729 struct net_device *dev = sdata->dev;
1730 struct sk_buff *skb, *xmit_skb;
1731 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1732 struct sta_info *dsta;
1733 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1734
1735 skb = rx->skb;
1736 xmit_skb = NULL;
1737
1738 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1739 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1740 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1741 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1742 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1743 if (is_multicast_ether_addr(ehdr->h_dest)) {
1744 /*
1745 * send multicast frames both to higher layers in
1746 * local net stack and back to the wireless medium
1747 */
1748 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1749 if (!xmit_skb && net_ratelimit())
1750 printk(KERN_DEBUG "%s: failed to clone "
1751 "multicast frame\n", dev->name);
1752 } else {
1753 dsta = sta_info_get(sdata, skb->data);
1754 if (dsta) {
1755 /*
1756 * The destination station is associated to
1757 * this AP (in this VLAN), so send the frame
1758 * directly to it and do not pass it to local
1759 * net stack.
1760 */
1761 xmit_skb = skb;
1762 skb = NULL;
1763 }
1764 }
1765 }
1766
1767 if (skb) {
1768 int align __maybe_unused;
1769
1770 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1771 /*
1772 * 'align' will only take the values 0 or 2 here
1773 * since all frames are required to be aligned
1774 * to 2-byte boundaries when being passed to
1775 * mac80211. That also explains the __skb_push()
1776 * below.
1777 */
1778 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1779 if (align) {
1780 if (WARN_ON(skb_headroom(skb) < 3)) {
1781 dev_kfree_skb(skb);
1782 skb = NULL;
1783 } else {
1784 u8 *data = skb->data;
1785 size_t len = skb_headlen(skb);
1786 skb->data -= align;
1787 memmove(skb->data, data, len);
1788 skb_set_tail_pointer(skb, len);
1789 }
1790 }
1791 #endif
1792
1793 if (skb) {
1794 /* deliver to local stack */
1795 skb->protocol = eth_type_trans(skb, dev);
1796 memset(skb->cb, 0, sizeof(skb->cb));
1797 netif_receive_skb(skb);
1798 }
1799 }
1800
1801 if (xmit_skb) {
1802 /* send to wireless media */
1803 xmit_skb->protocol = htons(ETH_P_802_3);
1804 skb_reset_network_header(xmit_skb);
1805 skb_reset_mac_header(xmit_skb);
1806 dev_queue_xmit(xmit_skb);
1807 }
1808 }
1809
1810 static ieee80211_rx_result debug_noinline
1811 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1812 {
1813 struct net_device *dev = rx->sdata->dev;
1814 struct sk_buff *skb = rx->skb;
1815 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1816 __le16 fc = hdr->frame_control;
1817 struct sk_buff_head frame_list;
1818 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1819
1820 if (unlikely(!ieee80211_is_data(fc)))
1821 return RX_CONTINUE;
1822
1823 if (unlikely(!ieee80211_is_data_present(fc)))
1824 return RX_DROP_MONITOR;
1825
1826 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1827 return RX_CONTINUE;
1828
1829 if (ieee80211_has_a4(hdr->frame_control) &&
1830 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1831 !rx->sdata->u.vlan.sta)
1832 return RX_DROP_UNUSABLE;
1833
1834 if (is_multicast_ether_addr(hdr->addr1) &&
1835 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1836 rx->sdata->u.vlan.sta) ||
1837 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1838 rx->sdata->u.mgd.use_4addr)))
1839 return RX_DROP_UNUSABLE;
1840
1841 skb->dev = dev;
1842 __skb_queue_head_init(&frame_list);
1843
1844 if (skb_linearize(skb))
1845 return RX_DROP_UNUSABLE;
1846
1847 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1848 rx->sdata->vif.type,
1849 rx->local->hw.extra_tx_headroom, true);
1850
1851 while (!skb_queue_empty(&frame_list)) {
1852 rx->skb = __skb_dequeue(&frame_list);
1853
1854 if (!ieee80211_frame_allowed(rx, fc)) {
1855 dev_kfree_skb(rx->skb);
1856 continue;
1857 }
1858 dev->stats.rx_packets++;
1859 dev->stats.rx_bytes += rx->skb->len;
1860
1861 ieee80211_deliver_skb(rx);
1862 }
1863
1864 return RX_QUEUED;
1865 }
1866
1867 #ifdef CONFIG_MAC80211_MESH
1868 static ieee80211_rx_result
1869 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1870 {
1871 struct ieee80211_hdr *hdr;
1872 struct ieee80211s_hdr *mesh_hdr;
1873 unsigned int hdrlen;
1874 struct sk_buff *skb = rx->skb, *fwd_skb;
1875 struct ieee80211_local *local = rx->local;
1876 struct ieee80211_sub_if_data *sdata = rx->sdata;
1877 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1878
1879 hdr = (struct ieee80211_hdr *) skb->data;
1880 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1881 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1882
1883 /* frame is in RMC, don't forward */
1884 if (ieee80211_is_data(hdr->frame_control) &&
1885 is_multicast_ether_addr(hdr->addr1) &&
1886 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1887 return RX_DROP_MONITOR;
1888
1889 if (!ieee80211_is_data(hdr->frame_control))
1890 return RX_CONTINUE;
1891
1892 if (!mesh_hdr->ttl)
1893 /* illegal frame */
1894 return RX_DROP_MONITOR;
1895
1896 if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
1897 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1898 dropped_frames_congestion);
1899 return RX_DROP_MONITOR;
1900 }
1901
1902 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1903 struct mesh_path *mppath;
1904 char *proxied_addr;
1905 char *mpp_addr;
1906
1907 if (is_multicast_ether_addr(hdr->addr1)) {
1908 mpp_addr = hdr->addr3;
1909 proxied_addr = mesh_hdr->eaddr1;
1910 } else {
1911 mpp_addr = hdr->addr4;
1912 proxied_addr = mesh_hdr->eaddr2;
1913 }
1914
1915 rcu_read_lock();
1916 mppath = mpp_path_lookup(proxied_addr, sdata);
1917 if (!mppath) {
1918 mpp_path_add(proxied_addr, mpp_addr, sdata);
1919 } else {
1920 spin_lock_bh(&mppath->state_lock);
1921 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1922 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1923 spin_unlock_bh(&mppath->state_lock);
1924 }
1925 rcu_read_unlock();
1926 }
1927
1928 /* Frame has reached destination. Don't forward */
1929 if (!is_multicast_ether_addr(hdr->addr1) &&
1930 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1931 return RX_CONTINUE;
1932
1933 mesh_hdr->ttl--;
1934
1935 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1936 if (!mesh_hdr->ttl)
1937 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1938 dropped_frames_ttl);
1939 else {
1940 struct ieee80211_hdr *fwd_hdr;
1941 struct ieee80211_tx_info *info;
1942
1943 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1944
1945 if (!fwd_skb && net_ratelimit())
1946 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1947 sdata->name);
1948 if (!fwd_skb)
1949 goto out;
1950
1951 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1952 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1953 info = IEEE80211_SKB_CB(fwd_skb);
1954 memset(info, 0, sizeof(*info));
1955 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1956 info->control.vif = &rx->sdata->vif;
1957 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1958 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1959 fwded_mcast);
1960 skb_set_queue_mapping(fwd_skb,
1961 ieee80211_select_queue(sdata, fwd_skb));
1962 ieee80211_set_qos_hdr(sdata, fwd_skb);
1963 } else {
1964 int err;
1965 /*
1966 * Save TA to addr1 to send TA a path error if a
1967 * suitable next hop is not found
1968 */
1969 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1970 ETH_ALEN);
1971 err = mesh_nexthop_lookup(fwd_skb, sdata);
1972 /* Failed to immediately resolve next hop:
1973 * fwded frame was dropped or will be added
1974 * later to the pending skb queue. */
1975 if (err)
1976 return RX_DROP_MONITOR;
1977
1978 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1979 fwded_unicast);
1980 }
1981 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1982 fwded_frames);
1983 ieee80211_add_pending_skb(local, fwd_skb);
1984 }
1985 }
1986
1987 out:
1988 if (is_multicast_ether_addr(hdr->addr1) ||
1989 sdata->dev->flags & IFF_PROMISC)
1990 return RX_CONTINUE;
1991 else
1992 return RX_DROP_MONITOR;
1993 }
1994 #endif
1995
1996 static ieee80211_rx_result debug_noinline
1997 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1998 {
1999 struct ieee80211_sub_if_data *sdata = rx->sdata;
2000 struct ieee80211_local *local = rx->local;
2001 struct net_device *dev = sdata->dev;
2002 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2003 __le16 fc = hdr->frame_control;
2004 bool port_control;
2005 int err;
2006
2007 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2008 return RX_CONTINUE;
2009
2010 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2011 return RX_DROP_MONITOR;
2012
2013 /*
2014 * Allow the cooked monitor interface of an AP to see 4-addr frames so
2015 * that a 4-addr station can be detected and moved into a separate VLAN
2016 */
2017 if (ieee80211_has_a4(hdr->frame_control) &&
2018 sdata->vif.type == NL80211_IFTYPE_AP)
2019 return RX_DROP_MONITOR;
2020
2021 err = __ieee80211_data_to_8023(rx, &port_control);
2022 if (unlikely(err))
2023 return RX_DROP_UNUSABLE;
2024
2025 if (!ieee80211_frame_allowed(rx, fc))
2026 return RX_DROP_MONITOR;
2027
2028 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2029 unlikely(port_control) && sdata->bss) {
2030 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2031 u.ap);
2032 dev = sdata->dev;
2033 rx->sdata = sdata;
2034 }
2035
2036 rx->skb->dev = dev;
2037
2038 dev->stats.rx_packets++;
2039 dev->stats.rx_bytes += rx->skb->len;
2040
2041 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2042 !is_multicast_ether_addr(
2043 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2044 (!local->scanning &&
2045 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2046 mod_timer(&local->dynamic_ps_timer, jiffies +
2047 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2048 }
2049
2050 ieee80211_deliver_skb(rx);
2051
2052 return RX_QUEUED;
2053 }
2054
2055 static ieee80211_rx_result debug_noinline
2056 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2057 {
2058 struct ieee80211_local *local = rx->local;
2059 struct ieee80211_hw *hw = &local->hw;
2060 struct sk_buff *skb = rx->skb;
2061 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2062 struct tid_ampdu_rx *tid_agg_rx;
2063 u16 start_seq_num;
2064 u16 tid;
2065
2066 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2067 return RX_CONTINUE;
2068
2069 if (ieee80211_is_back_req(bar->frame_control)) {
2070 struct {
2071 __le16 control, start_seq_num;
2072 } __packed bar_data;
2073
2074 if (!rx->sta)
2075 return RX_DROP_MONITOR;
2076
2077 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2078 &bar_data, sizeof(bar_data)))
2079 return RX_DROP_MONITOR;
2080
2081 tid = le16_to_cpu(bar_data.control) >> 12;
2082
2083 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2084 if (!tid_agg_rx)
2085 return RX_DROP_MONITOR;
2086
2087 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2088
2089 /* reset session timer */
2090 if (tid_agg_rx->timeout)
2091 mod_timer(&tid_agg_rx->session_timer,
2092 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2093
2094 spin_lock(&tid_agg_rx->reorder_lock);
2095 /* release stored frames up to start of BAR */
2096 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
2097 spin_unlock(&tid_agg_rx->reorder_lock);
2098
2099 kfree_skb(skb);
2100 return RX_QUEUED;
2101 }
2102
2103 /*
2104 * After this point, we only want management frames,
2105 * so we can drop all remaining control frames to
2106 * cooked monitor interfaces.
2107 */
2108 return RX_DROP_MONITOR;
2109 }
2110
2111 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2112 struct ieee80211_mgmt *mgmt,
2113 size_t len)
2114 {
2115 struct ieee80211_local *local = sdata->local;
2116 struct sk_buff *skb;
2117 struct ieee80211_mgmt *resp;
2118
2119 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
2120 /* Not to own unicast address */
2121 return;
2122 }
2123
2124 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
2125 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
2126 /* Not from the current AP or not associated yet. */
2127 return;
2128 }
2129
2130 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2131 /* Too short SA Query request frame */
2132 return;
2133 }
2134
2135 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2136 if (skb == NULL)
2137 return;
2138
2139 skb_reserve(skb, local->hw.extra_tx_headroom);
2140 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2141 memset(resp, 0, 24);
2142 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2143 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2144 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2145 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2146 IEEE80211_STYPE_ACTION);
2147 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2148 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2149 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2150 memcpy(resp->u.action.u.sa_query.trans_id,
2151 mgmt->u.action.u.sa_query.trans_id,
2152 WLAN_SA_QUERY_TR_ID_LEN);
2153
2154 ieee80211_tx_skb(sdata, skb);
2155 }
2156
2157 static ieee80211_rx_result debug_noinline
2158 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2159 {
2160 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2161 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2162
2163 /*
2164 * From here on, look only at management frames.
2165 * Data and control frames are already handled,
2166 * and unknown (reserved) frames are useless.
2167 */
2168 if (rx->skb->len < 24)
2169 return RX_DROP_MONITOR;
2170
2171 if (!ieee80211_is_mgmt(mgmt->frame_control))
2172 return RX_DROP_MONITOR;
2173
2174 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2175 return RX_DROP_MONITOR;
2176
2177 if (ieee80211_drop_unencrypted_mgmt(rx))
2178 return RX_DROP_UNUSABLE;
2179
2180 return RX_CONTINUE;
2181 }
2182
2183 static ieee80211_rx_result debug_noinline
2184 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2185 {
2186 struct ieee80211_local *local = rx->local;
2187 struct ieee80211_sub_if_data *sdata = rx->sdata;
2188 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2189 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2190 int len = rx->skb->len;
2191
2192 if (!ieee80211_is_action(mgmt->frame_control))
2193 return RX_CONTINUE;
2194
2195 /* drop too small frames */
2196 if (len < IEEE80211_MIN_ACTION_SIZE)
2197 return RX_DROP_UNUSABLE;
2198
2199 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2200 return RX_DROP_UNUSABLE;
2201
2202 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2203 return RX_DROP_UNUSABLE;
2204
2205 switch (mgmt->u.action.category) {
2206 case WLAN_CATEGORY_BACK:
2207 /*
2208 * The aggregation code is not prepared to handle
2209 * anything but STA/AP due to the BSSID handling;
2210 * IBSS could work in the code but isn't supported
2211 * by drivers or the standard.
2212 */
2213 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2214 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2215 sdata->vif.type != NL80211_IFTYPE_AP)
2216 break;
2217
2218 /* verify action_code is present */
2219 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2220 break;
2221
2222 switch (mgmt->u.action.u.addba_req.action_code) {
2223 case WLAN_ACTION_ADDBA_REQ:
2224 if (len < (IEEE80211_MIN_ACTION_SIZE +
2225 sizeof(mgmt->u.action.u.addba_req)))
2226 goto invalid;
2227 break;
2228 case WLAN_ACTION_ADDBA_RESP:
2229 if (len < (IEEE80211_MIN_ACTION_SIZE +
2230 sizeof(mgmt->u.action.u.addba_resp)))
2231 goto invalid;
2232 break;
2233 case WLAN_ACTION_DELBA:
2234 if (len < (IEEE80211_MIN_ACTION_SIZE +
2235 sizeof(mgmt->u.action.u.delba)))
2236 goto invalid;
2237 break;
2238 default:
2239 goto invalid;
2240 }
2241
2242 goto queue;
2243 case WLAN_CATEGORY_SPECTRUM_MGMT:
2244 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2245 break;
2246
2247 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2248 break;
2249
2250 /* verify action_code is present */
2251 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2252 break;
2253
2254 switch (mgmt->u.action.u.measurement.action_code) {
2255 case WLAN_ACTION_SPCT_MSR_REQ:
2256 if (len < (IEEE80211_MIN_ACTION_SIZE +
2257 sizeof(mgmt->u.action.u.measurement)))
2258 break;
2259 ieee80211_process_measurement_req(sdata, mgmt, len);
2260 goto handled;
2261 case WLAN_ACTION_SPCT_CHL_SWITCH:
2262 if (len < (IEEE80211_MIN_ACTION_SIZE +
2263 sizeof(mgmt->u.action.u.chan_switch)))
2264 break;
2265
2266 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2267 break;
2268
2269 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2270 break;
2271
2272 goto queue;
2273 }
2274 break;
2275 case WLAN_CATEGORY_SA_QUERY:
2276 if (len < (IEEE80211_MIN_ACTION_SIZE +
2277 sizeof(mgmt->u.action.u.sa_query)))
2278 break;
2279
2280 switch (mgmt->u.action.u.sa_query.action) {
2281 case WLAN_ACTION_SA_QUERY_REQUEST:
2282 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2283 break;
2284 ieee80211_process_sa_query_req(sdata, mgmt, len);
2285 goto handled;
2286 }
2287 break;
2288 case WLAN_CATEGORY_SELF_PROTECTED:
2289 switch (mgmt->u.action.u.self_prot.action_code) {
2290 case WLAN_SP_MESH_PEERING_OPEN:
2291 case WLAN_SP_MESH_PEERING_CLOSE:
2292 case WLAN_SP_MESH_PEERING_CONFIRM:
2293 if (!ieee80211_vif_is_mesh(&sdata->vif))
2294 goto invalid;
2295 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2296 /* userspace handles this frame */
2297 break;
2298 goto queue;
2299 case WLAN_SP_MGK_INFORM:
2300 case WLAN_SP_MGK_ACK:
2301 if (!ieee80211_vif_is_mesh(&sdata->vif))
2302 goto invalid;
2303 break;
2304 }
2305 break;
2306 case WLAN_CATEGORY_MESH_ACTION:
2307 if (!ieee80211_vif_is_mesh(&sdata->vif))
2308 break;
2309 if (mesh_action_is_path_sel(mgmt) &&
2310 (!mesh_path_sel_is_hwmp(sdata)))
2311 break;
2312 goto queue;
2313 }
2314
2315 return RX_CONTINUE;
2316
2317 invalid:
2318 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2319 /* will return in the next handlers */
2320 return RX_CONTINUE;
2321
2322 handled:
2323 if (rx->sta)
2324 rx->sta->rx_packets++;
2325 dev_kfree_skb(rx->skb);
2326 return RX_QUEUED;
2327
2328 queue:
2329 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2330 skb_queue_tail(&sdata->skb_queue, rx->skb);
2331 ieee80211_queue_work(&local->hw, &sdata->work);
2332 if (rx->sta)
2333 rx->sta->rx_packets++;
2334 return RX_QUEUED;
2335 }
2336
2337 static ieee80211_rx_result debug_noinline
2338 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2339 {
2340 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2341
2342 /* skip known-bad action frames and return them in the next handler */
2343 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2344 return RX_CONTINUE;
2345
2346 /*
2347 * Getting here means the kernel doesn't know how to handle
2348 * it, but maybe userspace does ... include returned frames
2349 * so userspace can register for those to know whether ones
2350 * it transmitted were processed or returned.
2351 */
2352
2353 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2354 rx->skb->data, rx->skb->len,
2355 GFP_ATOMIC)) {
2356 if (rx->sta)
2357 rx->sta->rx_packets++;
2358 dev_kfree_skb(rx->skb);
2359 return RX_QUEUED;
2360 }
2361
2362
2363 return RX_CONTINUE;
2364 }
2365
2366 static ieee80211_rx_result debug_noinline
2367 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2368 {
2369 struct ieee80211_local *local = rx->local;
2370 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2371 struct sk_buff *nskb;
2372 struct ieee80211_sub_if_data *sdata = rx->sdata;
2373 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2374
2375 if (!ieee80211_is_action(mgmt->frame_control))
2376 return RX_CONTINUE;
2377
2378 /*
2379 * For AP mode, hostapd is responsible for handling any action
2380 * frames that we didn't handle, including returning unknown
2381 * ones. For all other modes we will return them to the sender,
2382 * setting the 0x80 bit in the action category, as required by
2383 * 802.11-2007 7.3.1.11.
2384 * Newer versions of hostapd shall also use the management frame
2385 * registration mechanisms, but older ones still use cooked
2386 * monitor interfaces so push all frames there.
2387 */
2388 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2389 (sdata->vif.type == NL80211_IFTYPE_AP ||
2390 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2391 return RX_DROP_MONITOR;
2392
2393 /* do not return rejected action frames */
2394 if (mgmt->u.action.category & 0x80)
2395 return RX_DROP_UNUSABLE;
2396
2397 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2398 GFP_ATOMIC);
2399 if (nskb) {
2400 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2401
2402 nmgmt->u.action.category |= 0x80;
2403 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2404 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2405
2406 memset(nskb->cb, 0, sizeof(nskb->cb));
2407
2408 ieee80211_tx_skb(rx->sdata, nskb);
2409 }
2410 dev_kfree_skb(rx->skb);
2411 return RX_QUEUED;
2412 }
2413
2414 static ieee80211_rx_result debug_noinline
2415 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2416 {
2417 struct ieee80211_sub_if_data *sdata = rx->sdata;
2418 ieee80211_rx_result rxs;
2419 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2420 __le16 stype;
2421
2422 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2423 if (rxs != RX_CONTINUE)
2424 return rxs;
2425
2426 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2427
2428 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2429 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2430 sdata->vif.type != NL80211_IFTYPE_STATION)
2431 return RX_DROP_MONITOR;
2432
2433 switch (stype) {
2434 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2435 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2436 /* process for all: mesh, mlme, ibss */
2437 break;
2438 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2439 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2440 if (is_multicast_ether_addr(mgmt->da) &&
2441 !is_broadcast_ether_addr(mgmt->da))
2442 return RX_DROP_MONITOR;
2443
2444 /* process only for station */
2445 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2446 return RX_DROP_MONITOR;
2447 break;
2448 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2449 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2450 /* process only for ibss */
2451 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2452 return RX_DROP_MONITOR;
2453 break;
2454 default:
2455 return RX_DROP_MONITOR;
2456 }
2457
2458 /* queue up frame and kick off work to process it */
2459 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2460 skb_queue_tail(&sdata->skb_queue, rx->skb);
2461 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2462 if (rx->sta)
2463 rx->sta->rx_packets++;
2464
2465 return RX_QUEUED;
2466 }
2467
2468 /* TODO: use IEEE80211_RX_FRAGMENTED */
2469 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2470 struct ieee80211_rate *rate)
2471 {
2472 struct ieee80211_sub_if_data *sdata;
2473 struct ieee80211_local *local = rx->local;
2474 struct ieee80211_rtap_hdr {
2475 struct ieee80211_radiotap_header hdr;
2476 u8 flags;
2477 u8 rate_or_pad;
2478 __le16 chan_freq;
2479 __le16 chan_flags;
2480 } __packed *rthdr;
2481 struct sk_buff *skb = rx->skb, *skb2;
2482 struct net_device *prev_dev = NULL;
2483 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2484
2485 /*
2486 * If cooked monitor has been processed already, then
2487 * don't do it again. If not, set the flag.
2488 */
2489 if (rx->flags & IEEE80211_RX_CMNTR)
2490 goto out_free_skb;
2491 rx->flags |= IEEE80211_RX_CMNTR;
2492
2493 if (skb_headroom(skb) < sizeof(*rthdr) &&
2494 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2495 goto out_free_skb;
2496
2497 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2498 memset(rthdr, 0, sizeof(*rthdr));
2499 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2500 rthdr->hdr.it_present =
2501 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2502 (1 << IEEE80211_RADIOTAP_CHANNEL));
2503
2504 if (rate) {
2505 rthdr->rate_or_pad = rate->bitrate / 5;
2506 rthdr->hdr.it_present |=
2507 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2508 }
2509 rthdr->chan_freq = cpu_to_le16(status->freq);
2510
2511 if (status->band == IEEE80211_BAND_5GHZ)
2512 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2513 IEEE80211_CHAN_5GHZ);
2514 else
2515 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2516 IEEE80211_CHAN_2GHZ);
2517
2518 skb_set_mac_header(skb, 0);
2519 skb->ip_summed = CHECKSUM_UNNECESSARY;
2520 skb->pkt_type = PACKET_OTHERHOST;
2521 skb->protocol = htons(ETH_P_802_2);
2522
2523 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2524 if (!ieee80211_sdata_running(sdata))
2525 continue;
2526
2527 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2528 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2529 continue;
2530
2531 if (prev_dev) {
2532 skb2 = skb_clone(skb, GFP_ATOMIC);
2533 if (skb2) {
2534 skb2->dev = prev_dev;
2535 netif_receive_skb(skb2);
2536 }
2537 }
2538
2539 prev_dev = sdata->dev;
2540 sdata->dev->stats.rx_packets++;
2541 sdata->dev->stats.rx_bytes += skb->len;
2542 }
2543
2544 if (prev_dev) {
2545 skb->dev = prev_dev;
2546 netif_receive_skb(skb);
2547 return;
2548 }
2549
2550 out_free_skb:
2551 dev_kfree_skb(skb);
2552 }
2553
2554 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2555 ieee80211_rx_result res)
2556 {
2557 switch (res) {
2558 case RX_DROP_MONITOR:
2559 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2560 if (rx->sta)
2561 rx->sta->rx_dropped++;
2562 /* fall through */
2563 case RX_CONTINUE: {
2564 struct ieee80211_rate *rate = NULL;
2565 struct ieee80211_supported_band *sband;
2566 struct ieee80211_rx_status *status;
2567
2568 status = IEEE80211_SKB_RXCB((rx->skb));
2569
2570 sband = rx->local->hw.wiphy->bands[status->band];
2571 if (!(status->flag & RX_FLAG_HT))
2572 rate = &sband->bitrates[status->rate_idx];
2573
2574 ieee80211_rx_cooked_monitor(rx, rate);
2575 break;
2576 }
2577 case RX_DROP_UNUSABLE:
2578 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2579 if (rx->sta)
2580 rx->sta->rx_dropped++;
2581 dev_kfree_skb(rx->skb);
2582 break;
2583 case RX_QUEUED:
2584 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2585 break;
2586 }
2587 }
2588
2589 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2590 {
2591 ieee80211_rx_result res = RX_DROP_MONITOR;
2592 struct sk_buff *skb;
2593
2594 #define CALL_RXH(rxh) \
2595 do { \
2596 res = rxh(rx); \
2597 if (res != RX_CONTINUE) \
2598 goto rxh_next; \
2599 } while (0);
2600
2601 spin_lock(&rx->local->rx_skb_queue.lock);
2602 if (rx->local->running_rx_handler)
2603 goto unlock;
2604
2605 rx->local->running_rx_handler = true;
2606
2607 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2608 spin_unlock(&rx->local->rx_skb_queue.lock);
2609
2610 /*
2611 * all the other fields are valid across frames
2612 * that belong to an aMPDU since they are on the
2613 * same TID from the same station
2614 */
2615 rx->skb = skb;
2616
2617 CALL_RXH(ieee80211_rx_h_decrypt)
2618 CALL_RXH(ieee80211_rx_h_check_more_data)
2619 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2620 CALL_RXH(ieee80211_rx_h_sta_process)
2621 CALL_RXH(ieee80211_rx_h_defragment)
2622 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2623 /* must be after MMIC verify so header is counted in MPDU mic */
2624 #ifdef CONFIG_MAC80211_MESH
2625 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2626 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2627 #endif
2628 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2629 CALL_RXH(ieee80211_rx_h_amsdu)
2630 CALL_RXH(ieee80211_rx_h_data)
2631 CALL_RXH(ieee80211_rx_h_ctrl);
2632 CALL_RXH(ieee80211_rx_h_mgmt_check)
2633 CALL_RXH(ieee80211_rx_h_action)
2634 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2635 CALL_RXH(ieee80211_rx_h_action_return)
2636 CALL_RXH(ieee80211_rx_h_mgmt)
2637
2638 rxh_next:
2639 ieee80211_rx_handlers_result(rx, res);
2640 spin_lock(&rx->local->rx_skb_queue.lock);
2641 #undef CALL_RXH
2642 }
2643
2644 rx->local->running_rx_handler = false;
2645
2646 unlock:
2647 spin_unlock(&rx->local->rx_skb_queue.lock);
2648 }
2649
2650 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2651 {
2652 ieee80211_rx_result res = RX_DROP_MONITOR;
2653
2654 #define CALL_RXH(rxh) \
2655 do { \
2656 res = rxh(rx); \
2657 if (res != RX_CONTINUE) \
2658 goto rxh_next; \
2659 } while (0);
2660
2661 CALL_RXH(ieee80211_rx_h_passive_scan)
2662 CALL_RXH(ieee80211_rx_h_check)
2663
2664 ieee80211_rx_reorder_ampdu(rx);
2665
2666 ieee80211_rx_handlers(rx);
2667 return;
2668
2669 rxh_next:
2670 ieee80211_rx_handlers_result(rx, res);
2671
2672 #undef CALL_RXH
2673 }
2674
2675 /*
2676 * This function makes calls into the RX path, therefore
2677 * it has to be invoked under RCU read lock.
2678 */
2679 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2680 {
2681 struct ieee80211_rx_data rx = {
2682 .sta = sta,
2683 .sdata = sta->sdata,
2684 .local = sta->local,
2685 /* This is OK -- must be QoS data frame */
2686 .security_idx = tid,
2687 .seqno_idx = tid,
2688 .flags = 0,
2689 };
2690 struct tid_ampdu_rx *tid_agg_rx;
2691
2692 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2693 if (!tid_agg_rx)
2694 return;
2695
2696 spin_lock(&tid_agg_rx->reorder_lock);
2697 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
2698 spin_unlock(&tid_agg_rx->reorder_lock);
2699
2700 ieee80211_rx_handlers(&rx);
2701 }
2702
2703 /* main receive path */
2704
2705 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2706 struct ieee80211_hdr *hdr)
2707 {
2708 struct ieee80211_sub_if_data *sdata = rx->sdata;
2709 struct sk_buff *skb = rx->skb;
2710 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2711 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2712 int multicast = is_multicast_ether_addr(hdr->addr1);
2713
2714 switch (sdata->vif.type) {
2715 case NL80211_IFTYPE_STATION:
2716 if (!bssid && !sdata->u.mgd.use_4addr)
2717 return 0;
2718 if (!multicast &&
2719 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2720 if (!(sdata->dev->flags & IFF_PROMISC) ||
2721 sdata->u.mgd.use_4addr)
2722 return 0;
2723 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2724 }
2725 break;
2726 case NL80211_IFTYPE_ADHOC:
2727 if (!bssid)
2728 return 0;
2729 if (ieee80211_is_beacon(hdr->frame_control)) {
2730 return 1;
2731 }
2732 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2733 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2734 return 0;
2735 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2736 } else if (!multicast &&
2737 compare_ether_addr(sdata->vif.addr,
2738 hdr->addr1) != 0) {
2739 if (!(sdata->dev->flags & IFF_PROMISC))
2740 return 0;
2741 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2742 } else if (!rx->sta) {
2743 int rate_idx;
2744 if (status->flag & RX_FLAG_HT)
2745 rate_idx = 0; /* TODO: HT rates */
2746 else
2747 rate_idx = status->rate_idx;
2748 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2749 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2750 }
2751 break;
2752 case NL80211_IFTYPE_MESH_POINT:
2753 if (!multicast &&
2754 compare_ether_addr(sdata->vif.addr,
2755 hdr->addr1) != 0) {
2756 if (!(sdata->dev->flags & IFF_PROMISC))
2757 return 0;
2758
2759 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2760 }
2761 break;
2762 case NL80211_IFTYPE_AP_VLAN:
2763 case NL80211_IFTYPE_AP:
2764 if (!bssid) {
2765 if (compare_ether_addr(sdata->vif.addr,
2766 hdr->addr1))
2767 return 0;
2768 } else if (!ieee80211_bssid_match(bssid,
2769 sdata->vif.addr)) {
2770 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2771 !ieee80211_is_beacon(hdr->frame_control) &&
2772 !(ieee80211_is_action(hdr->frame_control) &&
2773 sdata->vif.p2p))
2774 return 0;
2775 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2776 }
2777 break;
2778 case NL80211_IFTYPE_WDS:
2779 if (bssid || !ieee80211_is_data(hdr->frame_control))
2780 return 0;
2781 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2782 return 0;
2783 break;
2784 default:
2785 /* should never get here */
2786 WARN_ON(1);
2787 break;
2788 }
2789
2790 return 1;
2791 }
2792
2793 /*
2794 * This function returns whether or not the SKB
2795 * was destined for RX processing or not, which,
2796 * if consume is true, is equivalent to whether
2797 * or not the skb was consumed.
2798 */
2799 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2800 struct sk_buff *skb, bool consume)
2801 {
2802 struct ieee80211_local *local = rx->local;
2803 struct ieee80211_sub_if_data *sdata = rx->sdata;
2804 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2805 struct ieee80211_hdr *hdr = (void *)skb->data;
2806 int prepares;
2807
2808 rx->skb = skb;
2809 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2810 prepares = prepare_for_handlers(rx, hdr);
2811
2812 if (!prepares)
2813 return false;
2814
2815 if (!consume) {
2816 skb = skb_copy(skb, GFP_ATOMIC);
2817 if (!skb) {
2818 if (net_ratelimit())
2819 wiphy_debug(local->hw.wiphy,
2820 "failed to copy skb for %s\n",
2821 sdata->name);
2822 return true;
2823 }
2824
2825 rx->skb = skb;
2826 }
2827
2828 ieee80211_invoke_rx_handlers(rx);
2829 return true;
2830 }
2831
2832 /*
2833 * This is the actual Rx frames handler. as it blongs to Rx path it must
2834 * be called with rcu_read_lock protection.
2835 */
2836 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2837 struct sk_buff *skb)
2838 {
2839 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2840 struct ieee80211_local *local = hw_to_local(hw);
2841 struct ieee80211_sub_if_data *sdata;
2842 struct ieee80211_hdr *hdr;
2843 __le16 fc;
2844 struct ieee80211_rx_data rx;
2845 struct ieee80211_sub_if_data *prev;
2846 struct sta_info *sta, *tmp, *prev_sta;
2847 int err = 0;
2848
2849 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2850 memset(&rx, 0, sizeof(rx));
2851 rx.skb = skb;
2852 rx.local = local;
2853
2854 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2855 local->dot11ReceivedFragmentCount++;
2856
2857 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2858 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2859 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2860
2861 if (ieee80211_is_mgmt(fc))
2862 err = skb_linearize(skb);
2863 else
2864 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2865
2866 if (err) {
2867 dev_kfree_skb(skb);
2868 return;
2869 }
2870
2871 hdr = (struct ieee80211_hdr *)skb->data;
2872 ieee80211_parse_qos(&rx);
2873 ieee80211_verify_alignment(&rx);
2874
2875 if (ieee80211_is_data(fc)) {
2876 prev_sta = NULL;
2877
2878 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
2879 if (!prev_sta) {
2880 prev_sta = sta;
2881 continue;
2882 }
2883
2884 rx.sta = prev_sta;
2885 rx.sdata = prev_sta->sdata;
2886 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2887
2888 prev_sta = sta;
2889 }
2890
2891 if (prev_sta) {
2892 rx.sta = prev_sta;
2893 rx.sdata = prev_sta->sdata;
2894
2895 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2896 return;
2897 goto out;
2898 }
2899 }
2900
2901 prev = NULL;
2902
2903 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2904 if (!ieee80211_sdata_running(sdata))
2905 continue;
2906
2907 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2908 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2909 continue;
2910
2911 /*
2912 * frame is destined for this interface, but if it's
2913 * not also for the previous one we handle that after
2914 * the loop to avoid copying the SKB once too much
2915 */
2916
2917 if (!prev) {
2918 prev = sdata;
2919 continue;
2920 }
2921
2922 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2923 rx.sdata = prev;
2924 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2925
2926 prev = sdata;
2927 }
2928
2929 if (prev) {
2930 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2931 rx.sdata = prev;
2932
2933 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2934 return;
2935 }
2936
2937 out:
2938 dev_kfree_skb(skb);
2939 }
2940
2941 /*
2942 * This is the receive path handler. It is called by a low level driver when an
2943 * 802.11 MPDU is received from the hardware.
2944 */
2945 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2946 {
2947 struct ieee80211_local *local = hw_to_local(hw);
2948 struct ieee80211_rate *rate = NULL;
2949 struct ieee80211_supported_band *sband;
2950 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2951
2952 WARN_ON_ONCE(softirq_count() == 0);
2953
2954 if (WARN_ON(status->band < 0 ||
2955 status->band >= IEEE80211_NUM_BANDS))
2956 goto drop;
2957
2958 sband = local->hw.wiphy->bands[status->band];
2959 if (WARN_ON(!sband))
2960 goto drop;
2961
2962 /*
2963 * If we're suspending, it is possible although not too likely
2964 * that we'd be receiving frames after having already partially
2965 * quiesced the stack. We can't process such frames then since
2966 * that might, for example, cause stations to be added or other
2967 * driver callbacks be invoked.
2968 */
2969 if (unlikely(local->quiescing || local->suspended))
2970 goto drop;
2971
2972 /*
2973 * The same happens when we're not even started,
2974 * but that's worth a warning.
2975 */
2976 if (WARN_ON(!local->started))
2977 goto drop;
2978
2979 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2980 /*
2981 * Validate the rate, unless a PLCP error means that
2982 * we probably can't have a valid rate here anyway.
2983 */
2984
2985 if (status->flag & RX_FLAG_HT) {
2986 /*
2987 * rate_idx is MCS index, which can be [0-76]
2988 * as documented on:
2989 *
2990 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2991 *
2992 * Anything else would be some sort of driver or
2993 * hardware error. The driver should catch hardware
2994 * errors.
2995 */
2996 if (WARN((status->rate_idx < 0 ||
2997 status->rate_idx > 76),
2998 "Rate marked as an HT rate but passed "
2999 "status->rate_idx is not "
3000 "an MCS index [0-76]: %d (0x%02x)\n",
3001 status->rate_idx,
3002 status->rate_idx))
3003 goto drop;
3004 } else {
3005 if (WARN_ON(status->rate_idx < 0 ||
3006 status->rate_idx >= sband->n_bitrates))
3007 goto drop;
3008 rate = &sband->bitrates[status->rate_idx];
3009 }
3010 }
3011
3012 status->rx_flags = 0;
3013
3014 /*
3015 * key references and virtual interfaces are protected using RCU
3016 * and this requires that we are in a read-side RCU section during
3017 * receive processing
3018 */
3019 rcu_read_lock();
3020
3021 /*
3022 * Frames with failed FCS/PLCP checksum are not returned,
3023 * all other frames are returned without radiotap header
3024 * if it was previously present.
3025 * Also, frames with less than 16 bytes are dropped.
3026 */
3027 skb = ieee80211_rx_monitor(local, skb, rate);
3028 if (!skb) {
3029 rcu_read_unlock();
3030 return;
3031 }
3032
3033 ieee80211_tpt_led_trig_rx(local,
3034 ((struct ieee80211_hdr *)skb->data)->frame_control,
3035 skb->len);
3036 __ieee80211_rx_handle_packet(hw, skb);
3037
3038 rcu_read_unlock();
3039
3040 return;
3041 drop:
3042 kfree_skb(skb);
3043 }
3044 EXPORT_SYMBOL(ieee80211_rx);
3045
3046 /* This is a version of the rx handler that can be called from hard irq
3047 * context. Post the skb on the queue and schedule the tasklet */
3048 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3049 {
3050 struct ieee80211_local *local = hw_to_local(hw);
3051
3052 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3053
3054 skb->pkt_type = IEEE80211_RX_MSG;
3055 skb_queue_tail(&local->skb_queue, skb);
3056 tasklet_schedule(&local->tasklet);
3057 }
3058 EXPORT_SYMBOL(ieee80211_rx_irqsafe);