irq: Better struct irqaction layout
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <net/mac80211.h>
20 #include <net/ieee80211_radiotap.h>
21
22 #include "ieee80211_i.h"
23 #include "driver-ops.h"
24 #include "led.h"
25 #include "mesh.h"
26 #include "wep.h"
27 #include "wpa.h"
28 #include "tkip.h"
29 #include "wme.h"
30
31 /*
32 * monitor mode reception
33 *
34 * This function cleans up the SKB, i.e. it removes all the stuff
35 * only useful for monitoring.
36 */
37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38 struct sk_buff *skb)
39 {
40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
41 if (likely(skb->len > FCS_LEN))
42 __pskb_trim(skb, skb->len - FCS_LEN);
43 else {
44 /* driver bug */
45 WARN_ON(1);
46 dev_kfree_skb(skb);
47 skb = NULL;
48 }
49 }
50
51 return skb;
52 }
53
54 static inline int should_drop_frame(struct sk_buff *skb,
55 int present_fcs_len)
56 {
57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
59
60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
61 return 1;
62 if (unlikely(skb->len < 16 + present_fcs_len))
63 return 1;
64 if (ieee80211_is_ctl(hdr->frame_control) &&
65 !ieee80211_is_pspoll(hdr->frame_control) &&
66 !ieee80211_is_back_req(hdr->frame_control))
67 return 1;
68 return 0;
69 }
70
71 static int
72 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
73 struct ieee80211_rx_status *status)
74 {
75 int len;
76
77 /* always present fields */
78 len = sizeof(struct ieee80211_radiotap_header) + 9;
79
80 if (status->flag & RX_FLAG_TSFT)
81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1;
84
85 if (len & 1) /* padding for RX_FLAGS if necessary */
86 len++;
87
88 return len;
89 }
90
91 /*
92 * ieee80211_add_rx_radiotap_header - add radiotap header
93 *
94 * add a radiotap header containing all the fields which the hardware provided.
95 */
96 static void
97 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
98 struct sk_buff *skb,
99 struct ieee80211_rate *rate,
100 int rtap_len)
101 {
102 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
103 struct ieee80211_radiotap_header *rthdr;
104 unsigned char *pos;
105 u16 rx_flags = 0;
106
107 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
108 memset(rthdr, 0, rtap_len);
109
110 /* radiotap header, set always present flags */
111 rthdr->it_present =
112 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
113 (1 << IEEE80211_RADIOTAP_CHANNEL) |
114 (1 << IEEE80211_RADIOTAP_ANTENNA) |
115 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
116 rthdr->it_len = cpu_to_le16(rtap_len);
117
118 pos = (unsigned char *)(rthdr+1);
119
120 /* the order of the following fields is important */
121
122 /* IEEE80211_RADIOTAP_TSFT */
123 if (status->flag & RX_FLAG_TSFT) {
124 put_unaligned_le64(status->mactime, pos);
125 rthdr->it_present |=
126 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
127 pos += 8;
128 }
129
130 /* IEEE80211_RADIOTAP_FLAGS */
131 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
132 *pos |= IEEE80211_RADIOTAP_F_FCS;
133 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
134 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
135 if (status->flag & RX_FLAG_SHORTPRE)
136 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
137 pos++;
138
139 /* IEEE80211_RADIOTAP_RATE */
140 if (status->flag & RX_FLAG_HT) {
141 /*
142 * TODO: add following information into radiotap header once
143 * suitable fields are defined for it:
144 * - MCS index (status->rate_idx)
145 * - HT40 (status->flag & RX_FLAG_40MHZ)
146 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
147 */
148 *pos = 0;
149 } else {
150 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
151 *pos = rate->bitrate / 5;
152 }
153 pos++;
154
155 /* IEEE80211_RADIOTAP_CHANNEL */
156 put_unaligned_le16(status->freq, pos);
157 pos += 2;
158 if (status->band == IEEE80211_BAND_5GHZ)
159 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
160 pos);
161 else if (status->flag & RX_FLAG_HT)
162 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
163 pos);
164 else if (rate->flags & IEEE80211_RATE_ERP_G)
165 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
166 pos);
167 else
168 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
169 pos);
170 pos += 2;
171
172 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
173 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
174 *pos = status->signal;
175 rthdr->it_present |=
176 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
177 pos++;
178 }
179
180 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
181
182 /* IEEE80211_RADIOTAP_ANTENNA */
183 *pos = status->antenna;
184 pos++;
185
186 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
187
188 /* IEEE80211_RADIOTAP_RX_FLAGS */
189 /* ensure 2 byte alignment for the 2 byte field as required */
190 if ((pos - (u8 *)rthdr) & 1)
191 pos++;
192 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
193 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
194 put_unaligned_le16(rx_flags, pos);
195 pos += 2;
196 }
197
198 /*
199 * This function copies a received frame to all monitor interfaces and
200 * returns a cleaned-up SKB that no longer includes the FCS nor the
201 * radiotap header the driver might have added.
202 */
203 static struct sk_buff *
204 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
205 struct ieee80211_rate *rate)
206 {
207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
208 struct ieee80211_sub_if_data *sdata;
209 int needed_headroom = 0;
210 struct sk_buff *skb, *skb2;
211 struct net_device *prev_dev = NULL;
212 int present_fcs_len = 0;
213
214 /*
215 * First, we may need to make a copy of the skb because
216 * (1) we need to modify it for radiotap (if not present), and
217 * (2) the other RX handlers will modify the skb we got.
218 *
219 * We don't need to, of course, if we aren't going to return
220 * the SKB because it has a bad FCS/PLCP checksum.
221 */
222
223 /* room for the radiotap header based on driver features */
224 needed_headroom = ieee80211_rx_radiotap_len(local, status);
225
226 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
227 present_fcs_len = FCS_LEN;
228
229 /* make sure hdr->frame_control is on the linear part */
230 if (!pskb_may_pull(origskb, 2)) {
231 dev_kfree_skb(origskb);
232 return NULL;
233 }
234
235 if (!local->monitors) {
236 if (should_drop_frame(origskb, present_fcs_len)) {
237 dev_kfree_skb(origskb);
238 return NULL;
239 }
240
241 return remove_monitor_info(local, origskb);
242 }
243
244 if (should_drop_frame(origskb, present_fcs_len)) {
245 /* only need to expand headroom if necessary */
246 skb = origskb;
247 origskb = NULL;
248
249 /*
250 * This shouldn't trigger often because most devices have an
251 * RX header they pull before we get here, and that should
252 * be big enough for our radiotap information. We should
253 * probably export the length to drivers so that we can have
254 * them allocate enough headroom to start with.
255 */
256 if (skb_headroom(skb) < needed_headroom &&
257 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
258 dev_kfree_skb(skb);
259 return NULL;
260 }
261 } else {
262 /*
263 * Need to make a copy and possibly remove radiotap header
264 * and FCS from the original.
265 */
266 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
267
268 origskb = remove_monitor_info(local, origskb);
269
270 if (!skb)
271 return origskb;
272 }
273
274 /* prepend radiotap information */
275 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
276
277 skb_reset_mac_header(skb);
278 skb->ip_summed = CHECKSUM_UNNECESSARY;
279 skb->pkt_type = PACKET_OTHERHOST;
280 skb->protocol = htons(ETH_P_802_2);
281
282 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
283 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
284 continue;
285
286 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
287 continue;
288
289 if (!ieee80211_sdata_running(sdata))
290 continue;
291
292 if (prev_dev) {
293 skb2 = skb_clone(skb, GFP_ATOMIC);
294 if (skb2) {
295 skb2->dev = prev_dev;
296 netif_receive_skb(skb2);
297 }
298 }
299
300 prev_dev = sdata->dev;
301 sdata->dev->stats.rx_packets++;
302 sdata->dev->stats.rx_bytes += skb->len;
303 }
304
305 if (prev_dev) {
306 skb->dev = prev_dev;
307 netif_receive_skb(skb);
308 } else
309 dev_kfree_skb(skb);
310
311 return origskb;
312 }
313
314
315 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
316 {
317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
319 int tid;
320
321 /* does the frame have a qos control field? */
322 if (ieee80211_is_data_qos(hdr->frame_control)) {
323 u8 *qc = ieee80211_get_qos_ctl(hdr);
324 /* frame has qos control */
325 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
326 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
327 status->rx_flags |= IEEE80211_RX_AMSDU;
328 } else {
329 /*
330 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
331 *
332 * Sequence numbers for management frames, QoS data
333 * frames with a broadcast/multicast address in the
334 * Address 1 field, and all non-QoS data frames sent
335 * by QoS STAs are assigned using an additional single
336 * modulo-4096 counter, [...]
337 *
338 * We also use that counter for non-QoS STAs.
339 */
340 tid = NUM_RX_DATA_QUEUES - 1;
341 }
342
343 rx->queue = tid;
344 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
345 * For now, set skb->priority to 0 for other cases. */
346 rx->skb->priority = (tid > 7) ? 0 : tid;
347 }
348
349 /**
350 * DOC: Packet alignment
351 *
352 * Drivers always need to pass packets that are aligned to two-byte boundaries
353 * to the stack.
354 *
355 * Additionally, should, if possible, align the payload data in a way that
356 * guarantees that the contained IP header is aligned to a four-byte
357 * boundary. In the case of regular frames, this simply means aligning the
358 * payload to a four-byte boundary (because either the IP header is directly
359 * contained, or IV/RFC1042 headers that have a length divisible by four are
360 * in front of it). If the payload data is not properly aligned and the
361 * architecture doesn't support efficient unaligned operations, mac80211
362 * will align the data.
363 *
364 * With A-MSDU frames, however, the payload data address must yield two modulo
365 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
366 * push the IP header further back to a multiple of four again. Thankfully, the
367 * specs were sane enough this time around to require padding each A-MSDU
368 * subframe to a length that is a multiple of four.
369 *
370 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
371 * the payload is not supported, the driver is required to move the 802.11
372 * header to be directly in front of the payload in that case.
373 */
374 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
375 {
376 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
377 WARN_ONCE((unsigned long)rx->skb->data & 1,
378 "unaligned packet at 0x%p\n", rx->skb->data);
379 #endif
380 }
381
382
383 /* rx handlers */
384
385 static ieee80211_rx_result debug_noinline
386 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
387 {
388 struct ieee80211_local *local = rx->local;
389 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
390 struct sk_buff *skb = rx->skb;
391
392 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
393 return RX_CONTINUE;
394
395 if (test_bit(SCAN_HW_SCANNING, &local->scanning))
396 return ieee80211_scan_rx(rx->sdata, skb);
397
398 if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
399 /* drop all the other packets during a software scan anyway */
400 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
401 dev_kfree_skb(skb);
402 return RX_QUEUED;
403 }
404
405 /* scanning finished during invoking of handlers */
406 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
407 return RX_DROP_UNUSABLE;
408 }
409
410
411 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
412 {
413 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
414
415 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
416 return 0;
417
418 return ieee80211_is_robust_mgmt_frame(hdr);
419 }
420
421
422 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
423 {
424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
425
426 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
427 return 0;
428
429 return ieee80211_is_robust_mgmt_frame(hdr);
430 }
431
432
433 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
434 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
435 {
436 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
437 struct ieee80211_mmie *mmie;
438
439 if (skb->len < 24 + sizeof(*mmie) ||
440 !is_multicast_ether_addr(hdr->da))
441 return -1;
442
443 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
444 return -1; /* not a robust management frame */
445
446 mmie = (struct ieee80211_mmie *)
447 (skb->data + skb->len - sizeof(*mmie));
448 if (mmie->element_id != WLAN_EID_MMIE ||
449 mmie->length != sizeof(*mmie) - 2)
450 return -1;
451
452 return le16_to_cpu(mmie->key_id);
453 }
454
455
456 static ieee80211_rx_result
457 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
458 {
459 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
460 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
461 char *dev_addr = rx->sdata->vif.addr;
462
463 if (ieee80211_is_data(hdr->frame_control)) {
464 if (is_multicast_ether_addr(hdr->addr1)) {
465 if (ieee80211_has_tods(hdr->frame_control) ||
466 !ieee80211_has_fromds(hdr->frame_control))
467 return RX_DROP_MONITOR;
468 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
469 return RX_DROP_MONITOR;
470 } else {
471 if (!ieee80211_has_a4(hdr->frame_control))
472 return RX_DROP_MONITOR;
473 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
474 return RX_DROP_MONITOR;
475 }
476 }
477
478 /* If there is not an established peer link and this is not a peer link
479 * establisment frame, beacon or probe, drop the frame.
480 */
481
482 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
483 struct ieee80211_mgmt *mgmt;
484
485 if (!ieee80211_is_mgmt(hdr->frame_control))
486 return RX_DROP_MONITOR;
487
488 if (ieee80211_is_action(hdr->frame_control)) {
489 mgmt = (struct ieee80211_mgmt *)hdr;
490 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
491 return RX_DROP_MONITOR;
492 return RX_CONTINUE;
493 }
494
495 if (ieee80211_is_probe_req(hdr->frame_control) ||
496 ieee80211_is_probe_resp(hdr->frame_control) ||
497 ieee80211_is_beacon(hdr->frame_control))
498 return RX_CONTINUE;
499
500 return RX_DROP_MONITOR;
501
502 }
503
504 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
505
506 if (ieee80211_is_data(hdr->frame_control) &&
507 is_multicast_ether_addr(hdr->addr1) &&
508 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
509 return RX_DROP_MONITOR;
510 #undef msh_h_get
511
512 return RX_CONTINUE;
513 }
514
515 #define SEQ_MODULO 0x1000
516 #define SEQ_MASK 0xfff
517
518 static inline int seq_less(u16 sq1, u16 sq2)
519 {
520 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
521 }
522
523 static inline u16 seq_inc(u16 sq)
524 {
525 return (sq + 1) & SEQ_MASK;
526 }
527
528 static inline u16 seq_sub(u16 sq1, u16 sq2)
529 {
530 return (sq1 - sq2) & SEQ_MASK;
531 }
532
533
534 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
535 struct tid_ampdu_rx *tid_agg_rx,
536 int index,
537 struct sk_buff_head *frames)
538 {
539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
540
541 if (!skb)
542 goto no_frame;
543
544 /* release the frame from the reorder ring buffer */
545 tid_agg_rx->stored_mpdu_num--;
546 tid_agg_rx->reorder_buf[index] = NULL;
547 __skb_queue_tail(frames, skb);
548
549 no_frame:
550 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
551 }
552
553 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
554 struct tid_ampdu_rx *tid_agg_rx,
555 u16 head_seq_num,
556 struct sk_buff_head *frames)
557 {
558 int index;
559
560 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
561 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
562 tid_agg_rx->buf_size;
563 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
564 }
565 }
566
567 /*
568 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
569 * the skb was added to the buffer longer than this time ago, the earlier
570 * frames that have not yet been received are assumed to be lost and the skb
571 * can be released for processing. This may also release other skb's from the
572 * reorder buffer if there are no additional gaps between the frames.
573 *
574 * Callers must hold tid_agg_rx->reorder_lock.
575 */
576 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
577
578 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
579 struct tid_ampdu_rx *tid_agg_rx,
580 struct sk_buff_head *frames)
581 {
582 int index, j;
583
584 /* release the buffer until next missing frame */
585 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
586 tid_agg_rx->buf_size;
587 if (!tid_agg_rx->reorder_buf[index] &&
588 tid_agg_rx->stored_mpdu_num > 1) {
589 /*
590 * No buffers ready to be released, but check whether any
591 * frames in the reorder buffer have timed out.
592 */
593 int skipped = 1;
594 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
595 j = (j + 1) % tid_agg_rx->buf_size) {
596 if (!tid_agg_rx->reorder_buf[j]) {
597 skipped++;
598 continue;
599 }
600 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
601 HT_RX_REORDER_BUF_TIMEOUT))
602 goto set_release_timer;
603
604 #ifdef CONFIG_MAC80211_HT_DEBUG
605 if (net_ratelimit())
606 wiphy_debug(hw->wiphy,
607 "release an RX reorder frame due to timeout on earlier frames\n");
608 #endif
609 ieee80211_release_reorder_frame(hw, tid_agg_rx,
610 j, frames);
611
612 /*
613 * Increment the head seq# also for the skipped slots.
614 */
615 tid_agg_rx->head_seq_num =
616 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
617 skipped = 0;
618 }
619 } else while (tid_agg_rx->reorder_buf[index]) {
620 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
621 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
622 tid_agg_rx->buf_size;
623 }
624
625 /*
626 * Disable the reorder release timer for now.
627 *
628 * The current implementation lacks a proper locking scheme
629 * which would protect vital statistic and debug counters
630 * from being updated by two different but concurrent BHs.
631 *
632 * More information about the topic is available from:
633 * - thread: http://marc.info/?t=128635927000001
634 *
635 * What was wrong:
636 * => http://marc.info/?l=linux-wireless&m=128636170811964
637 * "Basically the thing is that until your patch, the data
638 * in the struct didn't actually need locking because it
639 * was accessed by the RX path only which is not concurrent."
640 *
641 * List of what needs to be fixed:
642 * => http://marc.info/?l=linux-wireless&m=128656352920957
643 *
644
645 if (tid_agg_rx->stored_mpdu_num) {
646 j = index = seq_sub(tid_agg_rx->head_seq_num,
647 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
648
649 for (; j != (index - 1) % tid_agg_rx->buf_size;
650 j = (j + 1) % tid_agg_rx->buf_size) {
651 if (tid_agg_rx->reorder_buf[j])
652 break;
653 }
654
655 set_release_timer:
656
657 mod_timer(&tid_agg_rx->reorder_timer,
658 tid_agg_rx->reorder_time[j] +
659 HT_RX_REORDER_BUF_TIMEOUT);
660 } else {
661 del_timer(&tid_agg_rx->reorder_timer);
662 }
663 */
664
665 set_release_timer:
666 return;
667 }
668
669 /*
670 * As this function belongs to the RX path it must be under
671 * rcu_read_lock protection. It returns false if the frame
672 * can be processed immediately, true if it was consumed.
673 */
674 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
675 struct tid_ampdu_rx *tid_agg_rx,
676 struct sk_buff *skb,
677 struct sk_buff_head *frames)
678 {
679 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
680 u16 sc = le16_to_cpu(hdr->seq_ctrl);
681 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
682 u16 head_seq_num, buf_size;
683 int index;
684 bool ret = true;
685
686 buf_size = tid_agg_rx->buf_size;
687 head_seq_num = tid_agg_rx->head_seq_num;
688
689 spin_lock(&tid_agg_rx->reorder_lock);
690 /* frame with out of date sequence number */
691 if (seq_less(mpdu_seq_num, head_seq_num)) {
692 dev_kfree_skb(skb);
693 goto out;
694 }
695
696 /*
697 * If frame the sequence number exceeds our buffering window
698 * size release some previous frames to make room for this one.
699 */
700 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
701 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
702 /* release stored frames up to new head to stack */
703 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
704 frames);
705 }
706
707 /* Now the new frame is always in the range of the reordering buffer */
708
709 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
710
711 /* check if we already stored this frame */
712 if (tid_agg_rx->reorder_buf[index]) {
713 dev_kfree_skb(skb);
714 goto out;
715 }
716
717 /*
718 * If the current MPDU is in the right order and nothing else
719 * is stored we can process it directly, no need to buffer it.
720 */
721 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
722 tid_agg_rx->stored_mpdu_num == 0) {
723 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
724 ret = false;
725 goto out;
726 }
727
728 /* put the frame in the reordering buffer */
729 tid_agg_rx->reorder_buf[index] = skb;
730 tid_agg_rx->reorder_time[index] = jiffies;
731 tid_agg_rx->stored_mpdu_num++;
732 ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
733
734 out:
735 spin_unlock(&tid_agg_rx->reorder_lock);
736 return ret;
737 }
738
739 /*
740 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
741 * true if the MPDU was buffered, false if it should be processed.
742 */
743 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
744 struct sk_buff_head *frames)
745 {
746 struct sk_buff *skb = rx->skb;
747 struct ieee80211_local *local = rx->local;
748 struct ieee80211_hw *hw = &local->hw;
749 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
750 struct sta_info *sta = rx->sta;
751 struct tid_ampdu_rx *tid_agg_rx;
752 u16 sc;
753 int tid;
754
755 if (!ieee80211_is_data_qos(hdr->frame_control))
756 goto dont_reorder;
757
758 /*
759 * filter the QoS data rx stream according to
760 * STA/TID and check if this STA/TID is on aggregation
761 */
762
763 if (!sta)
764 goto dont_reorder;
765
766 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
767
768 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
769 if (!tid_agg_rx)
770 goto dont_reorder;
771
772 /* qos null data frames are excluded */
773 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
774 goto dont_reorder;
775
776 /* new, potentially un-ordered, ampdu frame - process it */
777
778 /* reset session timer */
779 if (tid_agg_rx->timeout)
780 mod_timer(&tid_agg_rx->session_timer,
781 TU_TO_EXP_TIME(tid_agg_rx->timeout));
782
783 /* if this mpdu is fragmented - terminate rx aggregation session */
784 sc = le16_to_cpu(hdr->seq_ctrl);
785 if (sc & IEEE80211_SCTL_FRAG) {
786 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
787 skb_queue_tail(&rx->sdata->skb_queue, skb);
788 ieee80211_queue_work(&local->hw, &rx->sdata->work);
789 return;
790 }
791
792 /*
793 * No locking needed -- we will only ever process one
794 * RX packet at a time, and thus own tid_agg_rx. All
795 * other code manipulating it needs to (and does) make
796 * sure that we cannot get to it any more before doing
797 * anything with it.
798 */
799 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
800 return;
801
802 dont_reorder:
803 __skb_queue_tail(frames, skb);
804 }
805
806 static ieee80211_rx_result debug_noinline
807 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
808 {
809 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
810 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
811
812 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
813 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
814 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
815 rx->sta->last_seq_ctrl[rx->queue] ==
816 hdr->seq_ctrl)) {
817 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
818 rx->local->dot11FrameDuplicateCount++;
819 rx->sta->num_duplicates++;
820 }
821 return RX_DROP_MONITOR;
822 } else
823 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
824 }
825
826 if (unlikely(rx->skb->len < 16)) {
827 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
828 return RX_DROP_MONITOR;
829 }
830
831 /* Drop disallowed frame classes based on STA auth/assoc state;
832 * IEEE 802.11, Chap 5.5.
833 *
834 * mac80211 filters only based on association state, i.e. it drops
835 * Class 3 frames from not associated stations. hostapd sends
836 * deauth/disassoc frames when needed. In addition, hostapd is
837 * responsible for filtering on both auth and assoc states.
838 */
839
840 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
841 return ieee80211_rx_mesh_check(rx);
842
843 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
844 ieee80211_is_pspoll(hdr->frame_control)) &&
845 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
846 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
847 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
848 if ((!ieee80211_has_fromds(hdr->frame_control) &&
849 !ieee80211_has_tods(hdr->frame_control) &&
850 ieee80211_is_data(hdr->frame_control)) ||
851 !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
852 /* Drop IBSS frames and frames for other hosts
853 * silently. */
854 return RX_DROP_MONITOR;
855 }
856
857 return RX_DROP_MONITOR;
858 }
859
860 return RX_CONTINUE;
861 }
862
863
864 static ieee80211_rx_result debug_noinline
865 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
866 {
867 struct sk_buff *skb = rx->skb;
868 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
869 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
870 int keyidx;
871 int hdrlen;
872 ieee80211_rx_result result = RX_DROP_UNUSABLE;
873 struct ieee80211_key *sta_ptk = NULL;
874 int mmie_keyidx = -1;
875 __le16 fc;
876
877 /*
878 * Key selection 101
879 *
880 * There are four types of keys:
881 * - GTK (group keys)
882 * - IGTK (group keys for management frames)
883 * - PTK (pairwise keys)
884 * - STK (station-to-station pairwise keys)
885 *
886 * When selecting a key, we have to distinguish between multicast
887 * (including broadcast) and unicast frames, the latter can only
888 * use PTKs and STKs while the former always use GTKs and IGTKs.
889 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
890 * unicast frames can also use key indices like GTKs. Hence, if we
891 * don't have a PTK/STK we check the key index for a WEP key.
892 *
893 * Note that in a regular BSS, multicast frames are sent by the
894 * AP only, associated stations unicast the frame to the AP first
895 * which then multicasts it on their behalf.
896 *
897 * There is also a slight problem in IBSS mode: GTKs are negotiated
898 * with each station, that is something we don't currently handle.
899 * The spec seems to expect that one negotiates the same key with
900 * every station but there's no such requirement; VLANs could be
901 * possible.
902 */
903
904 /*
905 * No point in finding a key and decrypting if the frame is neither
906 * addressed to us nor a multicast frame.
907 */
908 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
909 return RX_CONTINUE;
910
911 /* start without a key */
912 rx->key = NULL;
913
914 if (rx->sta)
915 sta_ptk = rcu_dereference(rx->sta->ptk);
916
917 fc = hdr->frame_control;
918
919 if (!ieee80211_has_protected(fc))
920 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
921
922 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
923 rx->key = sta_ptk;
924 if ((status->flag & RX_FLAG_DECRYPTED) &&
925 (status->flag & RX_FLAG_IV_STRIPPED))
926 return RX_CONTINUE;
927 /* Skip decryption if the frame is not protected. */
928 if (!ieee80211_has_protected(fc))
929 return RX_CONTINUE;
930 } else if (mmie_keyidx >= 0) {
931 /* Broadcast/multicast robust management frame / BIP */
932 if ((status->flag & RX_FLAG_DECRYPTED) &&
933 (status->flag & RX_FLAG_IV_STRIPPED))
934 return RX_CONTINUE;
935
936 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
937 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
938 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
939 if (rx->sta)
940 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
941 if (!rx->key)
942 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
943 } else if (!ieee80211_has_protected(fc)) {
944 /*
945 * The frame was not protected, so skip decryption. However, we
946 * need to set rx->key if there is a key that could have been
947 * used so that the frame may be dropped if encryption would
948 * have been expected.
949 */
950 struct ieee80211_key *key = NULL;
951 if (ieee80211_is_mgmt(fc) &&
952 is_multicast_ether_addr(hdr->addr1) &&
953 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
954 rx->key = key;
955 else if ((key = rcu_dereference(rx->sdata->default_key)))
956 rx->key = key;
957 return RX_CONTINUE;
958 } else {
959 u8 keyid;
960 /*
961 * The device doesn't give us the IV so we won't be
962 * able to look up the key. That's ok though, we
963 * don't need to decrypt the frame, we just won't
964 * be able to keep statistics accurate.
965 * Except for key threshold notifications, should
966 * we somehow allow the driver to tell us which key
967 * the hardware used if this flag is set?
968 */
969 if ((status->flag & RX_FLAG_DECRYPTED) &&
970 (status->flag & RX_FLAG_IV_STRIPPED))
971 return RX_CONTINUE;
972
973 hdrlen = ieee80211_hdrlen(fc);
974
975 if (rx->skb->len < 8 + hdrlen)
976 return RX_DROP_UNUSABLE; /* TODO: count this? */
977
978 /*
979 * no need to call ieee80211_wep_get_keyidx,
980 * it verifies a bunch of things we've done already
981 */
982 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
983 keyidx = keyid >> 6;
984
985 /* check per-station GTK first, if multicast packet */
986 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
987 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
988
989 /* if not found, try default key */
990 if (!rx->key) {
991 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
992
993 /*
994 * RSNA-protected unicast frames should always be
995 * sent with pairwise or station-to-station keys,
996 * but for WEP we allow using a key index as well.
997 */
998 if (rx->key &&
999 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1000 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1001 !is_multicast_ether_addr(hdr->addr1))
1002 rx->key = NULL;
1003 }
1004 }
1005
1006 if (rx->key) {
1007 rx->key->tx_rx_count++;
1008 /* TODO: add threshold stuff again */
1009 } else {
1010 return RX_DROP_MONITOR;
1011 }
1012
1013 if (skb_linearize(rx->skb))
1014 return RX_DROP_UNUSABLE;
1015 /* the hdr variable is invalid now! */
1016
1017 switch (rx->key->conf.cipher) {
1018 case WLAN_CIPHER_SUITE_WEP40:
1019 case WLAN_CIPHER_SUITE_WEP104:
1020 /* Check for weak IVs if possible */
1021 if (rx->sta && ieee80211_is_data(fc) &&
1022 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1023 !(status->flag & RX_FLAG_DECRYPTED)) &&
1024 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1025 rx->sta->wep_weak_iv_count++;
1026
1027 result = ieee80211_crypto_wep_decrypt(rx);
1028 break;
1029 case WLAN_CIPHER_SUITE_TKIP:
1030 result = ieee80211_crypto_tkip_decrypt(rx);
1031 break;
1032 case WLAN_CIPHER_SUITE_CCMP:
1033 result = ieee80211_crypto_ccmp_decrypt(rx);
1034 break;
1035 case WLAN_CIPHER_SUITE_AES_CMAC:
1036 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1037 break;
1038 default:
1039 /*
1040 * We can reach here only with HW-only algorithms
1041 * but why didn't it decrypt the frame?!
1042 */
1043 return RX_DROP_UNUSABLE;
1044 }
1045
1046 /* either the frame has been decrypted or will be dropped */
1047 status->flag |= RX_FLAG_DECRYPTED;
1048
1049 return result;
1050 }
1051
1052 static ieee80211_rx_result debug_noinline
1053 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1054 {
1055 struct ieee80211_local *local;
1056 struct ieee80211_hdr *hdr;
1057 struct sk_buff *skb;
1058
1059 local = rx->local;
1060 skb = rx->skb;
1061 hdr = (struct ieee80211_hdr *) skb->data;
1062
1063 if (!local->pspolling)
1064 return RX_CONTINUE;
1065
1066 if (!ieee80211_has_fromds(hdr->frame_control))
1067 /* this is not from AP */
1068 return RX_CONTINUE;
1069
1070 if (!ieee80211_is_data(hdr->frame_control))
1071 return RX_CONTINUE;
1072
1073 if (!ieee80211_has_moredata(hdr->frame_control)) {
1074 /* AP has no more frames buffered for us */
1075 local->pspolling = false;
1076 return RX_CONTINUE;
1077 }
1078
1079 /* more data bit is set, let's request a new frame from the AP */
1080 ieee80211_send_pspoll(local, rx->sdata);
1081
1082 return RX_CONTINUE;
1083 }
1084
1085 static void ap_sta_ps_start(struct sta_info *sta)
1086 {
1087 struct ieee80211_sub_if_data *sdata = sta->sdata;
1088 struct ieee80211_local *local = sdata->local;
1089
1090 atomic_inc(&sdata->bss->num_sta_ps);
1091 set_sta_flags(sta, WLAN_STA_PS_STA);
1092 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1093 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1094 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1095 sdata->name, sta->sta.addr, sta->sta.aid);
1096 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1097 }
1098
1099 static void ap_sta_ps_end(struct sta_info *sta)
1100 {
1101 struct ieee80211_sub_if_data *sdata = sta->sdata;
1102
1103 atomic_dec(&sdata->bss->num_sta_ps);
1104
1105 clear_sta_flags(sta, WLAN_STA_PS_STA);
1106
1107 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1108 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1109 sdata->name, sta->sta.addr, sta->sta.aid);
1110 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1111
1112 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1113 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1114 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1115 sdata->name, sta->sta.addr, sta->sta.aid);
1116 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1117 return;
1118 }
1119
1120 ieee80211_sta_ps_deliver_wakeup(sta);
1121 }
1122
1123 static ieee80211_rx_result debug_noinline
1124 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1125 {
1126 struct sta_info *sta = rx->sta;
1127 struct sk_buff *skb = rx->skb;
1128 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1129 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1130
1131 if (!sta)
1132 return RX_CONTINUE;
1133
1134 /*
1135 * Update last_rx only for IBSS packets which are for the current
1136 * BSSID to avoid keeping the current IBSS network alive in cases
1137 * where other STAs start using different BSSID.
1138 */
1139 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1140 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1141 NL80211_IFTYPE_ADHOC);
1142 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
1143 sta->last_rx = jiffies;
1144 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1145 /*
1146 * Mesh beacons will update last_rx when if they are found to
1147 * match the current local configuration when processed.
1148 */
1149 sta->last_rx = jiffies;
1150 }
1151
1152 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1153 return RX_CONTINUE;
1154
1155 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1156 ieee80211_sta_rx_notify(rx->sdata, hdr);
1157
1158 sta->rx_fragments++;
1159 sta->rx_bytes += rx->skb->len;
1160 sta->last_signal = status->signal;
1161
1162 /*
1163 * Change STA power saving mode only at the end of a frame
1164 * exchange sequence.
1165 */
1166 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1167 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1168 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1169 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
1170 /*
1171 * Ignore doze->wake transitions that are
1172 * indicated by non-data frames, the standard
1173 * is unclear here, but for example going to
1174 * PS mode and then scanning would cause a
1175 * doze->wake transition for the probe request,
1176 * and that is clearly undesirable.
1177 */
1178 if (ieee80211_is_data(hdr->frame_control) &&
1179 !ieee80211_has_pm(hdr->frame_control))
1180 ap_sta_ps_end(sta);
1181 } else {
1182 if (ieee80211_has_pm(hdr->frame_control))
1183 ap_sta_ps_start(sta);
1184 }
1185 }
1186
1187 /*
1188 * Drop (qos-)data::nullfunc frames silently, since they
1189 * are used only to control station power saving mode.
1190 */
1191 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1192 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1193 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1194
1195 /*
1196 * If we receive a 4-addr nullfunc frame from a STA
1197 * that was not moved to a 4-addr STA vlan yet, drop
1198 * the frame to the monitor interface, to make sure
1199 * that hostapd sees it
1200 */
1201 if (ieee80211_has_a4(hdr->frame_control) &&
1202 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1203 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1204 !rx->sdata->u.vlan.sta)))
1205 return RX_DROP_MONITOR;
1206 /*
1207 * Update counter and free packet here to avoid
1208 * counting this as a dropped packed.
1209 */
1210 sta->rx_packets++;
1211 dev_kfree_skb(rx->skb);
1212 return RX_QUEUED;
1213 }
1214
1215 return RX_CONTINUE;
1216 } /* ieee80211_rx_h_sta_process */
1217
1218 static inline struct ieee80211_fragment_entry *
1219 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1220 unsigned int frag, unsigned int seq, int rx_queue,
1221 struct sk_buff **skb)
1222 {
1223 struct ieee80211_fragment_entry *entry;
1224 int idx;
1225
1226 idx = sdata->fragment_next;
1227 entry = &sdata->fragments[sdata->fragment_next++];
1228 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1229 sdata->fragment_next = 0;
1230
1231 if (!skb_queue_empty(&entry->skb_list)) {
1232 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1233 struct ieee80211_hdr *hdr =
1234 (struct ieee80211_hdr *) entry->skb_list.next->data;
1235 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1236 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1237 "addr1=%pM addr2=%pM\n",
1238 sdata->name, idx,
1239 jiffies - entry->first_frag_time, entry->seq,
1240 entry->last_frag, hdr->addr1, hdr->addr2);
1241 #endif
1242 __skb_queue_purge(&entry->skb_list);
1243 }
1244
1245 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1246 *skb = NULL;
1247 entry->first_frag_time = jiffies;
1248 entry->seq = seq;
1249 entry->rx_queue = rx_queue;
1250 entry->last_frag = frag;
1251 entry->ccmp = 0;
1252 entry->extra_len = 0;
1253
1254 return entry;
1255 }
1256
1257 static inline struct ieee80211_fragment_entry *
1258 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1259 unsigned int frag, unsigned int seq,
1260 int rx_queue, struct ieee80211_hdr *hdr)
1261 {
1262 struct ieee80211_fragment_entry *entry;
1263 int i, idx;
1264
1265 idx = sdata->fragment_next;
1266 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1267 struct ieee80211_hdr *f_hdr;
1268
1269 idx--;
1270 if (idx < 0)
1271 idx = IEEE80211_FRAGMENT_MAX - 1;
1272
1273 entry = &sdata->fragments[idx];
1274 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1275 entry->rx_queue != rx_queue ||
1276 entry->last_frag + 1 != frag)
1277 continue;
1278
1279 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1280
1281 /*
1282 * Check ftype and addresses are equal, else check next fragment
1283 */
1284 if (((hdr->frame_control ^ f_hdr->frame_control) &
1285 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1286 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1287 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1288 continue;
1289
1290 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1291 __skb_queue_purge(&entry->skb_list);
1292 continue;
1293 }
1294 return entry;
1295 }
1296
1297 return NULL;
1298 }
1299
1300 static ieee80211_rx_result debug_noinline
1301 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1302 {
1303 struct ieee80211_hdr *hdr;
1304 u16 sc;
1305 __le16 fc;
1306 unsigned int frag, seq;
1307 struct ieee80211_fragment_entry *entry;
1308 struct sk_buff *skb;
1309 struct ieee80211_rx_status *status;
1310
1311 hdr = (struct ieee80211_hdr *)rx->skb->data;
1312 fc = hdr->frame_control;
1313 sc = le16_to_cpu(hdr->seq_ctrl);
1314 frag = sc & IEEE80211_SCTL_FRAG;
1315
1316 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1317 (rx->skb)->len < 24 ||
1318 is_multicast_ether_addr(hdr->addr1))) {
1319 /* not fragmented */
1320 goto out;
1321 }
1322 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1323
1324 if (skb_linearize(rx->skb))
1325 return RX_DROP_UNUSABLE;
1326
1327 /*
1328 * skb_linearize() might change the skb->data and
1329 * previously cached variables (in this case, hdr) need to
1330 * be refreshed with the new data.
1331 */
1332 hdr = (struct ieee80211_hdr *)rx->skb->data;
1333 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1334
1335 if (frag == 0) {
1336 /* This is the first fragment of a new frame. */
1337 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1338 rx->queue, &(rx->skb));
1339 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1340 ieee80211_has_protected(fc)) {
1341 int queue = ieee80211_is_mgmt(fc) ?
1342 NUM_RX_DATA_QUEUES : rx->queue;
1343 /* Store CCMP PN so that we can verify that the next
1344 * fragment has a sequential PN value. */
1345 entry->ccmp = 1;
1346 memcpy(entry->last_pn,
1347 rx->key->u.ccmp.rx_pn[queue],
1348 CCMP_PN_LEN);
1349 }
1350 return RX_QUEUED;
1351 }
1352
1353 /* This is a fragment for a frame that should already be pending in
1354 * fragment cache. Add this fragment to the end of the pending entry.
1355 */
1356 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1357 if (!entry) {
1358 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1359 return RX_DROP_MONITOR;
1360 }
1361
1362 /* Verify that MPDUs within one MSDU have sequential PN values.
1363 * (IEEE 802.11i, 8.3.3.4.5) */
1364 if (entry->ccmp) {
1365 int i;
1366 u8 pn[CCMP_PN_LEN], *rpn;
1367 int queue;
1368 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1369 return RX_DROP_UNUSABLE;
1370 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1371 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1372 pn[i]++;
1373 if (pn[i])
1374 break;
1375 }
1376 queue = ieee80211_is_mgmt(fc) ?
1377 NUM_RX_DATA_QUEUES : rx->queue;
1378 rpn = rx->key->u.ccmp.rx_pn[queue];
1379 if (memcmp(pn, rpn, CCMP_PN_LEN))
1380 return RX_DROP_UNUSABLE;
1381 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1382 }
1383
1384 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1385 __skb_queue_tail(&entry->skb_list, rx->skb);
1386 entry->last_frag = frag;
1387 entry->extra_len += rx->skb->len;
1388 if (ieee80211_has_morefrags(fc)) {
1389 rx->skb = NULL;
1390 return RX_QUEUED;
1391 }
1392
1393 rx->skb = __skb_dequeue(&entry->skb_list);
1394 if (skb_tailroom(rx->skb) < entry->extra_len) {
1395 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1396 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1397 GFP_ATOMIC))) {
1398 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1399 __skb_queue_purge(&entry->skb_list);
1400 return RX_DROP_UNUSABLE;
1401 }
1402 }
1403 while ((skb = __skb_dequeue(&entry->skb_list))) {
1404 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1405 dev_kfree_skb(skb);
1406 }
1407
1408 /* Complete frame has been reassembled - process it now */
1409 status = IEEE80211_SKB_RXCB(rx->skb);
1410 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1411
1412 out:
1413 if (rx->sta)
1414 rx->sta->rx_packets++;
1415 if (is_multicast_ether_addr(hdr->addr1))
1416 rx->local->dot11MulticastReceivedFrameCount++;
1417 else
1418 ieee80211_led_rx(rx->local);
1419 return RX_CONTINUE;
1420 }
1421
1422 static ieee80211_rx_result debug_noinline
1423 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1424 {
1425 struct ieee80211_sub_if_data *sdata = rx->sdata;
1426 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1427 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1428
1429 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1430 !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
1431 return RX_CONTINUE;
1432
1433 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1434 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1435 return RX_DROP_UNUSABLE;
1436
1437 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1438 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1439 else
1440 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1441
1442 /* Free PS Poll skb here instead of returning RX_DROP that would
1443 * count as an dropped frame. */
1444 dev_kfree_skb(rx->skb);
1445
1446 return RX_QUEUED;
1447 }
1448
1449 static ieee80211_rx_result debug_noinline
1450 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1451 {
1452 u8 *data = rx->skb->data;
1453 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1454
1455 if (!ieee80211_is_data_qos(hdr->frame_control))
1456 return RX_CONTINUE;
1457
1458 /* remove the qos control field, update frame type and meta-data */
1459 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1460 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1461 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1462 /* change frame type to non QOS */
1463 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1464
1465 return RX_CONTINUE;
1466 }
1467
1468 static int
1469 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1470 {
1471 if (unlikely(!rx->sta ||
1472 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1473 return -EACCES;
1474
1475 return 0;
1476 }
1477
1478 static int
1479 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1480 {
1481 struct sk_buff *skb = rx->skb;
1482 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1483
1484 /*
1485 * Pass through unencrypted frames if the hardware has
1486 * decrypted them already.
1487 */
1488 if (status->flag & RX_FLAG_DECRYPTED)
1489 return 0;
1490
1491 /* Drop unencrypted frames if key is set. */
1492 if (unlikely(!ieee80211_has_protected(fc) &&
1493 !ieee80211_is_nullfunc(fc) &&
1494 ieee80211_is_data(fc) &&
1495 (rx->key || rx->sdata->drop_unencrypted)))
1496 return -EACCES;
1497
1498 return 0;
1499 }
1500
1501 static int
1502 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1503 {
1504 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1505 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1506 __le16 fc = hdr->frame_control;
1507
1508 /*
1509 * Pass through unencrypted frames if the hardware has
1510 * decrypted them already.
1511 */
1512 if (status->flag & RX_FLAG_DECRYPTED)
1513 return 0;
1514
1515 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1516 if (unlikely(!ieee80211_has_protected(fc) &&
1517 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1518 rx->key))
1519 return -EACCES;
1520 /* BIP does not use Protected field, so need to check MMIE */
1521 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1522 ieee80211_get_mmie_keyidx(rx->skb) < 0))
1523 return -EACCES;
1524 /*
1525 * When using MFP, Action frames are not allowed prior to
1526 * having configured keys.
1527 */
1528 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1529 ieee80211_is_robust_mgmt_frame(
1530 (struct ieee80211_hdr *) rx->skb->data)))
1531 return -EACCES;
1532 }
1533
1534 return 0;
1535 }
1536
1537 static int
1538 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1539 {
1540 struct ieee80211_sub_if_data *sdata = rx->sdata;
1541 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1542
1543 if (ieee80211_has_a4(hdr->frame_control) &&
1544 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1545 return -1;
1546
1547 if (is_multicast_ether_addr(hdr->addr1) &&
1548 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
1549 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1550 return -1;
1551
1552 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1553 }
1554
1555 /*
1556 * requires that rx->skb is a frame with ethernet header
1557 */
1558 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1559 {
1560 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1561 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1562 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1563
1564 /*
1565 * Allow EAPOL frames to us/the PAE group address regardless
1566 * of whether the frame was encrypted or not.
1567 */
1568 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1569 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1570 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1571 return true;
1572
1573 if (ieee80211_802_1x_port_control(rx) ||
1574 ieee80211_drop_unencrypted(rx, fc))
1575 return false;
1576
1577 return true;
1578 }
1579
1580 /*
1581 * requires that rx->skb is a frame with ethernet header
1582 */
1583 static void
1584 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1585 {
1586 struct ieee80211_sub_if_data *sdata = rx->sdata;
1587 struct net_device *dev = sdata->dev;
1588 struct sk_buff *skb, *xmit_skb;
1589 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1590 struct sta_info *dsta;
1591 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1592
1593 skb = rx->skb;
1594 xmit_skb = NULL;
1595
1596 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1597 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1598 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1599 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1600 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1601 if (is_multicast_ether_addr(ehdr->h_dest)) {
1602 /*
1603 * send multicast frames both to higher layers in
1604 * local net stack and back to the wireless medium
1605 */
1606 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1607 if (!xmit_skb && net_ratelimit())
1608 printk(KERN_DEBUG "%s: failed to clone "
1609 "multicast frame\n", dev->name);
1610 } else {
1611 dsta = sta_info_get(sdata, skb->data);
1612 if (dsta) {
1613 /*
1614 * The destination station is associated to
1615 * this AP (in this VLAN), so send the frame
1616 * directly to it and do not pass it to local
1617 * net stack.
1618 */
1619 xmit_skb = skb;
1620 skb = NULL;
1621 }
1622 }
1623 }
1624
1625 if (skb) {
1626 int align __maybe_unused;
1627
1628 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1629 /*
1630 * 'align' will only take the values 0 or 2 here
1631 * since all frames are required to be aligned
1632 * to 2-byte boundaries when being passed to
1633 * mac80211. That also explains the __skb_push()
1634 * below.
1635 */
1636 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1637 if (align) {
1638 if (WARN_ON(skb_headroom(skb) < 3)) {
1639 dev_kfree_skb(skb);
1640 skb = NULL;
1641 } else {
1642 u8 *data = skb->data;
1643 size_t len = skb_headlen(skb);
1644 skb->data -= align;
1645 memmove(skb->data, data, len);
1646 skb_set_tail_pointer(skb, len);
1647 }
1648 }
1649 #endif
1650
1651 if (skb) {
1652 /* deliver to local stack */
1653 skb->protocol = eth_type_trans(skb, dev);
1654 memset(skb->cb, 0, sizeof(skb->cb));
1655 netif_receive_skb(skb);
1656 }
1657 }
1658
1659 if (xmit_skb) {
1660 /* send to wireless media */
1661 xmit_skb->protocol = htons(ETH_P_802_3);
1662 skb_reset_network_header(xmit_skb);
1663 skb_reset_mac_header(xmit_skb);
1664 dev_queue_xmit(xmit_skb);
1665 }
1666 }
1667
1668 static ieee80211_rx_result debug_noinline
1669 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1670 {
1671 struct net_device *dev = rx->sdata->dev;
1672 struct sk_buff *skb = rx->skb;
1673 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1674 __le16 fc = hdr->frame_control;
1675 struct sk_buff_head frame_list;
1676 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1677
1678 if (unlikely(!ieee80211_is_data(fc)))
1679 return RX_CONTINUE;
1680
1681 if (unlikely(!ieee80211_is_data_present(fc)))
1682 return RX_DROP_MONITOR;
1683
1684 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1685 return RX_CONTINUE;
1686
1687 if (ieee80211_has_a4(hdr->frame_control) &&
1688 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1689 !rx->sdata->u.vlan.sta)
1690 return RX_DROP_UNUSABLE;
1691
1692 if (is_multicast_ether_addr(hdr->addr1) &&
1693 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1694 rx->sdata->u.vlan.sta) ||
1695 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1696 rx->sdata->u.mgd.use_4addr)))
1697 return RX_DROP_UNUSABLE;
1698
1699 skb->dev = dev;
1700 __skb_queue_head_init(&frame_list);
1701
1702 if (skb_linearize(skb))
1703 return RX_DROP_UNUSABLE;
1704
1705 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1706 rx->sdata->vif.type,
1707 rx->local->hw.extra_tx_headroom);
1708
1709 while (!skb_queue_empty(&frame_list)) {
1710 rx->skb = __skb_dequeue(&frame_list);
1711
1712 if (!ieee80211_frame_allowed(rx, fc)) {
1713 dev_kfree_skb(rx->skb);
1714 continue;
1715 }
1716 dev->stats.rx_packets++;
1717 dev->stats.rx_bytes += rx->skb->len;
1718
1719 ieee80211_deliver_skb(rx);
1720 }
1721
1722 return RX_QUEUED;
1723 }
1724
1725 #ifdef CONFIG_MAC80211_MESH
1726 static ieee80211_rx_result
1727 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1728 {
1729 struct ieee80211_hdr *hdr;
1730 struct ieee80211s_hdr *mesh_hdr;
1731 unsigned int hdrlen;
1732 struct sk_buff *skb = rx->skb, *fwd_skb;
1733 struct ieee80211_local *local = rx->local;
1734 struct ieee80211_sub_if_data *sdata = rx->sdata;
1735 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1736
1737 hdr = (struct ieee80211_hdr *) skb->data;
1738 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1739 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1740
1741 if (!ieee80211_is_data(hdr->frame_control))
1742 return RX_CONTINUE;
1743
1744 if (!mesh_hdr->ttl)
1745 /* illegal frame */
1746 return RX_DROP_MONITOR;
1747
1748 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1749 struct mesh_path *mppath;
1750 char *proxied_addr;
1751 char *mpp_addr;
1752
1753 if (is_multicast_ether_addr(hdr->addr1)) {
1754 mpp_addr = hdr->addr3;
1755 proxied_addr = mesh_hdr->eaddr1;
1756 } else {
1757 mpp_addr = hdr->addr4;
1758 proxied_addr = mesh_hdr->eaddr2;
1759 }
1760
1761 rcu_read_lock();
1762 mppath = mpp_path_lookup(proxied_addr, sdata);
1763 if (!mppath) {
1764 mpp_path_add(proxied_addr, mpp_addr, sdata);
1765 } else {
1766 spin_lock_bh(&mppath->state_lock);
1767 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1768 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1769 spin_unlock_bh(&mppath->state_lock);
1770 }
1771 rcu_read_unlock();
1772 }
1773
1774 /* Frame has reached destination. Don't forward */
1775 if (!is_multicast_ether_addr(hdr->addr1) &&
1776 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1777 return RX_CONTINUE;
1778
1779 mesh_hdr->ttl--;
1780
1781 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1782 if (!mesh_hdr->ttl)
1783 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1784 dropped_frames_ttl);
1785 else {
1786 struct ieee80211_hdr *fwd_hdr;
1787 struct ieee80211_tx_info *info;
1788
1789 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1790
1791 if (!fwd_skb && net_ratelimit())
1792 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1793 sdata->name);
1794
1795 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1796 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1797 info = IEEE80211_SKB_CB(fwd_skb);
1798 memset(info, 0, sizeof(*info));
1799 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1800 info->control.vif = &rx->sdata->vif;
1801 skb_set_queue_mapping(skb,
1802 ieee80211_select_queue(rx->sdata, fwd_skb));
1803 ieee80211_set_qos_hdr(local, skb);
1804 if (is_multicast_ether_addr(fwd_hdr->addr1))
1805 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1806 fwded_mcast);
1807 else {
1808 int err;
1809 /*
1810 * Save TA to addr1 to send TA a path error if a
1811 * suitable next hop is not found
1812 */
1813 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1814 ETH_ALEN);
1815 err = mesh_nexthop_lookup(fwd_skb, sdata);
1816 /* Failed to immediately resolve next hop:
1817 * fwded frame was dropped or will be added
1818 * later to the pending skb queue. */
1819 if (err)
1820 return RX_DROP_MONITOR;
1821
1822 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1823 fwded_unicast);
1824 }
1825 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1826 fwded_frames);
1827 ieee80211_add_pending_skb(local, fwd_skb);
1828 }
1829 }
1830
1831 if (is_multicast_ether_addr(hdr->addr1) ||
1832 sdata->dev->flags & IFF_PROMISC)
1833 return RX_CONTINUE;
1834 else
1835 return RX_DROP_MONITOR;
1836 }
1837 #endif
1838
1839 static ieee80211_rx_result debug_noinline
1840 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1841 {
1842 struct ieee80211_sub_if_data *sdata = rx->sdata;
1843 struct ieee80211_local *local = rx->local;
1844 struct net_device *dev = sdata->dev;
1845 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1846 __le16 fc = hdr->frame_control;
1847 int err;
1848
1849 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1850 return RX_CONTINUE;
1851
1852 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1853 return RX_DROP_MONITOR;
1854
1855 /*
1856 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1857 * that a 4-addr station can be detected and moved into a separate VLAN
1858 */
1859 if (ieee80211_has_a4(hdr->frame_control) &&
1860 sdata->vif.type == NL80211_IFTYPE_AP)
1861 return RX_DROP_MONITOR;
1862
1863 err = __ieee80211_data_to_8023(rx);
1864 if (unlikely(err))
1865 return RX_DROP_UNUSABLE;
1866
1867 if (!ieee80211_frame_allowed(rx, fc))
1868 return RX_DROP_MONITOR;
1869
1870 rx->skb->dev = dev;
1871
1872 dev->stats.rx_packets++;
1873 dev->stats.rx_bytes += rx->skb->len;
1874
1875 if (ieee80211_is_data(hdr->frame_control) &&
1876 !is_multicast_ether_addr(hdr->addr1) &&
1877 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
1878 mod_timer(&local->dynamic_ps_timer, jiffies +
1879 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1880 }
1881
1882 ieee80211_deliver_skb(rx);
1883
1884 return RX_QUEUED;
1885 }
1886
1887 static ieee80211_rx_result debug_noinline
1888 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1889 {
1890 struct ieee80211_local *local = rx->local;
1891 struct ieee80211_hw *hw = &local->hw;
1892 struct sk_buff *skb = rx->skb;
1893 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1894 struct tid_ampdu_rx *tid_agg_rx;
1895 u16 start_seq_num;
1896 u16 tid;
1897
1898 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1899 return RX_CONTINUE;
1900
1901 if (ieee80211_is_back_req(bar->frame_control)) {
1902 struct {
1903 __le16 control, start_seq_num;
1904 } __packed bar_data;
1905
1906 if (!rx->sta)
1907 return RX_DROP_MONITOR;
1908
1909 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1910 &bar_data, sizeof(bar_data)))
1911 return RX_DROP_MONITOR;
1912
1913 tid = le16_to_cpu(bar_data.control) >> 12;
1914
1915 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
1916 if (!tid_agg_rx)
1917 return RX_DROP_MONITOR;
1918
1919 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1920
1921 /* reset session timer */
1922 if (tid_agg_rx->timeout)
1923 mod_timer(&tid_agg_rx->session_timer,
1924 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1925
1926 /* release stored frames up to start of BAR */
1927 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1928 frames);
1929 kfree_skb(skb);
1930 return RX_QUEUED;
1931 }
1932
1933 /*
1934 * After this point, we only want management frames,
1935 * so we can drop all remaining control frames to
1936 * cooked monitor interfaces.
1937 */
1938 return RX_DROP_MONITOR;
1939 }
1940
1941 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1942 struct ieee80211_mgmt *mgmt,
1943 size_t len)
1944 {
1945 struct ieee80211_local *local = sdata->local;
1946 struct sk_buff *skb;
1947 struct ieee80211_mgmt *resp;
1948
1949 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1950 /* Not to own unicast address */
1951 return;
1952 }
1953
1954 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1955 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1956 /* Not from the current AP or not associated yet. */
1957 return;
1958 }
1959
1960 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1961 /* Too short SA Query request frame */
1962 return;
1963 }
1964
1965 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1966 if (skb == NULL)
1967 return;
1968
1969 skb_reserve(skb, local->hw.extra_tx_headroom);
1970 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1971 memset(resp, 0, 24);
1972 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1973 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1974 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1975 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1976 IEEE80211_STYPE_ACTION);
1977 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1978 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1979 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1980 memcpy(resp->u.action.u.sa_query.trans_id,
1981 mgmt->u.action.u.sa_query.trans_id,
1982 WLAN_SA_QUERY_TR_ID_LEN);
1983
1984 ieee80211_tx_skb(sdata, skb);
1985 }
1986
1987 static ieee80211_rx_result debug_noinline
1988 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
1989 {
1990 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1991 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1992
1993 /*
1994 * From here on, look only at management frames.
1995 * Data and control frames are already handled,
1996 * and unknown (reserved) frames are useless.
1997 */
1998 if (rx->skb->len < 24)
1999 return RX_DROP_MONITOR;
2000
2001 if (!ieee80211_is_mgmt(mgmt->frame_control))
2002 return RX_DROP_MONITOR;
2003
2004 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2005 return RX_DROP_MONITOR;
2006
2007 if (ieee80211_drop_unencrypted_mgmt(rx))
2008 return RX_DROP_UNUSABLE;
2009
2010 return RX_CONTINUE;
2011 }
2012
2013 static ieee80211_rx_result debug_noinline
2014 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2015 {
2016 struct ieee80211_local *local = rx->local;
2017 struct ieee80211_sub_if_data *sdata = rx->sdata;
2018 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2019 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2020 int len = rx->skb->len;
2021
2022 if (!ieee80211_is_action(mgmt->frame_control))
2023 return RX_CONTINUE;
2024
2025 /* drop too small frames */
2026 if (len < IEEE80211_MIN_ACTION_SIZE)
2027 return RX_DROP_UNUSABLE;
2028
2029 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2030 return RX_DROP_UNUSABLE;
2031
2032 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2033 return RX_DROP_UNUSABLE;
2034
2035 switch (mgmt->u.action.category) {
2036 case WLAN_CATEGORY_BACK:
2037 /*
2038 * The aggregation code is not prepared to handle
2039 * anything but STA/AP due to the BSSID handling;
2040 * IBSS could work in the code but isn't supported
2041 * by drivers or the standard.
2042 */
2043 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2044 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2045 sdata->vif.type != NL80211_IFTYPE_AP)
2046 break;
2047
2048 /* verify action_code is present */
2049 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2050 break;
2051
2052 switch (mgmt->u.action.u.addba_req.action_code) {
2053 case WLAN_ACTION_ADDBA_REQ:
2054 if (len < (IEEE80211_MIN_ACTION_SIZE +
2055 sizeof(mgmt->u.action.u.addba_req)))
2056 goto invalid;
2057 break;
2058 case WLAN_ACTION_ADDBA_RESP:
2059 if (len < (IEEE80211_MIN_ACTION_SIZE +
2060 sizeof(mgmt->u.action.u.addba_resp)))
2061 goto invalid;
2062 break;
2063 case WLAN_ACTION_DELBA:
2064 if (len < (IEEE80211_MIN_ACTION_SIZE +
2065 sizeof(mgmt->u.action.u.delba)))
2066 goto invalid;
2067 break;
2068 default:
2069 goto invalid;
2070 }
2071
2072 goto queue;
2073 case WLAN_CATEGORY_SPECTRUM_MGMT:
2074 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2075 break;
2076
2077 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2078 break;
2079
2080 /* verify action_code is present */
2081 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2082 break;
2083
2084 switch (mgmt->u.action.u.measurement.action_code) {
2085 case WLAN_ACTION_SPCT_MSR_REQ:
2086 if (len < (IEEE80211_MIN_ACTION_SIZE +
2087 sizeof(mgmt->u.action.u.measurement)))
2088 break;
2089 ieee80211_process_measurement_req(sdata, mgmt, len);
2090 goto handled;
2091 case WLAN_ACTION_SPCT_CHL_SWITCH:
2092 if (len < (IEEE80211_MIN_ACTION_SIZE +
2093 sizeof(mgmt->u.action.u.chan_switch)))
2094 break;
2095
2096 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2097 break;
2098
2099 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2100 break;
2101
2102 goto queue;
2103 }
2104 break;
2105 case WLAN_CATEGORY_SA_QUERY:
2106 if (len < (IEEE80211_MIN_ACTION_SIZE +
2107 sizeof(mgmt->u.action.u.sa_query)))
2108 break;
2109
2110 switch (mgmt->u.action.u.sa_query.action) {
2111 case WLAN_ACTION_SA_QUERY_REQUEST:
2112 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2113 break;
2114 ieee80211_process_sa_query_req(sdata, mgmt, len);
2115 goto handled;
2116 }
2117 break;
2118 case WLAN_CATEGORY_MESH_PLINK:
2119 case WLAN_CATEGORY_MESH_PATH_SEL:
2120 if (!ieee80211_vif_is_mesh(&sdata->vif))
2121 break;
2122 goto queue;
2123 }
2124
2125 return RX_CONTINUE;
2126
2127 invalid:
2128 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2129 /* will return in the next handlers */
2130 return RX_CONTINUE;
2131
2132 handled:
2133 if (rx->sta)
2134 rx->sta->rx_packets++;
2135 dev_kfree_skb(rx->skb);
2136 return RX_QUEUED;
2137
2138 queue:
2139 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2140 skb_queue_tail(&sdata->skb_queue, rx->skb);
2141 ieee80211_queue_work(&local->hw, &sdata->work);
2142 if (rx->sta)
2143 rx->sta->rx_packets++;
2144 return RX_QUEUED;
2145 }
2146
2147 static ieee80211_rx_result debug_noinline
2148 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2149 {
2150 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2151
2152 /* skip known-bad action frames and return them in the next handler */
2153 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2154 return RX_CONTINUE;
2155
2156 /*
2157 * Getting here means the kernel doesn't know how to handle
2158 * it, but maybe userspace does ... include returned frames
2159 * so userspace can register for those to know whether ones
2160 * it transmitted were processed or returned.
2161 */
2162
2163 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2164 rx->skb->data, rx->skb->len,
2165 GFP_ATOMIC)) {
2166 if (rx->sta)
2167 rx->sta->rx_packets++;
2168 dev_kfree_skb(rx->skb);
2169 return RX_QUEUED;
2170 }
2171
2172
2173 return RX_CONTINUE;
2174 }
2175
2176 static ieee80211_rx_result debug_noinline
2177 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2178 {
2179 struct ieee80211_local *local = rx->local;
2180 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2181 struct sk_buff *nskb;
2182 struct ieee80211_sub_if_data *sdata = rx->sdata;
2183 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2184
2185 if (!ieee80211_is_action(mgmt->frame_control))
2186 return RX_CONTINUE;
2187
2188 /*
2189 * For AP mode, hostapd is responsible for handling any action
2190 * frames that we didn't handle, including returning unknown
2191 * ones. For all other modes we will return them to the sender,
2192 * setting the 0x80 bit in the action category, as required by
2193 * 802.11-2007 7.3.1.11.
2194 * Newer versions of hostapd shall also use the management frame
2195 * registration mechanisms, but older ones still use cooked
2196 * monitor interfaces so push all frames there.
2197 */
2198 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2199 (sdata->vif.type == NL80211_IFTYPE_AP ||
2200 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2201 return RX_DROP_MONITOR;
2202
2203 /* do not return rejected action frames */
2204 if (mgmt->u.action.category & 0x80)
2205 return RX_DROP_UNUSABLE;
2206
2207 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2208 GFP_ATOMIC);
2209 if (nskb) {
2210 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2211
2212 nmgmt->u.action.category |= 0x80;
2213 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2214 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2215
2216 memset(nskb->cb, 0, sizeof(nskb->cb));
2217
2218 ieee80211_tx_skb(rx->sdata, nskb);
2219 }
2220 dev_kfree_skb(rx->skb);
2221 return RX_QUEUED;
2222 }
2223
2224 static ieee80211_rx_result debug_noinline
2225 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2226 {
2227 struct ieee80211_sub_if_data *sdata = rx->sdata;
2228 ieee80211_rx_result rxs;
2229 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2230 __le16 stype;
2231
2232 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2233 if (rxs != RX_CONTINUE)
2234 return rxs;
2235
2236 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2237
2238 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2239 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2240 sdata->vif.type != NL80211_IFTYPE_STATION)
2241 return RX_DROP_MONITOR;
2242
2243 switch (stype) {
2244 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2245 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2246 /* process for all: mesh, mlme, ibss */
2247 break;
2248 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2249 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2250 /* process only for station */
2251 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2252 return RX_DROP_MONITOR;
2253 break;
2254 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2255 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2256 /* process only for ibss */
2257 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2258 return RX_DROP_MONITOR;
2259 break;
2260 default:
2261 return RX_DROP_MONITOR;
2262 }
2263
2264 /* queue up frame and kick off work to process it */
2265 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2266 skb_queue_tail(&sdata->skb_queue, rx->skb);
2267 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2268 if (rx->sta)
2269 rx->sta->rx_packets++;
2270
2271 return RX_QUEUED;
2272 }
2273
2274 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2275 struct ieee80211_rx_data *rx)
2276 {
2277 int keyidx;
2278 unsigned int hdrlen;
2279
2280 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2281 if (rx->skb->len >= hdrlen + 4)
2282 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2283 else
2284 keyidx = -1;
2285
2286 if (!rx->sta) {
2287 /*
2288 * Some hardware seem to generate incorrect Michael MIC
2289 * reports; ignore them to avoid triggering countermeasures.
2290 */
2291 return;
2292 }
2293
2294 if (!ieee80211_has_protected(hdr->frame_control))
2295 return;
2296
2297 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2298 /*
2299 * APs with pairwise keys should never receive Michael MIC
2300 * errors for non-zero keyidx because these are reserved for
2301 * group keys and only the AP is sending real multicast
2302 * frames in the BSS.
2303 */
2304 return;
2305 }
2306
2307 if (!ieee80211_is_data(hdr->frame_control) &&
2308 !ieee80211_is_auth(hdr->frame_control))
2309 return;
2310
2311 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2312 GFP_ATOMIC);
2313 }
2314
2315 /* TODO: use IEEE80211_RX_FRAGMENTED */
2316 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2317 struct ieee80211_rate *rate)
2318 {
2319 struct ieee80211_sub_if_data *sdata;
2320 struct ieee80211_local *local = rx->local;
2321 struct ieee80211_rtap_hdr {
2322 struct ieee80211_radiotap_header hdr;
2323 u8 flags;
2324 u8 rate_or_pad;
2325 __le16 chan_freq;
2326 __le16 chan_flags;
2327 } __packed *rthdr;
2328 struct sk_buff *skb = rx->skb, *skb2;
2329 struct net_device *prev_dev = NULL;
2330 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2331
2332 /*
2333 * If cooked monitor has been processed already, then
2334 * don't do it again. If not, set the flag.
2335 */
2336 if (rx->flags & IEEE80211_RX_CMNTR)
2337 goto out_free_skb;
2338 rx->flags |= IEEE80211_RX_CMNTR;
2339
2340 if (skb_headroom(skb) < sizeof(*rthdr) &&
2341 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2342 goto out_free_skb;
2343
2344 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2345 memset(rthdr, 0, sizeof(*rthdr));
2346 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2347 rthdr->hdr.it_present =
2348 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2349 (1 << IEEE80211_RADIOTAP_CHANNEL));
2350
2351 if (rate) {
2352 rthdr->rate_or_pad = rate->bitrate / 5;
2353 rthdr->hdr.it_present |=
2354 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2355 }
2356 rthdr->chan_freq = cpu_to_le16(status->freq);
2357
2358 if (status->band == IEEE80211_BAND_5GHZ)
2359 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2360 IEEE80211_CHAN_5GHZ);
2361 else
2362 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2363 IEEE80211_CHAN_2GHZ);
2364
2365 skb_set_mac_header(skb, 0);
2366 skb->ip_summed = CHECKSUM_UNNECESSARY;
2367 skb->pkt_type = PACKET_OTHERHOST;
2368 skb->protocol = htons(ETH_P_802_2);
2369
2370 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2371 if (!ieee80211_sdata_running(sdata))
2372 continue;
2373
2374 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2375 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2376 continue;
2377
2378 if (prev_dev) {
2379 skb2 = skb_clone(skb, GFP_ATOMIC);
2380 if (skb2) {
2381 skb2->dev = prev_dev;
2382 netif_receive_skb(skb2);
2383 }
2384 }
2385
2386 prev_dev = sdata->dev;
2387 sdata->dev->stats.rx_packets++;
2388 sdata->dev->stats.rx_bytes += skb->len;
2389 }
2390
2391 if (prev_dev) {
2392 skb->dev = prev_dev;
2393 netif_receive_skb(skb);
2394 return;
2395 }
2396
2397 out_free_skb:
2398 dev_kfree_skb(skb);
2399 }
2400
2401 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2402 ieee80211_rx_result res)
2403 {
2404 switch (res) {
2405 case RX_DROP_MONITOR:
2406 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2407 if (rx->sta)
2408 rx->sta->rx_dropped++;
2409 /* fall through */
2410 case RX_CONTINUE: {
2411 struct ieee80211_rate *rate = NULL;
2412 struct ieee80211_supported_band *sband;
2413 struct ieee80211_rx_status *status;
2414
2415 status = IEEE80211_SKB_RXCB((rx->skb));
2416
2417 sband = rx->local->hw.wiphy->bands[status->band];
2418 if (!(status->flag & RX_FLAG_HT))
2419 rate = &sband->bitrates[status->rate_idx];
2420
2421 ieee80211_rx_cooked_monitor(rx, rate);
2422 break;
2423 }
2424 case RX_DROP_UNUSABLE:
2425 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2426 if (rx->sta)
2427 rx->sta->rx_dropped++;
2428 dev_kfree_skb(rx->skb);
2429 break;
2430 case RX_QUEUED:
2431 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2432 break;
2433 }
2434 }
2435
2436 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2437 struct sk_buff_head *frames)
2438 {
2439 ieee80211_rx_result res = RX_DROP_MONITOR;
2440 struct sk_buff *skb;
2441
2442 #define CALL_RXH(rxh) \
2443 do { \
2444 res = rxh(rx); \
2445 if (res != RX_CONTINUE) \
2446 goto rxh_next; \
2447 } while (0);
2448
2449 while ((skb = __skb_dequeue(frames))) {
2450 /*
2451 * all the other fields are valid across frames
2452 * that belong to an aMPDU since they are on the
2453 * same TID from the same station
2454 */
2455 rx->skb = skb;
2456 rx->flags = 0;
2457
2458 CALL_RXH(ieee80211_rx_h_decrypt)
2459 CALL_RXH(ieee80211_rx_h_check_more_data)
2460 CALL_RXH(ieee80211_rx_h_sta_process)
2461 CALL_RXH(ieee80211_rx_h_defragment)
2462 CALL_RXH(ieee80211_rx_h_ps_poll)
2463 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2464 /* must be after MMIC verify so header is counted in MPDU mic */
2465 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2466 CALL_RXH(ieee80211_rx_h_amsdu)
2467 #ifdef CONFIG_MAC80211_MESH
2468 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2469 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2470 #endif
2471 CALL_RXH(ieee80211_rx_h_data)
2472
2473 /* special treatment -- needs the queue */
2474 res = ieee80211_rx_h_ctrl(rx, frames);
2475 if (res != RX_CONTINUE)
2476 goto rxh_next;
2477
2478 CALL_RXH(ieee80211_rx_h_mgmt_check)
2479 CALL_RXH(ieee80211_rx_h_action)
2480 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2481 CALL_RXH(ieee80211_rx_h_action_return)
2482 CALL_RXH(ieee80211_rx_h_mgmt)
2483
2484 rxh_next:
2485 ieee80211_rx_handlers_result(rx, res);
2486
2487 #undef CALL_RXH
2488 }
2489 }
2490
2491 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2492 {
2493 struct sk_buff_head reorder_release;
2494 ieee80211_rx_result res = RX_DROP_MONITOR;
2495
2496 __skb_queue_head_init(&reorder_release);
2497
2498 #define CALL_RXH(rxh) \
2499 do { \
2500 res = rxh(rx); \
2501 if (res != RX_CONTINUE) \
2502 goto rxh_next; \
2503 } while (0);
2504
2505 CALL_RXH(ieee80211_rx_h_passive_scan)
2506 CALL_RXH(ieee80211_rx_h_check)
2507
2508 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2509
2510 ieee80211_rx_handlers(rx, &reorder_release);
2511 return;
2512
2513 rxh_next:
2514 ieee80211_rx_handlers_result(rx, res);
2515
2516 #undef CALL_RXH
2517 }
2518
2519 /*
2520 * This function makes calls into the RX path. Therefore the
2521 * caller must hold the sta_info->lock and everything has to
2522 * be under rcu_read_lock protection as well.
2523 */
2524 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2525 {
2526 struct sk_buff_head frames;
2527 struct ieee80211_rx_data rx = {
2528 .sta = sta,
2529 .sdata = sta->sdata,
2530 .local = sta->local,
2531 .queue = tid,
2532 };
2533 struct tid_ampdu_rx *tid_agg_rx;
2534
2535 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2536 if (!tid_agg_rx)
2537 return;
2538
2539 __skb_queue_head_init(&frames);
2540
2541 spin_lock(&tid_agg_rx->reorder_lock);
2542 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
2543 spin_unlock(&tid_agg_rx->reorder_lock);
2544
2545 ieee80211_rx_handlers(&rx, &frames);
2546 }
2547
2548 /* main receive path */
2549
2550 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2551 struct ieee80211_hdr *hdr)
2552 {
2553 struct ieee80211_sub_if_data *sdata = rx->sdata;
2554 struct sk_buff *skb = rx->skb;
2555 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2556 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2557 int multicast = is_multicast_ether_addr(hdr->addr1);
2558
2559 switch (sdata->vif.type) {
2560 case NL80211_IFTYPE_STATION:
2561 if (!bssid && !sdata->u.mgd.use_4addr)
2562 return 0;
2563 if (!multicast &&
2564 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2565 if (!(sdata->dev->flags & IFF_PROMISC))
2566 return 0;
2567 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2568 }
2569 break;
2570 case NL80211_IFTYPE_ADHOC:
2571 if (!bssid)
2572 return 0;
2573 if (ieee80211_is_beacon(hdr->frame_control)) {
2574 return 1;
2575 }
2576 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2577 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2578 return 0;
2579 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2580 } else if (!multicast &&
2581 compare_ether_addr(sdata->vif.addr,
2582 hdr->addr1) != 0) {
2583 if (!(sdata->dev->flags & IFF_PROMISC))
2584 return 0;
2585 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2586 } else if (!rx->sta) {
2587 int rate_idx;
2588 if (status->flag & RX_FLAG_HT)
2589 rate_idx = 0; /* TODO: HT rates */
2590 else
2591 rate_idx = status->rate_idx;
2592 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2593 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2594 }
2595 break;
2596 case NL80211_IFTYPE_MESH_POINT:
2597 if (!multicast &&
2598 compare_ether_addr(sdata->vif.addr,
2599 hdr->addr1) != 0) {
2600 if (!(sdata->dev->flags & IFF_PROMISC))
2601 return 0;
2602
2603 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2604 }
2605 break;
2606 case NL80211_IFTYPE_AP_VLAN:
2607 case NL80211_IFTYPE_AP:
2608 if (!bssid) {
2609 if (compare_ether_addr(sdata->vif.addr,
2610 hdr->addr1))
2611 return 0;
2612 } else if (!ieee80211_bssid_match(bssid,
2613 sdata->vif.addr)) {
2614 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2615 return 0;
2616 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2617 }
2618 break;
2619 case NL80211_IFTYPE_WDS:
2620 if (bssid || !ieee80211_is_data(hdr->frame_control))
2621 return 0;
2622 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2623 return 0;
2624 break;
2625 default:
2626 /* should never get here */
2627 WARN_ON(1);
2628 break;
2629 }
2630
2631 return 1;
2632 }
2633
2634 /*
2635 * This function returns whether or not the SKB
2636 * was destined for RX processing or not, which,
2637 * if consume is true, is equivalent to whether
2638 * or not the skb was consumed.
2639 */
2640 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2641 struct sk_buff *skb, bool consume)
2642 {
2643 struct ieee80211_local *local = rx->local;
2644 struct ieee80211_sub_if_data *sdata = rx->sdata;
2645 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2646 struct ieee80211_hdr *hdr = (void *)skb->data;
2647 int prepares;
2648
2649 rx->skb = skb;
2650 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2651 prepares = prepare_for_handlers(rx, hdr);
2652
2653 if (!prepares)
2654 return false;
2655
2656 if (status->flag & RX_FLAG_MMIC_ERROR) {
2657 if (status->rx_flags & IEEE80211_RX_RA_MATCH)
2658 ieee80211_rx_michael_mic_report(hdr, rx);
2659 return false;
2660 }
2661
2662 if (!consume) {
2663 skb = skb_copy(skb, GFP_ATOMIC);
2664 if (!skb) {
2665 if (net_ratelimit())
2666 wiphy_debug(local->hw.wiphy,
2667 "failed to copy multicast frame for %s\n",
2668 sdata->name);
2669 return true;
2670 }
2671
2672 rx->skb = skb;
2673 }
2674
2675 ieee80211_invoke_rx_handlers(rx);
2676 return true;
2677 }
2678
2679 /*
2680 * This is the actual Rx frames handler. as it blongs to Rx path it must
2681 * be called with rcu_read_lock protection.
2682 */
2683 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2684 struct sk_buff *skb)
2685 {
2686 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2687 struct ieee80211_local *local = hw_to_local(hw);
2688 struct ieee80211_sub_if_data *sdata;
2689 struct ieee80211_hdr *hdr;
2690 __le16 fc;
2691 struct ieee80211_rx_data rx;
2692 struct ieee80211_sub_if_data *prev;
2693 struct sta_info *sta, *tmp, *prev_sta;
2694 int err = 0;
2695
2696 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2697 memset(&rx, 0, sizeof(rx));
2698 rx.skb = skb;
2699 rx.local = local;
2700
2701 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2702 local->dot11ReceivedFragmentCount++;
2703
2704 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2705 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2706 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2707
2708 if (ieee80211_is_mgmt(fc))
2709 err = skb_linearize(skb);
2710 else
2711 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2712
2713 if (err) {
2714 dev_kfree_skb(skb);
2715 return;
2716 }
2717
2718 hdr = (struct ieee80211_hdr *)skb->data;
2719 ieee80211_parse_qos(&rx);
2720 ieee80211_verify_alignment(&rx);
2721
2722 if (ieee80211_is_data(fc)) {
2723 prev_sta = NULL;
2724
2725 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2726 if (!prev_sta) {
2727 prev_sta = sta;
2728 continue;
2729 }
2730
2731 rx.sta = prev_sta;
2732 rx.sdata = prev_sta->sdata;
2733 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2734
2735 prev_sta = sta;
2736 }
2737
2738 if (prev_sta) {
2739 rx.sta = prev_sta;
2740 rx.sdata = prev_sta->sdata;
2741
2742 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2743 return;
2744 }
2745 }
2746
2747 prev = NULL;
2748
2749 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2750 if (!ieee80211_sdata_running(sdata))
2751 continue;
2752
2753 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2754 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2755 continue;
2756
2757 /*
2758 * frame is destined for this interface, but if it's
2759 * not also for the previous one we handle that after
2760 * the loop to avoid copying the SKB once too much
2761 */
2762
2763 if (!prev) {
2764 prev = sdata;
2765 continue;
2766 }
2767
2768 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2769 rx.sdata = prev;
2770 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2771
2772 prev = sdata;
2773 }
2774
2775 if (prev) {
2776 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2777 rx.sdata = prev;
2778
2779 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2780 return;
2781 }
2782
2783 dev_kfree_skb(skb);
2784 }
2785
2786 /*
2787 * This is the receive path handler. It is called by a low level driver when an
2788 * 802.11 MPDU is received from the hardware.
2789 */
2790 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2791 {
2792 struct ieee80211_local *local = hw_to_local(hw);
2793 struct ieee80211_rate *rate = NULL;
2794 struct ieee80211_supported_band *sband;
2795 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2796
2797 WARN_ON_ONCE(softirq_count() == 0);
2798
2799 if (WARN_ON(status->band < 0 ||
2800 status->band >= IEEE80211_NUM_BANDS))
2801 goto drop;
2802
2803 sband = local->hw.wiphy->bands[status->band];
2804 if (WARN_ON(!sband))
2805 goto drop;
2806
2807 /*
2808 * If we're suspending, it is possible although not too likely
2809 * that we'd be receiving frames after having already partially
2810 * quiesced the stack. We can't process such frames then since
2811 * that might, for example, cause stations to be added or other
2812 * driver callbacks be invoked.
2813 */
2814 if (unlikely(local->quiescing || local->suspended))
2815 goto drop;
2816
2817 /*
2818 * The same happens when we're not even started,
2819 * but that's worth a warning.
2820 */
2821 if (WARN_ON(!local->started))
2822 goto drop;
2823
2824 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2825 /*
2826 * Validate the rate, unless a PLCP error means that
2827 * we probably can't have a valid rate here anyway.
2828 */
2829
2830 if (status->flag & RX_FLAG_HT) {
2831 /*
2832 * rate_idx is MCS index, which can be [0-76]
2833 * as documented on:
2834 *
2835 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2836 *
2837 * Anything else would be some sort of driver or
2838 * hardware error. The driver should catch hardware
2839 * errors.
2840 */
2841 if (WARN((status->rate_idx < 0 ||
2842 status->rate_idx > 76),
2843 "Rate marked as an HT rate but passed "
2844 "status->rate_idx is not "
2845 "an MCS index [0-76]: %d (0x%02x)\n",
2846 status->rate_idx,
2847 status->rate_idx))
2848 goto drop;
2849 } else {
2850 if (WARN_ON(status->rate_idx < 0 ||
2851 status->rate_idx >= sband->n_bitrates))
2852 goto drop;
2853 rate = &sband->bitrates[status->rate_idx];
2854 }
2855 }
2856
2857 status->rx_flags = 0;
2858
2859 /*
2860 * key references and virtual interfaces are protected using RCU
2861 * and this requires that we are in a read-side RCU section during
2862 * receive processing
2863 */
2864 rcu_read_lock();
2865
2866 /*
2867 * Frames with failed FCS/PLCP checksum are not returned,
2868 * all other frames are returned without radiotap header
2869 * if it was previously present.
2870 * Also, frames with less than 16 bytes are dropped.
2871 */
2872 skb = ieee80211_rx_monitor(local, skb, rate);
2873 if (!skb) {
2874 rcu_read_unlock();
2875 return;
2876 }
2877
2878 __ieee80211_rx_handle_packet(hw, skb);
2879
2880 rcu_read_unlock();
2881
2882 return;
2883 drop:
2884 kfree_skb(skb);
2885 }
2886 EXPORT_SYMBOL(ieee80211_rx);
2887
2888 /* This is a version of the rx handler that can be called from hard irq
2889 * context. Post the skb on the queue and schedule the tasklet */
2890 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2891 {
2892 struct ieee80211_local *local = hw_to_local(hw);
2893
2894 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2895
2896 skb->pkt_type = IEEE80211_RX_MSG;
2897 skb_queue_tail(&local->skb_queue, skb);
2898 tasklet_schedule(&local->tasklet);
2899 }
2900 EXPORT_SYMBOL(ieee80211_rx_irqsafe);