2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "ar9003_mac.h"
20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio
, int maxdelta
,
23 int mindelta
, int main_rssi_avg
,
24 int alt_rssi_avg
, int pkt_count
)
26 return (((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
27 (alt_rssi_avg
> main_rssi_avg
+ maxdelta
)) ||
28 (alt_rssi_avg
> main_rssi_avg
+ mindelta
)) && (pkt_count
> 50);
31 static inline bool ath9k_check_auto_sleep(struct ath_softc
*sc
)
33 return sc
->ps_enabled
&&
34 (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
);
37 static struct ieee80211_hw
* ath_get_virt_hw(struct ath_softc
*sc
,
38 struct ieee80211_hdr
*hdr
)
40 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
43 spin_lock_bh(&sc
->wiphy_lock
);
44 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
45 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
48 if (compare_ether_addr(hdr
->addr1
, aphy
->hw
->wiphy
->perm_addr
)
54 spin_unlock_bh(&sc
->wiphy_lock
);
59 * Setup and link descriptors.
61 * 11N: we can no longer afford to self link the last descriptor.
62 * MAC acknowledges BA status as long as it copies frames to host
63 * buffer (or rx fifo). This can incorrectly acknowledge packets
64 * to a sender if last desc is self-linked.
66 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
68 struct ath_hw
*ah
= sc
->sc_ah
;
69 struct ath_common
*common
= ath9k_hw_common(ah
);
76 ds
->ds_link
= 0; /* link to null */
77 ds
->ds_data
= bf
->bf_buf_addr
;
79 /* virtual addr of the beginning of the buffer. */
82 ds
->ds_vdata
= skb
->data
;
85 * setup rx descriptors. The rx_bufsize here tells the hardware
86 * how much data it can DMA to us and that we are prepared
89 ath9k_hw_setuprxdesc(ah
, ds
,
93 if (sc
->rx
.rxlink
== NULL
)
94 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
96 *sc
->rx
.rxlink
= bf
->bf_daddr
;
98 sc
->rx
.rxlink
= &ds
->ds_link
;
102 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
104 /* XXX block beacon interrupts */
105 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
106 sc
->rx
.defant
= antenna
;
107 sc
->rx
.rxotherant
= 0;
110 static void ath_opmode_init(struct ath_softc
*sc
)
112 struct ath_hw
*ah
= sc
->sc_ah
;
113 struct ath_common
*common
= ath9k_hw_common(ah
);
117 /* configure rx filter */
118 rfilt
= ath_calcrxfilter(sc
);
119 ath9k_hw_setrxfilter(ah
, rfilt
);
121 /* configure bssid mask */
122 ath_hw_setbssidmask(common
);
124 /* configure operational mode */
125 ath9k_hw_setopmode(ah
);
127 /* calculate and install multicast filter */
128 mfilt
[0] = mfilt
[1] = ~0;
129 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
132 static bool ath_rx_edma_buf_link(struct ath_softc
*sc
,
133 enum ath9k_rx_qtype qtype
)
135 struct ath_hw
*ah
= sc
->sc_ah
;
136 struct ath_rx_edma
*rx_edma
;
140 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
141 if (skb_queue_len(&rx_edma
->rx_fifo
) >= rx_edma
->rx_fifo_hwsize
)
144 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
145 list_del_init(&bf
->list
);
150 memset(skb
->data
, 0, ah
->caps
.rx_status_len
);
151 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
152 ah
->caps
.rx_status_len
, DMA_TO_DEVICE
);
154 SKB_CB_ATHBUF(skb
) = bf
;
155 ath9k_hw_addrxbuf_edma(ah
, bf
->bf_buf_addr
, qtype
);
156 skb_queue_tail(&rx_edma
->rx_fifo
, skb
);
161 static void ath_rx_addbuffer_edma(struct ath_softc
*sc
,
162 enum ath9k_rx_qtype qtype
, int size
)
164 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
167 if (list_empty(&sc
->rx
.rxbuf
)) {
168 ath_print(common
, ATH_DBG_QUEUE
, "No free rx buf available\n");
172 while (!list_empty(&sc
->rx
.rxbuf
)) {
175 if (!ath_rx_edma_buf_link(sc
, qtype
))
183 static void ath_rx_remove_buffer(struct ath_softc
*sc
,
184 enum ath9k_rx_qtype qtype
)
187 struct ath_rx_edma
*rx_edma
;
190 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
192 while ((skb
= skb_dequeue(&rx_edma
->rx_fifo
)) != NULL
) {
193 bf
= SKB_CB_ATHBUF(skb
);
195 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
199 static void ath_rx_edma_cleanup(struct ath_softc
*sc
)
203 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
204 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
206 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
208 dev_kfree_skb_any(bf
->bf_mpdu
);
211 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
213 kfree(sc
->rx
.rx_bufptr
);
214 sc
->rx
.rx_bufptr
= NULL
;
217 static void ath_rx_edma_init_queue(struct ath_rx_edma
*rx_edma
, int size
)
219 skb_queue_head_init(&rx_edma
->rx_fifo
);
220 skb_queue_head_init(&rx_edma
->rx_buffers
);
221 rx_edma
->rx_fifo_hwsize
= size
;
224 static int ath_rx_edma_init(struct ath_softc
*sc
, int nbufs
)
226 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
227 struct ath_hw
*ah
= sc
->sc_ah
;
234 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
+
235 ah
->caps
.rx_status_len
,
236 min(common
->cachelsz
, (u16
)64));
238 ath9k_hw_set_rx_bufsize(ah
, common
->rx_bufsize
-
239 ah
->caps
.rx_status_len
);
241 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
],
242 ah
->caps
.rx_lp_qdepth
);
243 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
],
244 ah
->caps
.rx_hp_qdepth
);
246 size
= sizeof(struct ath_buf
) * nbufs
;
247 bf
= kzalloc(size
, GFP_KERNEL
);
251 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
252 sc
->rx
.rx_bufptr
= bf
;
254 for (i
= 0; i
< nbufs
; i
++, bf
++) {
255 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_KERNEL
);
261 memset(skb
->data
, 0, common
->rx_bufsize
);
264 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
267 if (unlikely(dma_mapping_error(sc
->dev
,
269 dev_kfree_skb_any(skb
);
272 ath_print(common
, ATH_DBG_FATAL
,
273 "dma_mapping_error() on RX init\n");
278 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
284 ath_rx_edma_cleanup(sc
);
288 static void ath_edma_start_recv(struct ath_softc
*sc
)
290 spin_lock_bh(&sc
->rx
.rxbuflock
);
292 ath9k_hw_rxena(sc
->sc_ah
);
294 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_HP
,
295 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
].rx_fifo_hwsize
);
297 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_LP
,
298 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
].rx_fifo_hwsize
);
302 ath9k_hw_startpcureceive(sc
->sc_ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
304 spin_unlock_bh(&sc
->rx
.rxbuflock
);
307 static void ath_edma_stop_recv(struct ath_softc
*sc
)
309 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
310 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
313 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
315 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
320 spin_lock_init(&sc
->sc_pcu_lock
);
321 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
322 spin_lock_init(&sc
->rx
.rxbuflock
);
324 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
325 return ath_rx_edma_init(sc
, nbufs
);
327 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
328 min(common
->cachelsz
, (u16
)64));
330 ath_print(common
, ATH_DBG_CONFIG
, "cachelsz %u rxbufsize %u\n",
331 common
->cachelsz
, common
->rx_bufsize
);
333 /* Initialize rx descriptors */
335 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
338 ath_print(common
, ATH_DBG_FATAL
,
339 "failed to allocate rx descriptors: %d\n",
344 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
345 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
,
353 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
356 if (unlikely(dma_mapping_error(sc
->dev
,
358 dev_kfree_skb_any(skb
);
361 ath_print(common
, ATH_DBG_FATAL
,
362 "dma_mapping_error() on RX init\n");
367 sc
->rx
.rxlink
= NULL
;
377 void ath_rx_cleanup(struct ath_softc
*sc
)
379 struct ath_hw
*ah
= sc
->sc_ah
;
380 struct ath_common
*common
= ath9k_hw_common(ah
);
384 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
385 ath_rx_edma_cleanup(sc
);
388 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
391 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
400 if (sc
->rx
.rxdma
.dd_desc_len
!= 0)
401 ath_descdma_cleanup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
);
406 * Calculate the receive filter according to the
407 * operating mode and state:
409 * o always accept unicast, broadcast, and multicast traffic
410 * o maintain current state of phy error reception (the hal
411 * may enable phy error frames for noise immunity work)
412 * o probe request frames are accepted only when operating in
413 * hostap, adhoc, or monitor modes
414 * o enable promiscuous mode according to the interface state
416 * - when operating in adhoc mode so the 802.11 layer creates
417 * node table entries for peers,
418 * - when operating in station mode for collecting rssi data when
419 * the station is otherwise quiet, or
420 * - when operating as a repeater so we see repeater-sta beacons
424 u32
ath_calcrxfilter(struct ath_softc
*sc
)
426 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
430 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
431 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
432 | ATH9K_RX_FILTER_MCAST
;
434 if (sc
->rx
.rxfilter
& FIF_PROBE_REQ
)
435 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
438 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
439 * mode interface or when in monitor mode. AP mode does not need this
440 * since it receives all in-BSS frames anyway.
442 if (((sc
->sc_ah
->opmode
!= NL80211_IFTYPE_AP
) &&
443 (sc
->rx
.rxfilter
& FIF_PROMISC_IN_BSS
)) ||
444 (sc
->sc_ah
->is_monitoring
))
445 rfilt
|= ATH9K_RX_FILTER_PROM
;
447 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
448 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
450 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
452 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
453 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
455 rfilt
|= ATH9K_RX_FILTER_BEACON
;
457 if ((AR_SREV_9280_20_OR_LATER(sc
->sc_ah
) ||
458 AR_SREV_9285_12_OR_LATER(sc
->sc_ah
)) &&
459 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) &&
460 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
461 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
463 if (conf_is_ht(&sc
->hw
->conf
))
464 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
466 if (sc
->sec_wiphy
|| (sc
->nvifs
> 1) ||
467 (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
468 /* The following may also be needed for other older chips */
469 if (sc
->sc_ah
->hw_version
.macVersion
== AR_SREV_VERSION_9160
)
470 rfilt
|= ATH9K_RX_FILTER_PROM
;
471 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
476 #undef RX_FILTER_PRESERVE
479 int ath_startrecv(struct ath_softc
*sc
)
481 struct ath_hw
*ah
= sc
->sc_ah
;
482 struct ath_buf
*bf
, *tbf
;
484 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
485 ath_edma_start_recv(sc
);
489 spin_lock_bh(&sc
->rx
.rxbuflock
);
490 if (list_empty(&sc
->rx
.rxbuf
))
493 sc
->rx
.rxlink
= NULL
;
494 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
495 ath_rx_buf_link(sc
, bf
);
498 /* We could have deleted elements so the list may be empty now */
499 if (list_empty(&sc
->rx
.rxbuf
))
502 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
503 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
508 ath9k_hw_startpcureceive(ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
510 spin_unlock_bh(&sc
->rx
.rxbuflock
);
515 bool ath_stoprecv(struct ath_softc
*sc
)
517 struct ath_hw
*ah
= sc
->sc_ah
;
520 spin_lock_bh(&sc
->rx
.rxbuflock
);
521 ath9k_hw_abortpcurecv(ah
);
522 ath9k_hw_setrxfilter(ah
, 0);
523 stopped
= ath9k_hw_stopdmarecv(ah
);
525 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
526 ath_edma_stop_recv(sc
);
528 sc
->rx
.rxlink
= NULL
;
529 spin_unlock_bh(&sc
->rx
.rxbuflock
);
531 ATH_DBG_WARN(!stopped
, "Could not stop RX, we could be "
532 "confusing the DMA engine when we start RX up\n");
536 void ath_flushrecv(struct ath_softc
*sc
)
538 sc
->sc_flags
|= SC_OP_RXFLUSH
;
539 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
540 ath_rx_tasklet(sc
, 1, true);
541 ath_rx_tasklet(sc
, 1, false);
542 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
545 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
547 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
548 struct ieee80211_mgmt
*mgmt
;
549 u8
*pos
, *end
, id
, elen
;
550 struct ieee80211_tim_ie
*tim
;
552 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
553 pos
= mgmt
->u
.beacon
.variable
;
554 end
= skb
->data
+ skb
->len
;
556 while (pos
+ 2 < end
) {
559 if (pos
+ elen
> end
)
562 if (id
== WLAN_EID_TIM
) {
563 if (elen
< sizeof(*tim
))
565 tim
= (struct ieee80211_tim_ie
*) pos
;
566 if (tim
->dtim_count
!= 0)
568 return tim
->bitmap_ctrl
& 0x01;
577 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
579 struct ieee80211_mgmt
*mgmt
;
580 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
582 if (skb
->len
< 24 + 8 + 2 + 2)
585 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
586 if (memcmp(common
->curbssid
, mgmt
->bssid
, ETH_ALEN
) != 0)
587 return; /* not from our current AP */
589 sc
->ps_flags
&= ~PS_WAIT_FOR_BEACON
;
591 if (sc
->ps_flags
& PS_BEACON_SYNC
) {
592 sc
->ps_flags
&= ~PS_BEACON_SYNC
;
593 ath_print(common
, ATH_DBG_PS
,
594 "Reconfigure Beacon timers based on "
595 "timestamp from the AP\n");
596 ath_beacon_config(sc
, NULL
);
599 if (ath_beacon_dtim_pending_cab(skb
)) {
601 * Remain awake waiting for buffered broadcast/multicast
602 * frames. If the last broadcast/multicast frame is not
603 * received properly, the next beacon frame will work as
604 * a backup trigger for returning into NETWORK SLEEP state,
605 * so we are waiting for it as well.
607 ath_print(common
, ATH_DBG_PS
, "Received DTIM beacon indicating "
608 "buffered broadcast/multicast frame(s)\n");
609 sc
->ps_flags
|= PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
;
613 if (sc
->ps_flags
& PS_WAIT_FOR_CAB
) {
615 * This can happen if a broadcast frame is dropped or the AP
616 * fails to send a frame indicating that all CAB frames have
619 sc
->ps_flags
&= ~PS_WAIT_FOR_CAB
;
620 ath_print(common
, ATH_DBG_PS
,
621 "PS wait for CAB frames timed out\n");
625 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
)
627 struct ieee80211_hdr
*hdr
;
628 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
630 hdr
= (struct ieee80211_hdr
*)skb
->data
;
632 /* Process Beacon and CAB receive in PS state */
633 if (((sc
->ps_flags
& PS_WAIT_FOR_BEACON
) || ath9k_check_auto_sleep(sc
))
634 && ieee80211_is_beacon(hdr
->frame_control
))
635 ath_rx_ps_beacon(sc
, skb
);
636 else if ((sc
->ps_flags
& PS_WAIT_FOR_CAB
) &&
637 (ieee80211_is_data(hdr
->frame_control
) ||
638 ieee80211_is_action(hdr
->frame_control
)) &&
639 is_multicast_ether_addr(hdr
->addr1
) &&
640 !ieee80211_has_moredata(hdr
->frame_control
)) {
642 * No more broadcast/multicast frames to be received at this
645 sc
->ps_flags
&= ~(PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
);
646 ath_print(common
, ATH_DBG_PS
,
647 "All PS CAB frames received, back to sleep\n");
648 } else if ((sc
->ps_flags
& PS_WAIT_FOR_PSPOLL_DATA
) &&
649 !is_multicast_ether_addr(hdr
->addr1
) &&
650 !ieee80211_has_morefrags(hdr
->frame_control
)) {
651 sc
->ps_flags
&= ~PS_WAIT_FOR_PSPOLL_DATA
;
652 ath_print(common
, ATH_DBG_PS
,
653 "Going back to sleep after having received "
654 "PS-Poll data (0x%lx)\n",
655 sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
657 PS_WAIT_FOR_PSPOLL_DATA
|
658 PS_WAIT_FOR_TX_ACK
));
662 static void ath_rx_send_to_mac80211(struct ieee80211_hw
*hw
,
663 struct ath_softc
*sc
, struct sk_buff
*skb
,
664 struct ieee80211_rx_status
*rxs
)
666 struct ieee80211_hdr
*hdr
;
668 hdr
= (struct ieee80211_hdr
*)skb
->data
;
670 /* Send the frame to mac80211 */
671 if (is_multicast_ether_addr(hdr
->addr1
)) {
674 * Deliver broadcast/multicast frames to all suitable
677 /* TODO: filter based on channel configuration */
678 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
679 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
680 struct sk_buff
*nskb
;
683 nskb
= skb_copy(skb
, GFP_ATOMIC
);
686 ieee80211_rx(aphy
->hw
, nskb
);
688 ieee80211_rx(sc
->hw
, skb
);
690 /* Deliver unicast frames based on receiver address */
691 ieee80211_rx(hw
, skb
);
694 static bool ath_edma_get_buffers(struct ath_softc
*sc
,
695 enum ath9k_rx_qtype qtype
)
697 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
698 struct ath_hw
*ah
= sc
->sc_ah
;
699 struct ath_common
*common
= ath9k_hw_common(ah
);
704 skb
= skb_peek(&rx_edma
->rx_fifo
);
708 bf
= SKB_CB_ATHBUF(skb
);
711 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
712 common
->rx_bufsize
, DMA_FROM_DEVICE
);
714 ret
= ath9k_hw_process_rxdesc_edma(ah
, NULL
, skb
->data
);
715 if (ret
== -EINPROGRESS
) {
716 /*let device gain the buffer again*/
717 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
718 common
->rx_bufsize
, DMA_FROM_DEVICE
);
722 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
723 if (ret
== -EINVAL
) {
724 /* corrupt descriptor, skip this one and the following one */
725 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
726 ath_rx_edma_buf_link(sc
, qtype
);
727 skb
= skb_peek(&rx_edma
->rx_fifo
);
731 bf
= SKB_CB_ATHBUF(skb
);
734 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
735 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
736 ath_rx_edma_buf_link(sc
, qtype
);
739 skb_queue_tail(&rx_edma
->rx_buffers
, skb
);
744 static struct ath_buf
*ath_edma_get_next_rx_buf(struct ath_softc
*sc
,
745 struct ath_rx_status
*rs
,
746 enum ath9k_rx_qtype qtype
)
748 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
752 while (ath_edma_get_buffers(sc
, qtype
));
753 skb
= __skb_dequeue(&rx_edma
->rx_buffers
);
757 bf
= SKB_CB_ATHBUF(skb
);
758 ath9k_hw_process_rxdesc_edma(sc
->sc_ah
, rs
, skb
->data
);
762 static struct ath_buf
*ath_get_next_rx_buf(struct ath_softc
*sc
,
763 struct ath_rx_status
*rs
)
765 struct ath_hw
*ah
= sc
->sc_ah
;
766 struct ath_common
*common
= ath9k_hw_common(ah
);
771 if (list_empty(&sc
->rx
.rxbuf
)) {
772 sc
->rx
.rxlink
= NULL
;
776 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
780 * Must provide the virtual address of the current
781 * descriptor, the physical address, and the virtual
782 * address of the next descriptor in the h/w chain.
783 * This allows the HAL to look ahead to see if the
784 * hardware is done with a descriptor by checking the
785 * done bit in the following descriptor and the address
786 * of the current descriptor the DMA engine is working
787 * on. All this is necessary because of our use of
788 * a self-linked list to avoid rx overruns.
790 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
, 0);
791 if (ret
== -EINPROGRESS
) {
792 struct ath_rx_status trs
;
794 struct ath_desc
*tds
;
796 memset(&trs
, 0, sizeof(trs
));
797 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
798 sc
->rx
.rxlink
= NULL
;
802 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
805 * On some hardware the descriptor status words could
806 * get corrupted, including the done bit. Because of
807 * this, check if the next descriptor's done bit is
810 * If the next descriptor's done bit is set, the current
811 * descriptor has been corrupted. Force s/w to discard
812 * this descriptor and continue...
816 ret
= ath9k_hw_rxprocdesc(ah
, tds
, &trs
, 0);
817 if (ret
== -EINPROGRESS
)
825 * Synchronize the DMA transfer with CPU before
826 * 1. accessing the frame
827 * 2. requeueing the same buffer to h/w
829 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
836 /* Assumes you've already done the endian to CPU conversion */
837 static bool ath9k_rx_accept(struct ath_common
*common
,
838 struct ieee80211_hdr
*hdr
,
839 struct ieee80211_rx_status
*rxs
,
840 struct ath_rx_status
*rx_stats
,
843 struct ath_hw
*ah
= common
->ah
;
845 u8 rx_status_len
= ah
->caps
.rx_status_len
;
847 fc
= hdr
->frame_control
;
849 if (!rx_stats
->rs_datalen
)
852 * rs_status follows rs_datalen so if rs_datalen is too large
853 * we can take a hint that hardware corrupted it, so ignore
856 if (rx_stats
->rs_datalen
> (common
->rx_bufsize
- rx_status_len
))
860 * rs_more indicates chained descriptors which can be used
861 * to link buffers together for a sort of scatter-gather
863 * reject the frame, we don't support scatter-gather yet and
864 * the frame is probably corrupt anyway
866 if (rx_stats
->rs_more
)
870 * The rx_stats->rs_status will not be set until the end of the
871 * chained descriptors so it can be ignored if rs_more is set. The
872 * rs_more will be false at the last element of the chained
875 if (rx_stats
->rs_status
!= 0) {
876 if (rx_stats
->rs_status
& ATH9K_RXERR_CRC
)
877 rxs
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
878 if (rx_stats
->rs_status
& ATH9K_RXERR_PHY
)
881 if (rx_stats
->rs_status
& ATH9K_RXERR_DECRYPT
) {
882 *decrypt_error
= true;
883 } else if (rx_stats
->rs_status
& ATH9K_RXERR_MIC
) {
885 * The MIC error bit is only valid if the frame
886 * is not a control frame or fragment, and it was
887 * decrypted using a valid TKIP key.
889 if (!ieee80211_is_ctl(fc
) &&
890 !ieee80211_has_morefrags(fc
) &&
891 !(le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
) &&
892 test_bit(rx_stats
->rs_keyix
, common
->tkip_keymap
))
893 rxs
->flag
|= RX_FLAG_MMIC_ERROR
;
895 rx_stats
->rs_status
&= ~ATH9K_RXERR_MIC
;
898 * Reject error frames with the exception of
899 * decryption and MIC failures. For monitor mode,
900 * we also ignore the CRC error.
902 if (ah
->is_monitoring
) {
903 if (rx_stats
->rs_status
&
904 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
908 if (rx_stats
->rs_status
&
909 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
917 static int ath9k_process_rate(struct ath_common
*common
,
918 struct ieee80211_hw
*hw
,
919 struct ath_rx_status
*rx_stats
,
920 struct ieee80211_rx_status
*rxs
)
922 struct ieee80211_supported_band
*sband
;
923 enum ieee80211_band band
;
926 band
= hw
->conf
.channel
->band
;
927 sband
= hw
->wiphy
->bands
[band
];
929 if (rx_stats
->rs_rate
& 0x80) {
931 rxs
->flag
|= RX_FLAG_HT
;
932 if (rx_stats
->rs_flags
& ATH9K_RX_2040
)
933 rxs
->flag
|= RX_FLAG_40MHZ
;
934 if (rx_stats
->rs_flags
& ATH9K_RX_GI
)
935 rxs
->flag
|= RX_FLAG_SHORT_GI
;
936 rxs
->rate_idx
= rx_stats
->rs_rate
& 0x7f;
940 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
941 if (sband
->bitrates
[i
].hw_value
== rx_stats
->rs_rate
) {
945 if (sband
->bitrates
[i
].hw_value_short
== rx_stats
->rs_rate
) {
946 rxs
->flag
|= RX_FLAG_SHORTPRE
;
953 * No valid hardware bitrate found -- we should not get here
954 * because hardware has already validated this frame as OK.
956 ath_print(common
, ATH_DBG_XMIT
, "unsupported hw bitrate detected "
957 "0x%02x using 1 Mbit\n", rx_stats
->rs_rate
);
962 static void ath9k_process_rssi(struct ath_common
*common
,
963 struct ieee80211_hw
*hw
,
964 struct ieee80211_hdr
*hdr
,
965 struct ath_rx_status
*rx_stats
)
967 struct ath_wiphy
*aphy
= hw
->priv
;
968 struct ath_hw
*ah
= common
->ah
;
972 if (ah
->opmode
!= NL80211_IFTYPE_STATION
)
975 fc
= hdr
->frame_control
;
976 if (!ieee80211_is_beacon(fc
) ||
977 compare_ether_addr(hdr
->addr3
, common
->curbssid
))
980 if (rx_stats
->rs_rssi
!= ATH9K_RSSI_BAD
&& !rx_stats
->rs_moreaggr
)
981 ATH_RSSI_LPF(aphy
->last_rssi
, rx_stats
->rs_rssi
);
983 last_rssi
= aphy
->last_rssi
;
984 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
985 rx_stats
->rs_rssi
= ATH_EP_RND(last_rssi
,
986 ATH_RSSI_EP_MULTIPLIER
);
987 if (rx_stats
->rs_rssi
< 0)
988 rx_stats
->rs_rssi
= 0;
990 /* Update Beacon RSSI, this is used by ANI. */
991 ah
->stats
.avgbrssi
= rx_stats
->rs_rssi
;
995 * For Decrypt or Demic errors, we only mark packet status here and always push
996 * up the frame up to let mac80211 handle the actual error case, be it no
997 * decryption key or real decryption error. This let us keep statistics there.
999 static int ath9k_rx_skb_preprocess(struct ath_common
*common
,
1000 struct ieee80211_hw
*hw
,
1001 struct ieee80211_hdr
*hdr
,
1002 struct ath_rx_status
*rx_stats
,
1003 struct ieee80211_rx_status
*rx_status
,
1004 bool *decrypt_error
)
1006 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
1009 * everything but the rate is checked here, the rate check is done
1010 * separately to avoid doing two lookups for a rate for each frame.
1012 if (!ath9k_rx_accept(common
, hdr
, rx_status
, rx_stats
, decrypt_error
))
1015 ath9k_process_rssi(common
, hw
, hdr
, rx_stats
);
1017 if (ath9k_process_rate(common
, hw
, rx_stats
, rx_status
))
1020 rx_status
->band
= hw
->conf
.channel
->band
;
1021 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
1022 rx_status
->signal
= ATH_DEFAULT_NOISE_FLOOR
+ rx_stats
->rs_rssi
;
1023 rx_status
->antenna
= rx_stats
->rs_antenna
;
1024 rx_status
->flag
|= RX_FLAG_TSFT
;
1029 static void ath9k_rx_skb_postprocess(struct ath_common
*common
,
1030 struct sk_buff
*skb
,
1031 struct ath_rx_status
*rx_stats
,
1032 struct ieee80211_rx_status
*rxs
,
1035 struct ath_hw
*ah
= common
->ah
;
1036 struct ieee80211_hdr
*hdr
;
1037 int hdrlen
, padpos
, padsize
;
1041 /* see if any padding is done by the hw and remove it */
1042 hdr
= (struct ieee80211_hdr
*) skb
->data
;
1043 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
1044 fc
= hdr
->frame_control
;
1045 padpos
= ath9k_cmn_padpos(hdr
->frame_control
);
1047 /* The MAC header is padded to have 32-bit boundary if the
1048 * packet payload is non-zero. The general calculation for
1049 * padsize would take into account odd header lengths:
1050 * padsize = (4 - padpos % 4) % 4; However, since only
1051 * even-length headers are used, padding can only be 0 or 2
1052 * bytes and we can optimize this a bit. In addition, we must
1053 * not try to remove padding from short control frames that do
1054 * not have payload. */
1055 padsize
= padpos
& 3;
1056 if (padsize
&& skb
->len
>=padpos
+padsize
+FCS_LEN
) {
1057 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
1058 skb_pull(skb
, padsize
);
1061 keyix
= rx_stats
->rs_keyix
;
1063 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
&&
1064 ieee80211_has_protected(fc
)) {
1065 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1066 } else if (ieee80211_has_protected(fc
)
1067 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
1068 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
1070 if (test_bit(keyix
, common
->keymap
))
1071 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1073 if (ah
->sw_mgmt_crypto
&&
1074 (rxs
->flag
& RX_FLAG_DECRYPTED
) &&
1075 ieee80211_is_mgmt(fc
))
1076 /* Use software decrypt for management frames. */
1077 rxs
->flag
&= ~RX_FLAG_DECRYPTED
;
1080 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb
*antcomb
,
1081 struct ath_hw_antcomb_conf ant_conf
,
1084 antcomb
->quick_scan_cnt
= 0;
1086 if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA2
)
1087 antcomb
->rssi_lna2
= main_rssi_avg
;
1088 else if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA1
)
1089 antcomb
->rssi_lna1
= main_rssi_avg
;
1091 switch ((ant_conf
.main_lna_conf
<< 4) | ant_conf
.alt_lna_conf
) {
1092 case (0x10): /* LNA2 A-B */
1093 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1094 antcomb
->first_quick_scan_conf
=
1095 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1096 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1098 case (0x20): /* LNA1 A-B */
1099 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1100 antcomb
->first_quick_scan_conf
=
1101 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1102 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1104 case (0x21): /* LNA1 LNA2 */
1105 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA2
;
1106 antcomb
->first_quick_scan_conf
=
1107 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1108 antcomb
->second_quick_scan_conf
=
1109 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1111 case (0x12): /* LNA2 LNA1 */
1112 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1
;
1113 antcomb
->first_quick_scan_conf
=
1114 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1115 antcomb
->second_quick_scan_conf
=
1116 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1118 case (0x13): /* LNA2 A+B */
1119 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1120 antcomb
->first_quick_scan_conf
=
1121 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1122 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1124 case (0x23): /* LNA1 A+B */
1125 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1126 antcomb
->first_quick_scan_conf
=
1127 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1128 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1135 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb
*antcomb
,
1136 struct ath_hw_antcomb_conf
*div_ant_conf
,
1137 int main_rssi_avg
, int alt_rssi_avg
,
1141 switch (antcomb
->quick_scan_cnt
) {
1143 /* set alt to main, and alt to first conf */
1144 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1145 div_ant_conf
->alt_lna_conf
= antcomb
->first_quick_scan_conf
;
1148 /* set alt to main, and alt to first conf */
1149 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1150 div_ant_conf
->alt_lna_conf
= antcomb
->second_quick_scan_conf
;
1151 antcomb
->rssi_first
= main_rssi_avg
;
1152 antcomb
->rssi_second
= alt_rssi_avg
;
1154 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1156 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1157 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1158 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1159 main_rssi_avg
, alt_rssi_avg
,
1160 antcomb
->total_pkt_count
))
1161 antcomb
->first_ratio
= true;
1163 antcomb
->first_ratio
= false;
1164 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1165 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1166 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1167 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1168 main_rssi_avg
, alt_rssi_avg
,
1169 antcomb
->total_pkt_count
))
1170 antcomb
->first_ratio
= true;
1172 antcomb
->first_ratio
= false;
1174 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1175 (alt_rssi_avg
> main_rssi_avg
+
1176 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1177 (alt_rssi_avg
> main_rssi_avg
)) &&
1178 (antcomb
->total_pkt_count
> 50))
1179 antcomb
->first_ratio
= true;
1181 antcomb
->first_ratio
= false;
1185 antcomb
->alt_good
= false;
1186 antcomb
->scan_not_start
= false;
1187 antcomb
->scan
= false;
1188 antcomb
->rssi_first
= main_rssi_avg
;
1189 antcomb
->rssi_third
= alt_rssi_avg
;
1191 if (antcomb
->second_quick_scan_conf
== ATH_ANT_DIV_COMB_LNA1
)
1192 antcomb
->rssi_lna1
= alt_rssi_avg
;
1193 else if (antcomb
->second_quick_scan_conf
==
1194 ATH_ANT_DIV_COMB_LNA2
)
1195 antcomb
->rssi_lna2
= alt_rssi_avg
;
1196 else if (antcomb
->second_quick_scan_conf
==
1197 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
) {
1198 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
)
1199 antcomb
->rssi_lna2
= main_rssi_avg
;
1200 else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
)
1201 antcomb
->rssi_lna1
= main_rssi_avg
;
1204 if (antcomb
->rssi_lna2
> antcomb
->rssi_lna1
+
1205 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)
1206 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1208 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA1
;
1210 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1211 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1212 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1213 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1214 main_rssi_avg
, alt_rssi_avg
,
1215 antcomb
->total_pkt_count
))
1216 antcomb
->second_ratio
= true;
1218 antcomb
->second_ratio
= false;
1219 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1220 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1221 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1222 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1223 main_rssi_avg
, alt_rssi_avg
,
1224 antcomb
->total_pkt_count
))
1225 antcomb
->second_ratio
= true;
1227 antcomb
->second_ratio
= false;
1229 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1230 (alt_rssi_avg
> main_rssi_avg
+
1231 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1232 (alt_rssi_avg
> main_rssi_avg
)) &&
1233 (antcomb
->total_pkt_count
> 50))
1234 antcomb
->second_ratio
= true;
1236 antcomb
->second_ratio
= false;
1239 /* set alt to the conf with maximun ratio */
1240 if (antcomb
->first_ratio
&& antcomb
->second_ratio
) {
1241 if (antcomb
->rssi_second
> antcomb
->rssi_third
) {
1243 if ((antcomb
->first_quick_scan_conf
==
1244 ATH_ANT_DIV_COMB_LNA1
) ||
1245 (antcomb
->first_quick_scan_conf
==
1246 ATH_ANT_DIV_COMB_LNA2
))
1247 /* Set alt LNA1 or LNA2*/
1248 if (div_ant_conf
->main_lna_conf
==
1249 ATH_ANT_DIV_COMB_LNA2
)
1250 div_ant_conf
->alt_lna_conf
=
1251 ATH_ANT_DIV_COMB_LNA1
;
1253 div_ant_conf
->alt_lna_conf
=
1254 ATH_ANT_DIV_COMB_LNA2
;
1256 /* Set alt to A+B or A-B */
1257 div_ant_conf
->alt_lna_conf
=
1258 antcomb
->first_quick_scan_conf
;
1259 } else if ((antcomb
->second_quick_scan_conf
==
1260 ATH_ANT_DIV_COMB_LNA1
) ||
1261 (antcomb
->second_quick_scan_conf
==
1262 ATH_ANT_DIV_COMB_LNA2
)) {
1263 /* Set alt LNA1 or LNA2 */
1264 if (div_ant_conf
->main_lna_conf
==
1265 ATH_ANT_DIV_COMB_LNA2
)
1266 div_ant_conf
->alt_lna_conf
=
1267 ATH_ANT_DIV_COMB_LNA1
;
1269 div_ant_conf
->alt_lna_conf
=
1270 ATH_ANT_DIV_COMB_LNA2
;
1272 /* Set alt to A+B or A-B */
1273 div_ant_conf
->alt_lna_conf
=
1274 antcomb
->second_quick_scan_conf
;
1276 } else if (antcomb
->first_ratio
) {
1278 if ((antcomb
->first_quick_scan_conf
==
1279 ATH_ANT_DIV_COMB_LNA1
) ||
1280 (antcomb
->first_quick_scan_conf
==
1281 ATH_ANT_DIV_COMB_LNA2
))
1282 /* Set alt LNA1 or LNA2 */
1283 if (div_ant_conf
->main_lna_conf
==
1284 ATH_ANT_DIV_COMB_LNA2
)
1285 div_ant_conf
->alt_lna_conf
=
1286 ATH_ANT_DIV_COMB_LNA1
;
1288 div_ant_conf
->alt_lna_conf
=
1289 ATH_ANT_DIV_COMB_LNA2
;
1291 /* Set alt to A+B or A-B */
1292 div_ant_conf
->alt_lna_conf
=
1293 antcomb
->first_quick_scan_conf
;
1294 } else if (antcomb
->second_ratio
) {
1296 if ((antcomb
->second_quick_scan_conf
==
1297 ATH_ANT_DIV_COMB_LNA1
) ||
1298 (antcomb
->second_quick_scan_conf
==
1299 ATH_ANT_DIV_COMB_LNA2
))
1300 /* Set alt LNA1 or LNA2 */
1301 if (div_ant_conf
->main_lna_conf
==
1302 ATH_ANT_DIV_COMB_LNA2
)
1303 div_ant_conf
->alt_lna_conf
=
1304 ATH_ANT_DIV_COMB_LNA1
;
1306 div_ant_conf
->alt_lna_conf
=
1307 ATH_ANT_DIV_COMB_LNA2
;
1309 /* Set alt to A+B or A-B */
1310 div_ant_conf
->alt_lna_conf
=
1311 antcomb
->second_quick_scan_conf
;
1313 /* main is largest */
1314 if ((antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) ||
1315 (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
))
1316 /* Set alt LNA1 or LNA2 */
1317 if (div_ant_conf
->main_lna_conf
==
1318 ATH_ANT_DIV_COMB_LNA2
)
1319 div_ant_conf
->alt_lna_conf
=
1320 ATH_ANT_DIV_COMB_LNA1
;
1322 div_ant_conf
->alt_lna_conf
=
1323 ATH_ANT_DIV_COMB_LNA2
;
1325 /* Set alt to A+B or A-B */
1326 div_ant_conf
->alt_lna_conf
= antcomb
->main_conf
;
1334 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf
*ant_conf
)
1336 /* Adjust the fast_div_bias based on main and alt lna conf */
1337 switch ((ant_conf
->main_lna_conf
<< 4) | ant_conf
->alt_lna_conf
) {
1338 case (0x01): /* A-B LNA2 */
1339 ant_conf
->fast_div_bias
= 0x3b;
1341 case (0x02): /* A-B LNA1 */
1342 ant_conf
->fast_div_bias
= 0x3d;
1344 case (0x03): /* A-B A+B */
1345 ant_conf
->fast_div_bias
= 0x1;
1347 case (0x10): /* LNA2 A-B */
1348 ant_conf
->fast_div_bias
= 0x7;
1350 case (0x12): /* LNA2 LNA1 */
1351 ant_conf
->fast_div_bias
= 0x2;
1353 case (0x13): /* LNA2 A+B */
1354 ant_conf
->fast_div_bias
= 0x7;
1356 case (0x20): /* LNA1 A-B */
1357 ant_conf
->fast_div_bias
= 0x6;
1359 case (0x21): /* LNA1 LNA2 */
1360 ant_conf
->fast_div_bias
= 0x0;
1362 case (0x23): /* LNA1 A+B */
1363 ant_conf
->fast_div_bias
= 0x6;
1365 case (0x30): /* A+B A-B */
1366 ant_conf
->fast_div_bias
= 0x1;
1368 case (0x31): /* A+B LNA2 */
1369 ant_conf
->fast_div_bias
= 0x3b;
1371 case (0x32): /* A+B LNA1 */
1372 ant_conf
->fast_div_bias
= 0x3d;
1379 /* Antenna diversity and combining */
1380 static void ath_ant_comb_scan(struct ath_softc
*sc
, struct ath_rx_status
*rs
)
1382 struct ath_hw_antcomb_conf div_ant_conf
;
1383 struct ath_ant_comb
*antcomb
= &sc
->ant_comb
;
1384 int alt_ratio
= 0, alt_rssi_avg
= 0, main_rssi_avg
= 0, curr_alt_set
;
1385 int curr_main_set
, curr_bias
;
1386 int main_rssi
= rs
->rs_rssi_ctl0
;
1387 int alt_rssi
= rs
->rs_rssi_ctl1
;
1388 int rx_ant_conf
, main_ant_conf
;
1389 bool short_scan
= false;
1391 rx_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_CURRENT_SHIFT
) &
1393 main_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_MAIN_SHIFT
) &
1396 /* Record packet only when alt_rssi is positive */
1398 antcomb
->total_pkt_count
++;
1399 antcomb
->main_total_rssi
+= main_rssi
;
1400 antcomb
->alt_total_rssi
+= alt_rssi
;
1401 if (main_ant_conf
== rx_ant_conf
)
1402 antcomb
->main_recv_cnt
++;
1404 antcomb
->alt_recv_cnt
++;
1407 /* Short scan check */
1408 if (antcomb
->scan
&& antcomb
->alt_good
) {
1409 if (time_after(jiffies
, antcomb
->scan_start_time
+
1410 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR
)))
1413 if (antcomb
->total_pkt_count
==
1414 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT
) {
1415 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1416 antcomb
->total_pkt_count
);
1417 if (alt_ratio
< ATH_ANT_DIV_COMB_ALT_ANT_RATIO
)
1422 if (((antcomb
->total_pkt_count
< ATH_ANT_DIV_COMB_MAX_PKTCOUNT
) ||
1423 rs
->rs_moreaggr
) && !short_scan
)
1426 if (antcomb
->total_pkt_count
) {
1427 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1428 antcomb
->total_pkt_count
);
1429 main_rssi_avg
= (antcomb
->main_total_rssi
/
1430 antcomb
->total_pkt_count
);
1431 alt_rssi_avg
= (antcomb
->alt_total_rssi
/
1432 antcomb
->total_pkt_count
);
1436 ath9k_hw_antdiv_comb_conf_get(sc
->sc_ah
, &div_ant_conf
);
1437 curr_alt_set
= div_ant_conf
.alt_lna_conf
;
1438 curr_main_set
= div_ant_conf
.main_lna_conf
;
1439 curr_bias
= div_ant_conf
.fast_div_bias
;
1443 if (antcomb
->count
== ATH_ANT_DIV_COMB_MAX_COUNT
) {
1444 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1445 ath_lnaconf_alt_good_scan(antcomb
, div_ant_conf
,
1447 antcomb
->alt_good
= true;
1449 antcomb
->alt_good
= false;
1453 antcomb
->scan
= true;
1454 antcomb
->scan_not_start
= true;
1457 if (!antcomb
->scan
) {
1458 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1459 if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA2
) {
1460 /* Switch main and alt LNA */
1461 div_ant_conf
.main_lna_conf
=
1462 ATH_ANT_DIV_COMB_LNA2
;
1463 div_ant_conf
.alt_lna_conf
=
1464 ATH_ANT_DIV_COMB_LNA1
;
1465 } else if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA1
) {
1466 div_ant_conf
.main_lna_conf
=
1467 ATH_ANT_DIV_COMB_LNA1
;
1468 div_ant_conf
.alt_lna_conf
=
1469 ATH_ANT_DIV_COMB_LNA2
;
1473 } else if ((curr_alt_set
!= ATH_ANT_DIV_COMB_LNA1
) &&
1474 (curr_alt_set
!= ATH_ANT_DIV_COMB_LNA2
)) {
1475 /* Set alt to another LNA */
1476 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
)
1477 div_ant_conf
.alt_lna_conf
=
1478 ATH_ANT_DIV_COMB_LNA1
;
1479 else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
)
1480 div_ant_conf
.alt_lna_conf
=
1481 ATH_ANT_DIV_COMB_LNA2
;
1486 if ((alt_rssi_avg
< (main_rssi_avg
+
1487 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA
)))
1491 if (!antcomb
->scan_not_start
) {
1492 switch (curr_alt_set
) {
1493 case ATH_ANT_DIV_COMB_LNA2
:
1494 antcomb
->rssi_lna2
= alt_rssi_avg
;
1495 antcomb
->rssi_lna1
= main_rssi_avg
;
1496 antcomb
->scan
= true;
1498 div_ant_conf
.main_lna_conf
=
1499 ATH_ANT_DIV_COMB_LNA1
;
1500 div_ant_conf
.alt_lna_conf
=
1501 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1503 case ATH_ANT_DIV_COMB_LNA1
:
1504 antcomb
->rssi_lna1
= alt_rssi_avg
;
1505 antcomb
->rssi_lna2
= main_rssi_avg
;
1506 antcomb
->scan
= true;
1508 div_ant_conf
.main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1509 div_ant_conf
.alt_lna_conf
=
1510 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1512 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
:
1513 antcomb
->rssi_add
= alt_rssi_avg
;
1514 antcomb
->scan
= true;
1516 div_ant_conf
.alt_lna_conf
=
1517 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1519 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
:
1520 antcomb
->rssi_sub
= alt_rssi_avg
;
1521 antcomb
->scan
= false;
1522 if (antcomb
->rssi_lna2
>
1523 (antcomb
->rssi_lna1
+
1524 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)) {
1525 /* use LNA2 as main LNA */
1526 if ((antcomb
->rssi_add
> antcomb
->rssi_lna1
) &&
1527 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1529 div_ant_conf
.main_lna_conf
=
1530 ATH_ANT_DIV_COMB_LNA2
;
1531 div_ant_conf
.alt_lna_conf
=
1532 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1533 } else if (antcomb
->rssi_sub
>
1534 antcomb
->rssi_lna1
) {
1536 div_ant_conf
.main_lna_conf
=
1537 ATH_ANT_DIV_COMB_LNA2
;
1538 div_ant_conf
.alt_lna_conf
=
1539 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1542 div_ant_conf
.main_lna_conf
=
1543 ATH_ANT_DIV_COMB_LNA2
;
1544 div_ant_conf
.alt_lna_conf
=
1545 ATH_ANT_DIV_COMB_LNA1
;
1548 /* use LNA1 as main LNA */
1549 if ((antcomb
->rssi_add
> antcomb
->rssi_lna2
) &&
1550 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1552 div_ant_conf
.main_lna_conf
=
1553 ATH_ANT_DIV_COMB_LNA1
;
1554 div_ant_conf
.alt_lna_conf
=
1555 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1556 } else if (antcomb
->rssi_sub
>
1557 antcomb
->rssi_lna1
) {
1559 div_ant_conf
.main_lna_conf
=
1560 ATH_ANT_DIV_COMB_LNA1
;
1561 div_ant_conf
.alt_lna_conf
=
1562 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1565 div_ant_conf
.main_lna_conf
=
1566 ATH_ANT_DIV_COMB_LNA1
;
1567 div_ant_conf
.alt_lna_conf
=
1568 ATH_ANT_DIV_COMB_LNA2
;
1576 if (!antcomb
->alt_good
) {
1577 antcomb
->scan_not_start
= false;
1578 /* Set alt to another LNA */
1579 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
) {
1580 div_ant_conf
.main_lna_conf
=
1581 ATH_ANT_DIV_COMB_LNA2
;
1582 div_ant_conf
.alt_lna_conf
=
1583 ATH_ANT_DIV_COMB_LNA1
;
1584 } else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
) {
1585 div_ant_conf
.main_lna_conf
=
1586 ATH_ANT_DIV_COMB_LNA1
;
1587 div_ant_conf
.alt_lna_conf
=
1588 ATH_ANT_DIV_COMB_LNA2
;
1594 ath_select_ant_div_from_quick_scan(antcomb
, &div_ant_conf
,
1595 main_rssi_avg
, alt_rssi_avg
,
1598 antcomb
->quick_scan_cnt
++;
1601 ath_ant_div_conf_fast_divbias(&div_ant_conf
);
1603 ath9k_hw_antdiv_comb_conf_set(sc
->sc_ah
, &div_ant_conf
);
1605 antcomb
->scan_start_time
= jiffies
;
1606 antcomb
->total_pkt_count
= 0;
1607 antcomb
->main_total_rssi
= 0;
1608 antcomb
->alt_total_rssi
= 0;
1609 antcomb
->main_recv_cnt
= 0;
1610 antcomb
->alt_recv_cnt
= 0;
1613 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
, bool hp
)
1616 struct sk_buff
*skb
= NULL
, *requeue_skb
;
1617 struct ieee80211_rx_status
*rxs
;
1618 struct ath_hw
*ah
= sc
->sc_ah
;
1619 struct ath_common
*common
= ath9k_hw_common(ah
);
1621 * The hw can techncically differ from common->hw when using ath9k
1622 * virtual wiphy so to account for that we iterate over the active
1623 * wiphys and find the appropriate wiphy and therefore hw.
1625 struct ieee80211_hw
*hw
= NULL
;
1626 struct ieee80211_hdr
*hdr
;
1628 bool decrypt_error
= false;
1629 struct ath_rx_status rs
;
1630 enum ath9k_rx_qtype qtype
;
1631 bool edma
= !!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
);
1633 u8 rx_status_len
= ah
->caps
.rx_status_len
;
1636 unsigned long flags
;
1639 dma_type
= DMA_BIDIRECTIONAL
;
1641 dma_type
= DMA_FROM_DEVICE
;
1643 qtype
= hp
? ATH9K_RX_QUEUE_HP
: ATH9K_RX_QUEUE_LP
;
1644 spin_lock_bh(&sc
->rx
.rxbuflock
);
1646 tsf
= ath9k_hw_gettsf64(ah
);
1647 tsf_lower
= tsf
& 0xffffffff;
1650 /* If handling rx interrupt and flush is in progress => exit */
1651 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
1654 memset(&rs
, 0, sizeof(rs
));
1656 bf
= ath_edma_get_next_rx_buf(sc
, &rs
, qtype
);
1658 bf
= ath_get_next_rx_buf(sc
, &rs
);
1667 hdr
= (struct ieee80211_hdr
*) (skb
->data
+ rx_status_len
);
1668 rxs
= IEEE80211_SKB_RXCB(skb
);
1670 hw
= ath_get_virt_hw(sc
, hdr
);
1672 ath_debug_stat_rx(sc
, &rs
);
1675 * If we're asked to flush receive queue, directly
1676 * chain it back at the queue without processing it.
1681 retval
= ath9k_rx_skb_preprocess(common
, hw
, hdr
, &rs
,
1682 rxs
, &decrypt_error
);
1686 rxs
->mactime
= (tsf
& ~0xffffffffULL
) | rs
.rs_tstamp
;
1687 if (rs
.rs_tstamp
> tsf_lower
&&
1688 unlikely(rs
.rs_tstamp
- tsf_lower
> 0x10000000))
1689 rxs
->mactime
-= 0x100000000ULL
;
1691 if (rs
.rs_tstamp
< tsf_lower
&&
1692 unlikely(tsf_lower
- rs
.rs_tstamp
> 0x10000000))
1693 rxs
->mactime
+= 0x100000000ULL
;
1695 /* Ensure we always have an skb to requeue once we are done
1696 * processing the current buffer's skb */
1697 requeue_skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_ATOMIC
);
1699 /* If there is no memory we ignore the current RX'd frame,
1700 * tell hardware it can give us a new frame using the old
1701 * skb and put it at the tail of the sc->rx.rxbuf list for
1706 /* Unmap the frame */
1707 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
1711 skb_put(skb
, rs
.rs_datalen
+ ah
->caps
.rx_status_len
);
1712 if (ah
->caps
.rx_status_len
)
1713 skb_pull(skb
, ah
->caps
.rx_status_len
);
1715 ath9k_rx_skb_postprocess(common
, skb
, &rs
,
1716 rxs
, decrypt_error
);
1718 /* We will now give hardware our shiny new allocated skb */
1719 bf
->bf_mpdu
= requeue_skb
;
1720 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
1723 if (unlikely(dma_mapping_error(sc
->dev
,
1724 bf
->bf_buf_addr
))) {
1725 dev_kfree_skb_any(requeue_skb
);
1727 bf
->bf_buf_addr
= 0;
1728 ath_print(common
, ATH_DBG_FATAL
,
1729 "dma_mapping_error() on RX\n");
1730 ath_rx_send_to_mac80211(hw
, sc
, skb
, rxs
);
1735 * change the default rx antenna if rx diversity chooses the
1736 * other antenna 3 times in a row.
1738 if (sc
->rx
.defant
!= rs
.rs_antenna
) {
1739 if (++sc
->rx
.rxotherant
>= 3)
1740 ath_setdefantenna(sc
, rs
.rs_antenna
);
1742 sc
->rx
.rxotherant
= 0;
1745 spin_lock_irqsave(&sc
->sc_pm_lock
, flags
);
1746 if (unlikely(ath9k_check_auto_sleep(sc
) ||
1747 (sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
1749 PS_WAIT_FOR_PSPOLL_DATA
))))
1751 spin_unlock_irqrestore(&sc
->sc_pm_lock
, flags
);
1753 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
)
1754 ath_ant_comb_scan(sc
, &rs
);
1756 ath_rx_send_to_mac80211(hw
, sc
, skb
, rxs
);
1760 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1761 ath_rx_edma_buf_link(sc
, qtype
);
1763 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1764 ath_rx_buf_link(sc
, bf
);
1768 spin_unlock_bh(&sc
->rx
.rxbuflock
);