2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "ar9003_mac.h"
20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio
, int maxdelta
,
23 int mindelta
, int main_rssi_avg
,
24 int alt_rssi_avg
, int pkt_count
)
26 return (((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
27 (alt_rssi_avg
> main_rssi_avg
+ maxdelta
)) ||
28 (alt_rssi_avg
> main_rssi_avg
+ mindelta
)) && (pkt_count
> 50);
31 static inline bool ath9k_check_auto_sleep(struct ath_softc
*sc
)
33 return sc
->ps_enabled
&&
34 (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
);
38 * Setup and link descriptors.
40 * 11N: we can no longer afford to self link the last descriptor.
41 * MAC acknowledges BA status as long as it copies frames to host
42 * buffer (or rx fifo). This can incorrectly acknowledge packets
43 * to a sender if last desc is self-linked.
45 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
47 struct ath_hw
*ah
= sc
->sc_ah
;
48 struct ath_common
*common
= ath9k_hw_common(ah
);
55 ds
->ds_link
= 0; /* link to null */
56 ds
->ds_data
= bf
->bf_buf_addr
;
58 /* virtual addr of the beginning of the buffer. */
61 ds
->ds_vdata
= skb
->data
;
64 * setup rx descriptors. The rx_bufsize here tells the hardware
65 * how much data it can DMA to us and that we are prepared
68 ath9k_hw_setuprxdesc(ah
, ds
,
72 if (sc
->rx
.rxlink
== NULL
)
73 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
75 *sc
->rx
.rxlink
= bf
->bf_daddr
;
77 sc
->rx
.rxlink
= &ds
->ds_link
;
81 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
83 /* XXX block beacon interrupts */
84 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
85 sc
->rx
.defant
= antenna
;
86 sc
->rx
.rxotherant
= 0;
89 static void ath_opmode_init(struct ath_softc
*sc
)
91 struct ath_hw
*ah
= sc
->sc_ah
;
92 struct ath_common
*common
= ath9k_hw_common(ah
);
96 /* configure rx filter */
97 rfilt
= ath_calcrxfilter(sc
);
98 ath9k_hw_setrxfilter(ah
, rfilt
);
100 /* configure bssid mask */
101 ath_hw_setbssidmask(common
);
103 /* configure operational mode */
104 ath9k_hw_setopmode(ah
);
106 /* calculate and install multicast filter */
107 mfilt
[0] = mfilt
[1] = ~0;
108 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
111 static bool ath_rx_edma_buf_link(struct ath_softc
*sc
,
112 enum ath9k_rx_qtype qtype
)
114 struct ath_hw
*ah
= sc
->sc_ah
;
115 struct ath_rx_edma
*rx_edma
;
119 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
120 if (skb_queue_len(&rx_edma
->rx_fifo
) >= rx_edma
->rx_fifo_hwsize
)
123 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
124 list_del_init(&bf
->list
);
129 memset(skb
->data
, 0, ah
->caps
.rx_status_len
);
130 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
131 ah
->caps
.rx_status_len
, DMA_TO_DEVICE
);
133 SKB_CB_ATHBUF(skb
) = bf
;
134 ath9k_hw_addrxbuf_edma(ah
, bf
->bf_buf_addr
, qtype
);
135 skb_queue_tail(&rx_edma
->rx_fifo
, skb
);
140 static void ath_rx_addbuffer_edma(struct ath_softc
*sc
,
141 enum ath9k_rx_qtype qtype
, int size
)
143 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
146 if (list_empty(&sc
->rx
.rxbuf
)) {
147 ath_dbg(common
, ATH_DBG_QUEUE
, "No free rx buf available\n");
151 while (!list_empty(&sc
->rx
.rxbuf
)) {
154 if (!ath_rx_edma_buf_link(sc
, qtype
))
162 static void ath_rx_remove_buffer(struct ath_softc
*sc
,
163 enum ath9k_rx_qtype qtype
)
166 struct ath_rx_edma
*rx_edma
;
169 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
171 while ((skb
= skb_dequeue(&rx_edma
->rx_fifo
)) != NULL
) {
172 bf
= SKB_CB_ATHBUF(skb
);
174 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
178 static void ath_rx_edma_cleanup(struct ath_softc
*sc
)
182 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
183 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
185 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
187 dev_kfree_skb_any(bf
->bf_mpdu
);
190 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
192 kfree(sc
->rx
.rx_bufptr
);
193 sc
->rx
.rx_bufptr
= NULL
;
196 static void ath_rx_edma_init_queue(struct ath_rx_edma
*rx_edma
, int size
)
198 skb_queue_head_init(&rx_edma
->rx_fifo
);
199 skb_queue_head_init(&rx_edma
->rx_buffers
);
200 rx_edma
->rx_fifo_hwsize
= size
;
203 static int ath_rx_edma_init(struct ath_softc
*sc
, int nbufs
)
205 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
206 struct ath_hw
*ah
= sc
->sc_ah
;
212 ath9k_hw_set_rx_bufsize(ah
, common
->rx_bufsize
-
213 ah
->caps
.rx_status_len
);
215 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
],
216 ah
->caps
.rx_lp_qdepth
);
217 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
],
218 ah
->caps
.rx_hp_qdepth
);
220 size
= sizeof(struct ath_buf
) * nbufs
;
221 bf
= kzalloc(size
, GFP_KERNEL
);
225 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
226 sc
->rx
.rx_bufptr
= bf
;
228 for (i
= 0; i
< nbufs
; i
++, bf
++) {
229 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_KERNEL
);
235 memset(skb
->data
, 0, common
->rx_bufsize
);
238 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
241 if (unlikely(dma_mapping_error(sc
->dev
,
243 dev_kfree_skb_any(skb
);
247 "dma_mapping_error() on RX init\n");
252 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
258 ath_rx_edma_cleanup(sc
);
262 static void ath_edma_start_recv(struct ath_softc
*sc
)
264 spin_lock_bh(&sc
->rx
.rxbuflock
);
266 ath9k_hw_rxena(sc
->sc_ah
);
268 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_HP
,
269 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
].rx_fifo_hwsize
);
271 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_LP
,
272 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
].rx_fifo_hwsize
);
276 ath9k_hw_startpcureceive(sc
->sc_ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
278 spin_unlock_bh(&sc
->rx
.rxbuflock
);
281 static void ath_edma_stop_recv(struct ath_softc
*sc
)
283 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
284 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
287 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
289 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
294 spin_lock_init(&sc
->sc_pcu_lock
);
295 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
296 spin_lock_init(&sc
->rx
.rxbuflock
);
298 common
->rx_bufsize
= IEEE80211_MAX_MPDU_LEN
/ 2 +
299 sc
->sc_ah
->caps
.rx_status_len
;
301 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
302 return ath_rx_edma_init(sc
, nbufs
);
304 ath_dbg(common
, ATH_DBG_CONFIG
, "cachelsz %u rxbufsize %u\n",
305 common
->cachelsz
, common
->rx_bufsize
);
307 /* Initialize rx descriptors */
309 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
313 "failed to allocate rx descriptors: %d\n",
318 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
319 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
,
327 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
330 if (unlikely(dma_mapping_error(sc
->dev
,
332 dev_kfree_skb_any(skb
);
336 "dma_mapping_error() on RX init\n");
341 sc
->rx
.rxlink
= NULL
;
351 void ath_rx_cleanup(struct ath_softc
*sc
)
353 struct ath_hw
*ah
= sc
->sc_ah
;
354 struct ath_common
*common
= ath9k_hw_common(ah
);
358 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
359 ath_rx_edma_cleanup(sc
);
362 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
365 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
374 if (sc
->rx
.rxdma
.dd_desc_len
!= 0)
375 ath_descdma_cleanup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
);
380 * Calculate the receive filter according to the
381 * operating mode and state:
383 * o always accept unicast, broadcast, and multicast traffic
384 * o maintain current state of phy error reception (the hal
385 * may enable phy error frames for noise immunity work)
386 * o probe request frames are accepted only when operating in
387 * hostap, adhoc, or monitor modes
388 * o enable promiscuous mode according to the interface state
390 * - when operating in adhoc mode so the 802.11 layer creates
391 * node table entries for peers,
392 * - when operating in station mode for collecting rssi data when
393 * the station is otherwise quiet, or
394 * - when operating as a repeater so we see repeater-sta beacons
398 u32
ath_calcrxfilter(struct ath_softc
*sc
)
400 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
404 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
405 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
406 | ATH9K_RX_FILTER_MCAST
;
408 if (sc
->rx
.rxfilter
& FIF_PROBE_REQ
)
409 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
412 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
413 * mode interface or when in monitor mode. AP mode does not need this
414 * since it receives all in-BSS frames anyway.
416 if (sc
->sc_ah
->is_monitoring
)
417 rfilt
|= ATH9K_RX_FILTER_PROM
;
419 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
420 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
422 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
424 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
425 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
427 rfilt
|= ATH9K_RX_FILTER_BEACON
;
429 if ((AR_SREV_9280_20_OR_LATER(sc
->sc_ah
) ||
430 AR_SREV_9285_12_OR_LATER(sc
->sc_ah
)) &&
431 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) &&
432 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
433 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
435 if (conf_is_ht(&sc
->hw
->conf
))
436 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
438 if (sc
->nvifs
> 1 || (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
439 /* The following may also be needed for other older chips */
440 if (sc
->sc_ah
->hw_version
.macVersion
== AR_SREV_VERSION_9160
)
441 rfilt
|= ATH9K_RX_FILTER_PROM
;
442 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
447 #undef RX_FILTER_PRESERVE
450 int ath_startrecv(struct ath_softc
*sc
)
452 struct ath_hw
*ah
= sc
->sc_ah
;
453 struct ath_buf
*bf
, *tbf
;
455 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
456 ath_edma_start_recv(sc
);
460 spin_lock_bh(&sc
->rx
.rxbuflock
);
461 if (list_empty(&sc
->rx
.rxbuf
))
464 sc
->rx
.rxlink
= NULL
;
465 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
466 ath_rx_buf_link(sc
, bf
);
469 /* We could have deleted elements so the list may be empty now */
470 if (list_empty(&sc
->rx
.rxbuf
))
473 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
474 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
479 ath9k_hw_startpcureceive(ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
481 spin_unlock_bh(&sc
->rx
.rxbuflock
);
486 bool ath_stoprecv(struct ath_softc
*sc
)
488 struct ath_hw
*ah
= sc
->sc_ah
;
489 bool stopped
, reset
= false;
491 spin_lock_bh(&sc
->rx
.rxbuflock
);
492 ath9k_hw_abortpcurecv(ah
);
493 ath9k_hw_setrxfilter(ah
, 0);
494 stopped
= ath9k_hw_stopdmarecv(ah
, &reset
);
496 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
497 ath_edma_stop_recv(sc
);
499 sc
->rx
.rxlink
= NULL
;
500 spin_unlock_bh(&sc
->rx
.rxbuflock
);
502 if (!(ah
->ah_flags
& AH_UNPLUGGED
) &&
503 unlikely(!stopped
)) {
504 ath_err(ath9k_hw_common(sc
->sc_ah
),
505 "Could not stop RX, we could be "
506 "confusing the DMA engine when we start RX up\n");
507 ATH_DBG_WARN_ON_ONCE(!stopped
);
509 return stopped
&& !reset
;
512 void ath_flushrecv(struct ath_softc
*sc
)
514 sc
->sc_flags
|= SC_OP_RXFLUSH
;
515 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
516 ath_rx_tasklet(sc
, 1, true);
517 ath_rx_tasklet(sc
, 1, false);
518 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
521 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
523 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
524 struct ieee80211_mgmt
*mgmt
;
525 u8
*pos
, *end
, id
, elen
;
526 struct ieee80211_tim_ie
*tim
;
528 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
529 pos
= mgmt
->u
.beacon
.variable
;
530 end
= skb
->data
+ skb
->len
;
532 while (pos
+ 2 < end
) {
535 if (pos
+ elen
> end
)
538 if (id
== WLAN_EID_TIM
) {
539 if (elen
< sizeof(*tim
))
541 tim
= (struct ieee80211_tim_ie
*) pos
;
542 if (tim
->dtim_count
!= 0)
544 return tim
->bitmap_ctrl
& 0x01;
553 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
555 struct ieee80211_mgmt
*mgmt
;
556 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
558 if (skb
->len
< 24 + 8 + 2 + 2)
561 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
562 if (memcmp(common
->curbssid
, mgmt
->bssid
, ETH_ALEN
) != 0) {
563 /* TODO: This doesn't work well if you have stations
564 * associated to two different APs because curbssid
565 * is just the last AP that any of the stations associated
568 return; /* not from our current AP */
571 sc
->ps_flags
&= ~PS_WAIT_FOR_BEACON
;
573 if (sc
->ps_flags
& PS_BEACON_SYNC
) {
574 sc
->ps_flags
&= ~PS_BEACON_SYNC
;
575 ath_dbg(common
, ATH_DBG_PS
,
576 "Reconfigure Beacon timers based on timestamp from the AP\n");
577 ath_beacon_config(sc
, NULL
);
580 if (ath_beacon_dtim_pending_cab(skb
)) {
582 * Remain awake waiting for buffered broadcast/multicast
583 * frames. If the last broadcast/multicast frame is not
584 * received properly, the next beacon frame will work as
585 * a backup trigger for returning into NETWORK SLEEP state,
586 * so we are waiting for it as well.
588 ath_dbg(common
, ATH_DBG_PS
,
589 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
590 sc
->ps_flags
|= PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
;
594 if (sc
->ps_flags
& PS_WAIT_FOR_CAB
) {
596 * This can happen if a broadcast frame is dropped or the AP
597 * fails to send a frame indicating that all CAB frames have
600 sc
->ps_flags
&= ~PS_WAIT_FOR_CAB
;
601 ath_dbg(common
, ATH_DBG_PS
,
602 "PS wait for CAB frames timed out\n");
606 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
)
608 struct ieee80211_hdr
*hdr
;
609 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
611 hdr
= (struct ieee80211_hdr
*)skb
->data
;
613 /* Process Beacon and CAB receive in PS state */
614 if (((sc
->ps_flags
& PS_WAIT_FOR_BEACON
) || ath9k_check_auto_sleep(sc
))
615 && ieee80211_is_beacon(hdr
->frame_control
))
616 ath_rx_ps_beacon(sc
, skb
);
617 else if ((sc
->ps_flags
& PS_WAIT_FOR_CAB
) &&
618 (ieee80211_is_data(hdr
->frame_control
) ||
619 ieee80211_is_action(hdr
->frame_control
)) &&
620 is_multicast_ether_addr(hdr
->addr1
) &&
621 !ieee80211_has_moredata(hdr
->frame_control
)) {
623 * No more broadcast/multicast frames to be received at this
626 sc
->ps_flags
&= ~(PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
);
627 ath_dbg(common
, ATH_DBG_PS
,
628 "All PS CAB frames received, back to sleep\n");
629 } else if ((sc
->ps_flags
& PS_WAIT_FOR_PSPOLL_DATA
) &&
630 !is_multicast_ether_addr(hdr
->addr1
) &&
631 !ieee80211_has_morefrags(hdr
->frame_control
)) {
632 sc
->ps_flags
&= ~PS_WAIT_FOR_PSPOLL_DATA
;
633 ath_dbg(common
, ATH_DBG_PS
,
634 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
635 sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
637 PS_WAIT_FOR_PSPOLL_DATA
|
638 PS_WAIT_FOR_TX_ACK
));
642 static bool ath_edma_get_buffers(struct ath_softc
*sc
,
643 enum ath9k_rx_qtype qtype
)
645 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
646 struct ath_hw
*ah
= sc
->sc_ah
;
647 struct ath_common
*common
= ath9k_hw_common(ah
);
652 skb
= skb_peek(&rx_edma
->rx_fifo
);
656 bf
= SKB_CB_ATHBUF(skb
);
659 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
660 common
->rx_bufsize
, DMA_FROM_DEVICE
);
662 ret
= ath9k_hw_process_rxdesc_edma(ah
, NULL
, skb
->data
);
663 if (ret
== -EINPROGRESS
) {
664 /*let device gain the buffer again*/
665 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
666 common
->rx_bufsize
, DMA_FROM_DEVICE
);
670 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
671 if (ret
== -EINVAL
) {
672 /* corrupt descriptor, skip this one and the following one */
673 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
674 ath_rx_edma_buf_link(sc
, qtype
);
675 skb
= skb_peek(&rx_edma
->rx_fifo
);
679 bf
= SKB_CB_ATHBUF(skb
);
682 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
683 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
684 ath_rx_edma_buf_link(sc
, qtype
);
687 skb_queue_tail(&rx_edma
->rx_buffers
, skb
);
692 static struct ath_buf
*ath_edma_get_next_rx_buf(struct ath_softc
*sc
,
693 struct ath_rx_status
*rs
,
694 enum ath9k_rx_qtype qtype
)
696 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
700 while (ath_edma_get_buffers(sc
, qtype
));
701 skb
= __skb_dequeue(&rx_edma
->rx_buffers
);
705 bf
= SKB_CB_ATHBUF(skb
);
706 ath9k_hw_process_rxdesc_edma(sc
->sc_ah
, rs
, skb
->data
);
710 static struct ath_buf
*ath_get_next_rx_buf(struct ath_softc
*sc
,
711 struct ath_rx_status
*rs
)
713 struct ath_hw
*ah
= sc
->sc_ah
;
714 struct ath_common
*common
= ath9k_hw_common(ah
);
719 if (list_empty(&sc
->rx
.rxbuf
)) {
720 sc
->rx
.rxlink
= NULL
;
724 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
728 * Must provide the virtual address of the current
729 * descriptor, the physical address, and the virtual
730 * address of the next descriptor in the h/w chain.
731 * This allows the HAL to look ahead to see if the
732 * hardware is done with a descriptor by checking the
733 * done bit in the following descriptor and the address
734 * of the current descriptor the DMA engine is working
735 * on. All this is necessary because of our use of
736 * a self-linked list to avoid rx overruns.
738 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
, 0);
739 if (ret
== -EINPROGRESS
) {
740 struct ath_rx_status trs
;
742 struct ath_desc
*tds
;
744 memset(&trs
, 0, sizeof(trs
));
745 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
746 sc
->rx
.rxlink
= NULL
;
750 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
753 * On some hardware the descriptor status words could
754 * get corrupted, including the done bit. Because of
755 * this, check if the next descriptor's done bit is
758 * If the next descriptor's done bit is set, the current
759 * descriptor has been corrupted. Force s/w to discard
760 * this descriptor and continue...
764 ret
= ath9k_hw_rxprocdesc(ah
, tds
, &trs
, 0);
765 if (ret
== -EINPROGRESS
)
773 * Synchronize the DMA transfer with CPU before
774 * 1. accessing the frame
775 * 2. requeueing the same buffer to h/w
777 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
784 /* Assumes you've already done the endian to CPU conversion */
785 static bool ath9k_rx_accept(struct ath_common
*common
,
786 struct ieee80211_hdr
*hdr
,
787 struct ieee80211_rx_status
*rxs
,
788 struct ath_rx_status
*rx_stats
,
791 #define is_mc_or_valid_tkip_keyix ((is_mc || \
792 (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
793 test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
795 struct ath_hw
*ah
= common
->ah
;
797 u8 rx_status_len
= ah
->caps
.rx_status_len
;
799 fc
= hdr
->frame_control
;
801 if (!rx_stats
->rs_datalen
)
804 * rs_status follows rs_datalen so if rs_datalen is too large
805 * we can take a hint that hardware corrupted it, so ignore
808 if (rx_stats
->rs_datalen
> (common
->rx_bufsize
- rx_status_len
))
811 /* Only use error bits from the last fragment */
812 if (rx_stats
->rs_more
)
816 * The rx_stats->rs_status will not be set until the end of the
817 * chained descriptors so it can be ignored if rs_more is set. The
818 * rs_more will be false at the last element of the chained
821 if (rx_stats
->rs_status
!= 0) {
822 if (rx_stats
->rs_status
& ATH9K_RXERR_CRC
)
823 rxs
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
824 if (rx_stats
->rs_status
& ATH9K_RXERR_PHY
)
827 if (rx_stats
->rs_status
& ATH9K_RXERR_DECRYPT
) {
828 *decrypt_error
= true;
829 } else if (rx_stats
->rs_status
& ATH9K_RXERR_MIC
) {
832 * The MIC error bit is only valid if the frame
833 * is not a control frame or fragment, and it was
834 * decrypted using a valid TKIP key.
836 is_mc
= !!is_multicast_ether_addr(hdr
->addr1
);
838 if (!ieee80211_is_ctl(fc
) &&
839 !ieee80211_has_morefrags(fc
) &&
840 !(le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
) &&
841 is_mc_or_valid_tkip_keyix
)
842 rxs
->flag
|= RX_FLAG_MMIC_ERROR
;
844 rx_stats
->rs_status
&= ~ATH9K_RXERR_MIC
;
847 * Reject error frames with the exception of
848 * decryption and MIC failures. For monitor mode,
849 * we also ignore the CRC error.
851 if (ah
->is_monitoring
) {
852 if (rx_stats
->rs_status
&
853 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
857 if (rx_stats
->rs_status
&
858 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
866 static int ath9k_process_rate(struct ath_common
*common
,
867 struct ieee80211_hw
*hw
,
868 struct ath_rx_status
*rx_stats
,
869 struct ieee80211_rx_status
*rxs
)
871 struct ieee80211_supported_band
*sband
;
872 enum ieee80211_band band
;
875 band
= hw
->conf
.channel
->band
;
876 sband
= hw
->wiphy
->bands
[band
];
878 if (rx_stats
->rs_rate
& 0x80) {
880 rxs
->flag
|= RX_FLAG_HT
;
881 if (rx_stats
->rs_flags
& ATH9K_RX_2040
)
882 rxs
->flag
|= RX_FLAG_40MHZ
;
883 if (rx_stats
->rs_flags
& ATH9K_RX_GI
)
884 rxs
->flag
|= RX_FLAG_SHORT_GI
;
885 rxs
->rate_idx
= rx_stats
->rs_rate
& 0x7f;
889 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
890 if (sband
->bitrates
[i
].hw_value
== rx_stats
->rs_rate
) {
894 if (sband
->bitrates
[i
].hw_value_short
== rx_stats
->rs_rate
) {
895 rxs
->flag
|= RX_FLAG_SHORTPRE
;
902 * No valid hardware bitrate found -- we should not get here
903 * because hardware has already validated this frame as OK.
905 ath_dbg(common
, ATH_DBG_XMIT
,
906 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
912 static void ath9k_process_rssi(struct ath_common
*common
,
913 struct ieee80211_hw
*hw
,
914 struct ieee80211_hdr
*hdr
,
915 struct ath_rx_status
*rx_stats
)
917 struct ath_softc
*sc
= hw
->priv
;
918 struct ath_hw
*ah
= common
->ah
;
922 if (ah
->opmode
!= NL80211_IFTYPE_STATION
)
925 fc
= hdr
->frame_control
;
926 if (!ieee80211_is_beacon(fc
) ||
927 compare_ether_addr(hdr
->addr3
, common
->curbssid
)) {
928 /* TODO: This doesn't work well if you have stations
929 * associated to two different APs because curbssid
930 * is just the last AP that any of the stations associated
936 if (rx_stats
->rs_rssi
!= ATH9K_RSSI_BAD
&& !rx_stats
->rs_moreaggr
)
937 ATH_RSSI_LPF(sc
->last_rssi
, rx_stats
->rs_rssi
);
939 last_rssi
= sc
->last_rssi
;
940 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
941 rx_stats
->rs_rssi
= ATH_EP_RND(last_rssi
,
942 ATH_RSSI_EP_MULTIPLIER
);
943 if (rx_stats
->rs_rssi
< 0)
944 rx_stats
->rs_rssi
= 0;
946 /* Update Beacon RSSI, this is used by ANI. */
947 ah
->stats
.avgbrssi
= rx_stats
->rs_rssi
;
951 * For Decrypt or Demic errors, we only mark packet status here and always push
952 * up the frame up to let mac80211 handle the actual error case, be it no
953 * decryption key or real decryption error. This let us keep statistics there.
955 static int ath9k_rx_skb_preprocess(struct ath_common
*common
,
956 struct ieee80211_hw
*hw
,
957 struct ieee80211_hdr
*hdr
,
958 struct ath_rx_status
*rx_stats
,
959 struct ieee80211_rx_status
*rx_status
,
962 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
965 * everything but the rate is checked here, the rate check is done
966 * separately to avoid doing two lookups for a rate for each frame.
968 if (!ath9k_rx_accept(common
, hdr
, rx_status
, rx_stats
, decrypt_error
))
971 /* Only use status info from the last fragment */
972 if (rx_stats
->rs_more
)
975 ath9k_process_rssi(common
, hw
, hdr
, rx_stats
);
977 if (ath9k_process_rate(common
, hw
, rx_stats
, rx_status
))
980 rx_status
->band
= hw
->conf
.channel
->band
;
981 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
982 rx_status
->signal
= ATH_DEFAULT_NOISE_FLOOR
+ rx_stats
->rs_rssi
;
983 rx_status
->antenna
= rx_stats
->rs_antenna
;
984 rx_status
->flag
|= RX_FLAG_MACTIME_MPDU
;
989 static void ath9k_rx_skb_postprocess(struct ath_common
*common
,
991 struct ath_rx_status
*rx_stats
,
992 struct ieee80211_rx_status
*rxs
,
995 struct ath_hw
*ah
= common
->ah
;
996 struct ieee80211_hdr
*hdr
;
997 int hdrlen
, padpos
, padsize
;
1001 /* see if any padding is done by the hw and remove it */
1002 hdr
= (struct ieee80211_hdr
*) skb
->data
;
1003 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
1004 fc
= hdr
->frame_control
;
1005 padpos
= ath9k_cmn_padpos(hdr
->frame_control
);
1007 /* The MAC header is padded to have 32-bit boundary if the
1008 * packet payload is non-zero. The general calculation for
1009 * padsize would take into account odd header lengths:
1010 * padsize = (4 - padpos % 4) % 4; However, since only
1011 * even-length headers are used, padding can only be 0 or 2
1012 * bytes and we can optimize this a bit. In addition, we must
1013 * not try to remove padding from short control frames that do
1014 * not have payload. */
1015 padsize
= padpos
& 3;
1016 if (padsize
&& skb
->len
>=padpos
+padsize
+FCS_LEN
) {
1017 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
1018 skb_pull(skb
, padsize
);
1021 keyix
= rx_stats
->rs_keyix
;
1023 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
&&
1024 ieee80211_has_protected(fc
)) {
1025 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1026 } else if (ieee80211_has_protected(fc
)
1027 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
1028 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
1030 if (test_bit(keyix
, common
->keymap
))
1031 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1033 if (ah
->sw_mgmt_crypto
&&
1034 (rxs
->flag
& RX_FLAG_DECRYPTED
) &&
1035 ieee80211_is_mgmt(fc
))
1036 /* Use software decrypt for management frames. */
1037 rxs
->flag
&= ~RX_FLAG_DECRYPTED
;
1040 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb
*antcomb
,
1041 struct ath_hw_antcomb_conf ant_conf
,
1044 antcomb
->quick_scan_cnt
= 0;
1046 if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA2
)
1047 antcomb
->rssi_lna2
= main_rssi_avg
;
1048 else if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA1
)
1049 antcomb
->rssi_lna1
= main_rssi_avg
;
1051 switch ((ant_conf
.main_lna_conf
<< 4) | ant_conf
.alt_lna_conf
) {
1052 case (0x10): /* LNA2 A-B */
1053 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1054 antcomb
->first_quick_scan_conf
=
1055 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1056 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1058 case (0x20): /* LNA1 A-B */
1059 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1060 antcomb
->first_quick_scan_conf
=
1061 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1062 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1064 case (0x21): /* LNA1 LNA2 */
1065 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA2
;
1066 antcomb
->first_quick_scan_conf
=
1067 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1068 antcomb
->second_quick_scan_conf
=
1069 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1071 case (0x12): /* LNA2 LNA1 */
1072 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1
;
1073 antcomb
->first_quick_scan_conf
=
1074 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1075 antcomb
->second_quick_scan_conf
=
1076 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1078 case (0x13): /* LNA2 A+B */
1079 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1080 antcomb
->first_quick_scan_conf
=
1081 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1082 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1084 case (0x23): /* LNA1 A+B */
1085 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1086 antcomb
->first_quick_scan_conf
=
1087 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1088 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1095 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb
*antcomb
,
1096 struct ath_hw_antcomb_conf
*div_ant_conf
,
1097 int main_rssi_avg
, int alt_rssi_avg
,
1101 switch (antcomb
->quick_scan_cnt
) {
1103 /* set alt to main, and alt to first conf */
1104 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1105 div_ant_conf
->alt_lna_conf
= antcomb
->first_quick_scan_conf
;
1108 /* set alt to main, and alt to first conf */
1109 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1110 div_ant_conf
->alt_lna_conf
= antcomb
->second_quick_scan_conf
;
1111 antcomb
->rssi_first
= main_rssi_avg
;
1112 antcomb
->rssi_second
= alt_rssi_avg
;
1114 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1116 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1117 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1118 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1119 main_rssi_avg
, alt_rssi_avg
,
1120 antcomb
->total_pkt_count
))
1121 antcomb
->first_ratio
= true;
1123 antcomb
->first_ratio
= false;
1124 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1125 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1126 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1127 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1128 main_rssi_avg
, alt_rssi_avg
,
1129 antcomb
->total_pkt_count
))
1130 antcomb
->first_ratio
= true;
1132 antcomb
->first_ratio
= false;
1134 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1135 (alt_rssi_avg
> main_rssi_avg
+
1136 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1137 (alt_rssi_avg
> main_rssi_avg
)) &&
1138 (antcomb
->total_pkt_count
> 50))
1139 antcomb
->first_ratio
= true;
1141 antcomb
->first_ratio
= false;
1145 antcomb
->alt_good
= false;
1146 antcomb
->scan_not_start
= false;
1147 antcomb
->scan
= false;
1148 antcomb
->rssi_first
= main_rssi_avg
;
1149 antcomb
->rssi_third
= alt_rssi_avg
;
1151 if (antcomb
->second_quick_scan_conf
== ATH_ANT_DIV_COMB_LNA1
)
1152 antcomb
->rssi_lna1
= alt_rssi_avg
;
1153 else if (antcomb
->second_quick_scan_conf
==
1154 ATH_ANT_DIV_COMB_LNA2
)
1155 antcomb
->rssi_lna2
= alt_rssi_avg
;
1156 else if (antcomb
->second_quick_scan_conf
==
1157 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
) {
1158 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
)
1159 antcomb
->rssi_lna2
= main_rssi_avg
;
1160 else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
)
1161 antcomb
->rssi_lna1
= main_rssi_avg
;
1164 if (antcomb
->rssi_lna2
> antcomb
->rssi_lna1
+
1165 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)
1166 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1168 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA1
;
1170 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1171 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1172 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1173 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1174 main_rssi_avg
, alt_rssi_avg
,
1175 antcomb
->total_pkt_count
))
1176 antcomb
->second_ratio
= true;
1178 antcomb
->second_ratio
= false;
1179 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1180 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1181 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1182 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1183 main_rssi_avg
, alt_rssi_avg
,
1184 antcomb
->total_pkt_count
))
1185 antcomb
->second_ratio
= true;
1187 antcomb
->second_ratio
= false;
1189 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1190 (alt_rssi_avg
> main_rssi_avg
+
1191 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1192 (alt_rssi_avg
> main_rssi_avg
)) &&
1193 (antcomb
->total_pkt_count
> 50))
1194 antcomb
->second_ratio
= true;
1196 antcomb
->second_ratio
= false;
1199 /* set alt to the conf with maximun ratio */
1200 if (antcomb
->first_ratio
&& antcomb
->second_ratio
) {
1201 if (antcomb
->rssi_second
> antcomb
->rssi_third
) {
1203 if ((antcomb
->first_quick_scan_conf
==
1204 ATH_ANT_DIV_COMB_LNA1
) ||
1205 (antcomb
->first_quick_scan_conf
==
1206 ATH_ANT_DIV_COMB_LNA2
))
1207 /* Set alt LNA1 or LNA2*/
1208 if (div_ant_conf
->main_lna_conf
==
1209 ATH_ANT_DIV_COMB_LNA2
)
1210 div_ant_conf
->alt_lna_conf
=
1211 ATH_ANT_DIV_COMB_LNA1
;
1213 div_ant_conf
->alt_lna_conf
=
1214 ATH_ANT_DIV_COMB_LNA2
;
1216 /* Set alt to A+B or A-B */
1217 div_ant_conf
->alt_lna_conf
=
1218 antcomb
->first_quick_scan_conf
;
1219 } else if ((antcomb
->second_quick_scan_conf
==
1220 ATH_ANT_DIV_COMB_LNA1
) ||
1221 (antcomb
->second_quick_scan_conf
==
1222 ATH_ANT_DIV_COMB_LNA2
)) {
1223 /* Set alt LNA1 or LNA2 */
1224 if (div_ant_conf
->main_lna_conf
==
1225 ATH_ANT_DIV_COMB_LNA2
)
1226 div_ant_conf
->alt_lna_conf
=
1227 ATH_ANT_DIV_COMB_LNA1
;
1229 div_ant_conf
->alt_lna_conf
=
1230 ATH_ANT_DIV_COMB_LNA2
;
1232 /* Set alt to A+B or A-B */
1233 div_ant_conf
->alt_lna_conf
=
1234 antcomb
->second_quick_scan_conf
;
1236 } else if (antcomb
->first_ratio
) {
1238 if ((antcomb
->first_quick_scan_conf
==
1239 ATH_ANT_DIV_COMB_LNA1
) ||
1240 (antcomb
->first_quick_scan_conf
==
1241 ATH_ANT_DIV_COMB_LNA2
))
1242 /* Set alt LNA1 or LNA2 */
1243 if (div_ant_conf
->main_lna_conf
==
1244 ATH_ANT_DIV_COMB_LNA2
)
1245 div_ant_conf
->alt_lna_conf
=
1246 ATH_ANT_DIV_COMB_LNA1
;
1248 div_ant_conf
->alt_lna_conf
=
1249 ATH_ANT_DIV_COMB_LNA2
;
1251 /* Set alt to A+B or A-B */
1252 div_ant_conf
->alt_lna_conf
=
1253 antcomb
->first_quick_scan_conf
;
1254 } else if (antcomb
->second_ratio
) {
1256 if ((antcomb
->second_quick_scan_conf
==
1257 ATH_ANT_DIV_COMB_LNA1
) ||
1258 (antcomb
->second_quick_scan_conf
==
1259 ATH_ANT_DIV_COMB_LNA2
))
1260 /* Set alt LNA1 or LNA2 */
1261 if (div_ant_conf
->main_lna_conf
==
1262 ATH_ANT_DIV_COMB_LNA2
)
1263 div_ant_conf
->alt_lna_conf
=
1264 ATH_ANT_DIV_COMB_LNA1
;
1266 div_ant_conf
->alt_lna_conf
=
1267 ATH_ANT_DIV_COMB_LNA2
;
1269 /* Set alt to A+B or A-B */
1270 div_ant_conf
->alt_lna_conf
=
1271 antcomb
->second_quick_scan_conf
;
1273 /* main is largest */
1274 if ((antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) ||
1275 (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
))
1276 /* Set alt LNA1 or LNA2 */
1277 if (div_ant_conf
->main_lna_conf
==
1278 ATH_ANT_DIV_COMB_LNA2
)
1279 div_ant_conf
->alt_lna_conf
=
1280 ATH_ANT_DIV_COMB_LNA1
;
1282 div_ant_conf
->alt_lna_conf
=
1283 ATH_ANT_DIV_COMB_LNA2
;
1285 /* Set alt to A+B or A-B */
1286 div_ant_conf
->alt_lna_conf
= antcomb
->main_conf
;
1294 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf
*ant_conf
)
1296 /* Adjust the fast_div_bias based on main and alt lna conf */
1297 switch ((ant_conf
->main_lna_conf
<< 4) | ant_conf
->alt_lna_conf
) {
1298 case (0x01): /* A-B LNA2 */
1299 ant_conf
->fast_div_bias
= 0x3b;
1301 case (0x02): /* A-B LNA1 */
1302 ant_conf
->fast_div_bias
= 0x3d;
1304 case (0x03): /* A-B A+B */
1305 ant_conf
->fast_div_bias
= 0x1;
1307 case (0x10): /* LNA2 A-B */
1308 ant_conf
->fast_div_bias
= 0x7;
1310 case (0x12): /* LNA2 LNA1 */
1311 ant_conf
->fast_div_bias
= 0x2;
1313 case (0x13): /* LNA2 A+B */
1314 ant_conf
->fast_div_bias
= 0x7;
1316 case (0x20): /* LNA1 A-B */
1317 ant_conf
->fast_div_bias
= 0x6;
1319 case (0x21): /* LNA1 LNA2 */
1320 ant_conf
->fast_div_bias
= 0x0;
1322 case (0x23): /* LNA1 A+B */
1323 ant_conf
->fast_div_bias
= 0x6;
1325 case (0x30): /* A+B A-B */
1326 ant_conf
->fast_div_bias
= 0x1;
1328 case (0x31): /* A+B LNA2 */
1329 ant_conf
->fast_div_bias
= 0x3b;
1331 case (0x32): /* A+B LNA1 */
1332 ant_conf
->fast_div_bias
= 0x3d;
1339 /* Antenna diversity and combining */
1340 static void ath_ant_comb_scan(struct ath_softc
*sc
, struct ath_rx_status
*rs
)
1342 struct ath_hw_antcomb_conf div_ant_conf
;
1343 struct ath_ant_comb
*antcomb
= &sc
->ant_comb
;
1344 int alt_ratio
= 0, alt_rssi_avg
= 0, main_rssi_avg
= 0, curr_alt_set
;
1345 int curr_main_set
, curr_bias
;
1346 int main_rssi
= rs
->rs_rssi_ctl0
;
1347 int alt_rssi
= rs
->rs_rssi_ctl1
;
1348 int rx_ant_conf
, main_ant_conf
;
1349 bool short_scan
= false;
1351 rx_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_CURRENT_SHIFT
) &
1353 main_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_MAIN_SHIFT
) &
1356 /* Record packet only when alt_rssi is positive */
1358 antcomb
->total_pkt_count
++;
1359 antcomb
->main_total_rssi
+= main_rssi
;
1360 antcomb
->alt_total_rssi
+= alt_rssi
;
1361 if (main_ant_conf
== rx_ant_conf
)
1362 antcomb
->main_recv_cnt
++;
1364 antcomb
->alt_recv_cnt
++;
1367 /* Short scan check */
1368 if (antcomb
->scan
&& antcomb
->alt_good
) {
1369 if (time_after(jiffies
, antcomb
->scan_start_time
+
1370 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR
)))
1373 if (antcomb
->total_pkt_count
==
1374 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT
) {
1375 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1376 antcomb
->total_pkt_count
);
1377 if (alt_ratio
< ATH_ANT_DIV_COMB_ALT_ANT_RATIO
)
1382 if (((antcomb
->total_pkt_count
< ATH_ANT_DIV_COMB_MAX_PKTCOUNT
) ||
1383 rs
->rs_moreaggr
) && !short_scan
)
1386 if (antcomb
->total_pkt_count
) {
1387 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1388 antcomb
->total_pkt_count
);
1389 main_rssi_avg
= (antcomb
->main_total_rssi
/
1390 antcomb
->total_pkt_count
);
1391 alt_rssi_avg
= (antcomb
->alt_total_rssi
/
1392 antcomb
->total_pkt_count
);
1396 ath9k_hw_antdiv_comb_conf_get(sc
->sc_ah
, &div_ant_conf
);
1397 curr_alt_set
= div_ant_conf
.alt_lna_conf
;
1398 curr_main_set
= div_ant_conf
.main_lna_conf
;
1399 curr_bias
= div_ant_conf
.fast_div_bias
;
1403 if (antcomb
->count
== ATH_ANT_DIV_COMB_MAX_COUNT
) {
1404 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1405 ath_lnaconf_alt_good_scan(antcomb
, div_ant_conf
,
1407 antcomb
->alt_good
= true;
1409 antcomb
->alt_good
= false;
1413 antcomb
->scan
= true;
1414 antcomb
->scan_not_start
= true;
1417 if (!antcomb
->scan
) {
1418 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1419 if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA2
) {
1420 /* Switch main and alt LNA */
1421 div_ant_conf
.main_lna_conf
=
1422 ATH_ANT_DIV_COMB_LNA2
;
1423 div_ant_conf
.alt_lna_conf
=
1424 ATH_ANT_DIV_COMB_LNA1
;
1425 } else if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA1
) {
1426 div_ant_conf
.main_lna_conf
=
1427 ATH_ANT_DIV_COMB_LNA1
;
1428 div_ant_conf
.alt_lna_conf
=
1429 ATH_ANT_DIV_COMB_LNA2
;
1433 } else if ((curr_alt_set
!= ATH_ANT_DIV_COMB_LNA1
) &&
1434 (curr_alt_set
!= ATH_ANT_DIV_COMB_LNA2
)) {
1435 /* Set alt to another LNA */
1436 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
)
1437 div_ant_conf
.alt_lna_conf
=
1438 ATH_ANT_DIV_COMB_LNA1
;
1439 else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
)
1440 div_ant_conf
.alt_lna_conf
=
1441 ATH_ANT_DIV_COMB_LNA2
;
1446 if ((alt_rssi_avg
< (main_rssi_avg
+
1447 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA
)))
1451 if (!antcomb
->scan_not_start
) {
1452 switch (curr_alt_set
) {
1453 case ATH_ANT_DIV_COMB_LNA2
:
1454 antcomb
->rssi_lna2
= alt_rssi_avg
;
1455 antcomb
->rssi_lna1
= main_rssi_avg
;
1456 antcomb
->scan
= true;
1458 div_ant_conf
.main_lna_conf
=
1459 ATH_ANT_DIV_COMB_LNA1
;
1460 div_ant_conf
.alt_lna_conf
=
1461 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1463 case ATH_ANT_DIV_COMB_LNA1
:
1464 antcomb
->rssi_lna1
= alt_rssi_avg
;
1465 antcomb
->rssi_lna2
= main_rssi_avg
;
1466 antcomb
->scan
= true;
1468 div_ant_conf
.main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1469 div_ant_conf
.alt_lna_conf
=
1470 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1472 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
:
1473 antcomb
->rssi_add
= alt_rssi_avg
;
1474 antcomb
->scan
= true;
1476 div_ant_conf
.alt_lna_conf
=
1477 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1479 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
:
1480 antcomb
->rssi_sub
= alt_rssi_avg
;
1481 antcomb
->scan
= false;
1482 if (antcomb
->rssi_lna2
>
1483 (antcomb
->rssi_lna1
+
1484 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)) {
1485 /* use LNA2 as main LNA */
1486 if ((antcomb
->rssi_add
> antcomb
->rssi_lna1
) &&
1487 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1489 div_ant_conf
.main_lna_conf
=
1490 ATH_ANT_DIV_COMB_LNA2
;
1491 div_ant_conf
.alt_lna_conf
=
1492 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1493 } else if (antcomb
->rssi_sub
>
1494 antcomb
->rssi_lna1
) {
1496 div_ant_conf
.main_lna_conf
=
1497 ATH_ANT_DIV_COMB_LNA2
;
1498 div_ant_conf
.alt_lna_conf
=
1499 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1502 div_ant_conf
.main_lna_conf
=
1503 ATH_ANT_DIV_COMB_LNA2
;
1504 div_ant_conf
.alt_lna_conf
=
1505 ATH_ANT_DIV_COMB_LNA1
;
1508 /* use LNA1 as main LNA */
1509 if ((antcomb
->rssi_add
> antcomb
->rssi_lna2
) &&
1510 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1512 div_ant_conf
.main_lna_conf
=
1513 ATH_ANT_DIV_COMB_LNA1
;
1514 div_ant_conf
.alt_lna_conf
=
1515 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1516 } else if (antcomb
->rssi_sub
>
1517 antcomb
->rssi_lna1
) {
1519 div_ant_conf
.main_lna_conf
=
1520 ATH_ANT_DIV_COMB_LNA1
;
1521 div_ant_conf
.alt_lna_conf
=
1522 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1525 div_ant_conf
.main_lna_conf
=
1526 ATH_ANT_DIV_COMB_LNA1
;
1527 div_ant_conf
.alt_lna_conf
=
1528 ATH_ANT_DIV_COMB_LNA2
;
1536 if (!antcomb
->alt_good
) {
1537 antcomb
->scan_not_start
= false;
1538 /* Set alt to another LNA */
1539 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
) {
1540 div_ant_conf
.main_lna_conf
=
1541 ATH_ANT_DIV_COMB_LNA2
;
1542 div_ant_conf
.alt_lna_conf
=
1543 ATH_ANT_DIV_COMB_LNA1
;
1544 } else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
) {
1545 div_ant_conf
.main_lna_conf
=
1546 ATH_ANT_DIV_COMB_LNA1
;
1547 div_ant_conf
.alt_lna_conf
=
1548 ATH_ANT_DIV_COMB_LNA2
;
1554 ath_select_ant_div_from_quick_scan(antcomb
, &div_ant_conf
,
1555 main_rssi_avg
, alt_rssi_avg
,
1558 antcomb
->quick_scan_cnt
++;
1561 ath_ant_div_conf_fast_divbias(&div_ant_conf
);
1563 ath9k_hw_antdiv_comb_conf_set(sc
->sc_ah
, &div_ant_conf
);
1565 antcomb
->scan_start_time
= jiffies
;
1566 antcomb
->total_pkt_count
= 0;
1567 antcomb
->main_total_rssi
= 0;
1568 antcomb
->alt_total_rssi
= 0;
1569 antcomb
->main_recv_cnt
= 0;
1570 antcomb
->alt_recv_cnt
= 0;
1573 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
, bool hp
)
1576 struct sk_buff
*skb
= NULL
, *requeue_skb
, *hdr_skb
;
1577 struct ieee80211_rx_status
*rxs
;
1578 struct ath_hw
*ah
= sc
->sc_ah
;
1579 struct ath_common
*common
= ath9k_hw_common(ah
);
1581 * The hw can technically differ from common->hw when using ath9k
1582 * virtual wiphy so to account for that we iterate over the active
1583 * wiphys and find the appropriate wiphy and therefore hw.
1585 struct ieee80211_hw
*hw
= sc
->hw
;
1586 struct ieee80211_hdr
*hdr
;
1588 bool decrypt_error
= false;
1589 struct ath_rx_status rs
;
1590 enum ath9k_rx_qtype qtype
;
1591 bool edma
= !!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
);
1593 u8 rx_status_len
= ah
->caps
.rx_status_len
;
1596 unsigned long flags
;
1599 dma_type
= DMA_BIDIRECTIONAL
;
1601 dma_type
= DMA_FROM_DEVICE
;
1603 qtype
= hp
? ATH9K_RX_QUEUE_HP
: ATH9K_RX_QUEUE_LP
;
1604 spin_lock_bh(&sc
->rx
.rxbuflock
);
1606 tsf
= ath9k_hw_gettsf64(ah
);
1607 tsf_lower
= tsf
& 0xffffffff;
1610 /* If handling rx interrupt and flush is in progress => exit */
1611 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
1614 memset(&rs
, 0, sizeof(rs
));
1616 bf
= ath_edma_get_next_rx_buf(sc
, &rs
, qtype
);
1618 bf
= ath_get_next_rx_buf(sc
, &rs
);
1628 * Take frame header from the first fragment and RX status from
1632 hdr_skb
= sc
->rx
.frag
;
1636 hdr
= (struct ieee80211_hdr
*) (hdr_skb
->data
+ rx_status_len
);
1637 rxs
= IEEE80211_SKB_RXCB(hdr_skb
);
1639 ath_debug_stat_rx(sc
, &rs
);
1642 * If we're asked to flush receive queue, directly
1643 * chain it back at the queue without processing it.
1646 goto requeue_drop_frag
;
1648 retval
= ath9k_rx_skb_preprocess(common
, hw
, hdr
, &rs
,
1649 rxs
, &decrypt_error
);
1651 goto requeue_drop_frag
;
1653 rxs
->mactime
= (tsf
& ~0xffffffffULL
) | rs
.rs_tstamp
;
1654 if (rs
.rs_tstamp
> tsf_lower
&&
1655 unlikely(rs
.rs_tstamp
- tsf_lower
> 0x10000000))
1656 rxs
->mactime
-= 0x100000000ULL
;
1658 if (rs
.rs_tstamp
< tsf_lower
&&
1659 unlikely(tsf_lower
- rs
.rs_tstamp
> 0x10000000))
1660 rxs
->mactime
+= 0x100000000ULL
;
1662 /* Ensure we always have an skb to requeue once we are done
1663 * processing the current buffer's skb */
1664 requeue_skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_ATOMIC
);
1666 /* If there is no memory we ignore the current RX'd frame,
1667 * tell hardware it can give us a new frame using the old
1668 * skb and put it at the tail of the sc->rx.rxbuf list for
1671 goto requeue_drop_frag
;
1673 /* Unmap the frame */
1674 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
1678 skb_put(skb
, rs
.rs_datalen
+ ah
->caps
.rx_status_len
);
1679 if (ah
->caps
.rx_status_len
)
1680 skb_pull(skb
, ah
->caps
.rx_status_len
);
1683 ath9k_rx_skb_postprocess(common
, hdr_skb
, &rs
,
1684 rxs
, decrypt_error
);
1686 /* We will now give hardware our shiny new allocated skb */
1687 bf
->bf_mpdu
= requeue_skb
;
1688 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
1691 if (unlikely(dma_mapping_error(sc
->dev
,
1692 bf
->bf_buf_addr
))) {
1693 dev_kfree_skb_any(requeue_skb
);
1695 bf
->bf_buf_addr
= 0;
1696 ath_err(common
, "dma_mapping_error() on RX\n");
1697 ieee80211_rx(hw
, skb
);
1703 * rs_more indicates chained descriptors which can be
1704 * used to link buffers together for a sort of
1705 * scatter-gather operation.
1708 /* too many fragments - cannot handle frame */
1709 dev_kfree_skb_any(sc
->rx
.frag
);
1710 dev_kfree_skb_any(skb
);
1718 int space
= skb
->len
- skb_tailroom(hdr_skb
);
1722 if (pskb_expand_head(hdr_skb
, 0, space
, GFP_ATOMIC
) < 0) {
1724 goto requeue_drop_frag
;
1727 skb_copy_from_linear_data(skb
, skb_put(hdr_skb
, skb
->len
),
1729 dev_kfree_skb_any(skb
);
1734 * change the default rx antenna if rx diversity chooses the
1735 * other antenna 3 times in a row.
1737 if (sc
->rx
.defant
!= rs
.rs_antenna
) {
1738 if (++sc
->rx
.rxotherant
>= 3)
1739 ath_setdefantenna(sc
, rs
.rs_antenna
);
1741 sc
->rx
.rxotherant
= 0;
1744 spin_lock_irqsave(&sc
->sc_pm_lock
, flags
);
1746 if ((sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
1748 PS_WAIT_FOR_PSPOLL_DATA
)) ||
1749 unlikely(ath9k_check_auto_sleep(sc
)))
1751 spin_unlock_irqrestore(&sc
->sc_pm_lock
, flags
);
1753 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
)
1754 ath_ant_comb_scan(sc
, &rs
);
1756 ieee80211_rx(hw
, skb
);
1760 dev_kfree_skb_any(sc
->rx
.frag
);
1765 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1766 ath_rx_edma_buf_link(sc
, qtype
);
1768 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1769 ath_rx_buf_link(sc
, bf
);
1773 spin_unlock_bh(&sc
->rx
.rxbuflock
);