struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
unsigned long flags;
int q, mapping;
- u8 hlid = 0;
+ u8 hlid;
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
- hlid = wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue the packet */
- if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
- if (!test_bit(hlid, wlvif->links_map)) {
- wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
- hlid, q);
- dev_kfree_skb(skb);
- goto out;
- }
-
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
- skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
- } else {
- skb_queue_tail(&wl->tx_queue[q], skb);
+ if (hlid == WL12XX_INVALID_LINK_ID ||
+ !test_bit(hlid, wlvif->links_map)) {
+ wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+ dev_kfree_skb(skb);
+ goto out;
}
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
wl->tx_queue_count[q]++;
/*
mutex_lock(&wl->mutex);
/* let's notify MAC80211 about the remaining pending TX frames */
- wl1271_tx_reset(wl, reset_tx_queues);
+ wl12xx_tx_reset_wlvif(wl, wlvif);
+ wl12xx_tx_reset(wl, reset_tx_queues);
wl1271_power_off(wl);
wl->band = IEEE80211_BAND_2GHZ;
return 0;
}
-/* TODO: change wl1271_tx_reset(), so we can get sta as param */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
wl->hw = hw;
wl->plat_dev = plat_dev;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- skb_queue_head_init(&wl->tx_queue[i]);
-
for (i = 0; i < NUM_TX_QUEUES; i++)
for (j = 0; j < WL12XX_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
}
}
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct ieee80211_vif *vif,
- struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-
if (wl12xx_is_dummy_packet(wl, skb))
return wl->system_hlid;
wlvif->default_key = idx;
}
}
- hlid = wl1271_tx_get_hlid(wl, vif, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) {
wl1271_error("invalid hlid. dropping skb 0x%p", skb);
return -EINVAL;
return &queues[q];
}
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+ struct wl1271_link *lnk)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
unsigned long flags;
struct sk_buff_head *queue;
- queue = wl1271_select_queue(wl, wl->tx_queue);
+ queue = wl1271_select_queue(wl, lnk->tx_queue);
if (!queue)
- goto out;
+ return NULL;
skb = skb_dequeue(queue);
-
-out:
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
return skb;
}
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
- unsigned long flags;
int i, h, start_hlid;
- struct sk_buff_head *queue;
/* start from the link after the last one */
start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
if (!test_bit(h, wlvif->links_map))
continue;
- queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
- if (!queue)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+ if (!skb)
continue;
- skb = skb_dequeue(queue);
- if (skb)
- break;
+ wlvif->last_tx_hlid = h;
+ break;
}
- if (skb) {
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- wlvif->last_tx_hlid = h;
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count[q]--;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- } else {
+ if (!skb)
wlvif->last_tx_hlid = 0;
- }
return skb;
}
unsigned long flags;
struct sk_buff *skb = NULL;
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
- skb = wl1271_ap_skb_dequeue(wl, wlvif);
- else
- skb = wl1271_sta_skb_dequeue(wl);
-
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q;
return skb;
}
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct ieee80211_vif *vif,
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
- } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
- u8 hlid = wl1271_tx_get_hlid(wl, vif, skb);
+ } else {
+ u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
- WL12XX_MAX_LINKS;
- } else {
- skb_queue_head(&wl->tx_queue[q], skb);
+ WL12XX_MAX_LINKS;
}
spin_lock_irqsave(&wl->wl_lock, flags);
* Aggregation buffer is full.
* Flush buffer and try again.
*/
- wl1271_skb_queue_head(wl, vif, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true);
sent_packets = true;
* Firmware buffer is full.
* Queue back last skb, and stop aggregating.
*/
- wl1271_skb_queue_head(wl, vif, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
}
/* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- struct ieee80211_vif *vif = wl->vif; /* TODO: get as param */
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int i;
- struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
- if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
- for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+ for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
wl1271_free_sta(wl, wlvif, i);
- wl1271_tx_reset_link_queues(wl, i);
- wl->links[i].allocated_pkts = 0;
- wl->links[i].prev_freed_pkts = 0;
- }
-
- wlvif->last_tx_hlid = 0;
- } else {
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
- skb);
-
- if (!wl12xx_is_dummy_packet(wl, skb)) {
- info = IEEE80211_SKB_CB(skb);
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- ieee80211_tx_status_ni(wl->hw, skb);
- }
- }
- }
+ else
+ wlvif->sta.ba_rx_bitmap = 0;
- wlvif->sta.ba_rx_bitmap = 0;
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_pkts = 0;
+ wl->links[i].prev_freed_pkts = 0;
}
+ wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0;