u16 len = le16_to_cpu(rx_hdr->len);
struct sk_buff *skb;
int ret;
+ __le16 fc = hdr->frame_control;
/* We received data from the HW, so stop the watchdog */
if (unlikely(len + IWL39_RX_FRAME_SIZE >
/* mac80211 currently doesn't support paged SKB. Convert it to
* linear SKB for management frame and data frame requires
* software decryption or software defragementation. */
- if (ieee80211_is_mgmt(hdr->frame_control) ||
- ieee80211_has_protected(hdr->frame_control) ||
- ieee80211_has_morefrags(hdr->frame_control) ||
+ if (ieee80211_is_mgmt(fc) ||
+ ieee80211_has_protected(fc) ||
+ ieee80211_has_morefrags(fc) ||
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
ret = skb_linearize(skb);
else
goto out;
}
- iwl_update_stats(priv, false, hdr->frame_control, len);
+ /*
+ * XXX: We cannot touch the page and its virtual memory (pkt) after
+ * here. It might have already been freed by the above skb change.
+ */
+ iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(priv->hw, skb);
+ ieee80211_rx(priv->hw, skb);
out:
priv->alloc_rxb_page--;
rxb->page = NULL;
if (priv->rx_handlers[pkt->hdr.cmd]) {
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+ priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv,
pkt->hdr.cmd);
}
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
if (reclaim) {
/* Invoke any callbacks, transfer the buffer to caller,
* and fire off the (possibly) blocking iwl_send_cmd()
* as we reclaim the driver command queue */
- if (rxb && rxb->page)
+ if (rxb->page)
iwl_tx_cmd_complete(priv, rxb);
else
IWL_WARN(priv, "Claim null rxb?\n");
struct iwl_rx_mem_buffer *rxb;
struct page *page;
unsigned long flags;
+ gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
spin_unlock_irqrestore(&rxq->lock, flags);
if (rxq->free_count > RX_LOW_WATERMARK)
- priority |= __GFP_NOWARN;
+ gfp_mask |= __GFP_NOWARN;
if (priv->hw_params.rx_page_order > 0)
- priority |= __GFP_COMP;
+ gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
- page = alloc_pages(priority, priv->hw_params.rx_page_order);
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
{
struct sk_buff *skb;
int ret = 0;
+ __le16 fc = hdr->frame_control;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
/* mac80211 currently doesn't support paged SKB. Convert it to
* linear SKB for management frame and data frame requires
* software decryption or software defragementation. */
- if (ieee80211_is_mgmt(hdr->frame_control) ||
- ieee80211_has_protected(hdr->frame_control) ||
- ieee80211_has_morefrags(hdr->frame_control) ||
+ if (ieee80211_is_mgmt(fc) ||
+ ieee80211_has_protected(fc) ||
+ ieee80211_has_morefrags(fc) ||
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
ret = skb_linearize(skb);
else
goto out;
}
- iwl_update_stats(priv, false, hdr->frame_control, len);
+ /*
+ * XXX: We cannot touch the page and its virtual memory (hdr) after
+ * here. It might have already been freed by the above skb change.
+ */
+
+ iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
struct iwl_rx_mem_buffer *rxb;
struct page *page;
unsigned long flags;
+ gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
spin_unlock_irqrestore(&rxq->lock, flags);
if (rxq->free_count > RX_LOW_WATERMARK)
- priority |= __GFP_NOWARN;
+ gfp_mask |= __GFP_NOWARN;
if (priv->hw_params.rx_page_order > 0)
- priority |= __GFP_COMP;
+ gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
- page = alloc_pages(priority, priv->hw_params.rx_page_order);
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
if (priv->rx_handlers[pkt->hdr.cmd]) {
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+ priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv,
pkt->hdr.cmd);
}
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
if (reclaim) {
/* Invoke any callbacks, transfer the buffer to caller,
* and fire off the (possibly) blocking iwl_send_cmd()
* as we reclaim the driver command queue */
- if (rxb && rxb->page)
+ if (rxb->page)
iwl_tx_cmd_complete(priv, rxb);
else
IWL_WARN(priv, "Claim null rxb?\n");