#define MAX_NO_RECLAIM_CMDS 6
-/*
- * The first entry in driver_data array in ieee80211_tx_info
- * that can be used by the transport.
- */
-#define IWL_TRANS_FIRST_DRIVER_DATA 2
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
/*
* @command_groups_size: number of command groups, to avoid illegal access
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
* we get the ALIVE from the uCode
+ * @cb_data_offs: offset inside skb->cb to store transport data at, must have
+ * space for at least two pointers
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
int command_groups_size;
u32 sdio_adma_addr;
+
+ u8 cb_data_offs;
};
struct iwl_trans_dump_data {
return 0;
}
-static void iwl_pcie_free_tso_page(struct sk_buff *skb)
+static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
+ struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct page **page_ptr;
- if (info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]) {
- struct page *page =
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA];
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- __free_page(page);
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = NULL;
+ if (*page_ptr) {
+ __free_page(*page_ptr);
+ *page_ptr = NULL;
}
}
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(skb);
+ iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(skb);
+ iwl_pcie_free_tso_page(trans_pcie, skb);
__skb_queue_tail(skbs, skb);
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
- struct iwl_device_cmd *dev_cmd =
- info->driver_data[dev_cmd_idx];
+ struct iwl_device_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+ trans_pcie->dev_cmd_offs);
/*
* Note that we can very well be overflowing again.
* In that case, iwl_queue_space will be small again
* and we won't wake mac80211's queue.
*/
- iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id);
+ iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id);
}
spin_lock_bh(&txq->lock);
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
+ struct page **page_ptr;
int ret;
struct tso_t tso;
get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = hdr_page->page;
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(q) < 3)) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_device_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans_pcie->dev_cmd_offs);
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] =
- dev_cmd;
+ *dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
spin_unlock(&txq->lock);