#endif
}
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
static void iwl_set_hw_params(struct iwl_priv *priv)
{
if (cfg(priv)->ht_params)
hw_params(priv).use_rts_for_aggregation =
cfg(priv)->ht_params->use_rts_for_aggregation;
- if (iwlagn_mod_params.amsdu_size_8K)
- hw_params(priv).rx_page_order =
- get_order(IWL_RX_BUF_SIZE_8K);
- else
- hw_params(priv).rx_page_order =
- get_order(IWL_RX_BUF_SIZE_4K);
-
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
trans_cfg.op_mode = op_mode;
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ trans_cfg.rx_buf_size_8k = iwlagn_mod_params.amsdu_size_8K;
ucode_flags = fw->ucode_capa.flags;
* @valid_rx_ant: usable antennas for RX
* @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
* @sku: sku read from EEPROM
- * @rx_page_order: Rx buffer page order
* @ct_kill_threshold: temperature threshold - in hw dependent unit
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
u8 ht40_channel;
bool use_rts_for_aggregation;
u16 sku;
- u32 rx_page_order;
u32 ct_kill_threshold;
u32 ct_kill_exit_threshold;
unsigned int wd_timeout;
* @ucode_write_waitq: wait queue for uCode load
* @status - transport specific status flags
* @cmd_queue - command queue number
+ * @rx_buf_size_8k: 8 kB RX buffer size
+ * @rx_page_order: page order for receive buffer size
*/
struct iwl_trans_pcie {
struct iwl_rx_queue rxq;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
u8 n_q_to_fifo;
+
+ bool rx_buf_size_8k;
+ u32 rx_page_order;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
- if (hw_params(trans).rx_page_order > 0)
+ if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask,
- hw_params(trans).rx_page_order);
+ trans_pcie->rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(trans, "alloc_pages failed, "
"order: %d\n",
- hw_params(trans).rx_page_order);
+ trans_pcie->rx_page_order);
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
net_ratelimit())
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, hw_params(trans).rx_page_order);
+ __free_pages(page, trans_pcie->rx_page_order);
return;
}
element = rxq->rx_used.next;
rxb->page = page;
/* Get physical address of the RB */
rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << hw_params(trans).rx_page_order,
+ PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
unsigned long flags;
bool page_stolen = false;
- int max_len = PAGE_SIZE << hw_params(trans).rx_page_order;
+ int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
if (WARN_ON(!rxb))
/* page was stolen from us -- free our reference */
if (page_stolen) {
- __free_pages(rxb->page, hw_params(trans).rx_page_order);
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
}
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << hw_params(trans).rx_page_order,
+ PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
meta->source->resp_pkt = pkt;
meta->source->_rx_page_addr = (unsigned long)page_address(p);
- meta->source->_rx_page_order = hw_params(trans).rx_page_order;
+ meta->source->_rx_page_order = trans_pcie->rx_page_order;
meta->source->handler_status = handler_status;
}
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << hw_params(trans).rx_page_order,
+ PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page,
- hw_params(trans).rx_page_order);
+ trans_pcie->rx_page_order);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
struct iwl_rx_queue *rxq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
- if (iwlagn_mod_params.amsdu_size_8K)
+ if (trans_pcie->rx_buf_size_8k)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
else
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
trans_pcie->n_q_to_fifo * sizeof(u8));
+
+ trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
+ if (trans_pcie->rx_buf_size_8k)
+ trans_pcie->rx_page_order = get_order(8 * 1024);
+ else
+ trans_pcie->rx_page_order = get_order(4 * 1024);
}
static void iwl_trans_pcie_free(struct iwl_trans *trans)
* list of such notifications to filter. Max length is
* %MAX_NO_RECLAIM_CMDS.
* @n_no_reclaim_cmds: # of commands in list
+ * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
+ * if unset 4k will be the RX buffer size
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
u8 cmd_queue;
const u8 *no_reclaim_cmds;
int n_no_reclaim_cmds;
+
+ bool rx_buf_size_8k;
};
/**