*/
trans_cfg.op_mode = op_mode;
+ if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
+ priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
+ trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+ } else {
+ priv->sta_key_max_num = STA_KEY_MAX_NUM;
+ trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ }
+
/* Configure transport layer */
iwl_trans_configure(trans(priv), &trans_cfg);
priv->new_scan_threshold_behaviour =
!!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
- priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
- priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
- } else {
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
- priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
- }
-
priv->phy_calib_chain_noise_reset_cmd =
fw->ucode_capa.standard_phy_calibration_size;
priv->phy_calib_chain_noise_gain_cmd =
/**
* struct iwl_shared - shared fields for all the layers of the driver
*
- * @cmd_queue: command queue number
* @status: STATUS_*
* @wowlan: are we running wowlan uCode
* @valid_contexts: microcode/device supports multiple contexts
* @device_pointers: pointers to ucode event tables
*/
struct iwl_shared {
- u8 cmd_queue;
unsigned long status;
u8 valid_contexts;
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
* @status - transport specific status flags
+ * @cmd_queue - command queue number
*/
struct iwl_trans_pcie {
struct iwl_rx_queue rxq;
bool ucode_write_complete;
wait_queue_head_t ucode_write_waitq;
unsigned long status;
+ u8 cmd_queue;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_device_cmd *cmd;
unsigned long flags;
int len, err;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans->shrd->cmd_queue)
+ if (txq_id != trans_pcie->cmd_queue)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
out_cmd->hdr.cmd = cmd->id;
out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
INDEX_TO_SEQ(q->write_ptr));
/* and copy the data that needs to be copied */
get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
- q->write_ptr, idx, trans->shrd->cmd_queue);
+ q->write_ptr, idx, trans_pcie->cmd_queue);
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
DMA_BIDIRECTIONAL);
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans->shrd->cmd_queue,
+ if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans->shrd->cmd_queue, sequence,
- trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
struct iwl_tx_queue *txq =
- &trans_pcie->txq[trans->shrd->cmd_queue];
+ &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
IWL_ERR(trans,
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
+ trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
int freed = 0;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans->shrd->cmd_queue))
+ if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return 0;
lockdep_assert_held(&txq->lock);
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
+ (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
+ (~(1<<(trans_pcie)->cmd_queue)))
+
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie =
{
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
return -EINVAL;
if (!txq->meta || !txq->cmd)
goto error;
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++) {
txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
GFP_KERNEL);
/* Alloc driver data array and TFD circular buffer */
/* Driver private data, only for Tx (not command) queues,
* not shared with device. */
- if (txq_id != trans->shrd->cmd_queue) {
+ if (txq_id != trans_pcie->cmd_queue) {
txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
GFP_KERNEL);
if (!txq->skbs) {
txq->skbs = NULL;
/* since txq->cmd has been zeroed,
* all non allocated cmd[i] will be NULL */
- if (txq->cmd && txq_id == trans->shrd->cmd_queue)
+ if (txq->cmd && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
kfree(txq->cmd[i]);
kfree(txq->meta);
/* In the command queue, all the TBs are mapped as BIDI
* so unmap them as such.
*/
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
dma_dir = DMA_BIDIRECTIONAL;
else
dma_dir = DMA_TO_DEVICE;
/* De-alloc array of command/tx buffers */
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < txq->q.n_window; i++)
kfree(txq->cmd[i]);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ slots_num = (txq_id == trans->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ slots_num = (txq_id == trans->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(trans));
+ SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
iwl_write_prph(trans, SCD_AGGR_SEL, 0);
/* initiate the queues */
else
queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
+ iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
/* make sure all queue are not stopped */
memset(&trans_pcie->queue_stopped[0], 0,
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+}
+
static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie =
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
- if (cnt == trans->shrd->cmd_queue)
+ if (cnt == trans->cmd_queue)
continue;
txq = &trans_pcie->txq[cnt];
q = &txq->q;
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,
+ .configure = iwl_trans_pcie_configure,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
*
* @op_mode: pointer to the upper layer.
* Must be set before any other call.
+ * @cmd_queue: the index of the command queue.
+ * Must be set before start_fw.
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
+ u8 cmd_queue;
};
/**
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
+ * @configure: configure parameters required by the transport layer from
+ * the op_mode.
*/
struct iwl_trans_ops {
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
+ void (*configure)(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
};
/**
* more
*/
trans->op_mode = trans_cfg->op_mode;
+
+ trans->ops->configure(trans, trans_cfg);
}
static inline int iwl_trans_start_hw(struct iwl_trans *trans)