tx_info = &txq->txb[txq->q.read_ptr];
ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
tx_info->skb[0] = NULL;
- iwl3945_hw_txq_free_tfd(priv, txq);
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
}
if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
}
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
- dma_addr_t addr, u16 len)
+int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
{
int count;
- u32 pad;
- struct iwl3945_tfd *tfd = (struct iwl3945_tfd *)ptr;
+ struct iwl_queue *q;
+ struct iwl3945_tfd *tfd;
+
+ q = &txq->q;
+ tfd = &txq->tfds39[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
- pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags));
if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
IWL_ERR(priv, "Error can not send more than %d chunks\n",
*
* Does NOT advance any indexes
*/
-int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)&txq->tfds39[0];
struct iwl3945_tfd *tfd = &tfd_tmp[txq->q.read_ptr];
/* classify bd */
if (txq->q.id == IWL_CMD_QUEUE_NUM)
/* nothing to cleanup after for host commands */
- return 0;
+ return;
/* sanity check */
counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
if (counter > NUM_TFD_CHUNKS) {
IWL_ERR(priv, "Too many chunks: %i\n", counter);
/* @todo issue fatal error, it is quite serious situation */
- return 0;
+ return;
}
/* unmap chunks if any */
}
}
}
- return 0;
+ return ;
}
u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *addr)
}
static struct iwl_lib_ops iwl3945_lib = {
+ .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl3945_hw_txq_free_tfd,
.load_ucode = iwl3945_load_bsm,
.apm_ops = {
.init = iwl3945_apm_init,
extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
- dma_addr_t addr, u16 len);
-extern int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len,
+ u8 reset, u8 pad);
+extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
.txq_set_sched = iwl4965_txq_set_sched,
.txq_agg_enable = iwl4965_txq_agg_enable,
.txq_agg_disable = iwl4965_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
.rx_handler_setup = iwl4965_rx_handler_setup,
.setup_deferred_work = iwl4965_setup_deferred_work,
.cancel_deferred_work = iwl4965_cancel_deferred_work,
.txq_set_sched = iwl5000_txq_set_sched,
.txq_agg_enable = iwl5000_txq_agg_enable,
.txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
.rx_handler_setup = iwl5000_rx_handler_setup,
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
return rc;
}
+static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+ return addr;
+}
+
+static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @priv - driver private data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+ struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
+ struct iwl_tfd *tfd;
+ struct pci_dev *dev = priv->pci_dev;
+ int index = txq->q.read_ptr;
+ int i;
+ int num_tbs;
+
+ tfd = &tfd_tmp[index];
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ pci_unmap_single(dev,
+ pci_unmap_addr(&txq->cmd[index]->meta, mapping),
+ pci_unmap_len(&txq->cmd[index]->meta, len),
+ PCI_DMA_TODEVICE);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++) {
+ pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
+ iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
+
+ if (txq->txb) {
+ dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
+ txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
+ }
+ }
+}
+
+int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len,
+ u8 reset, u8 pad)
+{
+ struct iwl_queue *q;
+ struct iwl_tfd *tfd;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd = &txq->tfds[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(priv, "Error can not send more than %d chunks\n",
+ IWL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ BUG_ON(addr & ~DMA_BIT_MASK(36));
+ if (unlikely(addr & ~IWL_TX_DMA_MASK))
+ IWL_ERR(priv, "Unaligned address = %llx\n",
+ (unsigned long long)addr);
+
+ iwl_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
/******************************************************************************
*
* Misc. internal state and helper functions
void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
+ int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr,
+ u16 len, u8 reset, u8 pad);
+ void (*txq_free_tfd)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
/* aggregations */
int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
int sta_id, int tid, u16 ssn_idx);
* TX
******************************************************/
int iwl_txq_ctx_reset(struct iwl_priv *priv);
+void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
memset(ptr, 0, sizeof(*ptr));
}
-static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
- return addr;
-}
-
-static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
- struct iwl_tfd *tfd;
- struct pci_dev *dev = priv->pci_dev;
- int index = txq->q.read_ptr;
- int i;
- int num_tbs;
-
- tfd = &tfd_tmp[index];
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_tfd_get_num_tbs(tfd);
-
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (num_tbs)
- pci_unmap_single(dev,
- pci_unmap_addr(&txq->cmd[index]->meta, mapping),
- pci_unmap_len(&txq->cmd[index]->meta, len),
- PCI_DMA_TODEVICE);
-
- /* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++) {
- pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
-
- if (txq->txb) {
- dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
- txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
- }
- }
-}
-
-static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tfd *tfd,
- dma_addr_t addr, u16 len)
-{
-
- u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
- return -EINVAL;
- }
-
- BUG_ON(addr & ~DMA_BIT_MASK(36));
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(priv, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
- iwl_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return 0;
-}
-
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
/* first, empty all BD's */
for (; q->write_ptr != q->read_ptr;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
- iwl_hw_txq_free_tfd(priv, txq);
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
len = sizeof(struct iwl_cmd) * q->n_window;
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_tfd *tfd;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
struct iwl_cmd *out_cmd;
spin_lock_irqsave(&priv->lock, flags);
- /* Set up first empty TFD within this queue's circular TFD buffer */
- tfd = &txq->tfds[q->write_ptr];
- memset(tfd, 0, sizeof(*tfd));
-
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
txq->txb[q->write_ptr].skb[0] = skb;
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
txcmd_phys += offsetof(struct iwl_cmd, hdr);
- iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, len, 1, 0);
if (info->control.hw_key)
iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
if (len) {
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
len, PCI_DMA_TODEVICE);
- iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, len,
+ 0, 0);
}
/* Tell NIC about any 2-byte padding after MAC header */
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
- struct iwl_tfd *tfd;
struct iwl_cmd *out_cmd;
dma_addr_t phys_addr;
unsigned long flags;
spin_lock_irqsave(&priv->hcmd_lock, flags);
- tfd = &txq->tfds[q->write_ptr];
- memset(tfd, 0, sizeof(*tfd));
-
-
idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
out_cmd = txq->cmd[idx];
pci_unmap_len_set(&out_cmd->meta, len, len);
phys_addr += offsetof(struct iwl_cmd, hdr);
- iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, fix_size, 1, 0);
#ifdef CONFIG_IWLWIFI_DEBUG
switch (out_cmd->hdr.cmd) {
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
- iwl_hw_txq_free_tfd(priv, txq);
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
nfreed++;
}
return nfreed;
/* first, empty all BD's */
for (; q->write_ptr != q->read_ptr;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
- iwl3945_hw_txq_free_tfd(priv, txq);
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
len = sizeof(struct iwl_cmd) * q->n_window;
if (q->id == IWL_CMD_QUEUE_NUM)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
- struct iwl3945_tfd *tfd;
struct iwl_cmd *out_cmd;
u32 idx;
u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
dma_addr_t phys_addr;
- int pad;
int ret, len;
unsigned long flags;
spin_lock_irqsave(&priv->hcmd_lock, flags);
- tfd = &txq->tfds39[q->write_ptr];
- memset(tfd, 0, sizeof(*tfd));
-
idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
out_cmd = txq->cmd[idx];
pci_unmap_len_set(&out_cmd->meta, len, len);
phys_addr += offsetof(struct iwl_cmd, hdr);
- iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
-
- pad = U32_PAD(cmd->len);
- tfd->control_flags |= cpu_to_le32(TFD_CTL_PAD_SET(pad));
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, fix_size,
+ 1, U32_PAD(cmd->len));
IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
"%d bytes at %d[%d]:%d\n",
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl3945_tfd *tfd;
struct iwl3945_tx_cmd *tx;
struct iwl_tx_queue *txq = NULL;
struct iwl_queue *q = NULL;
spin_lock_irqsave(&priv->lock, flags);
- /* Set up first empty TFD within this queue's circular TFD buffer */
- tfd = &txq->tfds39[q->write_ptr];
- memset(tfd, 0, sizeof(*tfd));
idx = get_cmd_index(q, q->write_ptr, 0);
/* Set up driver data for this TFD */
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
- iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, len, 1, 0);
if (info->control.hw_key)
iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
if (len) {
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
len, PCI_DMA_TODEVICE);
- iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, len,
+ 0, U32_PAD(len));
}
- if (!len)
- /* If there is no payload, then we use only one Tx buffer */
- tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(1));
- else
- /* Else use 2 buffers.
- * Tell 3945 about any padding after MAC header */
- tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(2) |
- TFD_CTL_PAD_SET(U32_PAD(len)));
-
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx->len = cpu_to_le16(len);