iwlwifi: move NIC init and Tx queues init to iwlcore
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / iwlwifi / iwl-tx.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include "iwl-eeprom.h"
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-io.h"
36#include "iwl-helpers.h"
37
38/**
39 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
40 *
41 * Does NOT advance any TFD circular buffer read/write indexes
42 * Does NOT free the TFD itself (which is within circular buffer)
43 */
44int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
45{
46 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
47 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
48 struct pci_dev *dev = priv->pci_dev;
49 int i;
50 int counter = 0;
51 int index, is_odd;
52
53 /* Host command buffers stay mapped in memory, nothing to clean */
54 if (txq->q.id == IWL_CMD_QUEUE_NUM)
55 return 0;
56
57 /* Sanity check on number of chunks */
58 counter = IWL_GET_BITS(*bd, num_tbs);
59 if (counter > MAX_NUM_OF_TBS) {
60 IWL_ERROR("Too many chunks: %i\n", counter);
61 /* @todo issue fatal error, it is quite serious situation */
62 return 0;
63 }
64
65 /* Unmap chunks, if any.
66 * TFD info for odd chunks is different format than for even chunks. */
67 for (i = 0; i < counter; i++) {
68 index = i / 2;
69 is_odd = i & 0x1;
70
71 if (is_odd)
72 pci_unmap_single(
73 dev,
74 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
75 (IWL_GET_BITS(bd->pa[index],
76 tb2_addr_hi20) << 16),
77 IWL_GET_BITS(bd->pa[index], tb2_len),
78 PCI_DMA_TODEVICE);
79
80 else if (i > 0)
81 pci_unmap_single(dev,
82 le32_to_cpu(bd->pa[index].tb1_addr),
83 IWL_GET_BITS(bd->pa[index], tb1_len),
84 PCI_DMA_TODEVICE);
85
86 /* Free SKB, if any, for this chunk */
87 if (txq->txb[txq->q.read_ptr].skb[i]) {
88 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
89
90 dev_kfree_skb(skb);
91 txq->txb[txq->q.read_ptr].skb[i] = NULL;
92 }
93 }
94 return 0;
95}
96EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
97
98/**
99 * iwl_tx_queue_free - Deallocate DMA queue.
100 * @txq: Transmit queue to deallocate.
101 *
102 * Empty queue by removing and destroying all BD's.
103 * Free all buffers.
104 * 0-fill, but do not free "txq" descriptor structure.
105 */
106static void iwl_tx_queue_free(struct iwl_priv *priv,
107 struct iwl4965_tx_queue *txq)
108{
109 struct iwl4965_queue *q = &txq->q;
110 struct pci_dev *dev = priv->pci_dev;
111 int len;
112
113 if (q->n_bd == 0)
114 return;
115
116 /* first, empty all BD's */
117 for (; q->write_ptr != q->read_ptr;
118 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
119 iwl_hw_txq_free_tfd(priv, txq);
120
121 len = sizeof(struct iwl_cmd) * q->n_window;
122 if (q->id == IWL_CMD_QUEUE_NUM)
123 len += IWL_MAX_SCAN_SIZE;
124
125 /* De-alloc array of command/tx buffers */
126 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
127
128 /* De-alloc circular buffer of TFDs */
129 if (txq->q.n_bd)
130 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
131 txq->q.n_bd, txq->bd, txq->q.dma_addr);
132
133 /* De-alloc array of per-TFD driver data */
134 kfree(txq->txb);
135 txq->txb = NULL;
136
137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
140
141/**
142 * iwl_hw_txq_ctx_free - Free TXQ Context
143 *
144 * Destroy all TX DMA queues and structures
145 */
146void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
147{
148 int txq_id;
149
150 /* Tx queues */
151 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
152 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
153
154 /* Keep-warm buffer */
155 iwl_kw_free(priv);
156}
157EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
158
159/**
160 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
161 */
162static int iwl_queue_init(struct iwl_priv *priv, struct iwl4965_queue *q,
163 int count, int slots_num, u32 id)
164{
165 q->n_bd = count;
166 q->n_window = slots_num;
167 q->id = id;
168
169 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
170 * and iwl_queue_dec_wrap are broken. */
171 BUG_ON(!is_power_of_2(count));
172
173 /* slots_num must be power-of-two size, otherwise
174 * get_cmd_index is broken. */
175 BUG_ON(!is_power_of_2(slots_num));
176
177 q->low_mark = q->n_window / 4;
178 if (q->low_mark < 4)
179 q->low_mark = 4;
180
181 q->high_mark = q->n_window / 8;
182 if (q->high_mark < 2)
183 q->high_mark = 2;
184
185 q->write_ptr = q->read_ptr = 0;
186
187 return 0;
188}
189
190/**
191 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
192 */
193static int iwl_tx_queue_alloc(struct iwl_priv *priv,
194 struct iwl4965_tx_queue *txq, u32 id)
195{
196 struct pci_dev *dev = priv->pci_dev;
197
198 /* Driver private data, only for Tx (not command) queues,
199 * not shared with device. */
200 if (id != IWL_CMD_QUEUE_NUM) {
201 txq->txb = kmalloc(sizeof(txq->txb[0]) *
202 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
203 if (!txq->txb) {
204 IWL_ERROR("kmalloc for auxiliary BD "
205 "structures failed\n");
206 goto error;
207 }
208 } else
209 txq->txb = NULL;
210
211 /* Circular buffer of transmit frame descriptors (TFDs),
212 * shared with device */
213 txq->bd = pci_alloc_consistent(dev,
214 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
215 &txq->q.dma_addr);
216
217 if (!txq->bd) {
218 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
219 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
220 goto error;
221 }
222 txq->q.id = id;
223
224 return 0;
225
226 error:
227 kfree(txq->txb);
228 txq->txb = NULL;
229
230 return -ENOMEM;
231}
232
233/*
234 * Tell nic where to find circular buffer of Tx Frame Descriptors for
235 * given Tx queue, and enable the DMA channel used for that queue.
236 *
237 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
238 * channels supported in hardware.
239 */
240static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
241 struct iwl4965_tx_queue *txq)
242{
243 int rc;
244 unsigned long flags;
245 int txq_id = txq->q.id;
246
247 spin_lock_irqsave(&priv->lock, flags);
248 rc = iwl_grab_nic_access(priv);
249 if (rc) {
250 spin_unlock_irqrestore(&priv->lock, flags);
251 return rc;
252 }
253
254 /* Circular buffer (TFD queue in DRAM) physical base address */
255 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
256 txq->q.dma_addr >> 8);
257
258 /* Enable DMA channel, using same id as for TFD queue */
259 iwl_write_direct32(
260 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
261 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
262 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
263 iwl_release_nic_access(priv);
264 spin_unlock_irqrestore(&priv->lock, flags);
265
266 return 0;
267}
268
269/**
270 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
271 */
272static int iwl_tx_queue_init(struct iwl_priv *priv,
273 struct iwl4965_tx_queue *txq,
274 int slots_num, u32 txq_id)
275{
276 struct pci_dev *dev = priv->pci_dev;
277 int len;
278 int rc = 0;
279
280 /*
281 * Alloc buffer array for commands (Tx or other types of commands).
282 * For the command queue (#4), allocate command space + one big
283 * command for scan, since scan command is very huge; the system will
284 * not have two scans at the same time, so only one is needed.
285 * For normal Tx queues (all other queues), no super-size command
286 * space is needed.
287 */
288 len = sizeof(struct iwl_cmd) * slots_num;
289 if (txq_id == IWL_CMD_QUEUE_NUM)
290 len += IWL_MAX_SCAN_SIZE;
291 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
292 if (!txq->cmd)
293 return -ENOMEM;
294
295 /* Alloc driver data array and TFD circular buffer */
296 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
297 if (rc) {
298 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
299
300 return -ENOMEM;
301 }
302 txq->need_update = 0;
303
304 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
305 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
306 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
307
308 /* Initialize queue's high/low-water marks, and head/tail indexes */
309 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
310
311 /* Tell device where to find queue */
312 iwl_hw_tx_queue_init(priv, txq);
313
314 return 0;
315}
316
317/**
318 * iwl_txq_ctx_reset - Reset TX queue context
319 * Destroys all DMA structures and initialise them again
320 *
321 * @param priv
322 * @return error code
323 */
324int iwl_txq_ctx_reset(struct iwl_priv *priv)
325{
326 int ret = 0;
327 int txq_id, slots_num;
328
329 iwl_kw_free(priv);
330
331 /* Free all tx/cmd queues and keep-warm buffer */
332 iwl_hw_txq_ctx_free(priv);
333
334 /* Alloc keep-warm buffer */
335 ret = iwl_kw_alloc(priv);
336 if (ret) {
337 IWL_ERROR("Keep Warm allocation failed");
338 goto error_kw;
339 }
340
341 /* Turn off all Tx DMA fifos */
342 ret = priv->cfg->ops->lib->disable_tx_fifo(priv);
343 if (unlikely(ret))
344 goto error_reset;
345
346 /* Tell nic where to find the keep-warm buffer */
347 ret = iwl_kw_init(priv);
348 if (ret) {
349 IWL_ERROR("kw_init failed\n");
350 goto error_reset;
351 }
352
353 /* Alloc and init all (default 16) Tx queues,
354 * including the command queue (#4) */
355 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
356 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
357 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
358 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
359 txq_id);
360 if (ret) {
361 IWL_ERROR("Tx %d queue init failed\n", txq_id);
362 goto error;
363 }
364 }
365
366 return ret;
367
368 error:
369 iwl_hw_txq_ctx_free(priv);
370 error_reset:
371 iwl_kw_free(priv);
372 error_kw:
373 return ret;
374}