Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
fb4961db | 3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. |
ab697a9f EG |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | #include <linux/sched.h> | |
30 | #include <linux/wait.h> | |
1a361cd8 | 31 | #include <linux/gfp.h> |
ab697a9f | 32 | |
1b29dc94 | 33 | #include "iwl-prph.h" |
ab697a9f | 34 | #include "iwl-io.h" |
6468a01a | 35 | #include "internal.h" |
db70f290 | 36 | #include "iwl-op-mode.h" |
ab697a9f EG |
37 | |
38 | /****************************************************************************** | |
39 | * | |
40 | * RX path functions | |
41 | * | |
42 | ******************************************************************************/ | |
43 | ||
44 | /* | |
45 | * Rx theory of operation | |
46 | * | |
47 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
48 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
49 | * used not only for Rx frames, but for any command response or notification | |
50 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
51 | * of indexes into the circular buffer. | |
52 | * | |
53 | * Rx Queue Indexes | |
54 | * The host/firmware share two index registers for managing the Rx buffers. | |
55 | * | |
56 | * The READ index maps to the first position that the firmware may be writing | |
57 | * to -- the driver can read up to (but not including) this position and get | |
58 | * good data. | |
59 | * The READ index is managed by the firmware once the card is enabled. | |
60 | * | |
61 | * The WRITE index maps to the last position the driver has read from -- the | |
62 | * position preceding WRITE is the last slot the firmware can place a packet. | |
63 | * | |
64 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
65 | * WRITE = READ. | |
66 | * | |
67 | * During initialization, the host sets up the READ queue position to the first | |
68 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
69 | * | |
70 | * When the firmware places a packet in a buffer, it will advance the READ index | |
71 | * and fire the RX interrupt. The driver can then query the READ index and | |
72 | * process as many packets as possible, moving the WRITE index forward as it | |
73 | * resets the Rx queue buffers with new memory. | |
74 | * | |
75 | * The management in the driver is as follows: | |
76 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | |
77 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | |
78 | * to replenish the iwl->rxq->rx_free. | |
990aa6d7 | 79 | * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the |
ab697a9f EG |
80 | * iwl->rxq is replenished and the READ INDEX is updated (updating the |
81 | * 'processed' and 'read' driver indexes as well) | |
82 | * + A received packet is processed and handed to the kernel network stack, | |
83 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
84 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | |
85 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | |
86 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | |
87 | * were enough free buffers and RX_STALLED is set it is cleared. | |
88 | * | |
89 | * | |
90 | * Driver sequence: | |
91 | * | |
990aa6d7 EG |
92 | * iwl_rxq_alloc() Allocates rx_free |
93 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
94 | * iwl_pcie_rxq_restock | |
95 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx | |
ab697a9f EG |
96 | * queue, updates firmware pointers, and updates |
97 | * the WRITE index. If insufficient rx_free buffers | |
990aa6d7 | 98 | * are available, schedules iwl_pcie_rx_replenish |
ab697a9f EG |
99 | * |
100 | * -- enable interrupts -- | |
990aa6d7 | 101 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
ab697a9f EG |
102 | * READ INDEX, detaching the SKB from the pool. |
103 | * Moves the packet buffer from queue to rx_used. | |
990aa6d7 | 104 | * Calls iwl_pcie_rxq_restock to refill any empty |
ab697a9f EG |
105 | * slots. |
106 | * ... | |
107 | * | |
108 | */ | |
109 | ||
990aa6d7 EG |
110 | /* |
111 | * iwl_rxq_space - Return number of free slots available in queue. | |
ab697a9f | 112 | */ |
990aa6d7 | 113 | static int iwl_rxq_space(const struct iwl_rxq *q) |
ab697a9f EG |
114 | { |
115 | int s = q->read - q->write; | |
116 | if (s <= 0) | |
117 | s += RX_QUEUE_SIZE; | |
118 | /* keep some buffer to not confuse full and empty queue */ | |
119 | s -= 2; | |
120 | if (s < 0) | |
121 | s = 0; | |
122 | return s; | |
123 | } | |
124 | ||
9805c446 EG |
125 | /* |
126 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
127 | */ | |
128 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |
129 | { | |
130 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
131 | } | |
132 | ||
49bd072d EG |
133 | /* |
134 | * iwl_pcie_rx_stop - stops the Rx DMA | |
135 | */ | |
9805c446 EG |
136 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
137 | { | |
9805c446 EG |
138 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
139 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
140 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
141 | } | |
142 | ||
990aa6d7 EG |
143 | /* |
144 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | |
ab697a9f | 145 | */ |
223b9cb1 | 146 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) |
ab697a9f EG |
147 | { |
148 | unsigned long flags; | |
149 | u32 reg; | |
150 | ||
151 | spin_lock_irqsave(&q->lock, flags); | |
152 | ||
153 | if (q->need_update == 0) | |
154 | goto exit_unlock; | |
155 | ||
035f7ff2 | 156 | if (trans->cfg->base_params->shadow_reg_enable) { |
ab697a9f EG |
157 | /* shadow register enabled */ |
158 | /* Device expects a multiple of 8 */ | |
159 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 160 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); |
ab697a9f | 161 | } else { |
47107e84 DF |
162 | struct iwl_trans_pcie *trans_pcie = |
163 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
164 | ||
ab697a9f | 165 | /* If power-saving is in use, make sure device is awake */ |
01d651d4 | 166 | if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { |
1042db2a | 167 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); |
ab697a9f EG |
168 | |
169 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
5a878bf6 | 170 | IWL_DEBUG_INFO(trans, |
ab697a9f EG |
171 | "Rx queue requesting wakeup," |
172 | " GP1 = 0x%x\n", reg); | |
1042db2a | 173 | iwl_set_bit(trans, CSR_GP_CNTRL, |
ab697a9f EG |
174 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
175 | goto exit_unlock; | |
176 | } | |
177 | ||
178 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 179 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, |
ab697a9f EG |
180 | q->write_actual); |
181 | ||
182 | /* Else device is assumed to be awake */ | |
183 | } else { | |
184 | /* Device expects a multiple of 8 */ | |
185 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 186 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, |
ab697a9f EG |
187 | q->write_actual); |
188 | } | |
189 | } | |
190 | q->need_update = 0; | |
191 | ||
192 | exit_unlock: | |
193 | spin_unlock_irqrestore(&q->lock, flags); | |
194 | } | |
195 | ||
990aa6d7 EG |
196 | /* |
197 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool | |
ab697a9f EG |
198 | * |
199 | * If there are slots in the RX queue that need to be restocked, | |
200 | * and we have free pre-allocated buffers, fill the ranks as much | |
201 | * as we can, pulling from rx_free. | |
202 | * | |
203 | * This moves the 'write' index forward to catch up with 'processed', and | |
204 | * also updates the memory address in the firmware to reference the new | |
205 | * target buffer. | |
206 | */ | |
990aa6d7 | 207 | static void iwl_pcie_rxq_restock(struct iwl_trans *trans) |
ab697a9f | 208 | { |
20d3b647 | 209 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 210 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f EG |
211 | struct iwl_rx_mem_buffer *rxb; |
212 | unsigned long flags; | |
213 | ||
7439046d EG |
214 | /* |
215 | * If the device isn't enabled - not need to try to add buffers... | |
216 | * This can happen when we stop the device and still have an interrupt | |
217 | * pending. We stop the APM before we sync the interrupts / tasklets | |
218 | * because we have to (see comment there). On the other hand, since | |
219 | * the APM is stopped, we cannot access the HW (in particular not prph). | |
220 | * So don't try to restock if the APM has been already stopped. | |
221 | */ | |
222 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) | |
223 | return; | |
224 | ||
ab697a9f | 225 | spin_lock_irqsave(&rxq->lock, flags); |
990aa6d7 | 226 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
ab697a9f EG |
227 | /* The overwritten rxb must be a used one */ |
228 | rxb = rxq->queue[rxq->write]; | |
229 | BUG_ON(rxb && rxb->page); | |
230 | ||
231 | /* Get next free Rx buffer, remove from free list */ | |
e2b1930e JB |
232 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, |
233 | list); | |
234 | list_del(&rxb->list); | |
ab697a9f EG |
235 | |
236 | /* Point to Rx buffer via next RBD in circular buffer */ | |
9805c446 | 237 | rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
ab697a9f EG |
238 | rxq->queue[rxq->write] = rxb; |
239 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
240 | rxq->free_count--; | |
241 | } | |
242 | spin_unlock_irqrestore(&rxq->lock, flags); | |
243 | /* If the pre-allocated buffer pool is dropping low, schedule to | |
244 | * refill it */ | |
245 | if (rxq->free_count <= RX_LOW_WATERMARK) | |
1ee158d8 | 246 | schedule_work(&trans_pcie->rx_replenish); |
ab697a9f | 247 | |
ab697a9f EG |
248 | /* If we've added more space for the firmware to place data, tell it. |
249 | * Increment device's write pointer in multiples of 8. */ | |
250 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
251 | spin_lock_irqsave(&rxq->lock, flags); | |
252 | rxq->need_update = 1; | |
253 | spin_unlock_irqrestore(&rxq->lock, flags); | |
990aa6d7 | 254 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
ab697a9f EG |
255 | } |
256 | } | |
257 | ||
358a46d4 | 258 | /* |
9805c446 | 259 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
ab697a9f | 260 | * |
358a46d4 EG |
261 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
262 | * a page must be allocated and the RBD must point to the page. This function | |
263 | * doesn't change the HW pointer but handles the list of pages that is used by | |
990aa6d7 | 264 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
358a46d4 | 265 | * allocated buffers. |
ab697a9f | 266 | */ |
9805c446 | 267 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) |
ab697a9f | 268 | { |
20d3b647 | 269 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 270 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f EG |
271 | struct iwl_rx_mem_buffer *rxb; |
272 | struct page *page; | |
273 | unsigned long flags; | |
274 | gfp_t gfp_mask = priority; | |
275 | ||
276 | while (1) { | |
277 | spin_lock_irqsave(&rxq->lock, flags); | |
278 | if (list_empty(&rxq->rx_used)) { | |
279 | spin_unlock_irqrestore(&rxq->lock, flags); | |
280 | return; | |
281 | } | |
282 | spin_unlock_irqrestore(&rxq->lock, flags); | |
283 | ||
284 | if (rxq->free_count > RX_LOW_WATERMARK) | |
285 | gfp_mask |= __GFP_NOWARN; | |
286 | ||
b2cf410c | 287 | if (trans_pcie->rx_page_order > 0) |
ab697a9f EG |
288 | gfp_mask |= __GFP_COMP; |
289 | ||
290 | /* Alloc a new receive buffer */ | |
20d3b647 | 291 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); |
ab697a9f EG |
292 | if (!page) { |
293 | if (net_ratelimit()) | |
5a878bf6 | 294 | IWL_DEBUG_INFO(trans, "alloc_pages failed, " |
d6189124 | 295 | "order: %d\n", |
b2cf410c | 296 | trans_pcie->rx_page_order); |
ab697a9f EG |
297 | |
298 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | |
299 | net_ratelimit()) | |
5a878bf6 | 300 | IWL_CRIT(trans, "Failed to alloc_pages with %s." |
ab697a9f EG |
301 | "Only %u free buffers remaining.\n", |
302 | priority == GFP_ATOMIC ? | |
303 | "GFP_ATOMIC" : "GFP_KERNEL", | |
304 | rxq->free_count); | |
305 | /* We don't reschedule replenish work here -- we will | |
306 | * call the restock method and if it still needs | |
307 | * more buffers it will schedule replenish */ | |
308 | return; | |
309 | } | |
310 | ||
311 | spin_lock_irqsave(&rxq->lock, flags); | |
312 | ||
313 | if (list_empty(&rxq->rx_used)) { | |
314 | spin_unlock_irqrestore(&rxq->lock, flags); | |
b2cf410c | 315 | __free_pages(page, trans_pcie->rx_page_order); |
ab697a9f EG |
316 | return; |
317 | } | |
e2b1930e JB |
318 | rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, |
319 | list); | |
320 | list_del(&rxb->list); | |
ab697a9f EG |
321 | spin_unlock_irqrestore(&rxq->lock, flags); |
322 | ||
323 | BUG_ON(rxb->page); | |
324 | rxb->page = page; | |
325 | /* Get physical address of the RB */ | |
20d3b647 JB |
326 | rxb->page_dma = |
327 | dma_map_page(trans->dev, page, 0, | |
328 | PAGE_SIZE << trans_pcie->rx_page_order, | |
329 | DMA_FROM_DEVICE); | |
ab697a9f EG |
330 | /* dma address must be no more than 36 bits */ |
331 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | |
332 | /* and also 256 byte aligned! */ | |
333 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | |
334 | ||
335 | spin_lock_irqsave(&rxq->lock, flags); | |
336 | ||
337 | list_add_tail(&rxb->list, &rxq->rx_free); | |
338 | rxq->free_count++; | |
339 | ||
340 | spin_unlock_irqrestore(&rxq->lock, flags); | |
341 | } | |
342 | } | |
343 | ||
9805c446 EG |
344 | static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) |
345 | { | |
346 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
347 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
348 | int i; | |
349 | ||
350 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
351 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
352 | /* In the reset function, these buffers may have been allocated | |
353 | * to an SKB, so we need to unmap and free potential storage */ | |
354 | if (rxq->pool[i].page != NULL) { | |
355 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, | |
356 | PAGE_SIZE << trans_pcie->rx_page_order, | |
357 | DMA_FROM_DEVICE); | |
358 | __free_pages(rxq->pool[i].page, | |
359 | trans_pcie->rx_page_order); | |
360 | rxq->pool[i].page = NULL; | |
361 | } | |
362 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
363 | } | |
364 | } | |
365 | ||
358a46d4 | 366 | /* |
990aa6d7 | 367 | * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free |
358a46d4 EG |
368 | * |
369 | * When moving to rx_free an page is allocated for the slot. | |
370 | * | |
990aa6d7 | 371 | * Also restock the Rx queue via iwl_pcie_rxq_restock. |
358a46d4 EG |
372 | * This is called as a scheduled work item (except for during initialization) |
373 | */ | |
9805c446 | 374 | static void iwl_pcie_rx_replenish(struct iwl_trans *trans) |
ab697a9f | 375 | { |
7b11488f | 376 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ab697a9f EG |
377 | unsigned long flags; |
378 | ||
9805c446 | 379 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); |
ab697a9f | 380 | |
7b11488f | 381 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
990aa6d7 | 382 | iwl_pcie_rxq_restock(trans); |
7b11488f | 383 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
ab697a9f EG |
384 | } |
385 | ||
990aa6d7 | 386 | static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans) |
ab697a9f | 387 | { |
9805c446 | 388 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); |
ab697a9f | 389 | |
990aa6d7 | 390 | iwl_pcie_rxq_restock(trans); |
ab697a9f EG |
391 | } |
392 | ||
9805c446 | 393 | static void iwl_pcie_rx_replenish_work(struct work_struct *data) |
ab697a9f | 394 | { |
5a878bf6 EG |
395 | struct iwl_trans_pcie *trans_pcie = |
396 | container_of(data, struct iwl_trans_pcie, rx_replenish); | |
ab697a9f | 397 | |
990aa6d7 | 398 | iwl_pcie_rx_replenish(trans_pcie->trans); |
ab697a9f EG |
399 | } |
400 | ||
9805c446 EG |
401 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
402 | { | |
403 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
404 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
405 | struct device *dev = trans->dev; | |
406 | ||
407 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); | |
408 | ||
409 | spin_lock_init(&rxq->lock); | |
410 | ||
411 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
412 | return -EINVAL; | |
413 | ||
414 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
415 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
416 | &rxq->bd_dma, GFP_KERNEL); | |
417 | if (!rxq->bd) | |
418 | goto err_bd; | |
419 | ||
420 | /*Allocate the driver's pointer to receive buffer status */ | |
421 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | |
422 | &rxq->rb_stts_dma, GFP_KERNEL); | |
423 | if (!rxq->rb_stts) | |
424 | goto err_rb_stts; | |
425 | ||
426 | return 0; | |
427 | ||
428 | err_rb_stts: | |
429 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
430 | rxq->bd, rxq->bd_dma); | |
431 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
432 | rxq->bd = NULL; | |
433 | err_bd: | |
434 | return -ENOMEM; | |
435 | } | |
436 | ||
437 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) | |
438 | { | |
439 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
440 | u32 rb_size; | |
441 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
442 | ||
9805c446 EG |
443 | if (trans_pcie->rx_buf_size_8k) |
444 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
445 | else | |
446 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
447 | ||
448 | /* Stop Rx DMA */ | |
449 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
450 | ||
451 | /* Reset driver's Rx queue write index */ | |
452 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | |
453 | ||
454 | /* Tell device where to find RBD circular buffer in DRAM */ | |
455 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
456 | (u32)(rxq->bd_dma >> 8)); | |
457 | ||
458 | /* Tell device where in DRAM to update its Rx status */ | |
459 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
460 | rxq->rb_stts_dma >> 4); | |
461 | ||
462 | /* Enable Rx DMA | |
463 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
464 | * the credit mechanism in 5000 HW RX FIFO | |
465 | * Direct rx interrupts to hosts | |
466 | * Rx buffer size 4 or 8k | |
467 | * RB timeout 0x10 | |
468 | * 256 RBDs | |
469 | */ | |
470 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
471 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
472 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
473 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
474 | rb_size| | |
49bd072d | 475 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| |
9805c446 EG |
476 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); |
477 | ||
478 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
479 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
480 | } | |
481 | ||
482 | int iwl_pcie_rx_init(struct iwl_trans *trans) | |
483 | { | |
484 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
485 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
486 | ||
487 | int i, err; | |
488 | unsigned long flags; | |
489 | ||
490 | if (!rxq->bd) { | |
491 | err = iwl_pcie_rx_alloc(trans); | |
492 | if (err) | |
493 | return err; | |
494 | } | |
495 | ||
496 | spin_lock_irqsave(&rxq->lock, flags); | |
497 | INIT_LIST_HEAD(&rxq->rx_free); | |
498 | INIT_LIST_HEAD(&rxq->rx_used); | |
499 | ||
500 | INIT_WORK(&trans_pcie->rx_replenish, | |
501 | iwl_pcie_rx_replenish_work); | |
502 | ||
503 | iwl_pcie_rxq_free_rbs(trans); | |
504 | ||
505 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
506 | rxq->queue[i] = NULL; | |
507 | ||
508 | /* Set us so that we have processed and used all buffers, but have | |
509 | * not restocked the Rx queue with fresh buffers */ | |
510 | rxq->read = rxq->write = 0; | |
511 | rxq->write_actual = 0; | |
512 | rxq->free_count = 0; | |
513 | spin_unlock_irqrestore(&rxq->lock, flags); | |
514 | ||
515 | iwl_pcie_rx_replenish(trans); | |
516 | ||
517 | iwl_pcie_rx_hw_init(trans, rxq); | |
518 | ||
519 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | |
520 | rxq->need_update = 1; | |
521 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
522 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | |
523 | ||
524 | return 0; | |
525 | } | |
526 | ||
527 | void iwl_pcie_rx_free(struct iwl_trans *trans) | |
528 | { | |
529 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
530 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
531 | unsigned long flags; | |
532 | ||
533 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
534 | * exit now */ | |
535 | if (!rxq->bd) { | |
536 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); | |
537 | return; | |
538 | } | |
539 | ||
540 | spin_lock_irqsave(&rxq->lock, flags); | |
541 | iwl_pcie_rxq_free_rbs(trans); | |
542 | spin_unlock_irqrestore(&rxq->lock, flags); | |
543 | ||
544 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
545 | rxq->bd, rxq->bd_dma); | |
546 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
547 | rxq->bd = NULL; | |
548 | ||
549 | if (rxq->rb_stts) | |
550 | dma_free_coherent(trans->dev, | |
551 | sizeof(struct iwl_rb_status), | |
552 | rxq->rb_stts, rxq->rb_stts_dma); | |
553 | else | |
554 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); | |
555 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | |
556 | rxq->rb_stts = NULL; | |
557 | } | |
558 | ||
559 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, | |
df2f3216 JB |
560 | struct iwl_rx_mem_buffer *rxb) |
561 | { | |
562 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
990aa6d7 EG |
563 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
564 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | |
df2f3216 | 565 | unsigned long flags; |
0c19744c | 566 | bool page_stolen = false; |
b2cf410c | 567 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
0c19744c | 568 | u32 offset = 0; |
df2f3216 JB |
569 | |
570 | if (WARN_ON(!rxb)) | |
571 | return; | |
572 | ||
0c19744c JB |
573 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); |
574 | ||
575 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | |
576 | struct iwl_rx_packet *pkt; | |
577 | struct iwl_device_cmd *cmd; | |
578 | u16 sequence; | |
579 | bool reclaim; | |
580 | int index, cmd_index, err, len; | |
581 | struct iwl_rx_cmd_buffer rxcb = { | |
582 | ._offset = offset, | |
583 | ._page = rxb->page, | |
584 | ._page_stolen = false, | |
0d6c4a2e | 585 | .truesize = max_len, |
0c19744c JB |
586 | }; |
587 | ||
588 | pkt = rxb_addr(&rxcb); | |
589 | ||
590 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) | |
591 | break; | |
592 | ||
593 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", | |
990aa6d7 | 594 | rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), |
d9fb6465 | 595 | pkt->hdr.cmd); |
0c19744c JB |
596 | |
597 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | |
598 | len += sizeof(u32); /* account for status word */ | |
f042c2eb JB |
599 | trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); |
600 | trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); | |
0c19744c JB |
601 | |
602 | /* Reclaim a command buffer only if this packet is a response | |
603 | * to a (driver-originated) command. | |
604 | * If the packet (e.g. Rx frame) originated from uCode, | |
605 | * there is no command buffer to reclaim. | |
606 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
607 | * but apparently a few don't get set; catch them here. */ | |
608 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | |
609 | if (reclaim) { | |
610 | int i; | |
611 | ||
612 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | |
613 | if (trans_pcie->no_reclaim_cmds[i] == | |
614 | pkt->hdr.cmd) { | |
615 | reclaim = false; | |
616 | break; | |
617 | } | |
d663ee73 JB |
618 | } |
619 | } | |
df2f3216 | 620 | |
0c19744c JB |
621 | sequence = le16_to_cpu(pkt->hdr.sequence); |
622 | index = SEQ_TO_INDEX(sequence); | |
623 | cmd_index = get_cmd_index(&txq->q, index); | |
624 | ||
96791422 | 625 | if (reclaim) { |
990aa6d7 | 626 | struct iwl_pcie_txq_entry *ent; |
96791422 EG |
627 | ent = &txq->entries[cmd_index]; |
628 | cmd = ent->copy_cmd; | |
629 | WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); | |
630 | } else { | |
0c19744c | 631 | cmd = NULL; |
96791422 | 632 | } |
0c19744c JB |
633 | |
634 | err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); | |
635 | ||
96791422 EG |
636 | if (reclaim) { |
637 | /* The original command isn't needed any more */ | |
638 | kfree(txq->entries[cmd_index].copy_cmd); | |
639 | txq->entries[cmd_index].copy_cmd = NULL; | |
f4feb8ac JB |
640 | /* nor is the duplicated part of the command */ |
641 | kfree(txq->entries[cmd_index].free_buf); | |
642 | txq->entries[cmd_index].free_buf = NULL; | |
96791422 EG |
643 | } |
644 | ||
0c19744c JB |
645 | /* |
646 | * After here, we should always check rxcb._page_stolen, | |
647 | * if it is true then one of the handlers took the page. | |
648 | */ | |
649 | ||
650 | if (reclaim) { | |
651 | /* Invoke any callbacks, transfer the buffer to caller, | |
652 | * and fire off the (possibly) blocking | |
653 | * iwl_trans_send_cmd() | |
654 | * as we reclaim the driver command queue */ | |
655 | if (!rxcb._page_stolen) | |
990aa6d7 | 656 | iwl_pcie_hcmd_complete(trans, &rxcb, err); |
0c19744c JB |
657 | else |
658 | IWL_WARN(trans, "Claim null rxb?\n"); | |
659 | } | |
660 | ||
661 | page_stolen |= rxcb._page_stolen; | |
662 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); | |
df2f3216 JB |
663 | } |
664 | ||
0c19744c JB |
665 | /* page was stolen from us -- free our reference */ |
666 | if (page_stolen) { | |
b2cf410c | 667 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
df2f3216 | 668 | rxb->page = NULL; |
0c19744c | 669 | } |
df2f3216 JB |
670 | |
671 | /* Reuse the page if possible. For notification packets and | |
672 | * SKBs that fail to Rx correctly, add them back into the | |
673 | * rx_free list for reuse later. */ | |
674 | spin_lock_irqsave(&rxq->lock, flags); | |
675 | if (rxb->page != NULL) { | |
676 | rxb->page_dma = | |
677 | dma_map_page(trans->dev, rxb->page, 0, | |
20d3b647 JB |
678 | PAGE_SIZE << trans_pcie->rx_page_order, |
679 | DMA_FROM_DEVICE); | |
df2f3216 JB |
680 | list_add_tail(&rxb->list, &rxq->rx_free); |
681 | rxq->free_count++; | |
682 | } else | |
683 | list_add_tail(&rxb->list, &rxq->rx_used); | |
684 | spin_unlock_irqrestore(&rxq->lock, flags); | |
685 | } | |
686 | ||
990aa6d7 EG |
687 | /* |
688 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw | |
ab697a9f | 689 | */ |
990aa6d7 | 690 | static void iwl_pcie_rx_handle(struct iwl_trans *trans) |
ab697a9f | 691 | { |
df2f3216 | 692 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 693 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f | 694 | u32 r, i; |
ab697a9f EG |
695 | u8 fill_rx = 0; |
696 | u32 count = 8; | |
697 | int total_empty; | |
698 | ||
699 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | |
700 | * buffer that the driver may process (last buffer filled by ucode). */ | |
701 | r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; | |
702 | i = rxq->read; | |
703 | ||
704 | /* Rx interrupt, but nothing sent from uCode */ | |
705 | if (i == r) | |
726f23fd | 706 | IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); |
ab697a9f EG |
707 | |
708 | /* calculate total frames need to be restock after handling RX */ | |
709 | total_empty = r - rxq->write_actual; | |
710 | if (total_empty < 0) | |
711 | total_empty += RX_QUEUE_SIZE; | |
712 | ||
713 | if (total_empty > (RX_QUEUE_SIZE / 2)) | |
714 | fill_rx = 1; | |
715 | ||
716 | while (i != r) { | |
48a2d66f | 717 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f EG |
718 | |
719 | rxb = rxq->queue[i]; | |
ab697a9f EG |
720 | rxq->queue[i] = NULL; |
721 | ||
726f23fd EG |
722 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", |
723 | r, i, rxb); | |
9805c446 | 724 | iwl_pcie_rx_handle_rb(trans, rxb); |
ab697a9f EG |
725 | |
726 | i = (i + 1) & RX_QUEUE_MASK; | |
727 | /* If there are a lot of unused frames, | |
728 | * restock the Rx queue so ucode wont assert. */ | |
729 | if (fill_rx) { | |
730 | count++; | |
731 | if (count >= 8) { | |
732 | rxq->read = i; | |
990aa6d7 | 733 | iwl_pcie_rx_replenish_now(trans); |
ab697a9f EG |
734 | count = 0; |
735 | } | |
736 | } | |
737 | } | |
738 | ||
739 | /* Backtrack one entry */ | |
740 | rxq->read = i; | |
741 | if (fill_rx) | |
990aa6d7 | 742 | iwl_pcie_rx_replenish_now(trans); |
ab697a9f | 743 | else |
990aa6d7 | 744 | iwl_pcie_rxq_restock(trans); |
ab697a9f EG |
745 | } |
746 | ||
990aa6d7 EG |
747 | /* |
748 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card | |
7ff94706 | 749 | */ |
990aa6d7 | 750 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
7ff94706 | 751 | { |
f946b529 EG |
752 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
753 | ||
7ff94706 | 754 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ |
035f7ff2 | 755 | if (trans->cfg->internal_wimax_coex && |
1042db2a | 756 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & |
20d3b647 | 757 | APMS_CLK_VAL_MRB_FUNC_MODE) || |
1042db2a | 758 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & |
20d3b647 | 759 | APMG_PS_CTRL_VAL_RESET_REQ))) { |
74fda971 | 760 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
8a8bbdb4 | 761 | iwl_op_mode_wimax_active(trans->op_mode); |
f946b529 | 762 | wake_up(&trans_pcie->wait_command_queue); |
7ff94706 EG |
763 | return; |
764 | } | |
765 | ||
990aa6d7 EG |
766 | iwl_pcie_dump_csr(trans); |
767 | iwl_pcie_dump_fh(trans, NULL); | |
7ff94706 | 768 | |
d18aa87f | 769 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); |
f946b529 EG |
770 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
771 | wake_up(&trans_pcie->wait_command_queue); | |
772 | ||
bcb9321c | 773 | iwl_op_mode_nic_error(trans->op_mode); |
7ff94706 EG |
774 | } |
775 | ||
990aa6d7 | 776 | void iwl_pcie_tasklet(struct iwl_trans *trans) |
ab697a9f | 777 | { |
20d3b647 JB |
778 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
779 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
ab697a9f EG |
780 | u32 inta = 0; |
781 | u32 handled = 0; | |
782 | unsigned long flags; | |
783 | u32 i; | |
784 | #ifdef CONFIG_IWLWIFI_DEBUG | |
785 | u32 inta_mask; | |
786 | #endif | |
787 | ||
7b11488f | 788 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
ab697a9f EG |
789 | |
790 | /* Ack/clear/reset pending uCode interrupts. | |
791 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
792 | */ | |
793 | /* There is a hardware bug in the interrupt mask function that some | |
794 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
795 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
796 | * ICT interrupt handling mechanism has another bug that might cause | |
797 | * these unmasked interrupts fail to be detected. We workaround the | |
798 | * hardware bugs here by ACKing all the possible interrupts so that | |
799 | * interrupt coalescing can still be achieved. | |
800 | */ | |
1042db2a | 801 | iwl_write32(trans, CSR_INT, |
20d3b647 | 802 | trans_pcie->inta | ~trans_pcie->inta_mask); |
ab697a9f | 803 | |
0c325769 | 804 | inta = trans_pcie->inta; |
ab697a9f EG |
805 | |
806 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 807 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f | 808 | /* just for debug */ |
1042db2a | 809 | inta_mask = iwl_read32(trans, CSR_INT_MASK); |
0ca24daf | 810 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
20d3b647 | 811 | inta, inta_mask); |
ab697a9f EG |
812 | } |
813 | #endif | |
814 | ||
0c325769 EG |
815 | /* saved interrupt in inta variable now we can reset trans_pcie->inta */ |
816 | trans_pcie->inta = 0; | |
ab697a9f | 817 | |
7b11488f | 818 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
b49ba04a | 819 | |
ab697a9f EG |
820 | /* Now service all interrupt bits discovered above. */ |
821 | if (inta & CSR_INT_BIT_HW_ERR) { | |
0c325769 | 822 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); |
ab697a9f EG |
823 | |
824 | /* Tell the device to stop sending interrupts */ | |
0c325769 | 825 | iwl_disable_interrupts(trans); |
ab697a9f | 826 | |
1f7b6172 | 827 | isr_stats->hw++; |
990aa6d7 | 828 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
829 | |
830 | handled |= CSR_INT_BIT_HW_ERR; | |
831 | ||
832 | return; | |
833 | } | |
834 | ||
835 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 836 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f EG |
837 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
838 | if (inta & CSR_INT_BIT_SCD) { | |
0c325769 | 839 | IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " |
ab697a9f | 840 | "the frame/frames.\n"); |
1f7b6172 | 841 | isr_stats->sch++; |
ab697a9f EG |
842 | } |
843 | ||
844 | /* Alive notification via Rx interrupt will do the real work */ | |
845 | if (inta & CSR_INT_BIT_ALIVE) { | |
0c325769 | 846 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
1f7b6172 | 847 | isr_stats->alive++; |
ab697a9f EG |
848 | } |
849 | } | |
850 | #endif | |
851 | /* Safely ignore these bits for debug checks below */ | |
852 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
853 | ||
854 | /* HW RF KILL switch toggled */ | |
855 | if (inta & CSR_INT_BIT_RF_KILL) { | |
c9eec95c | 856 | bool hw_rfkill; |
ab697a9f | 857 | |
8d425517 | 858 | hw_rfkill = iwl_is_rfkill_set(trans); |
0c325769 | 859 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", |
20d3b647 | 860 | hw_rfkill ? "disable radio" : "enable radio"); |
ab697a9f | 861 | |
1f7b6172 | 862 | isr_stats->rfkill++; |
ab697a9f | 863 | |
c9eec95c | 864 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
f946b529 EG |
865 | if (hw_rfkill) { |
866 | set_bit(STATUS_RFKILL, &trans_pcie->status); | |
867 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, | |
868 | &trans_pcie->status)) | |
869 | IWL_DEBUG_RF_KILL(trans, | |
870 | "Rfkill while SYNC HCMD in flight\n"); | |
871 | wake_up(&trans_pcie->wait_command_queue); | |
872 | } else { | |
873 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | |
874 | } | |
ab697a9f EG |
875 | |
876 | handled |= CSR_INT_BIT_RF_KILL; | |
877 | } | |
878 | ||
879 | /* Chip got too hot and stopped itself */ | |
880 | if (inta & CSR_INT_BIT_CT_KILL) { | |
0c325769 | 881 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); |
1f7b6172 | 882 | isr_stats->ctkill++; |
ab697a9f EG |
883 | handled |= CSR_INT_BIT_CT_KILL; |
884 | } | |
885 | ||
886 | /* Error detected by uCode */ | |
887 | if (inta & CSR_INT_BIT_SW_ERR) { | |
0c325769 | 888 | IWL_ERR(trans, "Microcode SW error detected. " |
ab697a9f | 889 | " Restarting 0x%X.\n", inta); |
1f7b6172 | 890 | isr_stats->sw++; |
990aa6d7 | 891 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
892 | handled |= CSR_INT_BIT_SW_ERR; |
893 | } | |
894 | ||
895 | /* uCode wakes up after power-down sleep */ | |
896 | if (inta & CSR_INT_BIT_WAKEUP) { | |
0c325769 | 897 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
990aa6d7 | 898 | iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); |
035f7ff2 | 899 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) |
990aa6d7 | 900 | iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); |
ab697a9f | 901 | |
1f7b6172 | 902 | isr_stats->wakeup++; |
ab697a9f EG |
903 | |
904 | handled |= CSR_INT_BIT_WAKEUP; | |
905 | } | |
906 | ||
907 | /* All uCode command responses, including Tx command responses, | |
908 | * Rx "responses" (frame-received notification), and other | |
909 | * notifications from uCode come through here*/ | |
910 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
20d3b647 | 911 | CSR_INT_BIT_RX_PERIODIC)) { |
0c325769 | 912 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); |
ab697a9f EG |
913 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
914 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1042db2a | 915 | iwl_write32(trans, CSR_FH_INT_STATUS, |
ab697a9f EG |
916 | CSR_FH_INT_RX_MASK); |
917 | } | |
918 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
919 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1042db2a | 920 | iwl_write32(trans, |
0c325769 | 921 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); |
ab697a9f EG |
922 | } |
923 | /* Sending RX interrupt require many steps to be done in the | |
924 | * the device: | |
925 | * 1- write interrupt to current index in ICT table. | |
926 | * 2- dma RX frame. | |
927 | * 3- update RX shared data to indicate last write index. | |
928 | * 4- send interrupt. | |
929 | * This could lead to RX race, driver could receive RX interrupt | |
930 | * but the shared data changes does not reflect this; | |
931 | * periodic interrupt will detect any dangling Rx activity. | |
932 | */ | |
933 | ||
934 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
1042db2a | 935 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
ab697a9f | 936 | CSR_INT_PERIODIC_DIS); |
6379103e | 937 | |
990aa6d7 | 938 | iwl_pcie_rx_handle(trans); |
6379103e | 939 | |
ab697a9f EG |
940 | /* |
941 | * Enable periodic interrupt in 8 msec only if we received | |
942 | * real RX interrupt (instead of just periodic int), to catch | |
943 | * any dangling Rx interrupt. If it was just the periodic | |
944 | * interrupt, there was no dangling Rx activity, and no need | |
945 | * to extend the periodic interrupt; one-shot is enough. | |
946 | */ | |
947 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1042db2a | 948 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
20d3b647 | 949 | CSR_INT_PERIODIC_ENA); |
ab697a9f | 950 | |
1f7b6172 | 951 | isr_stats->rx++; |
ab697a9f EG |
952 | } |
953 | ||
954 | /* This "Tx" DMA channel is used only for loading uCode */ | |
955 | if (inta & CSR_INT_BIT_FH_TX) { | |
1042db2a | 956 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); |
0c325769 | 957 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); |
1f7b6172 | 958 | isr_stats->tx++; |
ab697a9f EG |
959 | handled |= CSR_INT_BIT_FH_TX; |
960 | /* Wake up uCode load routine, now that load is complete */ | |
13df1aab JB |
961 | trans_pcie->ucode_write_complete = true; |
962 | wake_up(&trans_pcie->ucode_write_waitq); | |
ab697a9f EG |
963 | } |
964 | ||
965 | if (inta & ~handled) { | |
0c325769 | 966 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); |
1f7b6172 | 967 | isr_stats->unhandled++; |
ab697a9f EG |
968 | } |
969 | ||
0c325769 EG |
970 | if (inta & ~(trans_pcie->inta_mask)) { |
971 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | |
972 | inta & ~trans_pcie->inta_mask); | |
ab697a9f EG |
973 | } |
974 | ||
975 | /* Re-enable all interrupts */ | |
976 | /* only Re-enable if disabled by irq */ | |
83626404 | 977 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status)) |
0c325769 | 978 | iwl_enable_interrupts(trans); |
ab697a9f | 979 | /* Re-enable RF_KILL if it occurred */ |
8722c899 SG |
980 | else if (handled & CSR_INT_BIT_RF_KILL) |
981 | iwl_enable_rfkill_int(trans); | |
ab697a9f EG |
982 | } |
983 | ||
1a361cd8 EG |
984 | /****************************************************************************** |
985 | * | |
986 | * ICT functions | |
987 | * | |
988 | ******************************************************************************/ | |
10667136 JB |
989 | |
990 | /* a device (PCI-E) page is 4096 bytes long */ | |
991 | #define ICT_SHIFT 12 | |
992 | #define ICT_SIZE (1 << ICT_SHIFT) | |
993 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | |
1a361cd8 EG |
994 | |
995 | /* Free dram table */ | |
990aa6d7 | 996 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
1a361cd8 | 997 | { |
20d3b647 | 998 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 | 999 | |
10667136 | 1000 | if (trans_pcie->ict_tbl) { |
1042db2a | 1001 | dma_free_coherent(trans->dev, ICT_SIZE, |
10667136 | 1002 | trans_pcie->ict_tbl, |
0c325769 | 1003 | trans_pcie->ict_tbl_dma); |
10667136 JB |
1004 | trans_pcie->ict_tbl = NULL; |
1005 | trans_pcie->ict_tbl_dma = 0; | |
1a361cd8 EG |
1006 | } |
1007 | } | |
1008 | ||
10667136 JB |
1009 | /* |
1010 | * allocate dram shared table, it is an aligned memory | |
1011 | * block of ICT_SIZE. | |
1a361cd8 EG |
1012 | * also reset all data related to ICT table interrupt. |
1013 | */ | |
990aa6d7 | 1014 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
1a361cd8 | 1015 | { |
20d3b647 | 1016 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1017 | |
10667136 | 1018 | trans_pcie->ict_tbl = |
1042db2a | 1019 | dma_alloc_coherent(trans->dev, ICT_SIZE, |
10667136 JB |
1020 | &trans_pcie->ict_tbl_dma, |
1021 | GFP_KERNEL); | |
1022 | if (!trans_pcie->ict_tbl) | |
1a361cd8 EG |
1023 | return -ENOMEM; |
1024 | ||
10667136 JB |
1025 | /* just an API sanity check ... it is guaranteed to be aligned */ |
1026 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | |
990aa6d7 | 1027 | iwl_pcie_free_ict(trans); |
10667136 JB |
1028 | return -EINVAL; |
1029 | } | |
1a361cd8 | 1030 | |
10667136 JB |
1031 | IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", |
1032 | (unsigned long long)trans_pcie->ict_tbl_dma); | |
1a361cd8 | 1033 | |
10667136 | 1034 | IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); |
1a361cd8 EG |
1035 | |
1036 | /* reset table and index to all 0 */ | |
10667136 | 1037 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
0c325769 | 1038 | trans_pcie->ict_index = 0; |
1a361cd8 EG |
1039 | |
1040 | /* add periodic RX interrupt */ | |
0c325769 | 1041 | trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC; |
1a361cd8 EG |
1042 | return 0; |
1043 | } | |
1044 | ||
1045 | /* Device is going up inform it about using ICT interrupt table, | |
1046 | * also we need to tell the driver to start using ICT interrupt. | |
1047 | */ | |
990aa6d7 | 1048 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
1a361cd8 | 1049 | { |
20d3b647 | 1050 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 EG |
1051 | u32 val; |
1052 | unsigned long flags; | |
1053 | ||
10667136 | 1054 | if (!trans_pcie->ict_tbl) |
ed6a3803 | 1055 | return; |
1a361cd8 | 1056 | |
7b11488f | 1057 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
0c325769 | 1058 | iwl_disable_interrupts(trans); |
1a361cd8 | 1059 | |
10667136 | 1060 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
1a361cd8 | 1061 | |
10667136 | 1062 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; |
1a361cd8 EG |
1063 | |
1064 | val |= CSR_DRAM_INT_TBL_ENABLE; | |
1065 | val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; | |
1066 | ||
10667136 | 1067 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); |
1a361cd8 | 1068 | |
1042db2a | 1069 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); |
0c325769 EG |
1070 | trans_pcie->use_ict = true; |
1071 | trans_pcie->ict_index = 0; | |
1042db2a | 1072 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); |
0c325769 | 1073 | iwl_enable_interrupts(trans); |
7b11488f | 1074 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1075 | } |
1076 | ||
1077 | /* Device is going down disable ict interrupt usage */ | |
990aa6d7 | 1078 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
1a361cd8 | 1079 | { |
20d3b647 | 1080 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 EG |
1081 | unsigned long flags; |
1082 | ||
7b11488f | 1083 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
0c325769 | 1084 | trans_pcie->use_ict = false; |
7b11488f | 1085 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1086 | } |
1087 | ||
eb647644 | 1088 | /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ |
990aa6d7 | 1089 | static irqreturn_t iwl_pcie_isr(int irq, void *data) |
1a361cd8 | 1090 | { |
0c325769 | 1091 | struct iwl_trans *trans = data; |
eb647644 | 1092 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1093 | u32 inta, inta_mask; |
1a361cd8 EG |
1094 | #ifdef CONFIG_IWLWIFI_DEBUG |
1095 | u32 inta_fh; | |
1096 | #endif | |
eb647644 EG |
1097 | |
1098 | lockdep_assert_held(&trans_pcie->irq_lock); | |
1099 | ||
6c1011e1 | 1100 | trace_iwlwifi_dev_irq(trans->dev); |
b80667ee | 1101 | |
1a361cd8 EG |
1102 | /* Disable (but don't clear!) interrupts here to avoid |
1103 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
1104 | * If we have something to service, the tasklet will re-enable ints. | |
1105 | * If we *don't* have something, we'll re-enable before leaving here. */ | |
1042db2a EG |
1106 | inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ |
1107 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | |
1a361cd8 EG |
1108 | |
1109 | /* Discover which interrupts are active/pending */ | |
1042db2a | 1110 | inta = iwl_read32(trans, CSR_INT); |
1a361cd8 EG |
1111 | |
1112 | /* Ignore interrupt if there's nothing in NIC to service. | |
1113 | * This may be due to IRQ shared with another device, | |
1114 | * or due to sporadic interrupts thrown from our NIC. */ | |
1115 | if (!inta) { | |
0c325769 | 1116 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1a361cd8 EG |
1117 | goto none; |
1118 | } | |
1119 | ||
1120 | if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { | |
1121 | /* Hardware disappeared. It might have already raised | |
1122 | * an interrupt */ | |
0c325769 | 1123 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); |
eb647644 | 1124 | return IRQ_HANDLED; |
1a361cd8 EG |
1125 | } |
1126 | ||
1127 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 1128 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
1042db2a | 1129 | inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); |
0c325769 | 1130 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " |
1a361cd8 EG |
1131 | "fh 0x%08x\n", inta, inta_mask, inta_fh); |
1132 | } | |
1133 | #endif | |
1134 | ||
0c325769 | 1135 | trans_pcie->inta |= inta; |
990aa6d7 | 1136 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
1a361cd8 | 1137 | if (likely(inta)) |
0c325769 | 1138 | tasklet_schedule(&trans_pcie->irq_tasklet); |
83626404 | 1139 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
20d3b647 | 1140 | !trans_pcie->inta) |
0c325769 | 1141 | iwl_enable_interrupts(trans); |
1a361cd8 | 1142 | |
eb647644 | 1143 | none: |
1a361cd8 EG |
1144 | /* re-enable interrupts here since we don't have anything to service. */ |
1145 | /* only Re-enable if disabled by irq and no schedules tasklet. */ | |
83626404 | 1146 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
20d3b647 | 1147 | !trans_pcie->inta) |
0c325769 | 1148 | iwl_enable_interrupts(trans); |
1a361cd8 | 1149 | |
1a361cd8 EG |
1150 | return IRQ_NONE; |
1151 | } | |
1152 | ||
1153 | /* interrupt handler using ict table, with this interrupt driver will | |
1154 | * stop using INTA register to get device's interrupt, reading this register | |
1155 | * is expensive, device will write interrupts in ICT dram table, increment | |
1156 | * index then will fire interrupt to driver, driver will OR all ICT table | |
1157 | * entries from current index up to table entry with 0 value. the result is | |
1158 | * the interrupt we need to service, driver will set the entries back to 0 and | |
1159 | * set index. | |
1160 | */ | |
990aa6d7 | 1161 | irqreturn_t iwl_pcie_isr_ict(int irq, void *data) |
1a361cd8 | 1162 | { |
0c325769 EG |
1163 | struct iwl_trans *trans = data; |
1164 | struct iwl_trans_pcie *trans_pcie; | |
1a361cd8 EG |
1165 | u32 inta, inta_mask; |
1166 | u32 val = 0; | |
b80667ee | 1167 | u32 read; |
1a361cd8 EG |
1168 | unsigned long flags; |
1169 | ||
0c325769 | 1170 | if (!trans) |
1a361cd8 EG |
1171 | return IRQ_NONE; |
1172 | ||
0c325769 EG |
1173 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1174 | ||
eb647644 EG |
1175 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
1176 | ||
1a361cd8 EG |
1177 | /* dram interrupt table not set yet, |
1178 | * use legacy interrupt. | |
1179 | */ | |
eb647644 | 1180 | if (unlikely(!trans_pcie->use_ict)) { |
990aa6d7 | 1181 | irqreturn_t ret = iwl_pcie_isr(irq, data); |
eb647644 EG |
1182 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1183 | return ret; | |
1184 | } | |
1a361cd8 | 1185 | |
6c1011e1 | 1186 | trace_iwlwifi_dev_irq(trans->dev); |
b80667ee | 1187 | |
1a361cd8 EG |
1188 | /* Disable (but don't clear!) interrupts here to avoid |
1189 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
1190 | * If we have something to service, the tasklet will re-enable ints. | |
1191 | * If we *don't* have something, we'll re-enable before leaving here. | |
1192 | */ | |
1042db2a EG |
1193 | inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ |
1194 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | |
1a361cd8 | 1195 | |
1a361cd8 EG |
1196 | /* Ignore interrupt if there's nothing in NIC to service. |
1197 | * This may be due to IRQ shared with another device, | |
1198 | * or due to sporadic interrupts thrown from our NIC. */ | |
b80667ee | 1199 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); |
6c1011e1 | 1200 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); |
b80667ee | 1201 | if (!read) { |
0c325769 | 1202 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1a361cd8 EG |
1203 | goto none; |
1204 | } | |
1205 | ||
b80667ee JB |
1206 | /* |
1207 | * Collect all entries up to the first 0, starting from ict_index; | |
1208 | * note we already read at ict_index. | |
1209 | */ | |
1210 | do { | |
1211 | val |= read; | |
0c325769 | 1212 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", |
b80667ee | 1213 | trans_pcie->ict_index, read); |
0c325769 EG |
1214 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; |
1215 | trans_pcie->ict_index = | |
1216 | iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); | |
1a361cd8 | 1217 | |
b80667ee | 1218 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); |
6c1011e1 | 1219 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, |
b80667ee JB |
1220 | read); |
1221 | } while (read); | |
1a361cd8 EG |
1222 | |
1223 | /* We should not get this value, just ignore it. */ | |
1224 | if (val == 0xffffffff) | |
1225 | val = 0; | |
1226 | ||
1227 | /* | |
1228 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
1229 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
1230 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
1231 | * so we use them to decide on the real state of the Rx bit. | |
1232 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
1233 | */ | |
1234 | if (val & 0xC0000) | |
1235 | val |= 0x8000; | |
1236 | ||
1237 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
0c325769 | 1238 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", |
20d3b647 | 1239 | inta, inta_mask, val); |
1a361cd8 | 1240 | |
0c325769 EG |
1241 | inta &= trans_pcie->inta_mask; |
1242 | trans_pcie->inta |= inta; | |
1a361cd8 | 1243 | |
990aa6d7 | 1244 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
1a361cd8 | 1245 | if (likely(inta)) |
0c325769 | 1246 | tasklet_schedule(&trans_pcie->irq_tasklet); |
83626404 | 1247 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
b80667ee | 1248 | !trans_pcie->inta) { |
1a361cd8 EG |
1249 | /* Allow interrupt if was disabled by this handler and |
1250 | * no tasklet was schedules, We should not enable interrupt, | |
1251 | * tasklet will enable it. | |
1252 | */ | |
0c325769 | 1253 | iwl_enable_interrupts(trans); |
1a361cd8 EG |
1254 | } |
1255 | ||
7b11488f | 1256 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1257 | return IRQ_HANDLED; |
1258 | ||
1259 | none: | |
1260 | /* re-enable interrupts here since we don't have anything to service. | |
1261 | * only Re-enable if disabled by irq. | |
1262 | */ | |
83626404 | 1263 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
b80667ee | 1264 | !trans_pcie->inta) |
0c325769 | 1265 | iwl_enable_interrupts(trans); |
1a361cd8 | 1266 | |
7b11488f | 1267 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1268 | return IRQ_NONE; |
1269 | } |