Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
fb4961db | 3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. |
ab697a9f EG |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | #include <linux/sched.h> | |
30 | #include <linux/wait.h> | |
1a361cd8 | 31 | #include <linux/gfp.h> |
ab697a9f | 32 | |
1b29dc94 | 33 | #include "iwl-prph.h" |
ab697a9f | 34 | #include "iwl-io.h" |
6468a01a | 35 | #include "internal.h" |
db70f290 | 36 | #include "iwl-op-mode.h" |
ab697a9f EG |
37 | |
38 | /****************************************************************************** | |
39 | * | |
40 | * RX path functions | |
41 | * | |
42 | ******************************************************************************/ | |
43 | ||
44 | /* | |
45 | * Rx theory of operation | |
46 | * | |
47 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
48 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
49 | * used not only for Rx frames, but for any command response or notification | |
50 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
51 | * of indexes into the circular buffer. | |
52 | * | |
53 | * Rx Queue Indexes | |
54 | * The host/firmware share two index registers for managing the Rx buffers. | |
55 | * | |
56 | * The READ index maps to the first position that the firmware may be writing | |
57 | * to -- the driver can read up to (but not including) this position and get | |
58 | * good data. | |
59 | * The READ index is managed by the firmware once the card is enabled. | |
60 | * | |
61 | * The WRITE index maps to the last position the driver has read from -- the | |
62 | * position preceding WRITE is the last slot the firmware can place a packet. | |
63 | * | |
64 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
65 | * WRITE = READ. | |
66 | * | |
67 | * During initialization, the host sets up the READ queue position to the first | |
68 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
69 | * | |
70 | * When the firmware places a packet in a buffer, it will advance the READ index | |
71 | * and fire the RX interrupt. The driver can then query the READ index and | |
72 | * process as many packets as possible, moving the WRITE index forward as it | |
73 | * resets the Rx queue buffers with new memory. | |
74 | * | |
75 | * The management in the driver is as follows: | |
76 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | |
77 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | |
78 | * to replenish the iwl->rxq->rx_free. | |
990aa6d7 | 79 | * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the |
ab697a9f EG |
80 | * iwl->rxq is replenished and the READ INDEX is updated (updating the |
81 | * 'processed' and 'read' driver indexes as well) | |
82 | * + A received packet is processed and handed to the kernel network stack, | |
83 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
84 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | |
85 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | |
86 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | |
87 | * were enough free buffers and RX_STALLED is set it is cleared. | |
88 | * | |
89 | * | |
90 | * Driver sequence: | |
91 | * | |
990aa6d7 EG |
92 | * iwl_rxq_alloc() Allocates rx_free |
93 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
94 | * iwl_pcie_rxq_restock | |
95 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx | |
ab697a9f EG |
96 | * queue, updates firmware pointers, and updates |
97 | * the WRITE index. If insufficient rx_free buffers | |
990aa6d7 | 98 | * are available, schedules iwl_pcie_rx_replenish |
ab697a9f EG |
99 | * |
100 | * -- enable interrupts -- | |
990aa6d7 | 101 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
ab697a9f EG |
102 | * READ INDEX, detaching the SKB from the pool. |
103 | * Moves the packet buffer from queue to rx_used. | |
990aa6d7 | 104 | * Calls iwl_pcie_rxq_restock to refill any empty |
ab697a9f EG |
105 | * slots. |
106 | * ... | |
107 | * | |
108 | */ | |
109 | ||
990aa6d7 EG |
110 | /* |
111 | * iwl_rxq_space - Return number of free slots available in queue. | |
ab697a9f | 112 | */ |
990aa6d7 | 113 | static int iwl_rxq_space(const struct iwl_rxq *q) |
ab697a9f EG |
114 | { |
115 | int s = q->read - q->write; | |
116 | if (s <= 0) | |
117 | s += RX_QUEUE_SIZE; | |
118 | /* keep some buffer to not confuse full and empty queue */ | |
119 | s -= 2; | |
120 | if (s < 0) | |
121 | s = 0; | |
122 | return s; | |
123 | } | |
124 | ||
9805c446 EG |
125 | /* |
126 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
127 | */ | |
128 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |
129 | { | |
130 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
131 | } | |
132 | ||
49bd072d EG |
133 | /* |
134 | * iwl_pcie_rx_stop - stops the Rx DMA | |
135 | */ | |
9805c446 EG |
136 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
137 | { | |
9805c446 EG |
138 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
139 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
140 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
141 | } | |
142 | ||
990aa6d7 EG |
143 | /* |
144 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | |
ab697a9f | 145 | */ |
223b9cb1 | 146 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) |
ab697a9f EG |
147 | { |
148 | unsigned long flags; | |
149 | u32 reg; | |
150 | ||
151 | spin_lock_irqsave(&q->lock, flags); | |
152 | ||
153 | if (q->need_update == 0) | |
154 | goto exit_unlock; | |
155 | ||
035f7ff2 | 156 | if (trans->cfg->base_params->shadow_reg_enable) { |
ab697a9f EG |
157 | /* shadow register enabled */ |
158 | /* Device expects a multiple of 8 */ | |
159 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 160 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); |
ab697a9f | 161 | } else { |
47107e84 DF |
162 | struct iwl_trans_pcie *trans_pcie = |
163 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
164 | ||
ab697a9f | 165 | /* If power-saving is in use, make sure device is awake */ |
01d651d4 | 166 | if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { |
1042db2a | 167 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); |
ab697a9f EG |
168 | |
169 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
5a878bf6 | 170 | IWL_DEBUG_INFO(trans, |
ab697a9f EG |
171 | "Rx queue requesting wakeup," |
172 | " GP1 = 0x%x\n", reg); | |
1042db2a | 173 | iwl_set_bit(trans, CSR_GP_CNTRL, |
ab697a9f EG |
174 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
175 | goto exit_unlock; | |
176 | } | |
177 | ||
178 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 179 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, |
ab697a9f EG |
180 | q->write_actual); |
181 | ||
182 | /* Else device is assumed to be awake */ | |
183 | } else { | |
184 | /* Device expects a multiple of 8 */ | |
185 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 186 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, |
ab697a9f EG |
187 | q->write_actual); |
188 | } | |
189 | } | |
190 | q->need_update = 0; | |
191 | ||
192 | exit_unlock: | |
193 | spin_unlock_irqrestore(&q->lock, flags); | |
194 | } | |
195 | ||
990aa6d7 EG |
196 | /* |
197 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool | |
ab697a9f EG |
198 | * |
199 | * If there are slots in the RX queue that need to be restocked, | |
200 | * and we have free pre-allocated buffers, fill the ranks as much | |
201 | * as we can, pulling from rx_free. | |
202 | * | |
203 | * This moves the 'write' index forward to catch up with 'processed', and | |
204 | * also updates the memory address in the firmware to reference the new | |
205 | * target buffer. | |
206 | */ | |
990aa6d7 | 207 | static void iwl_pcie_rxq_restock(struct iwl_trans *trans) |
ab697a9f | 208 | { |
20d3b647 | 209 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 210 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f EG |
211 | struct iwl_rx_mem_buffer *rxb; |
212 | unsigned long flags; | |
213 | ||
7439046d EG |
214 | /* |
215 | * If the device isn't enabled - not need to try to add buffers... | |
216 | * This can happen when we stop the device and still have an interrupt | |
217 | * pending. We stop the APM before we sync the interrupts / tasklets | |
218 | * because we have to (see comment there). On the other hand, since | |
219 | * the APM is stopped, we cannot access the HW (in particular not prph). | |
220 | * So don't try to restock if the APM has been already stopped. | |
221 | */ | |
222 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) | |
223 | return; | |
224 | ||
ab697a9f | 225 | spin_lock_irqsave(&rxq->lock, flags); |
990aa6d7 | 226 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
ab697a9f EG |
227 | /* The overwritten rxb must be a used one */ |
228 | rxb = rxq->queue[rxq->write]; | |
229 | BUG_ON(rxb && rxb->page); | |
230 | ||
231 | /* Get next free Rx buffer, remove from free list */ | |
e2b1930e JB |
232 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, |
233 | list); | |
234 | list_del(&rxb->list); | |
ab697a9f EG |
235 | |
236 | /* Point to Rx buffer via next RBD in circular buffer */ | |
9805c446 | 237 | rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
ab697a9f EG |
238 | rxq->queue[rxq->write] = rxb; |
239 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
240 | rxq->free_count--; | |
241 | } | |
242 | spin_unlock_irqrestore(&rxq->lock, flags); | |
243 | /* If the pre-allocated buffer pool is dropping low, schedule to | |
244 | * refill it */ | |
245 | if (rxq->free_count <= RX_LOW_WATERMARK) | |
1ee158d8 | 246 | schedule_work(&trans_pcie->rx_replenish); |
ab697a9f | 247 | |
ab697a9f EG |
248 | /* If we've added more space for the firmware to place data, tell it. |
249 | * Increment device's write pointer in multiples of 8. */ | |
250 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
251 | spin_lock_irqsave(&rxq->lock, flags); | |
252 | rxq->need_update = 1; | |
253 | spin_unlock_irqrestore(&rxq->lock, flags); | |
990aa6d7 | 254 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
ab697a9f EG |
255 | } |
256 | } | |
257 | ||
358a46d4 | 258 | /* |
9805c446 | 259 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
ab697a9f | 260 | * |
358a46d4 EG |
261 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
262 | * a page must be allocated and the RBD must point to the page. This function | |
263 | * doesn't change the HW pointer but handles the list of pages that is used by | |
990aa6d7 | 264 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
358a46d4 | 265 | * allocated buffers. |
ab697a9f | 266 | */ |
9805c446 | 267 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) |
ab697a9f | 268 | { |
20d3b647 | 269 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 270 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f EG |
271 | struct iwl_rx_mem_buffer *rxb; |
272 | struct page *page; | |
273 | unsigned long flags; | |
274 | gfp_t gfp_mask = priority; | |
275 | ||
276 | while (1) { | |
277 | spin_lock_irqsave(&rxq->lock, flags); | |
278 | if (list_empty(&rxq->rx_used)) { | |
279 | spin_unlock_irqrestore(&rxq->lock, flags); | |
280 | return; | |
281 | } | |
282 | spin_unlock_irqrestore(&rxq->lock, flags); | |
283 | ||
284 | if (rxq->free_count > RX_LOW_WATERMARK) | |
285 | gfp_mask |= __GFP_NOWARN; | |
286 | ||
b2cf410c | 287 | if (trans_pcie->rx_page_order > 0) |
ab697a9f EG |
288 | gfp_mask |= __GFP_COMP; |
289 | ||
290 | /* Alloc a new receive buffer */ | |
20d3b647 | 291 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); |
ab697a9f EG |
292 | if (!page) { |
293 | if (net_ratelimit()) | |
5a878bf6 | 294 | IWL_DEBUG_INFO(trans, "alloc_pages failed, " |
d6189124 | 295 | "order: %d\n", |
b2cf410c | 296 | trans_pcie->rx_page_order); |
ab697a9f EG |
297 | |
298 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | |
299 | net_ratelimit()) | |
5a878bf6 | 300 | IWL_CRIT(trans, "Failed to alloc_pages with %s." |
ab697a9f EG |
301 | "Only %u free buffers remaining.\n", |
302 | priority == GFP_ATOMIC ? | |
303 | "GFP_ATOMIC" : "GFP_KERNEL", | |
304 | rxq->free_count); | |
305 | /* We don't reschedule replenish work here -- we will | |
306 | * call the restock method and if it still needs | |
307 | * more buffers it will schedule replenish */ | |
308 | return; | |
309 | } | |
310 | ||
311 | spin_lock_irqsave(&rxq->lock, flags); | |
312 | ||
313 | if (list_empty(&rxq->rx_used)) { | |
314 | spin_unlock_irqrestore(&rxq->lock, flags); | |
b2cf410c | 315 | __free_pages(page, trans_pcie->rx_page_order); |
ab697a9f EG |
316 | return; |
317 | } | |
e2b1930e JB |
318 | rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, |
319 | list); | |
320 | list_del(&rxb->list); | |
ab697a9f EG |
321 | spin_unlock_irqrestore(&rxq->lock, flags); |
322 | ||
323 | BUG_ON(rxb->page); | |
324 | rxb->page = page; | |
325 | /* Get physical address of the RB */ | |
20d3b647 JB |
326 | rxb->page_dma = |
327 | dma_map_page(trans->dev, page, 0, | |
328 | PAGE_SIZE << trans_pcie->rx_page_order, | |
329 | DMA_FROM_DEVICE); | |
7c341582 JB |
330 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
331 | rxb->page = NULL; | |
332 | spin_lock_irqsave(&rxq->lock, flags); | |
333 | list_add(&rxb->list, &rxq->rx_used); | |
334 | spin_unlock_irqrestore(&rxq->lock, flags); | |
335 | __free_pages(page, trans_pcie->rx_page_order); | |
336 | return; | |
337 | } | |
ab697a9f EG |
338 | /* dma address must be no more than 36 bits */ |
339 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | |
340 | /* and also 256 byte aligned! */ | |
341 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | |
342 | ||
343 | spin_lock_irqsave(&rxq->lock, flags); | |
344 | ||
345 | list_add_tail(&rxb->list, &rxq->rx_free); | |
346 | rxq->free_count++; | |
347 | ||
348 | spin_unlock_irqrestore(&rxq->lock, flags); | |
349 | } | |
350 | } | |
351 | ||
9805c446 EG |
352 | static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) |
353 | { | |
354 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
355 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
356 | int i; | |
357 | ||
358 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
359 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
360 | /* In the reset function, these buffers may have been allocated | |
361 | * to an SKB, so we need to unmap and free potential storage */ | |
362 | if (rxq->pool[i].page != NULL) { | |
363 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, | |
364 | PAGE_SIZE << trans_pcie->rx_page_order, | |
365 | DMA_FROM_DEVICE); | |
366 | __free_pages(rxq->pool[i].page, | |
367 | trans_pcie->rx_page_order); | |
368 | rxq->pool[i].page = NULL; | |
369 | } | |
370 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
371 | } | |
372 | } | |
373 | ||
358a46d4 | 374 | /* |
990aa6d7 | 375 | * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free |
358a46d4 EG |
376 | * |
377 | * When moving to rx_free an page is allocated for the slot. | |
378 | * | |
990aa6d7 | 379 | * Also restock the Rx queue via iwl_pcie_rxq_restock. |
358a46d4 EG |
380 | * This is called as a scheduled work item (except for during initialization) |
381 | */ | |
9805c446 | 382 | static void iwl_pcie_rx_replenish(struct iwl_trans *trans) |
ab697a9f | 383 | { |
7b11488f | 384 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ab697a9f EG |
385 | unsigned long flags; |
386 | ||
9805c446 | 387 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); |
ab697a9f | 388 | |
7b11488f | 389 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
990aa6d7 | 390 | iwl_pcie_rxq_restock(trans); |
7b11488f | 391 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
ab697a9f EG |
392 | } |
393 | ||
990aa6d7 | 394 | static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans) |
ab697a9f | 395 | { |
9805c446 | 396 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); |
ab697a9f | 397 | |
990aa6d7 | 398 | iwl_pcie_rxq_restock(trans); |
ab697a9f EG |
399 | } |
400 | ||
9805c446 | 401 | static void iwl_pcie_rx_replenish_work(struct work_struct *data) |
ab697a9f | 402 | { |
5a878bf6 EG |
403 | struct iwl_trans_pcie *trans_pcie = |
404 | container_of(data, struct iwl_trans_pcie, rx_replenish); | |
ab697a9f | 405 | |
990aa6d7 | 406 | iwl_pcie_rx_replenish(trans_pcie->trans); |
ab697a9f EG |
407 | } |
408 | ||
9805c446 EG |
409 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
410 | { | |
411 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
412 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
413 | struct device *dev = trans->dev; | |
414 | ||
415 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); | |
416 | ||
417 | spin_lock_init(&rxq->lock); | |
418 | ||
419 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
420 | return -EINVAL; | |
421 | ||
422 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
423 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
424 | &rxq->bd_dma, GFP_KERNEL); | |
425 | if (!rxq->bd) | |
426 | goto err_bd; | |
427 | ||
428 | /*Allocate the driver's pointer to receive buffer status */ | |
429 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | |
430 | &rxq->rb_stts_dma, GFP_KERNEL); | |
431 | if (!rxq->rb_stts) | |
432 | goto err_rb_stts; | |
433 | ||
434 | return 0; | |
435 | ||
436 | err_rb_stts: | |
437 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
438 | rxq->bd, rxq->bd_dma); | |
439 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
440 | rxq->bd = NULL; | |
441 | err_bd: | |
442 | return -ENOMEM; | |
ab697a9f EG |
443 | } |
444 | ||
9805c446 EG |
445 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
446 | { | |
447 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
448 | u32 rb_size; | |
449 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
450 | ||
9805c446 EG |
451 | if (trans_pcie->rx_buf_size_8k) |
452 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
453 | else | |
454 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
455 | ||
456 | /* Stop Rx DMA */ | |
457 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
458 | ||
459 | /* Reset driver's Rx queue write index */ | |
460 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | |
461 | ||
462 | /* Tell device where to find RBD circular buffer in DRAM */ | |
463 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
464 | (u32)(rxq->bd_dma >> 8)); | |
465 | ||
466 | /* Tell device where in DRAM to update its Rx status */ | |
467 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
468 | rxq->rb_stts_dma >> 4); | |
469 | ||
470 | /* Enable Rx DMA | |
471 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
472 | * the credit mechanism in 5000 HW RX FIFO | |
473 | * Direct rx interrupts to hosts | |
474 | * Rx buffer size 4 or 8k | |
475 | * RB timeout 0x10 | |
476 | * 256 RBDs | |
477 | */ | |
478 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
479 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
480 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
481 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
482 | rb_size| | |
49bd072d | 483 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| |
9805c446 EG |
484 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); |
485 | ||
486 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
487 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
488 | } | |
489 | ||
490 | int iwl_pcie_rx_init(struct iwl_trans *trans) | |
491 | { | |
492 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
493 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
494 | ||
495 | int i, err; | |
496 | unsigned long flags; | |
497 | ||
498 | if (!rxq->bd) { | |
499 | err = iwl_pcie_rx_alloc(trans); | |
500 | if (err) | |
501 | return err; | |
502 | } | |
503 | ||
504 | spin_lock_irqsave(&rxq->lock, flags); | |
505 | INIT_LIST_HEAD(&rxq->rx_free); | |
506 | INIT_LIST_HEAD(&rxq->rx_used); | |
507 | ||
508 | INIT_WORK(&trans_pcie->rx_replenish, | |
509 | iwl_pcie_rx_replenish_work); | |
510 | ||
511 | iwl_pcie_rxq_free_rbs(trans); | |
512 | ||
513 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
514 | rxq->queue[i] = NULL; | |
515 | ||
516 | /* Set us so that we have processed and used all buffers, but have | |
517 | * not restocked the Rx queue with fresh buffers */ | |
518 | rxq->read = rxq->write = 0; | |
519 | rxq->write_actual = 0; | |
520 | rxq->free_count = 0; | |
521 | spin_unlock_irqrestore(&rxq->lock, flags); | |
522 | ||
523 | iwl_pcie_rx_replenish(trans); | |
524 | ||
525 | iwl_pcie_rx_hw_init(trans, rxq); | |
526 | ||
527 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | |
528 | rxq->need_update = 1; | |
529 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
530 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | |
531 | ||
532 | return 0; | |
533 | } | |
534 | ||
535 | void iwl_pcie_rx_free(struct iwl_trans *trans) | |
536 | { | |
537 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
538 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
539 | unsigned long flags; | |
540 | ||
541 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
542 | * exit now */ | |
543 | if (!rxq->bd) { | |
544 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); | |
545 | return; | |
546 | } | |
547 | ||
548 | spin_lock_irqsave(&rxq->lock, flags); | |
549 | iwl_pcie_rxq_free_rbs(trans); | |
550 | spin_unlock_irqrestore(&rxq->lock, flags); | |
551 | ||
552 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
553 | rxq->bd, rxq->bd_dma); | |
554 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
555 | rxq->bd = NULL; | |
556 | ||
557 | if (rxq->rb_stts) | |
558 | dma_free_coherent(trans->dev, | |
559 | sizeof(struct iwl_rb_status), | |
560 | rxq->rb_stts, rxq->rb_stts_dma); | |
561 | else | |
562 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); | |
563 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | |
564 | rxq->rb_stts = NULL; | |
565 | } | |
566 | ||
567 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, | |
df2f3216 JB |
568 | struct iwl_rx_mem_buffer *rxb) |
569 | { | |
570 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
990aa6d7 EG |
571 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
572 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | |
df2f3216 | 573 | unsigned long flags; |
0c19744c | 574 | bool page_stolen = false; |
b2cf410c | 575 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
0c19744c | 576 | u32 offset = 0; |
df2f3216 JB |
577 | |
578 | if (WARN_ON(!rxb)) | |
579 | return; | |
580 | ||
0c19744c JB |
581 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); |
582 | ||
583 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | |
584 | struct iwl_rx_packet *pkt; | |
585 | struct iwl_device_cmd *cmd; | |
586 | u16 sequence; | |
587 | bool reclaim; | |
588 | int index, cmd_index, err, len; | |
589 | struct iwl_rx_cmd_buffer rxcb = { | |
590 | ._offset = offset, | |
591 | ._page = rxb->page, | |
592 | ._page_stolen = false, | |
0d6c4a2e | 593 | .truesize = max_len, |
0c19744c JB |
594 | }; |
595 | ||
596 | pkt = rxb_addr(&rxcb); | |
597 | ||
598 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) | |
599 | break; | |
600 | ||
601 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", | |
990aa6d7 | 602 | rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), |
d9fb6465 | 603 | pkt->hdr.cmd); |
0c19744c JB |
604 | |
605 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | |
606 | len += sizeof(u32); /* account for status word */ | |
f042c2eb JB |
607 | trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); |
608 | trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); | |
0c19744c JB |
609 | |
610 | /* Reclaim a command buffer only if this packet is a response | |
611 | * to a (driver-originated) command. | |
612 | * If the packet (e.g. Rx frame) originated from uCode, | |
613 | * there is no command buffer to reclaim. | |
614 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
615 | * but apparently a few don't get set; catch them here. */ | |
616 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | |
617 | if (reclaim) { | |
618 | int i; | |
619 | ||
620 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | |
621 | if (trans_pcie->no_reclaim_cmds[i] == | |
622 | pkt->hdr.cmd) { | |
623 | reclaim = false; | |
624 | break; | |
625 | } | |
d663ee73 JB |
626 | } |
627 | } | |
df2f3216 | 628 | |
0c19744c JB |
629 | sequence = le16_to_cpu(pkt->hdr.sequence); |
630 | index = SEQ_TO_INDEX(sequence); | |
631 | cmd_index = get_cmd_index(&txq->q, index); | |
632 | ||
96791422 | 633 | if (reclaim) { |
990aa6d7 | 634 | struct iwl_pcie_txq_entry *ent; |
96791422 EG |
635 | ent = &txq->entries[cmd_index]; |
636 | cmd = ent->copy_cmd; | |
637 | WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); | |
638 | } else { | |
0c19744c | 639 | cmd = NULL; |
96791422 | 640 | } |
0c19744c JB |
641 | |
642 | err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); | |
643 | ||
96791422 EG |
644 | if (reclaim) { |
645 | /* The original command isn't needed any more */ | |
646 | kfree(txq->entries[cmd_index].copy_cmd); | |
647 | txq->entries[cmd_index].copy_cmd = NULL; | |
f4feb8ac JB |
648 | /* nor is the duplicated part of the command */ |
649 | kfree(txq->entries[cmd_index].free_buf); | |
650 | txq->entries[cmd_index].free_buf = NULL; | |
96791422 EG |
651 | } |
652 | ||
0c19744c JB |
653 | /* |
654 | * After here, we should always check rxcb._page_stolen, | |
655 | * if it is true then one of the handlers took the page. | |
656 | */ | |
657 | ||
658 | if (reclaim) { | |
659 | /* Invoke any callbacks, transfer the buffer to caller, | |
660 | * and fire off the (possibly) blocking | |
661 | * iwl_trans_send_cmd() | |
662 | * as we reclaim the driver command queue */ | |
663 | if (!rxcb._page_stolen) | |
990aa6d7 | 664 | iwl_pcie_hcmd_complete(trans, &rxcb, err); |
0c19744c JB |
665 | else |
666 | IWL_WARN(trans, "Claim null rxb?\n"); | |
667 | } | |
668 | ||
669 | page_stolen |= rxcb._page_stolen; | |
670 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); | |
df2f3216 JB |
671 | } |
672 | ||
0c19744c JB |
673 | /* page was stolen from us -- free our reference */ |
674 | if (page_stolen) { | |
b2cf410c | 675 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
df2f3216 | 676 | rxb->page = NULL; |
0c19744c | 677 | } |
df2f3216 JB |
678 | |
679 | /* Reuse the page if possible. For notification packets and | |
680 | * SKBs that fail to Rx correctly, add them back into the | |
681 | * rx_free list for reuse later. */ | |
682 | spin_lock_irqsave(&rxq->lock, flags); | |
683 | if (rxb->page != NULL) { | |
684 | rxb->page_dma = | |
685 | dma_map_page(trans->dev, rxb->page, 0, | |
20d3b647 JB |
686 | PAGE_SIZE << trans_pcie->rx_page_order, |
687 | DMA_FROM_DEVICE); | |
7c341582 JB |
688 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
689 | /* | |
690 | * free the page(s) as well to not break | |
691 | * the invariant that the items on the used | |
692 | * list have no page(s) | |
693 | */ | |
694 | __free_pages(rxb->page, trans_pcie->rx_page_order); | |
695 | rxb->page = NULL; | |
696 | list_add_tail(&rxb->list, &rxq->rx_used); | |
697 | } else { | |
698 | list_add_tail(&rxb->list, &rxq->rx_free); | |
699 | rxq->free_count++; | |
700 | } | |
df2f3216 JB |
701 | } else |
702 | list_add_tail(&rxb->list, &rxq->rx_used); | |
703 | spin_unlock_irqrestore(&rxq->lock, flags); | |
704 | } | |
705 | ||
990aa6d7 EG |
706 | /* |
707 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw | |
ab697a9f | 708 | */ |
990aa6d7 | 709 | static void iwl_pcie_rx_handle(struct iwl_trans *trans) |
ab697a9f | 710 | { |
df2f3216 | 711 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 712 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f | 713 | u32 r, i; |
ab697a9f EG |
714 | u8 fill_rx = 0; |
715 | u32 count = 8; | |
716 | int total_empty; | |
717 | ||
718 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | |
719 | * buffer that the driver may process (last buffer filled by ucode). */ | |
52e2a99e | 720 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
ab697a9f EG |
721 | i = rxq->read; |
722 | ||
723 | /* Rx interrupt, but nothing sent from uCode */ | |
724 | if (i == r) | |
726f23fd | 725 | IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); |
ab697a9f EG |
726 | |
727 | /* calculate total frames need to be restock after handling RX */ | |
728 | total_empty = r - rxq->write_actual; | |
729 | if (total_empty < 0) | |
730 | total_empty += RX_QUEUE_SIZE; | |
731 | ||
732 | if (total_empty > (RX_QUEUE_SIZE / 2)) | |
733 | fill_rx = 1; | |
734 | ||
735 | while (i != r) { | |
48a2d66f | 736 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f EG |
737 | |
738 | rxb = rxq->queue[i]; | |
ab697a9f EG |
739 | rxq->queue[i] = NULL; |
740 | ||
726f23fd EG |
741 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", |
742 | r, i, rxb); | |
9805c446 | 743 | iwl_pcie_rx_handle_rb(trans, rxb); |
ab697a9f EG |
744 | |
745 | i = (i + 1) & RX_QUEUE_MASK; | |
746 | /* If there are a lot of unused frames, | |
747 | * restock the Rx queue so ucode wont assert. */ | |
748 | if (fill_rx) { | |
749 | count++; | |
750 | if (count >= 8) { | |
751 | rxq->read = i; | |
990aa6d7 | 752 | iwl_pcie_rx_replenish_now(trans); |
ab697a9f EG |
753 | count = 0; |
754 | } | |
755 | } | |
756 | } | |
757 | ||
758 | /* Backtrack one entry */ | |
759 | rxq->read = i; | |
760 | if (fill_rx) | |
990aa6d7 | 761 | iwl_pcie_rx_replenish_now(trans); |
ab697a9f | 762 | else |
990aa6d7 | 763 | iwl_pcie_rxq_restock(trans); |
ab697a9f EG |
764 | } |
765 | ||
990aa6d7 EG |
766 | /* |
767 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card | |
7ff94706 | 768 | */ |
990aa6d7 | 769 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
7ff94706 | 770 | { |
f946b529 EG |
771 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
772 | ||
7ff94706 | 773 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ |
035f7ff2 | 774 | if (trans->cfg->internal_wimax_coex && |
1042db2a | 775 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & |
20d3b647 | 776 | APMS_CLK_VAL_MRB_FUNC_MODE) || |
1042db2a | 777 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & |
20d3b647 | 778 | APMG_PS_CTRL_VAL_RESET_REQ))) { |
74fda971 | 779 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
8a8bbdb4 | 780 | iwl_op_mode_wimax_active(trans->op_mode); |
f946b529 | 781 | wake_up(&trans_pcie->wait_command_queue); |
7ff94706 EG |
782 | return; |
783 | } | |
784 | ||
990aa6d7 EG |
785 | iwl_pcie_dump_csr(trans); |
786 | iwl_pcie_dump_fh(trans, NULL); | |
7ff94706 | 787 | |
d18aa87f | 788 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); |
f946b529 EG |
789 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
790 | wake_up(&trans_pcie->wait_command_queue); | |
791 | ||
bcb9321c | 792 | iwl_op_mode_nic_error(trans->op_mode); |
7ff94706 EG |
793 | } |
794 | ||
990aa6d7 | 795 | void iwl_pcie_tasklet(struct iwl_trans *trans) |
ab697a9f | 796 | { |
20d3b647 JB |
797 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
798 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
ab697a9f EG |
799 | u32 inta = 0; |
800 | u32 handled = 0; | |
801 | unsigned long flags; | |
802 | u32 i; | |
803 | #ifdef CONFIG_IWLWIFI_DEBUG | |
804 | u32 inta_mask; | |
805 | #endif | |
806 | ||
7b11488f | 807 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
ab697a9f EG |
808 | |
809 | /* Ack/clear/reset pending uCode interrupts. | |
810 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
811 | */ | |
812 | /* There is a hardware bug in the interrupt mask function that some | |
813 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
814 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
815 | * ICT interrupt handling mechanism has another bug that might cause | |
816 | * these unmasked interrupts fail to be detected. We workaround the | |
817 | * hardware bugs here by ACKing all the possible interrupts so that | |
818 | * interrupt coalescing can still be achieved. | |
819 | */ | |
1042db2a | 820 | iwl_write32(trans, CSR_INT, |
20d3b647 | 821 | trans_pcie->inta | ~trans_pcie->inta_mask); |
ab697a9f | 822 | |
0c325769 | 823 | inta = trans_pcie->inta; |
ab697a9f EG |
824 | |
825 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 826 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f | 827 | /* just for debug */ |
1042db2a | 828 | inta_mask = iwl_read32(trans, CSR_INT_MASK); |
0ca24daf | 829 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
20d3b647 | 830 | inta, inta_mask); |
ab697a9f EG |
831 | } |
832 | #endif | |
833 | ||
0c325769 EG |
834 | /* saved interrupt in inta variable now we can reset trans_pcie->inta */ |
835 | trans_pcie->inta = 0; | |
ab697a9f | 836 | |
7b11488f | 837 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
b49ba04a | 838 | |
ab697a9f EG |
839 | /* Now service all interrupt bits discovered above. */ |
840 | if (inta & CSR_INT_BIT_HW_ERR) { | |
0c325769 | 841 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); |
ab697a9f EG |
842 | |
843 | /* Tell the device to stop sending interrupts */ | |
0c325769 | 844 | iwl_disable_interrupts(trans); |
ab697a9f | 845 | |
1f7b6172 | 846 | isr_stats->hw++; |
990aa6d7 | 847 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
848 | |
849 | handled |= CSR_INT_BIT_HW_ERR; | |
850 | ||
851 | return; | |
852 | } | |
853 | ||
854 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 855 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f EG |
856 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
857 | if (inta & CSR_INT_BIT_SCD) { | |
0c325769 | 858 | IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " |
ab697a9f | 859 | "the frame/frames.\n"); |
1f7b6172 | 860 | isr_stats->sch++; |
ab697a9f EG |
861 | } |
862 | ||
863 | /* Alive notification via Rx interrupt will do the real work */ | |
864 | if (inta & CSR_INT_BIT_ALIVE) { | |
0c325769 | 865 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
1f7b6172 | 866 | isr_stats->alive++; |
ab697a9f EG |
867 | } |
868 | } | |
869 | #endif | |
870 | /* Safely ignore these bits for debug checks below */ | |
871 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
872 | ||
873 | /* HW RF KILL switch toggled */ | |
874 | if (inta & CSR_INT_BIT_RF_KILL) { | |
c9eec95c | 875 | bool hw_rfkill; |
ab697a9f | 876 | |
8d425517 | 877 | hw_rfkill = iwl_is_rfkill_set(trans); |
0c325769 | 878 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", |
20d3b647 | 879 | hw_rfkill ? "disable radio" : "enable radio"); |
ab697a9f | 880 | |
1f7b6172 | 881 | isr_stats->rfkill++; |
ab697a9f | 882 | |
c9eec95c | 883 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
f946b529 EG |
884 | if (hw_rfkill) { |
885 | set_bit(STATUS_RFKILL, &trans_pcie->status); | |
886 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, | |
887 | &trans_pcie->status)) | |
888 | IWL_DEBUG_RF_KILL(trans, | |
889 | "Rfkill while SYNC HCMD in flight\n"); | |
890 | wake_up(&trans_pcie->wait_command_queue); | |
891 | } else { | |
892 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | |
893 | } | |
ab697a9f EG |
894 | |
895 | handled |= CSR_INT_BIT_RF_KILL; | |
896 | } | |
897 | ||
898 | /* Chip got too hot and stopped itself */ | |
899 | if (inta & CSR_INT_BIT_CT_KILL) { | |
0c325769 | 900 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); |
1f7b6172 | 901 | isr_stats->ctkill++; |
ab697a9f EG |
902 | handled |= CSR_INT_BIT_CT_KILL; |
903 | } | |
904 | ||
905 | /* Error detected by uCode */ | |
906 | if (inta & CSR_INT_BIT_SW_ERR) { | |
0c325769 | 907 | IWL_ERR(trans, "Microcode SW error detected. " |
ab697a9f | 908 | " Restarting 0x%X.\n", inta); |
1f7b6172 | 909 | isr_stats->sw++; |
990aa6d7 | 910 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
911 | handled |= CSR_INT_BIT_SW_ERR; |
912 | } | |
913 | ||
914 | /* uCode wakes up after power-down sleep */ | |
915 | if (inta & CSR_INT_BIT_WAKEUP) { | |
0c325769 | 916 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
990aa6d7 | 917 | iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); |
035f7ff2 | 918 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) |
990aa6d7 | 919 | iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); |
ab697a9f | 920 | |
1f7b6172 | 921 | isr_stats->wakeup++; |
ab697a9f EG |
922 | |
923 | handled |= CSR_INT_BIT_WAKEUP; | |
924 | } | |
925 | ||
926 | /* All uCode command responses, including Tx command responses, | |
927 | * Rx "responses" (frame-received notification), and other | |
928 | * notifications from uCode come through here*/ | |
929 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
20d3b647 | 930 | CSR_INT_BIT_RX_PERIODIC)) { |
0c325769 | 931 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); |
ab697a9f EG |
932 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
933 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1042db2a | 934 | iwl_write32(trans, CSR_FH_INT_STATUS, |
ab697a9f EG |
935 | CSR_FH_INT_RX_MASK); |
936 | } | |
937 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
938 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1042db2a | 939 | iwl_write32(trans, |
0c325769 | 940 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); |
ab697a9f EG |
941 | } |
942 | /* Sending RX interrupt require many steps to be done in the | |
943 | * the device: | |
944 | * 1- write interrupt to current index in ICT table. | |
945 | * 2- dma RX frame. | |
946 | * 3- update RX shared data to indicate last write index. | |
947 | * 4- send interrupt. | |
948 | * This could lead to RX race, driver could receive RX interrupt | |
949 | * but the shared data changes does not reflect this; | |
950 | * periodic interrupt will detect any dangling Rx activity. | |
951 | */ | |
952 | ||
953 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
1042db2a | 954 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
ab697a9f | 955 | CSR_INT_PERIODIC_DIS); |
6379103e | 956 | |
990aa6d7 | 957 | iwl_pcie_rx_handle(trans); |
6379103e | 958 | |
ab697a9f EG |
959 | /* |
960 | * Enable periodic interrupt in 8 msec only if we received | |
961 | * real RX interrupt (instead of just periodic int), to catch | |
962 | * any dangling Rx interrupt. If it was just the periodic | |
963 | * interrupt, there was no dangling Rx activity, and no need | |
964 | * to extend the periodic interrupt; one-shot is enough. | |
965 | */ | |
966 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1042db2a | 967 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
20d3b647 | 968 | CSR_INT_PERIODIC_ENA); |
ab697a9f | 969 | |
1f7b6172 | 970 | isr_stats->rx++; |
ab697a9f EG |
971 | } |
972 | ||
973 | /* This "Tx" DMA channel is used only for loading uCode */ | |
974 | if (inta & CSR_INT_BIT_FH_TX) { | |
1042db2a | 975 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); |
0c325769 | 976 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); |
1f7b6172 | 977 | isr_stats->tx++; |
ab697a9f EG |
978 | handled |= CSR_INT_BIT_FH_TX; |
979 | /* Wake up uCode load routine, now that load is complete */ | |
13df1aab JB |
980 | trans_pcie->ucode_write_complete = true; |
981 | wake_up(&trans_pcie->ucode_write_waitq); | |
ab697a9f EG |
982 | } |
983 | ||
984 | if (inta & ~handled) { | |
0c325769 | 985 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); |
1f7b6172 | 986 | isr_stats->unhandled++; |
ab697a9f EG |
987 | } |
988 | ||
0c325769 EG |
989 | if (inta & ~(trans_pcie->inta_mask)) { |
990 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | |
991 | inta & ~trans_pcie->inta_mask); | |
ab697a9f EG |
992 | } |
993 | ||
994 | /* Re-enable all interrupts */ | |
995 | /* only Re-enable if disabled by irq */ | |
83626404 | 996 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status)) |
0c325769 | 997 | iwl_enable_interrupts(trans); |
ab697a9f | 998 | /* Re-enable RF_KILL if it occurred */ |
8722c899 SG |
999 | else if (handled & CSR_INT_BIT_RF_KILL) |
1000 | iwl_enable_rfkill_int(trans); | |
ab697a9f EG |
1001 | } |
1002 | ||
1a361cd8 EG |
1003 | /****************************************************************************** |
1004 | * | |
1005 | * ICT functions | |
1006 | * | |
1007 | ******************************************************************************/ | |
10667136 JB |
1008 | |
1009 | /* a device (PCI-E) page is 4096 bytes long */ | |
1010 | #define ICT_SHIFT 12 | |
1011 | #define ICT_SIZE (1 << ICT_SHIFT) | |
1012 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | |
1a361cd8 EG |
1013 | |
1014 | /* Free dram table */ | |
990aa6d7 | 1015 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
1a361cd8 | 1016 | { |
20d3b647 | 1017 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 | 1018 | |
10667136 | 1019 | if (trans_pcie->ict_tbl) { |
1042db2a | 1020 | dma_free_coherent(trans->dev, ICT_SIZE, |
10667136 | 1021 | trans_pcie->ict_tbl, |
0c325769 | 1022 | trans_pcie->ict_tbl_dma); |
10667136 JB |
1023 | trans_pcie->ict_tbl = NULL; |
1024 | trans_pcie->ict_tbl_dma = 0; | |
1a361cd8 EG |
1025 | } |
1026 | } | |
1027 | ||
10667136 JB |
1028 | /* |
1029 | * allocate dram shared table, it is an aligned memory | |
1030 | * block of ICT_SIZE. | |
1a361cd8 EG |
1031 | * also reset all data related to ICT table interrupt. |
1032 | */ | |
990aa6d7 | 1033 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
1a361cd8 | 1034 | { |
20d3b647 | 1035 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1036 | |
10667136 | 1037 | trans_pcie->ict_tbl = |
1042db2a | 1038 | dma_alloc_coherent(trans->dev, ICT_SIZE, |
10667136 JB |
1039 | &trans_pcie->ict_tbl_dma, |
1040 | GFP_KERNEL); | |
1041 | if (!trans_pcie->ict_tbl) | |
1a361cd8 EG |
1042 | return -ENOMEM; |
1043 | ||
10667136 JB |
1044 | /* just an API sanity check ... it is guaranteed to be aligned */ |
1045 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | |
990aa6d7 | 1046 | iwl_pcie_free_ict(trans); |
10667136 JB |
1047 | return -EINVAL; |
1048 | } | |
1a361cd8 | 1049 | |
10667136 JB |
1050 | IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", |
1051 | (unsigned long long)trans_pcie->ict_tbl_dma); | |
1a361cd8 | 1052 | |
10667136 | 1053 | IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); |
1a361cd8 EG |
1054 | |
1055 | /* reset table and index to all 0 */ | |
10667136 | 1056 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
0c325769 | 1057 | trans_pcie->ict_index = 0; |
1a361cd8 EG |
1058 | |
1059 | /* add periodic RX interrupt */ | |
0c325769 | 1060 | trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC; |
1a361cd8 EG |
1061 | return 0; |
1062 | } | |
1063 | ||
1064 | /* Device is going up inform it about using ICT interrupt table, | |
1065 | * also we need to tell the driver to start using ICT interrupt. | |
1066 | */ | |
990aa6d7 | 1067 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
1a361cd8 | 1068 | { |
20d3b647 | 1069 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 EG |
1070 | u32 val; |
1071 | unsigned long flags; | |
1072 | ||
10667136 | 1073 | if (!trans_pcie->ict_tbl) |
ed6a3803 | 1074 | return; |
1a361cd8 | 1075 | |
7b11488f | 1076 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
0c325769 | 1077 | iwl_disable_interrupts(trans); |
1a361cd8 | 1078 | |
10667136 | 1079 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
1a361cd8 | 1080 | |
10667136 | 1081 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; |
1a361cd8 EG |
1082 | |
1083 | val |= CSR_DRAM_INT_TBL_ENABLE; | |
1084 | val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; | |
1085 | ||
10667136 | 1086 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); |
1a361cd8 | 1087 | |
1042db2a | 1088 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); |
0c325769 EG |
1089 | trans_pcie->use_ict = true; |
1090 | trans_pcie->ict_index = 0; | |
1042db2a | 1091 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); |
0c325769 | 1092 | iwl_enable_interrupts(trans); |
7b11488f | 1093 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1094 | } |
1095 | ||
1096 | /* Device is going down disable ict interrupt usage */ | |
990aa6d7 | 1097 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
1a361cd8 | 1098 | { |
20d3b647 | 1099 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 EG |
1100 | unsigned long flags; |
1101 | ||
7b11488f | 1102 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
0c325769 | 1103 | trans_pcie->use_ict = false; |
7b11488f | 1104 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1105 | } |
1106 | ||
eb647644 | 1107 | /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ |
990aa6d7 | 1108 | static irqreturn_t iwl_pcie_isr(int irq, void *data) |
1a361cd8 | 1109 | { |
0c325769 | 1110 | struct iwl_trans *trans = data; |
eb647644 | 1111 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1112 | u32 inta, inta_mask; |
1a361cd8 EG |
1113 | #ifdef CONFIG_IWLWIFI_DEBUG |
1114 | u32 inta_fh; | |
1115 | #endif | |
eb647644 EG |
1116 | |
1117 | lockdep_assert_held(&trans_pcie->irq_lock); | |
1118 | ||
6c1011e1 | 1119 | trace_iwlwifi_dev_irq(trans->dev); |
b80667ee | 1120 | |
1a361cd8 EG |
1121 | /* Disable (but don't clear!) interrupts here to avoid |
1122 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
1123 | * If we have something to service, the tasklet will re-enable ints. | |
1124 | * If we *don't* have something, we'll re-enable before leaving here. */ | |
25a17265 | 1125 | inta_mask = iwl_read32(trans, CSR_INT_MASK); |
1042db2a | 1126 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
1a361cd8 EG |
1127 | |
1128 | /* Discover which interrupts are active/pending */ | |
1042db2a | 1129 | inta = iwl_read32(trans, CSR_INT); |
1a361cd8 | 1130 | |
25a17265 EG |
1131 | if (inta & (~inta_mask)) { |
1132 | IWL_DEBUG_ISR(trans, | |
1133 | "We got a masked interrupt (0x%08x)...Ack and ignore\n", | |
1134 | inta & (~inta_mask)); | |
1135 | iwl_write32(trans, CSR_INT, inta & (~inta_mask)); | |
1136 | inta &= inta_mask; | |
1137 | } | |
1138 | ||
1a361cd8 EG |
1139 | /* Ignore interrupt if there's nothing in NIC to service. |
1140 | * This may be due to IRQ shared with another device, | |
1141 | * or due to sporadic interrupts thrown from our NIC. */ | |
1142 | if (!inta) { | |
0c325769 | 1143 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1a361cd8 EG |
1144 | goto none; |
1145 | } | |
1146 | ||
1147 | if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { | |
1148 | /* Hardware disappeared. It might have already raised | |
1149 | * an interrupt */ | |
0c325769 | 1150 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); |
eb647644 | 1151 | return IRQ_HANDLED; |
1a361cd8 EG |
1152 | } |
1153 | ||
1154 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 1155 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
1042db2a | 1156 | inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); |
0c325769 | 1157 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " |
1a361cd8 EG |
1158 | "fh 0x%08x\n", inta, inta_mask, inta_fh); |
1159 | } | |
1160 | #endif | |
1161 | ||
0c325769 | 1162 | trans_pcie->inta |= inta; |
990aa6d7 | 1163 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
1a361cd8 | 1164 | if (likely(inta)) |
0c325769 | 1165 | tasklet_schedule(&trans_pcie->irq_tasklet); |
83626404 | 1166 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
20d3b647 | 1167 | !trans_pcie->inta) |
0c325769 | 1168 | iwl_enable_interrupts(trans); |
392d4cad | 1169 | return IRQ_HANDLED; |
1a361cd8 | 1170 | |
eb647644 | 1171 | none: |
1a361cd8 EG |
1172 | /* re-enable interrupts here since we don't have anything to service. */ |
1173 | /* only Re-enable if disabled by irq and no schedules tasklet. */ | |
83626404 | 1174 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
20d3b647 | 1175 | !trans_pcie->inta) |
0c325769 | 1176 | iwl_enable_interrupts(trans); |
1a361cd8 | 1177 | |
1a361cd8 EG |
1178 | return IRQ_NONE; |
1179 | } | |
1180 | ||
1181 | /* interrupt handler using ict table, with this interrupt driver will | |
1182 | * stop using INTA register to get device's interrupt, reading this register | |
1183 | * is expensive, device will write interrupts in ICT dram table, increment | |
1184 | * index then will fire interrupt to driver, driver will OR all ICT table | |
1185 | * entries from current index up to table entry with 0 value. the result is | |
1186 | * the interrupt we need to service, driver will set the entries back to 0 and | |
1187 | * set index. | |
1188 | */ | |
990aa6d7 | 1189 | irqreturn_t iwl_pcie_isr_ict(int irq, void *data) |
1a361cd8 | 1190 | { |
0c325769 EG |
1191 | struct iwl_trans *trans = data; |
1192 | struct iwl_trans_pcie *trans_pcie; | |
1a361cd8 EG |
1193 | u32 inta, inta_mask; |
1194 | u32 val = 0; | |
b80667ee | 1195 | u32 read; |
1a361cd8 EG |
1196 | unsigned long flags; |
1197 | ||
0c325769 | 1198 | if (!trans) |
1a361cd8 EG |
1199 | return IRQ_NONE; |
1200 | ||
0c325769 EG |
1201 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1202 | ||
eb647644 EG |
1203 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
1204 | ||
1a361cd8 EG |
1205 | /* dram interrupt table not set yet, |
1206 | * use legacy interrupt. | |
1207 | */ | |
eb647644 | 1208 | if (unlikely(!trans_pcie->use_ict)) { |
990aa6d7 | 1209 | irqreturn_t ret = iwl_pcie_isr(irq, data); |
eb647644 EG |
1210 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1211 | return ret; | |
1212 | } | |
1a361cd8 | 1213 | |
6c1011e1 | 1214 | trace_iwlwifi_dev_irq(trans->dev); |
b80667ee | 1215 | |
1a361cd8 EG |
1216 | /* Disable (but don't clear!) interrupts here to avoid |
1217 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
1218 | * If we have something to service, the tasklet will re-enable ints. | |
1219 | * If we *don't* have something, we'll re-enable before leaving here. | |
1220 | */ | |
25a17265 | 1221 | inta_mask = iwl_read32(trans, CSR_INT_MASK); |
1042db2a | 1222 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
1a361cd8 | 1223 | |
1a361cd8 EG |
1224 | /* Ignore interrupt if there's nothing in NIC to service. |
1225 | * This may be due to IRQ shared with another device, | |
1226 | * or due to sporadic interrupts thrown from our NIC. */ | |
b80667ee | 1227 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); |
6c1011e1 | 1228 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); |
b80667ee | 1229 | if (!read) { |
0c325769 | 1230 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1a361cd8 EG |
1231 | goto none; |
1232 | } | |
1233 | ||
b80667ee JB |
1234 | /* |
1235 | * Collect all entries up to the first 0, starting from ict_index; | |
1236 | * note we already read at ict_index. | |
1237 | */ | |
1238 | do { | |
1239 | val |= read; | |
0c325769 | 1240 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", |
b80667ee | 1241 | trans_pcie->ict_index, read); |
0c325769 EG |
1242 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; |
1243 | trans_pcie->ict_index = | |
1244 | iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); | |
1a361cd8 | 1245 | |
b80667ee | 1246 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); |
6c1011e1 | 1247 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, |
b80667ee JB |
1248 | read); |
1249 | } while (read); | |
1a361cd8 EG |
1250 | |
1251 | /* We should not get this value, just ignore it. */ | |
1252 | if (val == 0xffffffff) | |
1253 | val = 0; | |
1254 | ||
1255 | /* | |
1256 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
1257 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
1258 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
1259 | * so we use them to decide on the real state of the Rx bit. | |
1260 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
1261 | */ | |
1262 | if (val & 0xC0000) | |
1263 | val |= 0x8000; | |
1264 | ||
1265 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
0c325769 | 1266 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", |
20d3b647 | 1267 | inta, inta_mask, val); |
1a361cd8 | 1268 | |
0c325769 EG |
1269 | inta &= trans_pcie->inta_mask; |
1270 | trans_pcie->inta |= inta; | |
1a361cd8 | 1271 | |
990aa6d7 | 1272 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
1a361cd8 | 1273 | if (likely(inta)) |
0c325769 | 1274 | tasklet_schedule(&trans_pcie->irq_tasklet); |
83626404 | 1275 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
b80667ee | 1276 | !trans_pcie->inta) { |
1a361cd8 EG |
1277 | /* Allow interrupt if was disabled by this handler and |
1278 | * no tasklet was schedules, We should not enable interrupt, | |
1279 | * tasklet will enable it. | |
1280 | */ | |
0c325769 | 1281 | iwl_enable_interrupts(trans); |
1a361cd8 EG |
1282 | } |
1283 | ||
7b11488f | 1284 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1285 | return IRQ_HANDLED; |
1286 | ||
1287 | none: | |
1288 | /* re-enable interrupts here since we don't have anything to service. | |
1289 | * only Re-enable if disabled by irq. | |
1290 | */ | |
83626404 | 1291 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
b80667ee | 1292 | !trans_pcie->inta) |
0c325769 | 1293 | iwl_enable_interrupts(trans); |
1a361cd8 | 1294 | |
7b11488f | 1295 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1296 | return IRQ_NONE; |
1297 | } |