2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 int ioat_pending_level
= 4;
42 module_param(ioat_pending_level
, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level
,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 /* internal functions */
47 static void ioat1_cleanup(struct ioat_dma_chan
*ioat
);
48 static void ioat1_dma_start_null_desc(struct ioat_dma_chan
*ioat
);
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
53 * @data: interrupt data
55 static irqreturn_t
ioat_dma_do_interrupt(int irq
, void *data
)
57 struct ioatdma_device
*instance
= data
;
58 struct ioat_chan_common
*chan
;
59 unsigned long attnstatus
;
63 intrctrl
= readb(instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
65 if (!(intrctrl
& IOAT_INTRCTRL_MASTER_INT_EN
))
68 if (!(intrctrl
& IOAT_INTRCTRL_INT_STATUS
)) {
69 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
73 attnstatus
= readl(instance
->reg_base
+ IOAT_ATTNSTATUS_OFFSET
);
74 for_each_bit(bit
, &attnstatus
, BITS_PER_LONG
) {
75 chan
= ioat_chan_by_index(instance
, bit
);
76 tasklet_schedule(&chan
->cleanup_task
);
79 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
86 * @data: interrupt data
88 static irqreturn_t
ioat_dma_do_interrupt_msix(int irq
, void *data
)
90 struct ioat_chan_common
*chan
= data
;
92 tasklet_schedule(&chan
->cleanup_task
);
97 static void ioat1_cleanup_tasklet(unsigned long data
);
99 /* common channel initialization */
100 void ioat_init_channel(struct ioatdma_device
*device
,
101 struct ioat_chan_common
*chan
, int idx
,
102 void (*timer_fn
)(unsigned long),
103 void (*tasklet
)(unsigned long),
106 struct dma_device
*dma
= &device
->common
;
108 chan
->device
= device
;
109 chan
->reg_base
= device
->reg_base
+ (0x80 * (idx
+ 1));
110 spin_lock_init(&chan
->cleanup_lock
);
111 chan
->common
.device
= dma
;
112 list_add_tail(&chan
->common
.device_node
, &dma
->channels
);
113 device
->idx
[idx
] = chan
;
114 init_timer(&chan
->timer
);
115 chan
->timer
.function
= timer_fn
;
116 chan
->timer
.data
= ioat
;
117 tasklet_init(&chan
->cleanup_task
, tasklet
, ioat
);
118 tasklet_disable(&chan
->cleanup_task
);
121 static void ioat1_timer_event(unsigned long data
);
124 * ioat1_dma_enumerate_channels - find and initialize the device's channels
125 * @device: the device to be enumerated
127 static int ioat1_enumerate_channels(struct ioatdma_device
*device
)
132 struct ioat_dma_chan
*ioat
;
133 struct device
*dev
= &device
->pdev
->dev
;
134 struct dma_device
*dma
= &device
->common
;
136 INIT_LIST_HEAD(&dma
->channels
);
137 dma
->chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
138 dma
->chancnt
&= 0x1f; /* bits [4:0] valid */
139 if (dma
->chancnt
> ARRAY_SIZE(device
->idx
)) {
140 dev_warn(dev
, "(%d) exceeds max supported channels (%zu)\n",
141 dma
->chancnt
, ARRAY_SIZE(device
->idx
));
142 dma
->chancnt
= ARRAY_SIZE(device
->idx
);
144 xfercap_scale
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
145 xfercap_scale
&= 0x1f; /* bits [4:0] valid */
146 xfercap
= (xfercap_scale
== 0 ? -1 : (1UL << xfercap_scale
));
147 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, xfercap
);
149 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
150 if (i7300_idle_platform_probe(NULL
, NULL
, 1) == 0)
153 for (i
= 0; i
< dma
->chancnt
; i
++) {
154 ioat
= devm_kzalloc(dev
, sizeof(*ioat
), GFP_KERNEL
);
158 ioat_init_channel(device
, &ioat
->base
, i
,
160 ioat1_cleanup_tasklet
,
161 (unsigned long) ioat
);
162 ioat
->xfercap
= xfercap
;
163 spin_lock_init(&ioat
->desc_lock
);
164 INIT_LIST_HEAD(&ioat
->free_desc
);
165 INIT_LIST_HEAD(&ioat
->used_desc
);
172 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
174 * @chan: DMA channel handle
177 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan
*ioat
)
179 void __iomem
*reg_base
= ioat
->base
.reg_base
;
181 dev_dbg(to_dev(&ioat
->base
), "%s: pending: %d\n",
182 __func__
, ioat
->pending
);
184 writeb(IOAT_CHANCMD_APPEND
, reg_base
+ IOAT1_CHANCMD_OFFSET
);
187 static void ioat1_dma_memcpy_issue_pending(struct dma_chan
*chan
)
189 struct ioat_dma_chan
*ioat
= to_ioat_chan(chan
);
191 if (ioat
->pending
> 0) {
192 spin_lock_bh(&ioat
->desc_lock
);
193 __ioat1_dma_memcpy_issue_pending(ioat
);
194 spin_unlock_bh(&ioat
->desc_lock
);
199 * ioat1_reset_channel - restart a channel
200 * @ioat: IOAT DMA channel handle
202 static void ioat1_reset_channel(struct ioat_dma_chan
*ioat
)
204 struct ioat_chan_common
*chan
= &ioat
->base
;
205 void __iomem
*reg_base
= chan
->reg_base
;
206 u32 chansts
, chanerr
;
208 dev_warn(to_dev(chan
), "reset\n");
209 chanerr
= readl(reg_base
+ IOAT_CHANERR_OFFSET
);
210 chansts
= *chan
->completion
& IOAT_CHANSTS_STATUS
;
212 dev_err(to_dev(chan
),
213 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
214 chan_num(chan
), chansts
, chanerr
);
215 writel(chanerr
, reg_base
+ IOAT_CHANERR_OFFSET
);
219 * whack it upside the head with a reset
220 * and wait for things to settle out.
221 * force the pending count to a really big negative
222 * to make sure no one forces an issue_pending
223 * while we're waiting.
226 ioat
->pending
= INT_MIN
;
227 writeb(IOAT_CHANCMD_RESET
,
228 reg_base
+ IOAT_CHANCMD_OFFSET(chan
->device
->version
));
229 set_bit(IOAT_RESET_PENDING
, &chan
->state
);
230 mod_timer(&chan
->timer
, jiffies
+ RESET_DELAY
);
233 static dma_cookie_t
ioat1_tx_submit(struct dma_async_tx_descriptor
*tx
)
235 struct dma_chan
*c
= tx
->chan
;
236 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
237 struct ioat_desc_sw
*desc
= tx_to_ioat_desc(tx
);
238 struct ioat_chan_common
*chan
= &ioat
->base
;
239 struct ioat_desc_sw
*first
;
240 struct ioat_desc_sw
*chain_tail
;
243 spin_lock_bh(&ioat
->desc_lock
);
244 /* cookie incr and addition to used_list must be atomic */
251 dev_dbg(to_dev(&ioat
->base
), "%s: cookie: %d\n", __func__
, cookie
);
253 /* write address into NextDescriptor field of last desc in chain */
254 first
= to_ioat_desc(desc
->tx_list
.next
);
255 chain_tail
= to_ioat_desc(ioat
->used_desc
.prev
);
256 /* make descriptor updates globally visible before chaining */
258 chain_tail
->hw
->next
= first
->txd
.phys
;
259 list_splice_tail_init(&desc
->tx_list
, &ioat
->used_desc
);
260 dump_desc_dbg(ioat
, chain_tail
);
261 dump_desc_dbg(ioat
, first
);
263 if (!test_and_set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
))
264 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
266 ioat
->pending
+= desc
->hw
->tx_cnt
;
267 if (ioat
->pending
>= ioat_pending_level
)
268 __ioat1_dma_memcpy_issue_pending(ioat
);
269 spin_unlock_bh(&ioat
->desc_lock
);
275 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
276 * @ioat: the channel supplying the memory pool for the descriptors
277 * @flags: allocation flags
279 static struct ioat_desc_sw
*
280 ioat_dma_alloc_descriptor(struct ioat_dma_chan
*ioat
, gfp_t flags
)
282 struct ioat_dma_descriptor
*desc
;
283 struct ioat_desc_sw
*desc_sw
;
284 struct ioatdma_device
*ioatdma_device
;
287 ioatdma_device
= ioat
->base
.device
;
288 desc
= pci_pool_alloc(ioatdma_device
->dma_pool
, flags
, &phys
);
292 desc_sw
= kzalloc(sizeof(*desc_sw
), flags
);
293 if (unlikely(!desc_sw
)) {
294 pci_pool_free(ioatdma_device
->dma_pool
, desc
, phys
);
298 memset(desc
, 0, sizeof(*desc
));
300 INIT_LIST_HEAD(&desc_sw
->tx_list
);
301 dma_async_tx_descriptor_init(&desc_sw
->txd
, &ioat
->base
.common
);
302 desc_sw
->txd
.tx_submit
= ioat1_tx_submit
;
304 desc_sw
->txd
.phys
= phys
;
305 set_desc_id(desc_sw
, -1);
310 static int ioat_initial_desc_count
= 256;
311 module_param(ioat_initial_desc_count
, int, 0644);
312 MODULE_PARM_DESC(ioat_initial_desc_count
,
313 "ioat1: initial descriptors per channel (default: 256)");
315 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
316 * @chan: the channel to be filled out
318 static int ioat1_dma_alloc_chan_resources(struct dma_chan
*c
)
320 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
321 struct ioat_chan_common
*chan
= &ioat
->base
;
322 struct ioat_desc_sw
*desc
;
327 /* have we already been set up? */
328 if (!list_empty(&ioat
->free_desc
))
329 return ioat
->desccount
;
331 /* Setup register to interrupt and write completion status on error */
332 writew(IOAT_CHANCTRL_RUN
, chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
334 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
336 dev_err(to_dev(chan
), "CHANERR = %x, clearing\n", chanerr
);
337 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
340 /* Allocate descriptors */
341 for (i
= 0; i
< ioat_initial_desc_count
; i
++) {
342 desc
= ioat_dma_alloc_descriptor(ioat
, GFP_KERNEL
);
344 dev_err(to_dev(chan
), "Only %d initial descriptors\n", i
);
347 set_desc_id(desc
, i
);
348 list_add_tail(&desc
->node
, &tmp_list
);
350 spin_lock_bh(&ioat
->desc_lock
);
352 list_splice(&tmp_list
, &ioat
->free_desc
);
353 spin_unlock_bh(&ioat
->desc_lock
);
355 /* allocate a completion writeback area */
356 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
357 chan
->completion
= pci_pool_alloc(chan
->device
->completion_pool
,
358 GFP_KERNEL
, &chan
->completion_dma
);
359 memset(chan
->completion
, 0, sizeof(*chan
->completion
));
360 writel(((u64
) chan
->completion_dma
) & 0x00000000FFFFFFFF,
361 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
362 writel(((u64
) chan
->completion_dma
) >> 32,
363 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
365 tasklet_enable(&chan
->cleanup_task
);
366 ioat1_dma_start_null_desc(ioat
); /* give chain to dma device */
367 dev_dbg(to_dev(chan
), "%s: allocated %d descriptors\n",
368 __func__
, ioat
->desccount
);
369 return ioat
->desccount
;
373 * ioat1_dma_free_chan_resources - release all the descriptors
374 * @chan: the channel to be cleaned
376 static void ioat1_dma_free_chan_resources(struct dma_chan
*c
)
378 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
379 struct ioat_chan_common
*chan
= &ioat
->base
;
380 struct ioatdma_device
*ioatdma_device
= chan
->device
;
381 struct ioat_desc_sw
*desc
, *_desc
;
382 int in_use_descs
= 0;
384 /* Before freeing channel resources first check
385 * if they have been previously allocated for this channel.
387 if (ioat
->desccount
== 0)
390 tasklet_disable(&chan
->cleanup_task
);
391 del_timer_sync(&chan
->timer
);
394 /* Delay 100ms after reset to allow internal DMA logic to quiesce
395 * before removing DMA descriptor resources.
397 writeb(IOAT_CHANCMD_RESET
,
398 chan
->reg_base
+ IOAT_CHANCMD_OFFSET(chan
->device
->version
));
401 spin_lock_bh(&ioat
->desc_lock
);
402 list_for_each_entry_safe(desc
, _desc
, &ioat
->used_desc
, node
) {
403 dev_dbg(to_dev(chan
), "%s: freeing %d from used list\n",
404 __func__
, desc_id(desc
));
405 dump_desc_dbg(ioat
, desc
);
407 list_del(&desc
->node
);
408 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
412 list_for_each_entry_safe(desc
, _desc
,
413 &ioat
->free_desc
, node
) {
414 list_del(&desc
->node
);
415 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
419 spin_unlock_bh(&ioat
->desc_lock
);
421 pci_pool_free(ioatdma_device
->completion_pool
,
423 chan
->completion_dma
);
425 /* one is ok since we left it on there on purpose */
426 if (in_use_descs
> 1)
427 dev_err(to_dev(chan
), "Freeing %d in use descriptors!\n",
430 chan
->last_completion
= 0;
431 chan
->completion_dma
= 0;
437 * ioat1_dma_get_next_descriptor - return the next available descriptor
438 * @ioat: IOAT DMA channel handle
440 * Gets the next descriptor from the chain, and must be called with the
441 * channel's desc_lock held. Allocates more descriptors if the channel
444 static struct ioat_desc_sw
*
445 ioat1_dma_get_next_descriptor(struct ioat_dma_chan
*ioat
)
447 struct ioat_desc_sw
*new;
449 if (!list_empty(&ioat
->free_desc
)) {
450 new = to_ioat_desc(ioat
->free_desc
.next
);
451 list_del(&new->node
);
453 /* try to get another desc */
454 new = ioat_dma_alloc_descriptor(ioat
, GFP_ATOMIC
);
456 dev_err(to_dev(&ioat
->base
), "alloc failed\n");
460 dev_dbg(to_dev(&ioat
->base
), "%s: allocated: %d\n",
461 __func__
, desc_id(new));
466 static struct dma_async_tx_descriptor
*
467 ioat1_dma_prep_memcpy(struct dma_chan
*c
, dma_addr_t dma_dest
,
468 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
470 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
471 struct ioat_desc_sw
*desc
;
474 dma_addr_t src
= dma_src
;
475 dma_addr_t dest
= dma_dest
;
476 size_t total_len
= len
;
477 struct ioat_dma_descriptor
*hw
= NULL
;
480 spin_lock_bh(&ioat
->desc_lock
);
481 desc
= ioat1_dma_get_next_descriptor(ioat
);
487 copy
= min_t(size_t, len
, ioat
->xfercap
);
495 list_add_tail(&desc
->node
, &chain
);
501 struct ioat_desc_sw
*next
;
503 async_tx_ack(&desc
->txd
);
504 next
= ioat1_dma_get_next_descriptor(ioat
);
505 hw
->next
= next
? next
->txd
.phys
: 0;
506 dump_desc_dbg(ioat
, desc
);
513 struct ioat_chan_common
*chan
= &ioat
->base
;
515 dev_err(to_dev(chan
),
516 "chan%d - get_next_desc failed\n", chan_num(chan
));
517 list_splice(&chain
, &ioat
->free_desc
);
518 spin_unlock_bh(&ioat
->desc_lock
);
521 spin_unlock_bh(&ioat
->desc_lock
);
523 desc
->txd
.flags
= flags
;
524 desc
->len
= total_len
;
525 list_splice(&chain
, &desc
->tx_list
);
526 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
527 hw
->ctl_f
.compl_write
= 1;
529 dump_desc_dbg(ioat
, desc
);
534 static void ioat1_cleanup_tasklet(unsigned long data
)
536 struct ioat_dma_chan
*chan
= (void *)data
;
539 writew(IOAT_CHANCTRL_RUN
, chan
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
542 static void ioat_unmap(struct pci_dev
*pdev
, dma_addr_t addr
, size_t len
,
543 int direction
, enum dma_ctrl_flags flags
, bool dst
)
545 if ((dst
&& (flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)) ||
546 (!dst
&& (flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)))
547 pci_unmap_single(pdev
, addr
, len
, direction
);
549 pci_unmap_page(pdev
, addr
, len
, direction
);
553 void ioat_dma_unmap(struct ioat_chan_common
*chan
, enum dma_ctrl_flags flags
,
554 size_t len
, struct ioat_dma_descriptor
*hw
)
556 struct pci_dev
*pdev
= chan
->device
->pdev
;
557 size_t offset
= len
- hw
->size
;
559 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
560 ioat_unmap(pdev
, hw
->dst_addr
- offset
, len
,
561 PCI_DMA_FROMDEVICE
, flags
, 1);
563 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
))
564 ioat_unmap(pdev
, hw
->src_addr
- offset
, len
,
565 PCI_DMA_TODEVICE
, flags
, 0);
568 unsigned long ioat_get_current_completion(struct ioat_chan_common
*chan
)
570 unsigned long phys_complete
;
573 completion
= *chan
->completion
;
574 phys_complete
= ioat_chansts_to_addr(completion
);
576 dev_dbg(to_dev(chan
), "%s: phys_complete: %#llx\n", __func__
,
577 (unsigned long long) phys_complete
);
579 if (is_ioat_halted(completion
)) {
580 u32 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
581 dev_err(to_dev(chan
), "Channel halted, chanerr = %x\n",
584 /* TODO do something to salvage the situation */
587 return phys_complete
;
590 bool ioat_cleanup_preamble(struct ioat_chan_common
*chan
,
591 unsigned long *phys_complete
)
593 *phys_complete
= ioat_get_current_completion(chan
);
594 if (*phys_complete
== chan
->last_completion
)
596 clear_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
597 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
602 static void __cleanup(struct ioat_dma_chan
*ioat
, unsigned long phys_complete
)
604 struct ioat_chan_common
*chan
= &ioat
->base
;
605 struct list_head
*_desc
, *n
;
606 struct dma_async_tx_descriptor
*tx
;
608 dev_dbg(to_dev(chan
), "%s: phys_complete: %lx\n",
609 __func__
, phys_complete
);
610 list_for_each_safe(_desc
, n
, &ioat
->used_desc
) {
611 struct ioat_desc_sw
*desc
;
614 desc
= list_entry(_desc
, typeof(*desc
), node
);
617 * Incoming DMA requests may use multiple descriptors,
618 * due to exceeding xfercap, perhaps. If so, only the
619 * last one will have a cookie, and require unmapping.
621 dump_desc_dbg(ioat
, desc
);
623 chan
->completed_cookie
= tx
->cookie
;
625 ioat_dma_unmap(chan
, tx
->flags
, desc
->len
, desc
->hw
);
627 tx
->callback(tx
->callback_param
);
632 if (tx
->phys
!= phys_complete
) {
634 * a completed entry, but not the last, so clean
635 * up if the client is done with the descriptor
637 if (async_tx_test_ack(tx
))
638 list_move_tail(&desc
->node
, &ioat
->free_desc
);
641 * last used desc. Do not remove, so we can
645 /* if nothing else is pending, cancel the
648 if (n
== &ioat
->used_desc
) {
649 dev_dbg(to_dev(chan
),
650 "%s cancel completion timeout\n",
652 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
655 /* TODO check status bits? */
660 chan
->last_completion
= phys_complete
;
664 * ioat1_cleanup - cleanup up finished descriptors
665 * @chan: ioat channel to be cleaned up
667 * To prevent lock contention we defer cleanup when the locks are
668 * contended with a terminal timeout that forces cleanup and catches
669 * completion notification errors.
671 static void ioat1_cleanup(struct ioat_dma_chan
*ioat
)
673 struct ioat_chan_common
*chan
= &ioat
->base
;
674 unsigned long phys_complete
;
676 prefetch(chan
->completion
);
678 if (!spin_trylock_bh(&chan
->cleanup_lock
))
681 if (!ioat_cleanup_preamble(chan
, &phys_complete
)) {
682 spin_unlock_bh(&chan
->cleanup_lock
);
686 if (!spin_trylock_bh(&ioat
->desc_lock
)) {
687 spin_unlock_bh(&chan
->cleanup_lock
);
691 __cleanup(ioat
, phys_complete
);
693 spin_unlock_bh(&ioat
->desc_lock
);
694 spin_unlock_bh(&chan
->cleanup_lock
);
697 static void ioat1_timer_event(unsigned long data
)
699 struct ioat_dma_chan
*ioat
= (void *) data
;
700 struct ioat_chan_common
*chan
= &ioat
->base
;
702 dev_dbg(to_dev(chan
), "%s: state: %lx\n", __func__
, chan
->state
);
704 spin_lock_bh(&chan
->cleanup_lock
);
705 if (test_and_clear_bit(IOAT_RESET_PENDING
, &chan
->state
)) {
706 struct ioat_desc_sw
*desc
;
708 spin_lock_bh(&ioat
->desc_lock
);
710 /* restart active descriptors */
711 desc
= to_ioat_desc(ioat
->used_desc
.prev
);
712 ioat_set_chainaddr(ioat
, desc
->txd
.phys
);
716 set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
717 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
718 spin_unlock_bh(&ioat
->desc_lock
);
719 } else if (test_bit(IOAT_COMPLETION_PENDING
, &chan
->state
)) {
720 unsigned long phys_complete
;
722 spin_lock_bh(&ioat
->desc_lock
);
723 /* if we haven't made progress and we have already
724 * acknowledged a pending completion once, then be more
725 * forceful with a restart
727 if (ioat_cleanup_preamble(chan
, &phys_complete
))
728 __cleanup(ioat
, phys_complete
);
729 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
))
730 ioat1_reset_channel(ioat
);
732 u64 status
= ioat_chansts(chan
);
734 /* manually update the last completion address */
735 if (ioat_chansts_to_addr(status
) != 0)
736 *chan
->completion
= status
;
738 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
739 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
741 spin_unlock_bh(&ioat
->desc_lock
);
743 spin_unlock_bh(&chan
->cleanup_lock
);
746 static enum dma_status
747 ioat1_dma_is_complete(struct dma_chan
*c
, dma_cookie_t cookie
,
748 dma_cookie_t
*done
, dma_cookie_t
*used
)
750 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
752 if (ioat_is_complete(c
, cookie
, done
, used
) == DMA_SUCCESS
)
757 return ioat_is_complete(c
, cookie
, done
, used
);
760 static void ioat1_dma_start_null_desc(struct ioat_dma_chan
*ioat
)
762 struct ioat_chan_common
*chan
= &ioat
->base
;
763 struct ioat_desc_sw
*desc
;
764 struct ioat_dma_descriptor
*hw
;
766 spin_lock_bh(&ioat
->desc_lock
);
768 desc
= ioat1_dma_get_next_descriptor(ioat
);
771 dev_err(to_dev(chan
),
772 "Unable to start null desc - get next desc failed\n");
773 spin_unlock_bh(&ioat
->desc_lock
);
780 hw
->ctl_f
.int_en
= 1;
781 hw
->ctl_f
.compl_write
= 1;
782 /* set size to non-zero value (channel returns error when size is 0) */
783 hw
->size
= NULL_DESC_BUFFER_SIZE
;
786 async_tx_ack(&desc
->txd
);
788 list_add_tail(&desc
->node
, &ioat
->used_desc
);
789 dump_desc_dbg(ioat
, desc
);
791 ioat_set_chainaddr(ioat
, desc
->txd
.phys
);
793 spin_unlock_bh(&ioat
->desc_lock
);
797 * Perform a IOAT transaction to verify the HW works.
799 #define IOAT_TEST_SIZE 2000
801 static void __devinit
ioat_dma_test_callback(void *dma_async_param
)
803 struct completion
*cmp
= dma_async_param
;
809 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
810 * @device: device to be tested
812 static int __devinit
ioat_dma_self_test(struct ioatdma_device
*device
)
817 struct dma_device
*dma
= &device
->common
;
818 struct device
*dev
= &device
->pdev
->dev
;
819 struct dma_chan
*dma_chan
;
820 struct dma_async_tx_descriptor
*tx
;
821 dma_addr_t dma_dest
, dma_src
;
824 struct completion cmp
;
828 src
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
831 dest
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
837 /* Fill in src buffer */
838 for (i
= 0; i
< IOAT_TEST_SIZE
; i
++)
841 /* Start copy, using first DMA channel */
842 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
844 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
845 dev_err(dev
, "selftest cannot allocate chan resource\n");
850 dma_src
= dma_map_single(dev
, src
, IOAT_TEST_SIZE
, DMA_TO_DEVICE
);
851 dma_dest
= dma_map_single(dev
, dest
, IOAT_TEST_SIZE
, DMA_FROM_DEVICE
);
852 flags
= DMA_COMPL_SRC_UNMAP_SINGLE
| DMA_COMPL_DEST_UNMAP_SINGLE
|
854 tx
= device
->common
.device_prep_dma_memcpy(dma_chan
, dma_dest
, dma_src
,
855 IOAT_TEST_SIZE
, flags
);
857 dev_err(dev
, "Self-test prep failed, disabling\n");
863 init_completion(&cmp
);
864 tx
->callback
= ioat_dma_test_callback
;
865 tx
->callback_param
= &cmp
;
866 cookie
= tx
->tx_submit(tx
);
868 dev_err(dev
, "Self-test setup failed, disabling\n");
872 dma
->device_issue_pending(dma_chan
);
874 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
877 dma
->device_is_tx_complete(dma_chan
, cookie
, NULL
, NULL
)
879 dev_err(dev
, "Self-test copy timed out, disabling\n");
883 if (memcmp(src
, dest
, IOAT_TEST_SIZE
)) {
884 dev_err(dev
, "Self-test copy failed compare, disabling\n");
890 dma
->device_free_chan_resources(dma_chan
);
897 static char ioat_interrupt_style
[32] = "msix";
898 module_param_string(ioat_interrupt_style
, ioat_interrupt_style
,
899 sizeof(ioat_interrupt_style
), 0644);
900 MODULE_PARM_DESC(ioat_interrupt_style
,
901 "set ioat interrupt style: msix (default), "
902 "msix-single-vector, msi, intx)");
905 * ioat_dma_setup_interrupts - setup interrupt handler
906 * @device: ioat device
908 static int ioat_dma_setup_interrupts(struct ioatdma_device
*device
)
910 struct ioat_chan_common
*chan
;
911 struct pci_dev
*pdev
= device
->pdev
;
912 struct device
*dev
= &pdev
->dev
;
913 struct msix_entry
*msix
;
918 if (!strcmp(ioat_interrupt_style
, "msix"))
920 if (!strcmp(ioat_interrupt_style
, "msix-single-vector"))
921 goto msix_single_vector
;
922 if (!strcmp(ioat_interrupt_style
, "msi"))
924 if (!strcmp(ioat_interrupt_style
, "intx"))
926 dev_err(dev
, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style
);
930 /* The number of MSI-X vectors should equal the number of channels */
931 msixcnt
= device
->common
.chancnt
;
932 for (i
= 0; i
< msixcnt
; i
++)
933 device
->msix_entries
[i
].entry
= i
;
935 err
= pci_enable_msix(pdev
, device
->msix_entries
, msixcnt
);
939 goto msix_single_vector
;
941 for (i
= 0; i
< msixcnt
; i
++) {
942 msix
= &device
->msix_entries
[i
];
943 chan
= ioat_chan_by_index(device
, i
);
944 err
= devm_request_irq(dev
, msix
->vector
,
945 ioat_dma_do_interrupt_msix
, 0,
948 for (j
= 0; j
< i
; j
++) {
949 msix
= &device
->msix_entries
[j
];
950 chan
= ioat_chan_by_index(device
, j
);
951 devm_free_irq(dev
, msix
->vector
, chan
);
953 goto msix_single_vector
;
956 intrctrl
|= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL
;
960 msix
= &device
->msix_entries
[0];
962 err
= pci_enable_msix(pdev
, device
->msix_entries
, 1);
966 err
= devm_request_irq(dev
, msix
->vector
, ioat_dma_do_interrupt
, 0,
967 "ioat-msix", device
);
969 pci_disable_msix(pdev
);
975 err
= pci_enable_msi(pdev
);
979 err
= devm_request_irq(dev
, pdev
->irq
, ioat_dma_do_interrupt
, 0,
982 pci_disable_msi(pdev
);
988 err
= devm_request_irq(dev
, pdev
->irq
, ioat_dma_do_interrupt
,
989 IRQF_SHARED
, "ioat-intx", device
);
994 if (device
->intr_quirk
)
995 device
->intr_quirk(device
);
996 intrctrl
|= IOAT_INTRCTRL_MASTER_INT_EN
;
997 writeb(intrctrl
, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1001 /* Disable all interrupt generation */
1002 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1003 dev_err(dev
, "no usable interrupts\n");
1007 static void ioat_disable_interrupts(struct ioatdma_device
*device
)
1009 /* Disable all interrupt generation */
1010 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1013 int __devinit
ioat_probe(struct ioatdma_device
*device
)
1016 struct dma_device
*dma
= &device
->common
;
1017 struct pci_dev
*pdev
= device
->pdev
;
1018 struct device
*dev
= &pdev
->dev
;
1020 /* DMA coherent memory pool for DMA descriptor allocations */
1021 device
->dma_pool
= pci_pool_create("dma_desc_pool", pdev
,
1022 sizeof(struct ioat_dma_descriptor
),
1024 if (!device
->dma_pool
) {
1029 device
->completion_pool
= pci_pool_create("completion_pool", pdev
,
1030 sizeof(u64
), SMP_CACHE_BYTES
,
1033 if (!device
->completion_pool
) {
1035 goto err_completion_pool
;
1038 device
->enumerate_channels(device
);
1040 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
1041 dma
->dev
= &pdev
->dev
;
1043 dev_err(dev
, "Intel(R) I/OAT DMA Engine found,"
1044 " %d channels, device version 0x%02x, driver version %s\n",
1045 dma
->chancnt
, device
->version
, IOAT_DMA_VERSION
);
1047 if (!dma
->chancnt
) {
1048 dev_err(dev
, "Intel(R) I/OAT DMA Engine problem found: "
1049 "zero channels detected\n");
1050 goto err_setup_interrupts
;
1053 err
= ioat_dma_setup_interrupts(device
);
1055 goto err_setup_interrupts
;
1057 err
= ioat_dma_self_test(device
);
1064 ioat_disable_interrupts(device
);
1065 err_setup_interrupts
:
1066 pci_pool_destroy(device
->completion_pool
);
1067 err_completion_pool
:
1068 pci_pool_destroy(device
->dma_pool
);
1073 int __devinit
ioat_register(struct ioatdma_device
*device
)
1075 int err
= dma_async_device_register(&device
->common
);
1078 ioat_disable_interrupts(device
);
1079 pci_pool_destroy(device
->completion_pool
);
1080 pci_pool_destroy(device
->dma_pool
);
1086 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1087 static void ioat1_intr_quirk(struct ioatdma_device
*device
)
1089 struct pci_dev
*pdev
= device
->pdev
;
1092 pci_read_config_dword(pdev
, IOAT_PCI_DMACTRL_OFFSET
, &dmactrl
);
1093 if (pdev
->msi_enabled
)
1094 dmactrl
|= IOAT_PCI_DMACTRL_MSI_EN
;
1096 dmactrl
&= ~IOAT_PCI_DMACTRL_MSI_EN
;
1097 pci_write_config_dword(pdev
, IOAT_PCI_DMACTRL_OFFSET
, dmactrl
);
1100 int __devinit
ioat1_dma_probe(struct ioatdma_device
*device
, int dca
)
1102 struct pci_dev
*pdev
= device
->pdev
;
1103 struct dma_device
*dma
;
1106 device
->intr_quirk
= ioat1_intr_quirk
;
1107 device
->enumerate_channels
= ioat1_enumerate_channels
;
1108 dma
= &device
->common
;
1109 dma
->device_prep_dma_memcpy
= ioat1_dma_prep_memcpy
;
1110 dma
->device_issue_pending
= ioat1_dma_memcpy_issue_pending
;
1111 dma
->device_alloc_chan_resources
= ioat1_dma_alloc_chan_resources
;
1112 dma
->device_free_chan_resources
= ioat1_dma_free_chan_resources
;
1113 dma
->device_is_tx_complete
= ioat1_dma_is_complete
;
1115 err
= ioat_probe(device
);
1118 ioat_set_tcp_copy_break(4096);
1119 err
= ioat_register(device
);
1123 device
->dca
= ioat_dca_init(pdev
, device
->reg_base
);
1128 void __devexit
ioat_dma_remove(struct ioatdma_device
*device
)
1130 struct dma_device
*dma
= &device
->common
;
1132 ioat_disable_interrupts(device
);
1134 dma_async_device_unregister(dma
);
1136 pci_pool_destroy(device
->dma_pool
);
1137 pci_pool_destroy(device
->completion_pool
);
1139 INIT_LIST_HEAD(&dma
->channels
);