2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
39 #include "registers.h"
42 int ioat_ring_alloc_order
= 8;
43 module_param(ioat_ring_alloc_order
, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order
,
45 "ioat2+: allocate 2^n descriptors per channel"
46 " (default: 8 max: 16)");
47 static int ioat_ring_max_alloc_order
= IOAT_MAX_ORDER
;
48 module_param(ioat_ring_max_alloc_order
, int, 0644);
49 MODULE_PARM_DESC(ioat_ring_max_alloc_order
,
50 "ioat2+: upper limit for ring size (default: 16)");
52 void __ioat2_issue_pending(struct ioat2_dma_chan
*ioat
)
54 void * __iomem reg_base
= ioat
->base
.reg_base
;
57 ioat
->dmacount
+= ioat2_ring_pending(ioat
);
58 ioat
->issued
= ioat
->head
;
59 /* make descriptor updates globally visible before notifying channel */
61 writew(ioat
->dmacount
, reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
62 dev_dbg(to_dev(&ioat
->base
),
63 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
64 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
67 void ioat2_issue_pending(struct dma_chan
*chan
)
69 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(chan
);
71 spin_lock_bh(&ioat
->ring_lock
);
72 if (ioat
->pending
== 1)
73 __ioat2_issue_pending(ioat
);
74 spin_unlock_bh(&ioat
->ring_lock
);
78 * ioat2_update_pending - log pending descriptors
79 * @ioat: ioat2+ channel
81 * set pending to '1' unless pending is already set to '2', pending == 2
82 * indicates that submission is temporarily blocked due to an in-flight
83 * reset. If we are already above the ioat_pending_level threshold then
86 * called with ring_lock held
88 static void ioat2_update_pending(struct ioat2_dma_chan
*ioat
)
90 if (unlikely(ioat
->pending
== 2))
92 else if (ioat2_ring_pending(ioat
) > ioat_pending_level
)
93 __ioat2_issue_pending(ioat
);
98 static void __ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
100 struct ioat_ring_ent
*desc
;
101 struct ioat_dma_descriptor
*hw
;
104 if (ioat2_ring_space(ioat
) < 1) {
105 dev_err(to_dev(&ioat
->base
),
106 "Unable to start null desc - ring full\n");
110 dev_dbg(to_dev(&ioat
->base
), "%s: head: %#x tail: %#x issued: %#x\n",
111 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
112 idx
= ioat2_desc_alloc(ioat
, 1);
113 desc
= ioat2_get_ring_ent(ioat
, idx
);
118 hw
->ctl_f
.int_en
= 1;
119 hw
->ctl_f
.compl_write
= 1;
120 /* set size to non-zero value (channel returns error when size is 0) */
121 hw
->size
= NULL_DESC_BUFFER_SIZE
;
124 async_tx_ack(&desc
->txd
);
125 ioat2_set_chainaddr(ioat
, desc
->txd
.phys
);
126 dump_desc_dbg(ioat
, desc
);
127 __ioat2_issue_pending(ioat
);
130 static void ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
132 spin_lock_bh(&ioat
->ring_lock
);
133 __ioat2_start_null_desc(ioat
);
134 spin_unlock_bh(&ioat
->ring_lock
);
137 static void __cleanup(struct ioat2_dma_chan
*ioat
, unsigned long phys_complete
)
139 struct ioat_chan_common
*chan
= &ioat
->base
;
140 struct dma_async_tx_descriptor
*tx
;
141 struct ioat_ring_ent
*desc
;
142 bool seen_current
= false;
146 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
147 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
149 active
= ioat2_ring_active(ioat
);
150 for (i
= 0; i
< active
&& !seen_current
; i
++) {
151 prefetch(ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
+ 1));
152 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
154 dump_desc_dbg(ioat
, desc
);
156 ioat_dma_unmap(chan
, tx
->flags
, desc
->len
, desc
->hw
);
157 chan
->completed_cookie
= tx
->cookie
;
160 tx
->callback(tx
->callback_param
);
165 if (tx
->phys
== phys_complete
)
169 BUG_ON(!seen_current
); /* no active descs have written a completion? */
171 chan
->last_completion
= phys_complete
;
172 if (ioat
->head
== ioat
->tail
) {
173 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
175 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
176 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
181 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
182 * @chan: ioat channel to be cleaned up
184 static void ioat2_cleanup(struct ioat2_dma_chan
*ioat
)
186 struct ioat_chan_common
*chan
= &ioat
->base
;
187 unsigned long phys_complete
;
189 prefetch(chan
->completion
);
191 if (!spin_trylock_bh(&chan
->cleanup_lock
))
194 if (!ioat_cleanup_preamble(chan
, &phys_complete
)) {
195 spin_unlock_bh(&chan
->cleanup_lock
);
199 if (!spin_trylock_bh(&ioat
->ring_lock
)) {
200 spin_unlock_bh(&chan
->cleanup_lock
);
204 __cleanup(ioat
, phys_complete
);
206 spin_unlock_bh(&ioat
->ring_lock
);
207 spin_unlock_bh(&chan
->cleanup_lock
);
210 void ioat2_cleanup_tasklet(unsigned long data
)
212 struct ioat2_dma_chan
*ioat
= (void *) data
;
215 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
218 void __ioat2_restart_chan(struct ioat2_dma_chan
*ioat
)
220 struct ioat_chan_common
*chan
= &ioat
->base
;
222 /* set the tail to be re-issued */
223 ioat
->issued
= ioat
->tail
;
225 set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
226 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
228 dev_dbg(to_dev(chan
),
229 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
230 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
232 if (ioat2_ring_pending(ioat
)) {
233 struct ioat_ring_ent
*desc
;
235 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
236 ioat2_set_chainaddr(ioat
, desc
->txd
.phys
);
237 __ioat2_issue_pending(ioat
);
239 __ioat2_start_null_desc(ioat
);
242 int ioat2_quiesce(struct ioat_chan_common
*chan
, unsigned long tmo
)
244 unsigned long end
= jiffies
+ tmo
;
248 status
= ioat_chansts(chan
);
249 if (is_ioat_active(status
) || is_ioat_idle(status
))
251 while (is_ioat_active(status
) || is_ioat_idle(status
)) {
252 if (end
&& time_after(jiffies
, end
)) {
256 status
= ioat_chansts(chan
);
263 int ioat2_reset_sync(struct ioat_chan_common
*chan
, unsigned long tmo
)
265 unsigned long end
= jiffies
+ tmo
;
269 while (ioat_reset_pending(chan
)) {
270 if (end
&& time_after(jiffies
, end
)) {
280 static void ioat2_restart_channel(struct ioat2_dma_chan
*ioat
)
282 struct ioat_chan_common
*chan
= &ioat
->base
;
283 unsigned long phys_complete
;
285 ioat2_quiesce(chan
, 0);
286 if (ioat_cleanup_preamble(chan
, &phys_complete
))
287 __cleanup(ioat
, phys_complete
);
289 __ioat2_restart_chan(ioat
);
292 void ioat2_timer_event(unsigned long data
)
294 struct ioat2_dma_chan
*ioat
= (void *) data
;
295 struct ioat_chan_common
*chan
= &ioat
->base
;
297 spin_lock_bh(&chan
->cleanup_lock
);
298 if (test_bit(IOAT_COMPLETION_PENDING
, &chan
->state
)) {
299 unsigned long phys_complete
;
302 spin_lock_bh(&ioat
->ring_lock
);
303 status
= ioat_chansts(chan
);
305 /* when halted due to errors check for channel
306 * programming errors before advancing the completion state
308 if (is_ioat_halted(status
)) {
311 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
312 dev_err(to_dev(chan
), "%s: Channel halted (%x)\n",
314 BUG_ON(is_ioat_bug(chanerr
));
317 /* if we haven't made progress and we have already
318 * acknowledged a pending completion once, then be more
319 * forceful with a restart
321 if (ioat_cleanup_preamble(chan
, &phys_complete
))
322 __cleanup(ioat
, phys_complete
);
323 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
))
324 ioat2_restart_channel(ioat
);
326 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
327 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
329 spin_unlock_bh(&ioat
->ring_lock
);
333 /* if the ring is idle, empty, and oversized try to step
336 spin_lock_bh(&ioat
->ring_lock
);
337 active
= ioat2_ring_active(ioat
);
338 if (active
== 0 && ioat
->alloc_order
> ioat_get_alloc_order())
339 reshape_ring(ioat
, ioat
->alloc_order
-1);
340 spin_unlock_bh(&ioat
->ring_lock
);
342 /* keep shrinking until we get back to our minimum
345 if (ioat
->alloc_order
> ioat_get_alloc_order())
346 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
348 spin_unlock_bh(&chan
->cleanup_lock
);
351 static int ioat2_reset_hw(struct ioat_chan_common
*chan
)
353 /* throw away whatever the channel was doing and get it initialized */
356 ioat2_quiesce(chan
, msecs_to_jiffies(100));
358 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
359 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
361 return ioat2_reset_sync(chan
, msecs_to_jiffies(200));
365 * ioat2_enumerate_channels - find and initialize the device's channels
366 * @device: the device to be enumerated
368 int ioat2_enumerate_channels(struct ioatdma_device
*device
)
370 struct ioat2_dma_chan
*ioat
;
371 struct device
*dev
= &device
->pdev
->dev
;
372 struct dma_device
*dma
= &device
->common
;
376 INIT_LIST_HEAD(&dma
->channels
);
377 dma
->chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
378 dma
->chancnt
&= 0x1f; /* bits [4:0] valid */
379 if (dma
->chancnt
> ARRAY_SIZE(device
->idx
)) {
380 dev_warn(dev
, "(%d) exceeds max supported channels (%zu)\n",
381 dma
->chancnt
, ARRAY_SIZE(device
->idx
));
382 dma
->chancnt
= ARRAY_SIZE(device
->idx
);
384 xfercap_log
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
385 xfercap_log
&= 0x1f; /* bits [4:0] valid */
386 if (xfercap_log
== 0)
388 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, 1 << xfercap_log
);
390 /* FIXME which i/oat version is i7300? */
391 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
392 if (i7300_idle_platform_probe(NULL
, NULL
, 1) == 0)
395 for (i
= 0; i
< dma
->chancnt
; i
++) {
396 ioat
= devm_kzalloc(dev
, sizeof(*ioat
), GFP_KERNEL
);
400 ioat_init_channel(device
, &ioat
->base
, i
,
402 device
->cleanup_tasklet
,
403 (unsigned long) ioat
);
404 ioat
->xfercap_log
= xfercap_log
;
405 spin_lock_init(&ioat
->ring_lock
);
406 if (device
->reset_hw(&ioat
->base
)) {
415 static dma_cookie_t
ioat2_tx_submit_unlock(struct dma_async_tx_descriptor
*tx
)
417 struct dma_chan
*c
= tx
->chan
;
418 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
419 struct ioat_chan_common
*chan
= &ioat
->base
;
420 dma_cookie_t cookie
= c
->cookie
;
427 dev_dbg(to_dev(&ioat
->base
), "%s: cookie: %d\n", __func__
, cookie
);
429 if (!test_and_set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
))
430 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
431 ioat2_update_pending(ioat
);
432 spin_unlock_bh(&ioat
->ring_lock
);
437 static struct ioat_ring_ent
*ioat2_alloc_ring_ent(struct dma_chan
*chan
, gfp_t flags
)
439 struct ioat_dma_descriptor
*hw
;
440 struct ioat_ring_ent
*desc
;
441 struct ioatdma_device
*dma
;
444 dma
= to_ioatdma_device(chan
->device
);
445 hw
= pci_pool_alloc(dma
->dma_pool
, flags
, &phys
);
448 memset(hw
, 0, sizeof(*hw
));
450 desc
= kmem_cache_alloc(ioat2_cache
, flags
);
452 pci_pool_free(dma
->dma_pool
, hw
, phys
);
455 memset(desc
, 0, sizeof(*desc
));
457 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
458 desc
->txd
.tx_submit
= ioat2_tx_submit_unlock
;
460 desc
->txd
.phys
= phys
;
464 static void ioat2_free_ring_ent(struct ioat_ring_ent
*desc
, struct dma_chan
*chan
)
466 struct ioatdma_device
*dma
;
468 dma
= to_ioatdma_device(chan
->device
);
469 pci_pool_free(dma
->dma_pool
, desc
->hw
, desc
->txd
.phys
);
470 kmem_cache_free(ioat2_cache
, desc
);
473 static struct ioat_ring_ent
**ioat2_alloc_ring(struct dma_chan
*c
, int order
, gfp_t flags
)
475 struct ioat_ring_ent
**ring
;
476 int descs
= 1 << order
;
479 if (order
> ioat_get_max_alloc_order())
482 /* allocate the array to hold the software ring */
483 ring
= kcalloc(descs
, sizeof(*ring
), flags
);
486 for (i
= 0; i
< descs
; i
++) {
487 ring
[i
] = ioat2_alloc_ring_ent(c
, flags
);
490 ioat2_free_ring_ent(ring
[i
], c
);
494 set_desc_id(ring
[i
], i
);
498 for (i
= 0; i
< descs
-1; i
++) {
499 struct ioat_ring_ent
*next
= ring
[i
+1];
500 struct ioat_dma_descriptor
*hw
= ring
[i
]->hw
;
502 hw
->next
= next
->txd
.phys
;
504 ring
[i
]->hw
->next
= ring
[0]->txd
.phys
;
509 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
510 * @chan: channel to be initialized
512 int ioat2_alloc_chan_resources(struct dma_chan
*c
)
514 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
515 struct ioat_chan_common
*chan
= &ioat
->base
;
516 struct ioat_ring_ent
**ring
;
519 /* have we already been set up? */
521 return 1 << ioat
->alloc_order
;
523 /* Setup register to interrupt and write completion status on error */
524 writew(IOAT_CHANCTRL_RUN
, chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
526 /* allocate a completion writeback area */
527 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
528 chan
->completion
= pci_pool_alloc(chan
->device
->completion_pool
,
529 GFP_KERNEL
, &chan
->completion_dma
);
530 if (!chan
->completion
)
533 memset(chan
->completion
, 0, sizeof(*chan
->completion
));
534 writel(((u64
) chan
->completion_dma
) & 0x00000000FFFFFFFF,
535 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
536 writel(((u64
) chan
->completion_dma
) >> 32,
537 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
539 order
= ioat_get_alloc_order();
540 ring
= ioat2_alloc_ring(c
, order
, GFP_KERNEL
);
544 spin_lock_bh(&ioat
->ring_lock
);
550 ioat
->alloc_order
= order
;
551 spin_unlock_bh(&ioat
->ring_lock
);
553 tasklet_enable(&chan
->cleanup_task
);
554 ioat2_start_null_desc(ioat
);
556 return 1 << ioat
->alloc_order
;
559 bool reshape_ring(struct ioat2_dma_chan
*ioat
, int order
)
561 /* reshape differs from normal ring allocation in that we want
562 * to allocate a new software ring while only
563 * extending/truncating the hardware ring
565 struct ioat_chan_common
*chan
= &ioat
->base
;
566 struct dma_chan
*c
= &chan
->common
;
567 const u16 curr_size
= ioat2_ring_mask(ioat
) + 1;
568 const u16 active
= ioat2_ring_active(ioat
);
569 const u16 new_size
= 1 << order
;
570 struct ioat_ring_ent
**ring
;
573 if (order
> ioat_get_max_alloc_order())
576 /* double check that we have at least 1 free descriptor */
577 if (active
== curr_size
)
580 /* when shrinking, verify that we can hold the current active
581 * set in the new ring
583 if (active
>= new_size
)
586 /* allocate the array to hold the software ring */
587 ring
= kcalloc(new_size
, sizeof(*ring
), GFP_NOWAIT
);
591 /* allocate/trim descriptors as needed */
592 if (new_size
> curr_size
) {
593 /* copy current descriptors to the new ring */
594 for (i
= 0; i
< curr_size
; i
++) {
595 u16 curr_idx
= (ioat
->tail
+i
) & (curr_size
-1);
596 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
598 ring
[new_idx
] = ioat
->ring
[curr_idx
];
599 set_desc_id(ring
[new_idx
], new_idx
);
602 /* add new descriptors to the ring */
603 for (i
= curr_size
; i
< new_size
; i
++) {
604 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
606 ring
[new_idx
] = ioat2_alloc_ring_ent(c
, GFP_NOWAIT
);
607 if (!ring
[new_idx
]) {
609 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
611 ioat2_free_ring_ent(ring
[new_idx
], c
);
616 set_desc_id(ring
[new_idx
], new_idx
);
619 /* hw link new descriptors */
620 for (i
= curr_size
-1; i
< new_size
; i
++) {
621 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
622 struct ioat_ring_ent
*next
= ring
[(new_idx
+1) & (new_size
-1)];
623 struct ioat_dma_descriptor
*hw
= ring
[new_idx
]->hw
;
625 hw
->next
= next
->txd
.phys
;
628 struct ioat_dma_descriptor
*hw
;
629 struct ioat_ring_ent
*next
;
631 /* copy current descriptors to the new ring, dropping the
632 * removed descriptors
634 for (i
= 0; i
< new_size
; i
++) {
635 u16 curr_idx
= (ioat
->tail
+i
) & (curr_size
-1);
636 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
638 ring
[new_idx
] = ioat
->ring
[curr_idx
];
639 set_desc_id(ring
[new_idx
], new_idx
);
642 /* free deleted descriptors */
643 for (i
= new_size
; i
< curr_size
; i
++) {
644 struct ioat_ring_ent
*ent
;
646 ent
= ioat2_get_ring_ent(ioat
, ioat
->tail
+i
);
647 ioat2_free_ring_ent(ent
, c
);
650 /* fix up hardware ring */
651 hw
= ring
[(ioat
->tail
+new_size
-1) & (new_size
-1)]->hw
;
652 next
= ring
[(ioat
->tail
+new_size
) & (new_size
-1)];
653 hw
->next
= next
->txd
.phys
;
656 dev_dbg(to_dev(chan
), "%s: allocated %d descriptors\n",
661 ioat
->alloc_order
= order
;
667 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
668 * @idx: gets starting descriptor index on successful allocation
669 * @ioat: ioat2,3 channel (ring) to operate on
670 * @num_descs: allocation length
672 int ioat2_alloc_and_lock(u16
*idx
, struct ioat2_dma_chan
*ioat
, int num_descs
)
674 struct ioat_chan_common
*chan
= &ioat
->base
;
676 spin_lock_bh(&ioat
->ring_lock
);
677 /* never allow the last descriptor to be consumed, we need at
678 * least one free at all times to allow for on-the-fly ring
681 while (unlikely(ioat2_ring_space(ioat
) <= num_descs
)) {
682 if (reshape_ring(ioat
, ioat
->alloc_order
+ 1) &&
683 ioat2_ring_space(ioat
) > num_descs
)
686 if (printk_ratelimit())
687 dev_dbg(to_dev(chan
),
688 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
689 __func__
, num_descs
, ioat
->head
, ioat
->tail
,
691 spin_unlock_bh(&ioat
->ring_lock
);
693 /* progress reclaim in the allocation failure case we
694 * may be called under bh_disabled so we need to trigger
695 * the timer event directly
697 spin_lock_bh(&chan
->cleanup_lock
);
698 if (jiffies
> chan
->timer
.expires
&&
699 timer_pending(&chan
->timer
)) {
700 struct ioatdma_device
*device
= chan
->device
;
702 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
703 spin_unlock_bh(&chan
->cleanup_lock
);
704 device
->timer_fn((unsigned long) ioat
);
706 spin_unlock_bh(&chan
->cleanup_lock
);
710 dev_dbg(to_dev(chan
), "%s: num_descs: %d (%x:%x:%x)\n",
711 __func__
, num_descs
, ioat
->head
, ioat
->tail
, ioat
->issued
);
713 *idx
= ioat2_desc_alloc(ioat
, num_descs
);
714 return 0; /* with ioat->ring_lock held */
717 struct dma_async_tx_descriptor
*
718 ioat2_dma_prep_memcpy_lock(struct dma_chan
*c
, dma_addr_t dma_dest
,
719 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
721 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
722 struct ioat_dma_descriptor
*hw
;
723 struct ioat_ring_ent
*desc
;
724 dma_addr_t dst
= dma_dest
;
725 dma_addr_t src
= dma_src
;
726 size_t total_len
= len
;
731 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
732 if (likely(num_descs
) &&
733 ioat2_alloc_and_lock(&idx
, ioat
, num_descs
) == 0)
739 size_t copy
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
741 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
752 dump_desc_dbg(ioat
, desc
);
753 } while (++i
< num_descs
);
755 desc
->txd
.flags
= flags
;
756 desc
->len
= total_len
;
757 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
758 hw
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
759 hw
->ctl_f
.compl_write
= 1;
760 dump_desc_dbg(ioat
, desc
);
761 /* we leave the channel locked to ensure in order submission */
767 * ioat2_free_chan_resources - release all the descriptors
768 * @chan: the channel to be cleaned
770 void ioat2_free_chan_resources(struct dma_chan
*c
)
772 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
773 struct ioat_chan_common
*chan
= &ioat
->base
;
774 struct ioatdma_device
*device
= chan
->device
;
775 struct ioat_ring_ent
*desc
;
776 const u16 total_descs
= 1 << ioat
->alloc_order
;
780 /* Before freeing channel resources first check
781 * if they have been previously allocated for this channel.
786 tasklet_disable(&chan
->cleanup_task
);
787 del_timer_sync(&chan
->timer
);
788 device
->cleanup_tasklet((unsigned long) ioat
);
789 device
->reset_hw(chan
);
791 spin_lock_bh(&ioat
->ring_lock
);
792 descs
= ioat2_ring_space(ioat
);
793 dev_dbg(to_dev(chan
), "freeing %d idle descriptors\n", descs
);
794 for (i
= 0; i
< descs
; i
++) {
795 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
+ i
);
796 ioat2_free_ring_ent(desc
, c
);
799 if (descs
< total_descs
)
800 dev_err(to_dev(chan
), "Freeing %d in use descriptors!\n",
801 total_descs
- descs
);
803 for (i
= 0; i
< total_descs
- descs
; i
++) {
804 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
805 dump_desc_dbg(ioat
, desc
);
806 ioat2_free_ring_ent(desc
, c
);
811 ioat
->alloc_order
= 0;
812 pci_pool_free(device
->completion_pool
, chan
->completion
,
813 chan
->completion_dma
);
814 spin_unlock_bh(&ioat
->ring_lock
);
816 chan
->last_completion
= 0;
817 chan
->completion_dma
= 0;
823 ioat2_is_complete(struct dma_chan
*c
, dma_cookie_t cookie
,
824 dma_cookie_t
*done
, dma_cookie_t
*used
)
826 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
827 struct ioatdma_device
*device
= ioat
->base
.device
;
829 if (ioat_is_complete(c
, cookie
, done
, used
) == DMA_SUCCESS
)
832 device
->cleanup_tasklet((unsigned long) ioat
);
834 return ioat_is_complete(c
, cookie
, done
, used
);
837 static ssize_t
ring_size_show(struct dma_chan
*c
, char *page
)
839 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
841 return sprintf(page
, "%d\n", (1 << ioat
->alloc_order
) & ~1);
843 static struct ioat_sysfs_entry ring_size_attr
= __ATTR_RO(ring_size
);
845 static ssize_t
ring_active_show(struct dma_chan
*c
, char *page
)
847 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
849 /* ...taken outside the lock, no need to be precise */
850 return sprintf(page
, "%d\n", ioat2_ring_active(ioat
));
852 static struct ioat_sysfs_entry ring_active_attr
= __ATTR_RO(ring_active
);
854 static struct attribute
*ioat2_attrs
[] = {
855 &ring_size_attr
.attr
,
856 &ring_active_attr
.attr
,
858 &ioat_version_attr
.attr
,
862 struct kobj_type ioat2_ktype
= {
863 .sysfs_ops
= &ioat_sysfs_ops
,
864 .default_attrs
= ioat2_attrs
,
867 int __devinit
ioat2_dma_probe(struct ioatdma_device
*device
, int dca
)
869 struct pci_dev
*pdev
= device
->pdev
;
870 struct dma_device
*dma
;
872 struct ioat_chan_common
*chan
;
875 device
->enumerate_channels
= ioat2_enumerate_channels
;
876 device
->reset_hw
= ioat2_reset_hw
;
877 device
->cleanup_tasklet
= ioat2_cleanup_tasklet
;
878 device
->timer_fn
= ioat2_timer_event
;
879 device
->self_test
= ioat_dma_self_test
;
880 dma
= &device
->common
;
881 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
882 dma
->device_issue_pending
= ioat2_issue_pending
;
883 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
884 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
885 dma
->device_is_tx_complete
= ioat2_is_complete
;
887 err
= ioat_probe(device
);
890 ioat_set_tcp_copy_break(2048);
892 list_for_each_entry(c
, &dma
->channels
, device_node
) {
893 chan
= to_chan_common(c
);
894 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
| IOAT_DMA_DCA_ANY_CPU
,
895 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
898 err
= ioat_register(device
);
902 ioat_kobject_add(device
, &ioat2_ktype
);
905 device
->dca
= ioat2_dca_init(pdev
, device
->reg_base
);