ioat: implement a private tx_list
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / dma / ioat / dma.c
CommitLineData
0bbd5f4e 1/*
43d6e369 2 * Intel I/OAT DMA Linux driver
211a22ce 3 * Copyright(c) 2004 - 2009 Intel Corporation.
0bbd5f4e
CL
4 *
5 * This program is free software; you can redistribute it and/or modify it
43d6e369
SN
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
0bbd5f4e
CL
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
43d6e369
SN
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
0bbd5f4e 20 *
0bbd5f4e
CL
21 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
6b00c92c 34#include <linux/dma-mapping.h>
09177e85 35#include <linux/workqueue.h>
3ad0b02e 36#include <linux/i7300_idle.h>
584ec227
DW
37#include "dma.h"
38#include "registers.h"
39#include "hw.h"
0bbd5f4e 40
5cbafa65 41int ioat_pending_level = 4;
7bb67c14
SN
42module_param(ioat_pending_level, int, 0644);
43MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
45
0bbd5f4e 46/* internal functions */
5cbafa65
DW
47static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
3e037454
SN
49
50/**
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
52 * @irq: interrupt id
53 * @data: interrupt data
54 */
55static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
56{
57 struct ioatdma_device *instance = data;
dcbc853a 58 struct ioat_chan_common *chan;
3e037454
SN
59 unsigned long attnstatus;
60 int bit;
61 u8 intrctrl;
62
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
64
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
66 return IRQ_NONE;
67
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
70 return IRQ_NONE;
71 }
72
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
dcbc853a
DW
75 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
3e037454
SN
77 }
78
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
80 return IRQ_HANDLED;
81}
82
83/**
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
85 * @irq: interrupt id
86 * @data: interrupt data
87 */
88static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
89{
dcbc853a 90 struct ioat_chan_common *chan = data;
3e037454 91
dcbc853a 92 tasklet_schedule(&chan->cleanup_task);
3e037454
SN
93
94 return IRQ_HANDLED;
95}
96
5cbafa65
DW
97static void ioat1_cleanup_tasklet(unsigned long data);
98
99/* common channel initialization */
100void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
09c8a5b8
DW
102 void (*timer_fn)(unsigned long),
103 void (*tasklet)(unsigned long),
104 unsigned long ioat)
5cbafa65
DW
105{
106 struct dma_device *dma = &device->common;
107
108 chan->device = device;
109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
5cbafa65
DW
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
09c8a5b8
DW
114 init_timer(&chan->timer);
115 chan->timer.function = timer_fn;
116 chan->timer.data = ioat;
117 tasklet_init(&chan->cleanup_task, tasklet, ioat);
5cbafa65
DW
118 tasklet_disable(&chan->cleanup_task);
119}
120
09c8a5b8 121static void ioat1_timer_event(unsigned long data);
3e037454
SN
122
123/**
5cbafa65 124 * ioat1_dma_enumerate_channels - find and initialize the device's channels
3e037454
SN
125 * @device: the device to be enumerated
126 */
5cbafa65 127static int ioat1_enumerate_channels(struct ioatdma_device *device)
0bbd5f4e
CL
128{
129 u8 xfercap_scale;
130 u32 xfercap;
131 int i;
dcbc853a 132 struct ioat_dma_chan *ioat;
e6c0b69a 133 struct device *dev = &device->pdev->dev;
f2427e27 134 struct dma_device *dma = &device->common;
0bbd5f4e 135
f2427e27
DW
136 INIT_LIST_HEAD(&dma->channels);
137 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
bb320786
DW
138 dma->chancnt &= 0x1f; /* bits [4:0] valid */
139 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
140 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
141 dma->chancnt, ARRAY_SIZE(device->idx));
142 dma->chancnt = ARRAY_SIZE(device->idx);
143 }
e3828811 144 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
bb320786 145 xfercap_scale &= 0x1f; /* bits [4:0] valid */
0bbd5f4e 146 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
6df9183a 147 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
0bbd5f4e 148
f371be63 149#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
f2427e27
DW
150 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
151 dma->chancnt--;
27471fdb 152#endif
f2427e27 153 for (i = 0; i < dma->chancnt; i++) {
dcbc853a 154 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
5cbafa65 155 if (!ioat)
0bbd5f4e 156 break;
0bbd5f4e 157
5cbafa65 158 ioat_init_channel(device, &ioat->base, i,
09c8a5b8 159 ioat1_timer_event,
5cbafa65
DW
160 ioat1_cleanup_tasklet,
161 (unsigned long) ioat);
dcbc853a 162 ioat->xfercap = xfercap;
dcbc853a
DW
163 spin_lock_init(&ioat->desc_lock);
164 INIT_LIST_HEAD(&ioat->free_desc);
165 INIT_LIST_HEAD(&ioat->used_desc);
0bbd5f4e 166 }
5cbafa65
DW
167 dma->chancnt = i;
168 return i;
0bbd5f4e
CL
169}
170
711924b1
SN
171/**
172 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
173 * descriptors to hw
174 * @chan: DMA channel handle
175 */
bc3c7025 176static inline void
dcbc853a 177__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
711924b1 178{
dcbc853a
DW
179 void __iomem *reg_base = ioat->base.reg_base;
180
6df9183a
DW
181 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
182 __func__, ioat->pending);
dcbc853a
DW
183 ioat->pending = 0;
184 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
711924b1
SN
185}
186
187static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
188{
dcbc853a 189 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
711924b1 190
dcbc853a
DW
191 if (ioat->pending > 0) {
192 spin_lock_bh(&ioat->desc_lock);
193 __ioat1_dma_memcpy_issue_pending(ioat);
194 spin_unlock_bh(&ioat->desc_lock);
711924b1
SN
195 }
196}
197
09177e85 198/**
5cbafa65 199 * ioat1_reset_channel - restart a channel
dcbc853a 200 * @ioat: IOAT DMA channel handle
09177e85 201 */
5cbafa65 202static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
09177e85 203{
dcbc853a
DW
204 struct ioat_chan_common *chan = &ioat->base;
205 void __iomem *reg_base = chan->reg_base;
09177e85
MS
206 u32 chansts, chanerr;
207
09c8a5b8 208 dev_warn(to_dev(chan), "reset\n");
dcbc853a 209 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
09c8a5b8 210 chansts = *chan->completion & IOAT_CHANSTS_STATUS;
09177e85 211 if (chanerr) {
dcbc853a 212 dev_err(to_dev(chan),
09177e85 213 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
dcbc853a
DW
214 chan_num(chan), chansts, chanerr);
215 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
09177e85
MS
216 }
217
218 /*
219 * whack it upside the head with a reset
220 * and wait for things to settle out.
221 * force the pending count to a really big negative
222 * to make sure no one forces an issue_pending
223 * while we're waiting.
224 */
225
dcbc853a 226 ioat->pending = INT_MIN;
09177e85 227 writeb(IOAT_CHANCMD_RESET,
dcbc853a 228 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
09c8a5b8
DW
229 set_bit(IOAT_RESET_PENDING, &chan->state);
230 mod_timer(&chan->timer, jiffies + RESET_DELAY);
09177e85
MS
231}
232
7bb67c14 233static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
7405f74b 234{
dcbc853a
DW
235 struct dma_chan *c = tx->chan;
236 struct ioat_dma_chan *ioat = to_ioat_chan(c);
a0587bcf 237 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
09c8a5b8 238 struct ioat_chan_common *chan = &ioat->base;
a0587bcf
DW
239 struct ioat_desc_sw *first;
240 struct ioat_desc_sw *chain_tail;
7405f74b 241 dma_cookie_t cookie;
7405f74b 242
dcbc853a 243 spin_lock_bh(&ioat->desc_lock);
7405f74b 244 /* cookie incr and addition to used_list must be atomic */
dcbc853a 245 cookie = c->cookie;
7405f74b
DW
246 cookie++;
247 if (cookie < 0)
248 cookie = 1;
dcbc853a
DW
249 c->cookie = cookie;
250 tx->cookie = cookie;
6df9183a 251 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
7405f74b
DW
252
253 /* write address into NextDescriptor field of last desc in chain */
ea25968a 254 first = to_ioat_desc(desc->tx_list.next);
dcbc853a 255 chain_tail = to_ioat_desc(ioat->used_desc.prev);
a0587bcf
DW
256 /* make descriptor updates globally visible before chaining */
257 wmb();
258 chain_tail->hw->next = first->txd.phys;
ea25968a 259 list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
6df9183a
DW
260 dump_desc_dbg(ioat, chain_tail);
261 dump_desc_dbg(ioat, first);
a0587bcf 262
09c8a5b8
DW
263 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
264 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
265
ad643f54 266 ioat->pending += desc->hw->tx_cnt;
dcbc853a
DW
267 if (ioat->pending >= ioat_pending_level)
268 __ioat1_dma_memcpy_issue_pending(ioat);
269 spin_unlock_bh(&ioat->desc_lock);
7405f74b 270
7bb67c14
SN
271 return cookie;
272}
273
7bb67c14
SN
274/**
275 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
dcbc853a 276 * @ioat: the channel supplying the memory pool for the descriptors
7bb67c14
SN
277 * @flags: allocation flags
278 */
bc3c7025 279static struct ioat_desc_sw *
dcbc853a 280ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
0bbd5f4e
CL
281{
282 struct ioat_dma_descriptor *desc;
283 struct ioat_desc_sw *desc_sw;
8ab89567 284 struct ioatdma_device *ioatdma_device;
0bbd5f4e
CL
285 dma_addr_t phys;
286
dcbc853a 287 ioatdma_device = ioat->base.device;
8ab89567 288 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
0bbd5f4e
CL
289 if (unlikely(!desc))
290 return NULL;
291
292 desc_sw = kzalloc(sizeof(*desc_sw), flags);
293 if (unlikely(!desc_sw)) {
8ab89567 294 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
0bbd5f4e
CL
295 return NULL;
296 }
297
298 memset(desc, 0, sizeof(*desc));
7bb67c14 299
ea25968a 300 INIT_LIST_HEAD(&desc_sw->tx_list);
5cbafa65
DW
301 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
302 desc_sw->txd.tx_submit = ioat1_tx_submit;
0bbd5f4e 303 desc_sw->hw = desc;
bc3c7025 304 desc_sw->txd.phys = phys;
6df9183a 305 set_desc_id(desc_sw, -1);
0bbd5f4e
CL
306
307 return desc_sw;
308}
309
7bb67c14
SN
310static int ioat_initial_desc_count = 256;
311module_param(ioat_initial_desc_count, int, 0644);
312MODULE_PARM_DESC(ioat_initial_desc_count,
5cbafa65 313 "ioat1: initial descriptors per channel (default: 256)");
7bb67c14 314/**
5cbafa65 315 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
7bb67c14
SN
316 * @chan: the channel to be filled out
317 */
5cbafa65 318static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
0bbd5f4e 319{
dcbc853a
DW
320 struct ioat_dma_chan *ioat = to_ioat_chan(c);
321 struct ioat_chan_common *chan = &ioat->base;
711924b1 322 struct ioat_desc_sw *desc;
0bbd5f4e
CL
323 u32 chanerr;
324 int i;
325 LIST_HEAD(tmp_list);
326
e4223976 327 /* have we already been set up? */
dcbc853a
DW
328 if (!list_empty(&ioat->free_desc))
329 return ioat->desccount;
0bbd5f4e 330
43d6e369 331 /* Setup register to interrupt and write completion status on error */
f6ab95b5 332 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
0bbd5f4e 333
dcbc853a 334 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
0bbd5f4e 335 if (chanerr) {
dcbc853a
DW
336 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
337 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
0bbd5f4e
CL
338 }
339
340 /* Allocate descriptors */
7bb67c14 341 for (i = 0; i < ioat_initial_desc_count; i++) {
dcbc853a 342 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
0bbd5f4e 343 if (!desc) {
dcbc853a 344 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
0bbd5f4e
CL
345 break;
346 }
6df9183a 347 set_desc_id(desc, i);
0bbd5f4e
CL
348 list_add_tail(&desc->node, &tmp_list);
349 }
dcbc853a
DW
350 spin_lock_bh(&ioat->desc_lock);
351 ioat->desccount = i;
352 list_splice(&tmp_list, &ioat->free_desc);
dcbc853a 353 spin_unlock_bh(&ioat->desc_lock);
0bbd5f4e
CL
354
355 /* allocate a completion writeback area */
356 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
4fb9b9e8
DW
357 chan->completion = pci_pool_alloc(chan->device->completion_pool,
358 GFP_KERNEL, &chan->completion_dma);
359 memset(chan->completion, 0, sizeof(*chan->completion));
360 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
dcbc853a 361 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
4fb9b9e8 362 writel(((u64) chan->completion_dma) >> 32,
dcbc853a
DW
363 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
364
365 tasklet_enable(&chan->cleanup_task);
5cbafa65 366 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
6df9183a
DW
367 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
368 __func__, ioat->desccount);
dcbc853a 369 return ioat->desccount;
0bbd5f4e
CL
370}
371
7bb67c14 372/**
5cbafa65 373 * ioat1_dma_free_chan_resources - release all the descriptors
7bb67c14
SN
374 * @chan: the channel to be cleaned
375 */
5cbafa65 376static void ioat1_dma_free_chan_resources(struct dma_chan *c)
0bbd5f4e 377{
dcbc853a
DW
378 struct ioat_dma_chan *ioat = to_ioat_chan(c);
379 struct ioat_chan_common *chan = &ioat->base;
380 struct ioatdma_device *ioatdma_device = chan->device;
0bbd5f4e 381 struct ioat_desc_sw *desc, *_desc;
0bbd5f4e
CL
382 int in_use_descs = 0;
383
c3d4f44f
MS
384 /* Before freeing channel resources first check
385 * if they have been previously allocated for this channel.
386 */
dcbc853a 387 if (ioat->desccount == 0)
c3d4f44f
MS
388 return;
389
dcbc853a 390 tasklet_disable(&chan->cleanup_task);
09c8a5b8 391 del_timer_sync(&chan->timer);
5cbafa65 392 ioat1_cleanup(ioat);
0bbd5f4e 393
3e037454
SN
394 /* Delay 100ms after reset to allow internal DMA logic to quiesce
395 * before removing DMA descriptor resources.
396 */
7bb67c14 397 writeb(IOAT_CHANCMD_RESET,
dcbc853a 398 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
3e037454 399 mdelay(100);
0bbd5f4e 400
dcbc853a 401 spin_lock_bh(&ioat->desc_lock);
6df9183a
DW
402 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
403 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
404 __func__, desc_id(desc));
405 dump_desc_dbg(ioat, desc);
5cbafa65
DW
406 in_use_descs++;
407 list_del(&desc->node);
408 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
409 desc->txd.phys);
410 kfree(desc);
411 }
412 list_for_each_entry_safe(desc, _desc,
413 &ioat->free_desc, node) {
414 list_del(&desc->node);
8ab89567 415 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
bc3c7025 416 desc->txd.phys);
0bbd5f4e
CL
417 kfree(desc);
418 }
dcbc853a 419 spin_unlock_bh(&ioat->desc_lock);
0bbd5f4e 420
8ab89567 421 pci_pool_free(ioatdma_device->completion_pool,
4fb9b9e8
DW
422 chan->completion,
423 chan->completion_dma);
0bbd5f4e
CL
424
425 /* one is ok since we left it on there on purpose */
426 if (in_use_descs > 1)
dcbc853a 427 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
0bbd5f4e
CL
428 in_use_descs - 1);
429
4fb9b9e8
DW
430 chan->last_completion = 0;
431 chan->completion_dma = 0;
dcbc853a 432 ioat->pending = 0;
dcbc853a 433 ioat->desccount = 0;
3e037454 434}
7f2b291f 435
3e037454 436/**
dcbc853a
DW
437 * ioat1_dma_get_next_descriptor - return the next available descriptor
438 * @ioat: IOAT DMA channel handle
3e037454
SN
439 *
440 * Gets the next descriptor from the chain, and must be called with the
441 * channel's desc_lock held. Allocates more descriptors if the channel
442 * has run out.
443 */
7f2b291f 444static struct ioat_desc_sw *
dcbc853a 445ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
3e037454 446{
711924b1 447 struct ioat_desc_sw *new;
3e037454 448
dcbc853a
DW
449 if (!list_empty(&ioat->free_desc)) {
450 new = to_ioat_desc(ioat->free_desc.next);
3e037454
SN
451 list_del(&new->node);
452 } else {
453 /* try to get another desc */
dcbc853a 454 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
711924b1 455 if (!new) {
dcbc853a 456 dev_err(to_dev(&ioat->base), "alloc failed\n");
711924b1
SN
457 return NULL;
458 }
3e037454 459 }
6df9183a
DW
460 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
461 __func__, desc_id(new));
3e037454
SN
462 prefetch(new->hw);
463 return new;
0bbd5f4e
CL
464}
465
bc3c7025 466static struct dma_async_tx_descriptor *
dcbc853a 467ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
bc3c7025 468 dma_addr_t dma_src, size_t len, unsigned long flags)
0bbd5f4e 469{
dcbc853a 470 struct ioat_dma_chan *ioat = to_ioat_chan(c);
a0587bcf
DW
471 struct ioat_desc_sw *desc;
472 size_t copy;
473 LIST_HEAD(chain);
474 dma_addr_t src = dma_src;
475 dma_addr_t dest = dma_dest;
476 size_t total_len = len;
477 struct ioat_dma_descriptor *hw = NULL;
478 int tx_cnt = 0;
0bbd5f4e 479
dcbc853a 480 spin_lock_bh(&ioat->desc_lock);
5cbafa65 481 desc = ioat1_dma_get_next_descriptor(ioat);
a0587bcf
DW
482 do {
483 if (!desc)
484 break;
0bbd5f4e 485
a0587bcf 486 tx_cnt++;
dcbc853a 487 copy = min_t(size_t, len, ioat->xfercap);
a0587bcf
DW
488
489 hw = desc->hw;
490 hw->size = copy;
491 hw->ctl = 0;
492 hw->src_addr = src;
493 hw->dst_addr = dest;
494
495 list_add_tail(&desc->node, &chain);
496
497 len -= copy;
498 dest += copy;
499 src += copy;
500 if (len) {
501 struct ioat_desc_sw *next;
502
503 async_tx_ack(&desc->txd);
5cbafa65 504 next = ioat1_dma_get_next_descriptor(ioat);
a0587bcf 505 hw->next = next ? next->txd.phys : 0;
6df9183a 506 dump_desc_dbg(ioat, desc);
a0587bcf
DW
507 desc = next;
508 } else
509 hw->next = 0;
510 } while (len);
511
512 if (!desc) {
dcbc853a
DW
513 struct ioat_chan_common *chan = &ioat->base;
514
515 dev_err(to_dev(chan),
5cbafa65 516 "chan%d - get_next_desc failed\n", chan_num(chan));
dcbc853a
DW
517 list_splice(&chain, &ioat->free_desc);
518 spin_unlock_bh(&ioat->desc_lock);
711924b1 519 return NULL;
09177e85 520 }
dcbc853a 521 spin_unlock_bh(&ioat->desc_lock);
a0587bcf
DW
522
523 desc->txd.flags = flags;
a0587bcf 524 desc->len = total_len;
ea25968a 525 list_splice(&chain, &desc->tx_list);
a0587bcf
DW
526 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
527 hw->ctl_f.compl_write = 1;
ad643f54 528 hw->tx_cnt = tx_cnt;
6df9183a 529 dump_desc_dbg(ioat, desc);
a0587bcf
DW
530
531 return &desc->txd;
0bbd5f4e
CL
532}
533
5cbafa65 534static void ioat1_cleanup_tasklet(unsigned long data)
3e037454
SN
535{
536 struct ioat_dma_chan *chan = (void *)data;
f6ab95b5 537
5cbafa65 538 ioat1_cleanup(chan);
f6ab95b5 539 writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
3e037454
SN
540}
541
5cbafa65
DW
542static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
543 int direction, enum dma_ctrl_flags flags, bool dst)
e1d181ef 544{
5cbafa65
DW
545 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
546 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
547 pci_unmap_single(pdev, addr, len, direction);
548 else
549 pci_unmap_page(pdev, addr, len, direction);
e1d181ef
DW
550}
551
5cbafa65
DW
552
553void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
554 size_t len, struct ioat_dma_descriptor *hw)
0bbd5f4e 555{
5cbafa65
DW
556 struct pci_dev *pdev = chan->device->pdev;
557 size_t offset = len - hw->size;
0bbd5f4e 558
5cbafa65
DW
559 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
560 ioat_unmap(pdev, hw->dst_addr - offset, len,
561 PCI_DMA_FROMDEVICE, flags, 1);
0bbd5f4e 562
5cbafa65
DW
563 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
564 ioat_unmap(pdev, hw->src_addr - offset, len,
565 PCI_DMA_TODEVICE, flags, 0);
566}
567
568unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
569{
570 unsigned long phys_complete;
4fb9b9e8 571 u64 completion;
0bbd5f4e 572
4fb9b9e8 573 completion = *chan->completion;
09c8a5b8 574 phys_complete = ioat_chansts_to_addr(completion);
0bbd5f4e 575
6df9183a
DW
576 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
577 (unsigned long long) phys_complete);
578
09c8a5b8
DW
579 if (is_ioat_halted(completion)) {
580 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dcbc853a 581 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
09c8a5b8 582 chanerr);
0bbd5f4e
CL
583
584 /* TODO do something to salvage the situation */
585 }
586
5cbafa65
DW
587 return phys_complete;
588}
589
09c8a5b8
DW
590bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
591 unsigned long *phys_complete)
5cbafa65 592{
09c8a5b8
DW
593 *phys_complete = ioat_get_current_completion(chan);
594 if (*phys_complete == chan->last_completion)
595 return false;
596 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
597 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
5cbafa65 598
09c8a5b8
DW
599 return true;
600}
0bbd5f4e 601
09c8a5b8
DW
602static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
603{
604 struct ioat_chan_common *chan = &ioat->base;
605 struct list_head *_desc, *n;
606 struct dma_async_tx_descriptor *tx;
09177e85 607
6df9183a
DW
608 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
609 __func__, phys_complete);
09c8a5b8
DW
610 list_for_each_safe(_desc, n, &ioat->used_desc) {
611 struct ioat_desc_sw *desc;
612
613 prefetch(n);
614 desc = list_entry(_desc, typeof(*desc), node);
5cbafa65
DW
615 tx = &desc->txd;
616 /*
617 * Incoming DMA requests may use multiple descriptors,
618 * due to exceeding xfercap, perhaps. If so, only the
619 * last one will have a cookie, and require unmapping.
620 */
6df9183a 621 dump_desc_dbg(ioat, desc);
5cbafa65 622 if (tx->cookie) {
09c8a5b8
DW
623 chan->completed_cookie = tx->cookie;
624 tx->cookie = 0;
5cbafa65
DW
625 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
626 if (tx->callback) {
627 tx->callback(tx->callback_param);
628 tx->callback = NULL;
95218430 629 }
5cbafa65 630 }
0bbd5f4e 631
5cbafa65
DW
632 if (tx->phys != phys_complete) {
633 /*
634 * a completed entry, but not the last, so clean
635 * up if the client is done with the descriptor
636 */
637 if (async_tx_test_ack(tx))
638 list_move_tail(&desc->node, &ioat->free_desc);
5cbafa65
DW
639 } else {
640 /*
641 * last used desc. Do not remove, so we can
09c8a5b8 642 * append from it.
5cbafa65 643 */
09c8a5b8
DW
644
645 /* if nothing else is pending, cancel the
646 * completion timeout
647 */
648 if (n == &ioat->used_desc) {
649 dev_dbg(to_dev(chan),
650 "%s cancel completion timeout\n",
651 __func__);
652 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
653 }
0bbd5f4e 654
5cbafa65 655 /* TODO check status bits? */
0bbd5f4e
CL
656 break;
657 }
658 }
659
09c8a5b8
DW
660 chan->last_completion = phys_complete;
661}
662
663/**
664 * ioat1_cleanup - cleanup up finished descriptors
665 * @chan: ioat channel to be cleaned up
666 *
667 * To prevent lock contention we defer cleanup when the locks are
668 * contended with a terminal timeout that forces cleanup and catches
669 * completion notification errors.
670 */
671static void ioat1_cleanup(struct ioat_dma_chan *ioat)
672{
673 struct ioat_chan_common *chan = &ioat->base;
674 unsigned long phys_complete;
675
676 prefetch(chan->completion);
677
678 if (!spin_trylock_bh(&chan->cleanup_lock))
679 return;
680
681 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
682 spin_unlock_bh(&chan->cleanup_lock);
683 return;
684 }
685
686 if (!spin_trylock_bh(&ioat->desc_lock)) {
687 spin_unlock_bh(&chan->cleanup_lock);
688 return;
689 }
690
691 __cleanup(ioat, phys_complete);
692
dcbc853a 693 spin_unlock_bh(&ioat->desc_lock);
09c8a5b8
DW
694 spin_unlock_bh(&chan->cleanup_lock);
695}
0bbd5f4e 696
09c8a5b8
DW
697static void ioat1_timer_event(unsigned long data)
698{
699 struct ioat_dma_chan *ioat = (void *) data;
700 struct ioat_chan_common *chan = &ioat->base;
0bbd5f4e 701
09c8a5b8
DW
702 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
703
704 spin_lock_bh(&chan->cleanup_lock);
705 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
706 struct ioat_desc_sw *desc;
707
708 spin_lock_bh(&ioat->desc_lock);
709
710 /* restart active descriptors */
711 desc = to_ioat_desc(ioat->used_desc.prev);
712 ioat_set_chainaddr(ioat, desc->txd.phys);
713 ioat_start(chan);
714
715 ioat->pending = 0;
716 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
717 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
718 spin_unlock_bh(&ioat->desc_lock);
719 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
720 unsigned long phys_complete;
721
722 spin_lock_bh(&ioat->desc_lock);
723 /* if we haven't made progress and we have already
724 * acknowledged a pending completion once, then be more
725 * forceful with a restart
726 */
727 if (ioat_cleanup_preamble(chan, &phys_complete))
728 __cleanup(ioat, phys_complete);
729 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
730 ioat1_reset_channel(ioat);
731 else {
732 u64 status = ioat_chansts(chan);
733
734 /* manually update the last completion address */
735 if (ioat_chansts_to_addr(status) != 0)
736 *chan->completion = status;
737
738 set_bit(IOAT_COMPLETION_ACK, &chan->state);
739 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
740 }
741 spin_unlock_bh(&ioat->desc_lock);
742 }
dcbc853a 743 spin_unlock_bh(&chan->cleanup_lock);
0bbd5f4e
CL
744}
745
bc3c7025 746static enum dma_status
5cbafa65
DW
747ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
748 dma_cookie_t *done, dma_cookie_t *used)
0bbd5f4e 749{
dcbc853a 750 struct ioat_dma_chan *ioat = to_ioat_chan(c);
0bbd5f4e 751
5cbafa65
DW
752 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
753 return DMA_SUCCESS;
0bbd5f4e 754
5cbafa65 755 ioat1_cleanup(ioat);
0bbd5f4e 756
5cbafa65 757 return ioat_is_complete(c, cookie, done, used);
0bbd5f4e
CL
758}
759
5cbafa65 760static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
0bbd5f4e 761{
dcbc853a 762 struct ioat_chan_common *chan = &ioat->base;
0bbd5f4e 763 struct ioat_desc_sw *desc;
c7984f4e 764 struct ioat_dma_descriptor *hw;
0bbd5f4e 765
dcbc853a 766 spin_lock_bh(&ioat->desc_lock);
0bbd5f4e 767
5cbafa65 768 desc = ioat1_dma_get_next_descriptor(ioat);
7f1b358a
MS
769
770 if (!desc) {
dcbc853a 771 dev_err(to_dev(chan),
7f1b358a 772 "Unable to start null desc - get next desc failed\n");
dcbc853a 773 spin_unlock_bh(&ioat->desc_lock);
7f1b358a
MS
774 return;
775 }
776
c7984f4e
DW
777 hw = desc->hw;
778 hw->ctl = 0;
779 hw->ctl_f.null = 1;
780 hw->ctl_f.int_en = 1;
781 hw->ctl_f.compl_write = 1;
7f1b358a 782 /* set size to non-zero value (channel returns error when size is 0) */
c7984f4e
DW
783 hw->size = NULL_DESC_BUFFER_SIZE;
784 hw->src_addr = 0;
785 hw->dst_addr = 0;
bc3c7025 786 async_tx_ack(&desc->txd);
5cbafa65
DW
787 hw->next = 0;
788 list_add_tail(&desc->node, &ioat->used_desc);
6df9183a 789 dump_desc_dbg(ioat, desc);
7bb67c14 790
09c8a5b8
DW
791 ioat_set_chainaddr(ioat, desc->txd.phys);
792 ioat_start(chan);
dcbc853a 793 spin_unlock_bh(&ioat->desc_lock);
0bbd5f4e
CL
794}
795
796/*
797 * Perform a IOAT transaction to verify the HW works.
798 */
799#define IOAT_TEST_SIZE 2000
800
345d8523 801static void __devinit ioat_dma_test_callback(void *dma_async_param)
95218430 802{
b9bdcbba
DW
803 struct completion *cmp = dma_async_param;
804
805 complete(cmp);
95218430
SN
806}
807
3e037454
SN
808/**
809 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
810 * @device: device to be tested
811 */
345d8523 812static int __devinit ioat_dma_self_test(struct ioatdma_device *device)
0bbd5f4e
CL
813{
814 int i;
815 u8 *src;
816 u8 *dest;
bc3c7025
DW
817 struct dma_device *dma = &device->common;
818 struct device *dev = &device->pdev->dev;
0bbd5f4e 819 struct dma_chan *dma_chan;
711924b1 820 struct dma_async_tx_descriptor *tx;
0036731c 821 dma_addr_t dma_dest, dma_src;
0bbd5f4e
CL
822 dma_cookie_t cookie;
823 int err = 0;
b9bdcbba 824 struct completion cmp;
0c33e1ca 825 unsigned long tmo;
4f005dbe 826 unsigned long flags;
0bbd5f4e 827
e94b1766 828 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
0bbd5f4e
CL
829 if (!src)
830 return -ENOMEM;
e94b1766 831 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
0bbd5f4e
CL
832 if (!dest) {
833 kfree(src);
834 return -ENOMEM;
835 }
836
837 /* Fill in src buffer */
838 for (i = 0; i < IOAT_TEST_SIZE; i++)
839 src[i] = (u8)i;
840
841 /* Start copy, using first DMA channel */
bc3c7025 842 dma_chan = container_of(dma->channels.next, struct dma_chan,
43d6e369 843 device_node);
bc3c7025
DW
844 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
845 dev_err(dev, "selftest cannot allocate chan resource\n");
0bbd5f4e
CL
846 err = -ENODEV;
847 goto out;
848 }
849
bc3c7025
DW
850 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
851 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
a6a39ca1
DW
852 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
853 DMA_PREP_INTERRUPT;
0036731c 854 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
4f005dbe 855 IOAT_TEST_SIZE, flags);
5149fd01 856 if (!tx) {
bc3c7025 857 dev_err(dev, "Self-test prep failed, disabling\n");
5149fd01
SN
858 err = -ENODEV;
859 goto free_resources;
860 }
861
7405f74b 862 async_tx_ack(tx);
b9bdcbba 863 init_completion(&cmp);
95218430 864 tx->callback = ioat_dma_test_callback;
b9bdcbba 865 tx->callback_param = &cmp;
7bb67c14 866 cookie = tx->tx_submit(tx);
7f2b291f 867 if (cookie < 0) {
bc3c7025 868 dev_err(dev, "Self-test setup failed, disabling\n");
7f2b291f
SN
869 err = -ENODEV;
870 goto free_resources;
871 }
bc3c7025 872 dma->device_issue_pending(dma_chan);
532d3b1f 873
0c33e1ca 874 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
0bbd5f4e 875
0c33e1ca 876 if (tmo == 0 ||
bc3c7025 877 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
7bb67c14 878 != DMA_SUCCESS) {
bc3c7025 879 dev_err(dev, "Self-test copy timed out, disabling\n");
0bbd5f4e
CL
880 err = -ENODEV;
881 goto free_resources;
882 }
883 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
bc3c7025 884 dev_err(dev, "Self-test copy failed compare, disabling\n");
0bbd5f4e
CL
885 err = -ENODEV;
886 goto free_resources;
887 }
888
889free_resources:
bc3c7025 890 dma->device_free_chan_resources(dma_chan);
0bbd5f4e
CL
891out:
892 kfree(src);
893 kfree(dest);
894 return err;
895}
896
3e037454
SN
897static char ioat_interrupt_style[32] = "msix";
898module_param_string(ioat_interrupt_style, ioat_interrupt_style,
899 sizeof(ioat_interrupt_style), 0644);
900MODULE_PARM_DESC(ioat_interrupt_style,
901 "set ioat interrupt style: msix (default), "
902 "msix-single-vector, msi, intx)");
903
904/**
905 * ioat_dma_setup_interrupts - setup interrupt handler
906 * @device: ioat device
907 */
908static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
909{
dcbc853a 910 struct ioat_chan_common *chan;
e6c0b69a
DW
911 struct pci_dev *pdev = device->pdev;
912 struct device *dev = &pdev->dev;
913 struct msix_entry *msix;
914 int i, j, msixcnt;
915 int err = -EINVAL;
3e037454
SN
916 u8 intrctrl = 0;
917
918 if (!strcmp(ioat_interrupt_style, "msix"))
919 goto msix;
920 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
921 goto msix_single_vector;
922 if (!strcmp(ioat_interrupt_style, "msi"))
923 goto msi;
924 if (!strcmp(ioat_interrupt_style, "intx"))
925 goto intx;
e6c0b69a 926 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
5149fd01 927 goto err_no_irq;
3e037454
SN
928
929msix:
930 /* The number of MSI-X vectors should equal the number of channels */
931 msixcnt = device->common.chancnt;
932 for (i = 0; i < msixcnt; i++)
933 device->msix_entries[i].entry = i;
934
e6c0b69a 935 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
3e037454
SN
936 if (err < 0)
937 goto msi;
938 if (err > 0)
939 goto msix_single_vector;
940
941 for (i = 0; i < msixcnt; i++) {
e6c0b69a 942 msix = &device->msix_entries[i];
dcbc853a 943 chan = ioat_chan_by_index(device, i);
e6c0b69a
DW
944 err = devm_request_irq(dev, msix->vector,
945 ioat_dma_do_interrupt_msix, 0,
dcbc853a 946 "ioat-msix", chan);
3e037454
SN
947 if (err) {
948 for (j = 0; j < i; j++) {
e6c0b69a 949 msix = &device->msix_entries[j];
dcbc853a
DW
950 chan = ioat_chan_by_index(device, j);
951 devm_free_irq(dev, msix->vector, chan);
3e037454
SN
952 }
953 goto msix_single_vector;
954 }
955 }
956 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
3e037454
SN
957 goto done;
958
959msix_single_vector:
e6c0b69a
DW
960 msix = &device->msix_entries[0];
961 msix->entry = 0;
962 err = pci_enable_msix(pdev, device->msix_entries, 1);
3e037454
SN
963 if (err)
964 goto msi;
965
e6c0b69a
DW
966 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
967 "ioat-msix", device);
3e037454 968 if (err) {
e6c0b69a 969 pci_disable_msix(pdev);
3e037454
SN
970 goto msi;
971 }
3e037454
SN
972 goto done;
973
974msi:
e6c0b69a 975 err = pci_enable_msi(pdev);
3e037454
SN
976 if (err)
977 goto intx;
978
e6c0b69a
DW
979 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
980 "ioat-msi", device);
3e037454 981 if (err) {
e6c0b69a 982 pci_disable_msi(pdev);
3e037454
SN
983 goto intx;
984 }
3e037454
SN
985 goto done;
986
987intx:
e6c0b69a
DW
988 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
989 IRQF_SHARED, "ioat-intx", device);
3e037454
SN
990 if (err)
991 goto err_no_irq;
3e037454
SN
992
993done:
f2427e27
DW
994 if (device->intr_quirk)
995 device->intr_quirk(device);
3e037454
SN
996 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
997 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
998 return 0;
999
1000err_no_irq:
1001 /* Disable all interrupt generation */
1002 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
e6c0b69a
DW
1003 dev_err(dev, "no usable interrupts\n");
1004 return err;
3e037454
SN
1005}
1006
e6c0b69a 1007static void ioat_disable_interrupts(struct ioatdma_device *device)
3e037454 1008{
3e037454
SN
1009 /* Disable all interrupt generation */
1010 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
3e037454
SN
1011}
1012
345d8523 1013int __devinit ioat_probe(struct ioatdma_device *device)
0bbd5f4e 1014{
f2427e27
DW
1015 int err = -ENODEV;
1016 struct dma_device *dma = &device->common;
1017 struct pci_dev *pdev = device->pdev;
e6c0b69a 1018 struct device *dev = &pdev->dev;
0bbd5f4e
CL
1019
1020 /* DMA coherent memory pool for DMA descriptor allocations */
1021 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
8ab89567
SN
1022 sizeof(struct ioat_dma_descriptor),
1023 64, 0);
0bbd5f4e
CL
1024 if (!device->dma_pool) {
1025 err = -ENOMEM;
1026 goto err_dma_pool;
1027 }
1028
43d6e369
SN
1029 device->completion_pool = pci_pool_create("completion_pool", pdev,
1030 sizeof(u64), SMP_CACHE_BYTES,
1031 SMP_CACHE_BYTES);
5cbafa65 1032
0bbd5f4e
CL
1033 if (!device->completion_pool) {
1034 err = -ENOMEM;
1035 goto err_completion_pool;
1036 }
1037
5cbafa65 1038 device->enumerate_channels(device);
0bbd5f4e 1039
f2427e27 1040 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
f2427e27 1041 dma->dev = &pdev->dev;
7bb67c14 1042
e6c0b69a 1043 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
5149fd01 1044 " %d channels, device version 0x%02x, driver version %s\n",
bc3c7025 1045 dma->chancnt, device->version, IOAT_DMA_VERSION);
8ab89567 1046
bc3c7025 1047 if (!dma->chancnt) {
e6c0b69a 1048 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
8b794b14
MS
1049 "zero channels detected\n");
1050 goto err_setup_interrupts;
1051 }
1052
3e037454 1053 err = ioat_dma_setup_interrupts(device);
8ab89567 1054 if (err)
3e037454 1055 goto err_setup_interrupts;
0bbd5f4e 1056
3e037454 1057 err = ioat_dma_self_test(device);
0bbd5f4e
CL
1058 if (err)
1059 goto err_self_test;
1060
f2427e27 1061 return 0;
0bbd5f4e
CL
1062
1063err_self_test:
e6c0b69a 1064 ioat_disable_interrupts(device);
3e037454 1065err_setup_interrupts:
0bbd5f4e
CL
1066 pci_pool_destroy(device->completion_pool);
1067err_completion_pool:
1068 pci_pool_destroy(device->dma_pool);
1069err_dma_pool:
f2427e27
DW
1070 return err;
1071}
1072
345d8523 1073int __devinit ioat_register(struct ioatdma_device *device)
f2427e27
DW
1074{
1075 int err = dma_async_device_register(&device->common);
1076
1077 if (err) {
1078 ioat_disable_interrupts(device);
1079 pci_pool_destroy(device->completion_pool);
1080 pci_pool_destroy(device->dma_pool);
1081 }
1082
1083 return err;
1084}
1085
1086/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1087static void ioat1_intr_quirk(struct ioatdma_device *device)
1088{
1089 struct pci_dev *pdev = device->pdev;
1090 u32 dmactrl;
1091
1092 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1093 if (pdev->msi_enabled)
1094 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1095 else
1096 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1097 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1098}
1099
345d8523 1100int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
f2427e27
DW
1101{
1102 struct pci_dev *pdev = device->pdev;
1103 struct dma_device *dma;
1104 int err;
1105
1106 device->intr_quirk = ioat1_intr_quirk;
5cbafa65 1107 device->enumerate_channels = ioat1_enumerate_channels;
f2427e27
DW
1108 dma = &device->common;
1109 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1110 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
5cbafa65
DW
1111 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1112 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1113 dma->device_is_tx_complete = ioat1_dma_is_complete;
f2427e27
DW
1114
1115 err = ioat_probe(device);
1116 if (err)
1117 return err;
1118 ioat_set_tcp_copy_break(4096);
1119 err = ioat_register(device);
1120 if (err)
1121 return err;
1122 if (dca)
1123 device->dca = ioat_dca_init(pdev, device->reg_base);
1124
f2427e27
DW
1125 return err;
1126}
1127
345d8523 1128void __devexit ioat_dma_remove(struct ioatdma_device *device)
0bbd5f4e 1129{
bc3c7025 1130 struct dma_device *dma = &device->common;
0bbd5f4e 1131
e6c0b69a 1132 ioat_disable_interrupts(device);
8ab89567 1133
bc3c7025 1134 dma_async_device_unregister(dma);
dfe2299e 1135
0bbd5f4e
CL
1136 pci_pool_destroy(device->dma_pool);
1137 pci_pool_destroy(device->completion_pool);
8ab89567 1138
dcbc853a 1139 INIT_LIST_HEAD(&dma->channels);
0bbd5f4e 1140}