Commit | Line | Data |
---|---|---|
5cbafa65 DW |
1 | /* |
2 | * Intel I/OAT DMA Linux driver | |
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
20 | * | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine (versions >= 2), which | |
25 | * does asynchronous data movement and checksumming operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/dmaengine.h> | |
33 | #include <linux/delay.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/workqueue.h> | |
36 | #include <linux/i7300_idle.h> | |
37 | #include "dma.h" | |
38 | #include "dma_v2.h" | |
39 | #include "registers.h" | |
40 | #include "hw.h" | |
41 | ||
bf40a686 | 42 | int ioat_ring_alloc_order = 8; |
5cbafa65 DW |
43 | module_param(ioat_ring_alloc_order, int, 0644); |
44 | MODULE_PARM_DESC(ioat_ring_alloc_order, | |
376ec376 DW |
45 | "ioat2+: allocate 2^n descriptors per channel" |
46 | " (default: 8 max: 16)"); | |
a309218a DW |
47 | static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; |
48 | module_param(ioat_ring_max_alloc_order, int, 0644); | |
49 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | |
376ec376 | 50 | "ioat2+: upper limit for ring size (default: 16)"); |
5cbafa65 | 51 | |
b094ad3b | 52 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) |
5cbafa65 DW |
53 | { |
54 | void * __iomem reg_base = ioat->base.reg_base; | |
55 | ||
56 | ioat->pending = 0; | |
376ec376 | 57 | ioat->dmacount += ioat2_ring_pending(ioat); |
5cbafa65 DW |
58 | ioat->issued = ioat->head; |
59 | /* make descriptor updates globally visible before notifying channel */ | |
60 | wmb(); | |
61 | writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | |
6df9183a DW |
62 | dev_dbg(to_dev(&ioat->base), |
63 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
64 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
5cbafa65 DW |
65 | } |
66 | ||
bf40a686 | 67 | void ioat2_issue_pending(struct dma_chan *chan) |
5cbafa65 DW |
68 | { |
69 | struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); | |
70 | ||
71 | spin_lock_bh(&ioat->ring_lock); | |
72 | if (ioat->pending == 1) | |
73 | __ioat2_issue_pending(ioat); | |
74 | spin_unlock_bh(&ioat->ring_lock); | |
75 | } | |
76 | ||
77 | /** | |
78 | * ioat2_update_pending - log pending descriptors | |
79 | * @ioat: ioat2+ channel | |
80 | * | |
81 | * set pending to '1' unless pending is already set to '2', pending == 2 | |
82 | * indicates that submission is temporarily blocked due to an in-flight | |
83 | * reset. If we are already above the ioat_pending_level threshold then | |
84 | * just issue pending. | |
85 | * | |
86 | * called with ring_lock held | |
87 | */ | |
88 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | |
89 | { | |
90 | if (unlikely(ioat->pending == 2)) | |
91 | return; | |
92 | else if (ioat2_ring_pending(ioat) > ioat_pending_level) | |
93 | __ioat2_issue_pending(ioat); | |
94 | else | |
95 | ioat->pending = 1; | |
96 | } | |
97 | ||
98 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
99 | { | |
5cbafa65 DW |
100 | struct ioat_ring_ent *desc; |
101 | struct ioat_dma_descriptor *hw; | |
102 | int idx; | |
103 | ||
104 | if (ioat2_ring_space(ioat) < 1) { | |
105 | dev_err(to_dev(&ioat->base), | |
106 | "Unable to start null desc - ring full\n"); | |
107 | return; | |
108 | } | |
109 | ||
6df9183a DW |
110 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", |
111 | __func__, ioat->head, ioat->tail, ioat->issued); | |
5cbafa65 DW |
112 | idx = ioat2_desc_alloc(ioat, 1); |
113 | desc = ioat2_get_ring_ent(ioat, idx); | |
114 | ||
115 | hw = desc->hw; | |
116 | hw->ctl = 0; | |
117 | hw->ctl_f.null = 1; | |
118 | hw->ctl_f.int_en = 1; | |
119 | hw->ctl_f.compl_write = 1; | |
120 | /* set size to non-zero value (channel returns error when size is 0) */ | |
121 | hw->size = NULL_DESC_BUFFER_SIZE; | |
122 | hw->src_addr = 0; | |
123 | hw->dst_addr = 0; | |
124 | async_tx_ack(&desc->txd); | |
09c8a5b8 | 125 | ioat2_set_chainaddr(ioat, desc->txd.phys); |
6df9183a | 126 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
127 | __ioat2_issue_pending(ioat); |
128 | } | |
129 | ||
130 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
131 | { | |
132 | spin_lock_bh(&ioat->ring_lock); | |
133 | __ioat2_start_null_desc(ioat); | |
134 | spin_unlock_bh(&ioat->ring_lock); | |
135 | } | |
136 | ||
09c8a5b8 | 137 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
5cbafa65 | 138 | { |
5cbafa65 | 139 | struct ioat_chan_common *chan = &ioat->base; |
09c8a5b8 | 140 | struct dma_async_tx_descriptor *tx; |
5cbafa65 DW |
141 | struct ioat_ring_ent *desc; |
142 | bool seen_current = false; | |
143 | u16 active; | |
144 | int i; | |
5cbafa65 | 145 | |
6df9183a DW |
146 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
147 | __func__, ioat->head, ioat->tail, ioat->issued); | |
148 | ||
5cbafa65 DW |
149 | active = ioat2_ring_active(ioat); |
150 | for (i = 0; i < active && !seen_current; i++) { | |
151 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | |
152 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
153 | tx = &desc->txd; | |
6df9183a | 154 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
155 | if (tx->cookie) { |
156 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | |
157 | chan->completed_cookie = tx->cookie; | |
158 | tx->cookie = 0; | |
159 | if (tx->callback) { | |
160 | tx->callback(tx->callback_param); | |
161 | tx->callback = NULL; | |
162 | } | |
163 | } | |
164 | ||
165 | if (tx->phys == phys_complete) | |
166 | seen_current = true; | |
167 | } | |
168 | ioat->tail += i; | |
169 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | |
5cbafa65 DW |
170 | |
171 | chan->last_completion = phys_complete; | |
09c8a5b8 DW |
172 | if (ioat->head == ioat->tail) { |
173 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | |
174 | __func__); | |
175 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
a309218a | 176 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
09c8a5b8 DW |
177 | } |
178 | } | |
179 | ||
180 | /** | |
181 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | |
182 | * @chan: ioat channel to be cleaned up | |
183 | */ | |
184 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |
185 | { | |
186 | struct ioat_chan_common *chan = &ioat->base; | |
187 | unsigned long phys_complete; | |
5cbafa65 | 188 | |
09c8a5b8 DW |
189 | prefetch(chan->completion); |
190 | ||
191 | if (!spin_trylock_bh(&chan->cleanup_lock)) | |
192 | return; | |
193 | ||
194 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
195 | spin_unlock_bh(&chan->cleanup_lock); | |
196 | return; | |
197 | } | |
198 | ||
199 | if (!spin_trylock_bh(&ioat->ring_lock)) { | |
200 | spin_unlock_bh(&chan->cleanup_lock); | |
201 | return; | |
202 | } | |
203 | ||
204 | __cleanup(ioat, phys_complete); | |
205 | ||
206 | spin_unlock_bh(&ioat->ring_lock); | |
5cbafa65 DW |
207 | spin_unlock_bh(&chan->cleanup_lock); |
208 | } | |
209 | ||
e3232714 | 210 | void ioat2_cleanup_tasklet(unsigned long data) |
5cbafa65 DW |
211 | { |
212 | struct ioat2_dma_chan *ioat = (void *) data; | |
213 | ||
214 | ioat2_cleanup(ioat); | |
f6ab95b5 | 215 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
216 | } |
217 | ||
bf40a686 | 218 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) |
09c8a5b8 DW |
219 | { |
220 | struct ioat_chan_common *chan = &ioat->base; | |
221 | ||
222 | /* set the tail to be re-issued */ | |
223 | ioat->issued = ioat->tail; | |
224 | ioat->dmacount = 0; | |
225 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
226 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
227 | ||
228 | dev_dbg(to_dev(chan), | |
229 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
230 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
231 | ||
232 | if (ioat2_ring_pending(ioat)) { | |
233 | struct ioat_ring_ent *desc; | |
234 | ||
235 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | |
236 | ioat2_set_chainaddr(ioat, desc->txd.phys); | |
237 | __ioat2_issue_pending(ioat); | |
238 | } else | |
239 | __ioat2_start_null_desc(ioat); | |
240 | } | |
241 | ||
242 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |
243 | { | |
244 | struct ioat_chan_common *chan = &ioat->base; | |
245 | unsigned long phys_complete; | |
246 | u32 status; | |
247 | ||
248 | status = ioat_chansts(chan); | |
249 | if (is_ioat_active(status) || is_ioat_idle(status)) | |
250 | ioat_suspend(chan); | |
251 | while (is_ioat_active(status) || is_ioat_idle(status)) { | |
252 | status = ioat_chansts(chan); | |
253 | cpu_relax(); | |
254 | } | |
255 | ||
256 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
257 | __cleanup(ioat, phys_complete); | |
258 | ||
bf40a686 | 259 | __ioat2_restart_chan(ioat); |
09c8a5b8 DW |
260 | } |
261 | ||
e3232714 | 262 | void ioat2_timer_event(unsigned long data) |
09c8a5b8 DW |
263 | { |
264 | struct ioat2_dma_chan *ioat = (void *) data; | |
265 | struct ioat_chan_common *chan = &ioat->base; | |
266 | ||
267 | spin_lock_bh(&chan->cleanup_lock); | |
268 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | |
269 | unsigned long phys_complete; | |
270 | u64 status; | |
271 | ||
272 | spin_lock_bh(&ioat->ring_lock); | |
273 | status = ioat_chansts(chan); | |
274 | ||
275 | /* when halted due to errors check for channel | |
276 | * programming errors before advancing the completion state | |
277 | */ | |
278 | if (is_ioat_halted(status)) { | |
279 | u32 chanerr; | |
280 | ||
281 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
b57014de DW |
282 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", |
283 | __func__, chanerr); | |
09c8a5b8 DW |
284 | BUG_ON(is_ioat_bug(chanerr)); |
285 | } | |
286 | ||
287 | /* if we haven't made progress and we have already | |
288 | * acknowledged a pending completion once, then be more | |
289 | * forceful with a restart | |
290 | */ | |
291 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
292 | __cleanup(ioat, phys_complete); | |
293 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | |
294 | ioat2_restart_channel(ioat); | |
295 | else { | |
296 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
297 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
298 | } | |
299 | spin_unlock_bh(&ioat->ring_lock); | |
a309218a DW |
300 | } else { |
301 | u16 active; | |
302 | ||
303 | /* if the ring is idle, empty, and oversized try to step | |
304 | * down the size | |
305 | */ | |
306 | spin_lock_bh(&ioat->ring_lock); | |
307 | active = ioat2_ring_active(ioat); | |
308 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | |
309 | reshape_ring(ioat, ioat->alloc_order-1); | |
310 | spin_unlock_bh(&ioat->ring_lock); | |
311 | ||
312 | /* keep shrinking until we get back to our minimum | |
313 | * default size | |
314 | */ | |
315 | if (ioat->alloc_order > ioat_get_alloc_order()) | |
316 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
09c8a5b8 DW |
317 | } |
318 | spin_unlock_bh(&chan->cleanup_lock); | |
319 | } | |
320 | ||
5cbafa65 DW |
321 | /** |
322 | * ioat2_enumerate_channels - find and initialize the device's channels | |
323 | * @device: the device to be enumerated | |
324 | */ | |
bf40a686 | 325 | int ioat2_enumerate_channels(struct ioatdma_device *device) |
5cbafa65 DW |
326 | { |
327 | struct ioat2_dma_chan *ioat; | |
328 | struct device *dev = &device->pdev->dev; | |
329 | struct dma_device *dma = &device->common; | |
330 | u8 xfercap_log; | |
331 | int i; | |
332 | ||
333 | INIT_LIST_HEAD(&dma->channels); | |
334 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
335 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
336 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
337 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
338 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
339 | dma->chancnt = ARRAY_SIZE(device->idx); | |
340 | } | |
5cbafa65 | 341 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 342 | xfercap_log &= 0x1f; /* bits [4:0] valid */ |
5cbafa65 DW |
343 | if (xfercap_log == 0) |
344 | return 0; | |
6df9183a | 345 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); |
5cbafa65 DW |
346 | |
347 | /* FIXME which i/oat version is i7300? */ | |
348 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | |
349 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | |
350 | dma->chancnt--; | |
351 | #endif | |
352 | for (i = 0; i < dma->chancnt; i++) { | |
353 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | |
354 | if (!ioat) | |
355 | break; | |
356 | ||
357 | ioat_init_channel(device, &ioat->base, i, | |
bf40a686 DW |
358 | device->timer_fn, |
359 | device->cleanup_tasklet, | |
5cbafa65 DW |
360 | (unsigned long) ioat); |
361 | ioat->xfercap_log = xfercap_log; | |
362 | spin_lock_init(&ioat->ring_lock); | |
363 | } | |
364 | dma->chancnt = i; | |
365 | return i; | |
366 | } | |
367 | ||
368 | static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |
369 | { | |
370 | struct dma_chan *c = tx->chan; | |
371 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
09c8a5b8 | 372 | struct ioat_chan_common *chan = &ioat->base; |
5cbafa65 DW |
373 | dma_cookie_t cookie = c->cookie; |
374 | ||
375 | cookie++; | |
376 | if (cookie < 0) | |
377 | cookie = 1; | |
378 | tx->cookie = cookie; | |
379 | c->cookie = cookie; | |
6df9183a DW |
380 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
381 | ||
09c8a5b8 DW |
382 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
383 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
5cbafa65 DW |
384 | ioat2_update_pending(ioat); |
385 | spin_unlock_bh(&ioat->ring_lock); | |
386 | ||
387 | return cookie; | |
388 | } | |
389 | ||
a309218a | 390 | static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) |
5cbafa65 DW |
391 | { |
392 | struct ioat_dma_descriptor *hw; | |
393 | struct ioat_ring_ent *desc; | |
394 | struct ioatdma_device *dma; | |
395 | dma_addr_t phys; | |
396 | ||
397 | dma = to_ioatdma_device(chan->device); | |
a309218a | 398 | hw = pci_pool_alloc(dma->dma_pool, flags, &phys); |
5cbafa65 DW |
399 | if (!hw) |
400 | return NULL; | |
401 | memset(hw, 0, sizeof(*hw)); | |
402 | ||
162b96e6 | 403 | desc = kmem_cache_alloc(ioat2_cache, flags); |
5cbafa65 DW |
404 | if (!desc) { |
405 | pci_pool_free(dma->dma_pool, hw, phys); | |
406 | return NULL; | |
407 | } | |
162b96e6 | 408 | memset(desc, 0, sizeof(*desc)); |
5cbafa65 DW |
409 | |
410 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
411 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | |
412 | desc->hw = hw; | |
413 | desc->txd.phys = phys; | |
414 | return desc; | |
415 | } | |
416 | ||
417 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | |
418 | { | |
419 | struct ioatdma_device *dma; | |
420 | ||
421 | dma = to_ioatdma_device(chan->device); | |
422 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | |
162b96e6 | 423 | kmem_cache_free(ioat2_cache, desc); |
5cbafa65 DW |
424 | } |
425 | ||
a309218a DW |
426 | static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
427 | { | |
428 | struct ioat_ring_ent **ring; | |
429 | int descs = 1 << order; | |
430 | int i; | |
431 | ||
432 | if (order > ioat_get_max_alloc_order()) | |
433 | return NULL; | |
434 | ||
435 | /* allocate the array to hold the software ring */ | |
436 | ring = kcalloc(descs, sizeof(*ring), flags); | |
437 | if (!ring) | |
438 | return NULL; | |
439 | for (i = 0; i < descs; i++) { | |
440 | ring[i] = ioat2_alloc_ring_ent(c, flags); | |
441 | if (!ring[i]) { | |
442 | while (i--) | |
443 | ioat2_free_ring_ent(ring[i], c); | |
444 | kfree(ring); | |
445 | return NULL; | |
446 | } | |
447 | set_desc_id(ring[i], i); | |
448 | } | |
449 | ||
450 | /* link descs */ | |
451 | for (i = 0; i < descs-1; i++) { | |
452 | struct ioat_ring_ent *next = ring[i+1]; | |
453 | struct ioat_dma_descriptor *hw = ring[i]->hw; | |
454 | ||
455 | hw->next = next->txd.phys; | |
456 | } | |
457 | ring[i]->hw->next = ring[0]->txd.phys; | |
458 | ||
459 | return ring; | |
460 | } | |
461 | ||
5cbafa65 DW |
462 | /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring |
463 | * @chan: channel to be initialized | |
464 | */ | |
bf40a686 | 465 | int ioat2_alloc_chan_resources(struct dma_chan *c) |
5cbafa65 DW |
466 | { |
467 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
468 | struct ioat_chan_common *chan = &ioat->base; | |
469 | struct ioat_ring_ent **ring; | |
5cbafa65 | 470 | u32 chanerr; |
a309218a | 471 | int order; |
5cbafa65 DW |
472 | |
473 | /* have we already been set up? */ | |
474 | if (ioat->ring) | |
475 | return 1 << ioat->alloc_order; | |
476 | ||
477 | /* Setup register to interrupt and write completion status on error */ | |
f6ab95b5 | 478 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
479 | |
480 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
481 | if (chanerr) { | |
482 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | |
483 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
484 | } | |
485 | ||
486 | /* allocate a completion writeback area */ | |
487 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
488 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
489 | GFP_KERNEL, &chan->completion_dma); | |
490 | if (!chan->completion) | |
5cbafa65 DW |
491 | return -ENOMEM; |
492 | ||
4fb9b9e8 DW |
493 | memset(chan->completion, 0, sizeof(*chan->completion)); |
494 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
5cbafa65 | 495 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 496 | writel(((u64) chan->completion_dma) >> 32, |
5cbafa65 DW |
497 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
498 | ||
a309218a DW |
499 | order = ioat_get_alloc_order(); |
500 | ring = ioat2_alloc_ring(c, order, GFP_KERNEL); | |
5cbafa65 DW |
501 | if (!ring) |
502 | return -ENOMEM; | |
5cbafa65 DW |
503 | |
504 | spin_lock_bh(&ioat->ring_lock); | |
505 | ioat->ring = ring; | |
506 | ioat->head = 0; | |
507 | ioat->issued = 0; | |
508 | ioat->tail = 0; | |
509 | ioat->pending = 0; | |
a309218a | 510 | ioat->alloc_order = order; |
5cbafa65 DW |
511 | spin_unlock_bh(&ioat->ring_lock); |
512 | ||
513 | tasklet_enable(&chan->cleanup_task); | |
514 | ioat2_start_null_desc(ioat); | |
515 | ||
a309218a DW |
516 | return 1 << ioat->alloc_order; |
517 | } | |
518 | ||
bf40a686 | 519 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order) |
a309218a DW |
520 | { |
521 | /* reshape differs from normal ring allocation in that we want | |
522 | * to allocate a new software ring while only | |
523 | * extending/truncating the hardware ring | |
524 | */ | |
525 | struct ioat_chan_common *chan = &ioat->base; | |
526 | struct dma_chan *c = &chan->common; | |
527 | const u16 curr_size = ioat2_ring_mask(ioat) + 1; | |
528 | const u16 active = ioat2_ring_active(ioat); | |
529 | const u16 new_size = 1 << order; | |
530 | struct ioat_ring_ent **ring; | |
531 | u16 i; | |
532 | ||
533 | if (order > ioat_get_max_alloc_order()) | |
534 | return false; | |
535 | ||
536 | /* double check that we have at least 1 free descriptor */ | |
537 | if (active == curr_size) | |
538 | return false; | |
539 | ||
540 | /* when shrinking, verify that we can hold the current active | |
541 | * set in the new ring | |
542 | */ | |
543 | if (active >= new_size) | |
544 | return false; | |
545 | ||
546 | /* allocate the array to hold the software ring */ | |
547 | ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); | |
548 | if (!ring) | |
549 | return false; | |
550 | ||
551 | /* allocate/trim descriptors as needed */ | |
552 | if (new_size > curr_size) { | |
553 | /* copy current descriptors to the new ring */ | |
554 | for (i = 0; i < curr_size; i++) { | |
555 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
556 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
557 | ||
558 | ring[new_idx] = ioat->ring[curr_idx]; | |
559 | set_desc_id(ring[new_idx], new_idx); | |
560 | } | |
561 | ||
562 | /* add new descriptors to the ring */ | |
563 | for (i = curr_size; i < new_size; i++) { | |
564 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
565 | ||
566 | ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); | |
567 | if (!ring[new_idx]) { | |
568 | while (i--) { | |
569 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
570 | ||
571 | ioat2_free_ring_ent(ring[new_idx], c); | |
572 | } | |
573 | kfree(ring); | |
574 | return false; | |
575 | } | |
576 | set_desc_id(ring[new_idx], new_idx); | |
577 | } | |
578 | ||
579 | /* hw link new descriptors */ | |
580 | for (i = curr_size-1; i < new_size; i++) { | |
581 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
582 | struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; | |
583 | struct ioat_dma_descriptor *hw = ring[new_idx]->hw; | |
584 | ||
585 | hw->next = next->txd.phys; | |
586 | } | |
587 | } else { | |
588 | struct ioat_dma_descriptor *hw; | |
589 | struct ioat_ring_ent *next; | |
590 | ||
591 | /* copy current descriptors to the new ring, dropping the | |
592 | * removed descriptors | |
593 | */ | |
594 | for (i = 0; i < new_size; i++) { | |
595 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
596 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
597 | ||
598 | ring[new_idx] = ioat->ring[curr_idx]; | |
599 | set_desc_id(ring[new_idx], new_idx); | |
600 | } | |
601 | ||
602 | /* free deleted descriptors */ | |
603 | for (i = new_size; i < curr_size; i++) { | |
604 | struct ioat_ring_ent *ent; | |
605 | ||
606 | ent = ioat2_get_ring_ent(ioat, ioat->tail+i); | |
607 | ioat2_free_ring_ent(ent, c); | |
608 | } | |
609 | ||
610 | /* fix up hardware ring */ | |
611 | hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; | |
612 | next = ring[(ioat->tail+new_size) & (new_size-1)]; | |
613 | hw->next = next->txd.phys; | |
614 | } | |
615 | ||
616 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | |
617 | __func__, new_size); | |
618 | ||
619 | kfree(ioat->ring); | |
620 | ioat->ring = ring; | |
621 | ioat->alloc_order = order; | |
622 | ||
623 | return true; | |
5cbafa65 DW |
624 | } |
625 | ||
626 | /** | |
627 | * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops | |
628 | * @idx: gets starting descriptor index on successful allocation | |
629 | * @ioat: ioat2,3 channel (ring) to operate on | |
630 | * @num_descs: allocation length | |
631 | */ | |
bf40a686 | 632 | int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) |
5cbafa65 DW |
633 | { |
634 | struct ioat_chan_common *chan = &ioat->base; | |
635 | ||
636 | spin_lock_bh(&ioat->ring_lock); | |
a309218a DW |
637 | /* never allow the last descriptor to be consumed, we need at |
638 | * least one free at all times to allow for on-the-fly ring | |
639 | * resizing. | |
640 | */ | |
641 | while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { | |
642 | if (reshape_ring(ioat, ioat->alloc_order + 1) && | |
643 | ioat2_ring_space(ioat) > num_descs) | |
644 | break; | |
645 | ||
5cbafa65 DW |
646 | if (printk_ratelimit()) |
647 | dev_dbg(to_dev(chan), | |
648 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", | |
649 | __func__, num_descs, ioat->head, ioat->tail, | |
650 | ioat->issued); | |
651 | spin_unlock_bh(&ioat->ring_lock); | |
652 | ||
09c8a5b8 DW |
653 | /* progress reclaim in the allocation failure case we |
654 | * may be called under bh_disabled so we need to trigger | |
655 | * the timer event directly | |
656 | */ | |
657 | spin_lock_bh(&chan->cleanup_lock); | |
658 | if (jiffies > chan->timer.expires && | |
659 | timer_pending(&chan->timer)) { | |
bf40a686 DW |
660 | struct ioatdma_device *device = chan->device; |
661 | ||
09c8a5b8 DW |
662 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
663 | spin_unlock_bh(&chan->cleanup_lock); | |
bf40a686 | 664 | device->timer_fn((unsigned long) ioat); |
09c8a5b8 DW |
665 | } else |
666 | spin_unlock_bh(&chan->cleanup_lock); | |
5cbafa65 DW |
667 | return -ENOMEM; |
668 | } | |
669 | ||
670 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | |
671 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | |
672 | ||
673 | *idx = ioat2_desc_alloc(ioat, num_descs); | |
674 | return 0; /* with ioat->ring_lock held */ | |
675 | } | |
676 | ||
bf40a686 | 677 | struct dma_async_tx_descriptor * |
5cbafa65 DW |
678 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, |
679 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
680 | { | |
681 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
682 | struct ioat_dma_descriptor *hw; | |
683 | struct ioat_ring_ent *desc; | |
684 | dma_addr_t dst = dma_dest; | |
685 | dma_addr_t src = dma_src; | |
686 | size_t total_len = len; | |
687 | int num_descs; | |
688 | u16 idx; | |
689 | int i; | |
690 | ||
691 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
692 | if (likely(num_descs) && | |
693 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | |
694 | /* pass */; | |
695 | else | |
696 | return NULL; | |
f477f5b3 AM |
697 | i = 0; |
698 | do { | |
5cbafa65 DW |
699 | size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); |
700 | ||
701 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
702 | hw = desc->hw; | |
703 | ||
704 | hw->size = copy; | |
705 | hw->ctl = 0; | |
706 | hw->src_addr = src; | |
707 | hw->dst_addr = dst; | |
708 | ||
709 | len -= copy; | |
710 | dst += copy; | |
711 | src += copy; | |
6df9183a | 712 | dump_desc_dbg(ioat, desc); |
f477f5b3 | 713 | } while (++i < num_descs); |
5cbafa65 DW |
714 | |
715 | desc->txd.flags = flags; | |
716 | desc->len = total_len; | |
717 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
128f2d56 | 718 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); |
5cbafa65 | 719 | hw->ctl_f.compl_write = 1; |
6df9183a | 720 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
721 | /* we leave the channel locked to ensure in order submission */ |
722 | ||
723 | return &desc->txd; | |
724 | } | |
725 | ||
726 | /** | |
727 | * ioat2_free_chan_resources - release all the descriptors | |
728 | * @chan: the channel to be cleaned | |
729 | */ | |
bf40a686 | 730 | void ioat2_free_chan_resources(struct dma_chan *c) |
5cbafa65 DW |
731 | { |
732 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
733 | struct ioat_chan_common *chan = &ioat->base; | |
bf40a686 | 734 | struct ioatdma_device *device = chan->device; |
5cbafa65 DW |
735 | struct ioat_ring_ent *desc; |
736 | const u16 total_descs = 1 << ioat->alloc_order; | |
737 | int descs; | |
738 | int i; | |
739 | ||
740 | /* Before freeing channel resources first check | |
741 | * if they have been previously allocated for this channel. | |
742 | */ | |
743 | if (!ioat->ring) | |
744 | return; | |
745 | ||
746 | tasklet_disable(&chan->cleanup_task); | |
09c8a5b8 | 747 | del_timer_sync(&chan->timer); |
bf40a686 | 748 | device->cleanup_tasklet((unsigned long) ioat); |
5cbafa65 DW |
749 | |
750 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | |
751 | * before removing DMA descriptor resources. | |
752 | */ | |
753 | writeb(IOAT_CHANCMD_RESET, | |
754 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | |
755 | mdelay(100); | |
756 | ||
757 | spin_lock_bh(&ioat->ring_lock); | |
758 | descs = ioat2_ring_space(ioat); | |
6df9183a | 759 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); |
5cbafa65 DW |
760 | for (i = 0; i < descs; i++) { |
761 | desc = ioat2_get_ring_ent(ioat, ioat->head + i); | |
762 | ioat2_free_ring_ent(desc, c); | |
763 | } | |
764 | ||
765 | if (descs < total_descs) | |
766 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | |
767 | total_descs - descs); | |
768 | ||
769 | for (i = 0; i < total_descs - descs; i++) { | |
770 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
6df9183a | 771 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
772 | ioat2_free_ring_ent(desc, c); |
773 | } | |
774 | ||
775 | kfree(ioat->ring); | |
776 | ioat->ring = NULL; | |
777 | ioat->alloc_order = 0; | |
bf40a686 | 778 | pci_pool_free(device->completion_pool, chan->completion, |
4fb9b9e8 | 779 | chan->completion_dma); |
5cbafa65 DW |
780 | spin_unlock_bh(&ioat->ring_lock); |
781 | ||
782 | chan->last_completion = 0; | |
4fb9b9e8 | 783 | chan->completion_dma = 0; |
5cbafa65 DW |
784 | ioat->pending = 0; |
785 | ioat->dmacount = 0; | |
5cbafa65 DW |
786 | } |
787 | ||
bf40a686 | 788 | enum dma_status |
5cbafa65 DW |
789 | ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, |
790 | dma_cookie_t *done, dma_cookie_t *used) | |
791 | { | |
792 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
bf40a686 | 793 | struct ioatdma_device *device = ioat->base.device; |
5cbafa65 DW |
794 | |
795 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | |
796 | return DMA_SUCCESS; | |
797 | ||
bf40a686 | 798 | device->cleanup_tasklet((unsigned long) ioat); |
5cbafa65 DW |
799 | |
800 | return ioat_is_complete(c, cookie, done, used); | |
801 | } | |
802 | ||
5669e31c DW |
803 | static ssize_t ring_size_show(struct dma_chan *c, char *page) |
804 | { | |
805 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
806 | ||
807 | return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); | |
808 | } | |
809 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | |
810 | ||
811 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | |
812 | { | |
813 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
814 | ||
815 | /* ...taken outside the lock, no need to be precise */ | |
816 | return sprintf(page, "%d\n", ioat2_ring_active(ioat)); | |
817 | } | |
818 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | |
819 | ||
820 | static struct attribute *ioat2_attrs[] = { | |
821 | &ring_size_attr.attr, | |
822 | &ring_active_attr.attr, | |
823 | &ioat_cap_attr.attr, | |
824 | &ioat_version_attr.attr, | |
825 | NULL, | |
826 | }; | |
827 | ||
828 | struct kobj_type ioat2_ktype = { | |
829 | .sysfs_ops = &ioat_sysfs_ops, | |
830 | .default_attrs = ioat2_attrs, | |
831 | }; | |
832 | ||
345d8523 | 833 | int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) |
5cbafa65 DW |
834 | { |
835 | struct pci_dev *pdev = device->pdev; | |
836 | struct dma_device *dma; | |
837 | struct dma_chan *c; | |
838 | struct ioat_chan_common *chan; | |
839 | int err; | |
840 | ||
841 | device->enumerate_channels = ioat2_enumerate_channels; | |
bf40a686 DW |
842 | device->cleanup_tasklet = ioat2_cleanup_tasklet; |
843 | device->timer_fn = ioat2_timer_event; | |
9de6fc71 | 844 | device->self_test = ioat_dma_self_test; |
5cbafa65 DW |
845 | dma = &device->common; |
846 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
847 | dma->device_issue_pending = ioat2_issue_pending; | |
848 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
849 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
850 | dma->device_is_tx_complete = ioat2_is_complete; | |
851 | ||
852 | err = ioat_probe(device); | |
853 | if (err) | |
854 | return err; | |
855 | ioat_set_tcp_copy_break(2048); | |
856 | ||
857 | list_for_each_entry(c, &dma->channels, device_node) { | |
858 | chan = to_chan_common(c); | |
859 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | |
860 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
861 | } | |
862 | ||
863 | err = ioat_register(device); | |
864 | if (err) | |
865 | return err; | |
5669e31c DW |
866 | |
867 | ioat_kobject_add(device, &ioat2_ktype); | |
868 | ||
5cbafa65 DW |
869 | if (dca) |
870 | device->dca = ioat2_dca_init(pdev, device->reg_base); | |
871 | ||
5cbafa65 DW |
872 | return err; |
873 | } |