Commit | Line | Data |
---|---|---|
bf40a686 DW |
1 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
3 | * redistributing this file, you may do so under either license. | |
4 | * | |
5 | * GPL LICENSE SUMMARY | |
6 | * | |
7 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms and conditions of the GNU General Public License, | |
11 | * version 2, as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program; if not, write to the Free Software Foundation, Inc., | |
20 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
21 | * | |
22 | * The full GNU General Public License is included in this distribution in | |
23 | * the file called "COPYING". | |
24 | * | |
25 | * BSD LICENSE | |
26 | * | |
27 | * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. | |
28 | * | |
29 | * Redistribution and use in source and binary forms, with or without | |
30 | * modification, are permitted provided that the following conditions are met: | |
31 | * | |
32 | * * Redistributions of source code must retain the above copyright | |
33 | * notice, this list of conditions and the following disclaimer. | |
34 | * * Redistributions in binary form must reproduce the above copyright | |
35 | * notice, this list of conditions and the following disclaimer in | |
36 | * the documentation and/or other materials provided with the | |
37 | * distribution. | |
38 | * * Neither the name of Intel Corporation nor the names of its | |
39 | * contributors may be used to endorse or promote products derived | |
40 | * from this software without specific prior written permission. | |
41 | * | |
42 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
43 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
44 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
45 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
46 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
47 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
48 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
49 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
50 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
51 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
52 | * POSSIBILITY OF SUCH DAMAGE. | |
53 | */ | |
54 | ||
55 | /* | |
56 | * Support routines for v3+ hardware | |
57 | */ | |
7727eaa4 | 58 | #include <linux/module.h> |
bf40a686 | 59 | #include <linux/pci.h> |
5a0e3ad6 | 60 | #include <linux/gfp.h> |
bf40a686 DW |
61 | #include <linux/dmaengine.h> |
62 | #include <linux/dma-mapping.h> | |
70c71606 | 63 | #include <linux/prefetch.h> |
949ff5b8 | 64 | #include "../dmaengine.h" |
bf40a686 DW |
65 | #include "registers.h" |
66 | #include "hw.h" | |
67 | #include "dma.h" | |
68 | #include "dma_v2.h" | |
69 | ||
b094ad3b DW |
70 | /* ioat hardware assumes at least two sources for raid operations */ |
71 | #define src_cnt_to_sw(x) ((x) + 2) | |
72 | #define src_cnt_to_hw(x) ((x) - 2) | |
7727eaa4 DJ |
73 | #define ndest_to_sw(x) ((x) + 1) |
74 | #define ndest_to_hw(x) ((x) - 1) | |
75 | #define src16_cnt_to_sw(x) ((x) + 9) | |
76 | #define src16_cnt_to_hw(x) ((x) - 9) | |
b094ad3b DW |
77 | |
78 | /* provide a lookup table for setting the source address in the base or | |
d69d235b | 79 | * extended descriptor of an xor or pq descriptor |
b094ad3b | 80 | */ |
d0b0c8c7 | 81 | static const u8 xor_idx_to_desc = 0xe0; |
9b487ced AK |
82 | static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; |
83 | static const u8 pq_idx_to_desc = 0xf8; | |
7727eaa4 DJ |
84 | static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, |
85 | 2, 2, 2, 2, 2, 2, 2 }; | |
9b487ced | 86 | static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; |
7727eaa4 DJ |
87 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, |
88 | 0, 1, 2, 3, 4, 5, 6 }; | |
89 | ||
3f09ede4 DJ |
90 | static void ioat3_eh(struct ioat2_dma_chan *ioat); |
91 | ||
b094ad3b DW |
92 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) |
93 | { | |
94 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | |
95 | ||
96 | return raw->field[xor_idx_to_field[idx]]; | |
97 | } | |
98 | ||
99 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | |
100 | dma_addr_t addr, u32 offset, int idx) | |
101 | { | |
102 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | |
103 | ||
104 | raw->field[xor_idx_to_field[idx]] = addr + offset; | |
105 | } | |
106 | ||
d69d235b DW |
107 | static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) |
108 | { | |
109 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | |
110 | ||
111 | return raw->field[pq_idx_to_field[idx]]; | |
112 | } | |
113 | ||
7727eaa4 DJ |
114 | static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) |
115 | { | |
116 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | |
117 | ||
118 | return raw->field[pq16_idx_to_field[idx]]; | |
119 | } | |
120 | ||
d69d235b DW |
121 | static void pq_set_src(struct ioat_raw_descriptor *descs[2], |
122 | dma_addr_t addr, u32 offset, u8 coef, int idx) | |
123 | { | |
124 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; | |
125 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | |
126 | ||
127 | raw->field[pq_idx_to_field[idx]] = addr + offset; | |
128 | pq->coef[idx] = coef; | |
129 | } | |
130 | ||
8a52b9ff DJ |
131 | static bool is_jf_ioat(struct pci_dev *pdev) |
132 | { | |
133 | switch (pdev->device) { | |
134 | case PCI_DEVICE_ID_INTEL_IOAT_JSF0: | |
135 | case PCI_DEVICE_ID_INTEL_IOAT_JSF1: | |
136 | case PCI_DEVICE_ID_INTEL_IOAT_JSF2: | |
137 | case PCI_DEVICE_ID_INTEL_IOAT_JSF3: | |
138 | case PCI_DEVICE_ID_INTEL_IOAT_JSF4: | |
139 | case PCI_DEVICE_ID_INTEL_IOAT_JSF5: | |
140 | case PCI_DEVICE_ID_INTEL_IOAT_JSF6: | |
141 | case PCI_DEVICE_ID_INTEL_IOAT_JSF7: | |
142 | case PCI_DEVICE_ID_INTEL_IOAT_JSF8: | |
143 | case PCI_DEVICE_ID_INTEL_IOAT_JSF9: | |
144 | return true; | |
145 | default: | |
146 | return false; | |
147 | } | |
148 | } | |
149 | ||
150 | static bool is_snb_ioat(struct pci_dev *pdev) | |
151 | { | |
152 | switch (pdev->device) { | |
153 | case PCI_DEVICE_ID_INTEL_IOAT_SNB0: | |
154 | case PCI_DEVICE_ID_INTEL_IOAT_SNB1: | |
155 | case PCI_DEVICE_ID_INTEL_IOAT_SNB2: | |
156 | case PCI_DEVICE_ID_INTEL_IOAT_SNB3: | |
157 | case PCI_DEVICE_ID_INTEL_IOAT_SNB4: | |
158 | case PCI_DEVICE_ID_INTEL_IOAT_SNB5: | |
159 | case PCI_DEVICE_ID_INTEL_IOAT_SNB6: | |
160 | case PCI_DEVICE_ID_INTEL_IOAT_SNB7: | |
161 | case PCI_DEVICE_ID_INTEL_IOAT_SNB8: | |
162 | case PCI_DEVICE_ID_INTEL_IOAT_SNB9: | |
163 | return true; | |
164 | default: | |
165 | return false; | |
166 | } | |
167 | } | |
168 | ||
169 | static bool is_ivb_ioat(struct pci_dev *pdev) | |
170 | { | |
171 | switch (pdev->device) { | |
172 | case PCI_DEVICE_ID_INTEL_IOAT_IVB0: | |
173 | case PCI_DEVICE_ID_INTEL_IOAT_IVB1: | |
174 | case PCI_DEVICE_ID_INTEL_IOAT_IVB2: | |
175 | case PCI_DEVICE_ID_INTEL_IOAT_IVB3: | |
176 | case PCI_DEVICE_ID_INTEL_IOAT_IVB4: | |
177 | case PCI_DEVICE_ID_INTEL_IOAT_IVB5: | |
178 | case PCI_DEVICE_ID_INTEL_IOAT_IVB6: | |
179 | case PCI_DEVICE_ID_INTEL_IOAT_IVB7: | |
180 | case PCI_DEVICE_ID_INTEL_IOAT_IVB8: | |
181 | case PCI_DEVICE_ID_INTEL_IOAT_IVB9: | |
182 | return true; | |
183 | default: | |
184 | return false; | |
185 | } | |
186 | ||
187 | } | |
188 | ||
189 | static bool is_hsw_ioat(struct pci_dev *pdev) | |
190 | { | |
191 | switch (pdev->device) { | |
192 | case PCI_DEVICE_ID_INTEL_IOAT_HSW0: | |
193 | case PCI_DEVICE_ID_INTEL_IOAT_HSW1: | |
194 | case PCI_DEVICE_ID_INTEL_IOAT_HSW2: | |
195 | case PCI_DEVICE_ID_INTEL_IOAT_HSW3: | |
196 | case PCI_DEVICE_ID_INTEL_IOAT_HSW4: | |
197 | case PCI_DEVICE_ID_INTEL_IOAT_HSW5: | |
198 | case PCI_DEVICE_ID_INTEL_IOAT_HSW6: | |
199 | case PCI_DEVICE_ID_INTEL_IOAT_HSW7: | |
200 | case PCI_DEVICE_ID_INTEL_IOAT_HSW8: | |
201 | case PCI_DEVICE_ID_INTEL_IOAT_HSW9: | |
202 | return true; | |
203 | default: | |
204 | return false; | |
205 | } | |
206 | ||
207 | } | |
208 | ||
209 | static bool is_xeon_cb32(struct pci_dev *pdev) | |
210 | { | |
211 | return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || | |
212 | is_hsw_ioat(pdev); | |
213 | } | |
214 | ||
215 | static bool is_bwd_ioat(struct pci_dev *pdev) | |
216 | { | |
217 | switch (pdev->device) { | |
218 | case PCI_DEVICE_ID_INTEL_IOAT_BWD0: | |
219 | case PCI_DEVICE_ID_INTEL_IOAT_BWD1: | |
220 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | |
221 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | |
222 | return true; | |
223 | default: | |
224 | return false; | |
225 | } | |
226 | } | |
227 | ||
d302398d DJ |
228 | static bool is_bwd_noraid(struct pci_dev *pdev) |
229 | { | |
230 | switch (pdev->device) { | |
231 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | |
232 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | |
233 | return true; | |
234 | default: | |
235 | return false; | |
236 | } | |
237 | ||
238 | } | |
239 | ||
7727eaa4 DJ |
240 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], |
241 | dma_addr_t addr, u32 offset, u8 coef, int idx) | |
242 | { | |
243 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; | |
244 | struct ioat_pq16a_descriptor *pq16 = | |
245 | (struct ioat_pq16a_descriptor *)desc[1]; | |
246 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | |
247 | ||
248 | raw->field[pq16_idx_to_field[idx]] = addr + offset; | |
249 | ||
250 | if (idx < 8) | |
251 | pq->coef[idx] = coef; | |
252 | else | |
253 | pq16->coef[idx - 8] = coef; | |
254 | } | |
255 | ||
e6a30fec | 256 | static struct ioat_sed_ent * |
7727eaa4 DJ |
257 | ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) |
258 | { | |
259 | struct ioat_sed_ent *sed; | |
260 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | |
261 | ||
262 | sed = kmem_cache_alloc(device->sed_pool, flags); | |
263 | if (!sed) | |
264 | return NULL; | |
265 | ||
266 | sed->hw_pool = hw_pool; | |
267 | sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], | |
268 | flags, &sed->dma); | |
269 | if (!sed->hw) { | |
270 | kmem_cache_free(device->sed_pool, sed); | |
271 | return NULL; | |
272 | } | |
273 | ||
274 | return sed; | |
275 | } | |
276 | ||
e6a30fec | 277 | static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) |
7727eaa4 DJ |
278 | { |
279 | if (!sed) | |
280 | return; | |
281 | ||
282 | dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | |
283 | kmem_cache_free(device->sed_pool, sed); | |
284 | } | |
285 | ||
bf40a686 | 286 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, |
b094ad3b | 287 | struct ioat_ring_ent *desc, int idx) |
bf40a686 DW |
288 | { |
289 | struct ioat_chan_common *chan = &ioat->base; | |
290 | struct pci_dev *pdev = chan->device->pdev; | |
291 | size_t len = desc->len; | |
292 | size_t offset = len - desc->hw->size; | |
293 | struct dma_async_tx_descriptor *tx = &desc->txd; | |
294 | enum dma_ctrl_flags flags = tx->flags; | |
295 | ||
296 | switch (desc->hw->ctl_f.op) { | |
297 | case IOAT_OP_COPY: | |
58c8649e DW |
298 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ |
299 | ioat_dma_unmap(chan, flags, len, desc->hw); | |
bf40a686 DW |
300 | break; |
301 | case IOAT_OP_FILL: { | |
302 | struct ioat_fill_descriptor *hw = desc->fill; | |
303 | ||
304 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | |
305 | ioat_unmap(pdev, hw->dst_addr - offset, len, | |
306 | PCI_DMA_FROMDEVICE, flags, 1); | |
307 | break; | |
308 | } | |
b094ad3b DW |
309 | case IOAT_OP_XOR_VAL: |
310 | case IOAT_OP_XOR: { | |
311 | struct ioat_xor_descriptor *xor = desc->xor; | |
312 | struct ioat_ring_ent *ext; | |
313 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | |
314 | int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); | |
315 | struct ioat_raw_descriptor *descs[2]; | |
316 | int i; | |
317 | ||
318 | if (src_cnt > 5) { | |
319 | ext = ioat2_get_ring_ent(ioat, idx + 1); | |
320 | xor_ex = ext->xor_ex; | |
321 | } | |
322 | ||
323 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
324 | descs[0] = (struct ioat_raw_descriptor *) xor; | |
325 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | |
326 | for (i = 0; i < src_cnt; i++) { | |
327 | dma_addr_t src = xor_get_src(descs, i); | |
328 | ||
329 | ioat_unmap(pdev, src - offset, len, | |
330 | PCI_DMA_TODEVICE, flags, 0); | |
331 | } | |
332 | ||
333 | /* dest is a source in xor validate operations */ | |
334 | if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { | |
335 | ioat_unmap(pdev, xor->dst_addr - offset, len, | |
336 | PCI_DMA_TODEVICE, flags, 1); | |
337 | break; | |
338 | } | |
339 | } | |
340 | ||
341 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | |
342 | ioat_unmap(pdev, xor->dst_addr - offset, len, | |
343 | PCI_DMA_FROMDEVICE, flags, 1); | |
344 | break; | |
345 | } | |
d69d235b DW |
346 | case IOAT_OP_PQ_VAL: |
347 | case IOAT_OP_PQ: { | |
348 | struct ioat_pq_descriptor *pq = desc->pq; | |
349 | struct ioat_ring_ent *ext; | |
350 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | |
351 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | |
352 | struct ioat_raw_descriptor *descs[2]; | |
353 | int i; | |
354 | ||
355 | if (src_cnt > 3) { | |
356 | ext = ioat2_get_ring_ent(ioat, idx + 1); | |
357 | pq_ex = ext->pq_ex; | |
358 | } | |
359 | ||
360 | /* in the 'continue' case don't unmap the dests as sources */ | |
361 | if (dmaf_p_disabled_continue(flags)) | |
362 | src_cnt--; | |
363 | else if (dmaf_continue(flags)) | |
364 | src_cnt -= 3; | |
365 | ||
366 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
367 | descs[0] = (struct ioat_raw_descriptor *) pq; | |
368 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | |
369 | for (i = 0; i < src_cnt; i++) { | |
370 | dma_addr_t src = pq_get_src(descs, i); | |
371 | ||
372 | ioat_unmap(pdev, src - offset, len, | |
373 | PCI_DMA_TODEVICE, flags, 0); | |
374 | } | |
375 | ||
376 | /* the dests are sources in pq validate operations */ | |
377 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | |
378 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | |
379 | ioat_unmap(pdev, pq->p_addr - offset, | |
380 | len, PCI_DMA_TODEVICE, flags, 0); | |
381 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
382 | ioat_unmap(pdev, pq->q_addr - offset, | |
383 | len, PCI_DMA_TODEVICE, flags, 0); | |
384 | break; | |
385 | } | |
386 | } | |
387 | ||
388 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
389 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | |
390 | ioat_unmap(pdev, pq->p_addr - offset, len, | |
391 | PCI_DMA_BIDIRECTIONAL, flags, 1); | |
392 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
393 | ioat_unmap(pdev, pq->q_addr - offset, len, | |
394 | PCI_DMA_BIDIRECTIONAL, flags, 1); | |
395 | } | |
396 | break; | |
397 | } | |
7727eaa4 DJ |
398 | case IOAT_OP_PQ_16S: |
399 | case IOAT_OP_PQ_VAL_16S: { | |
400 | struct ioat_pq_descriptor *pq = desc->pq; | |
401 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | |
402 | struct ioat_raw_descriptor *descs[4]; | |
403 | int i; | |
404 | ||
405 | /* in the 'continue' case don't unmap the dests as sources */ | |
406 | if (dmaf_p_disabled_continue(flags)) | |
407 | src_cnt--; | |
408 | else if (dmaf_continue(flags)) | |
409 | src_cnt -= 3; | |
410 | ||
411 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
412 | descs[0] = (struct ioat_raw_descriptor *)pq; | |
413 | descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); | |
414 | descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); | |
415 | for (i = 0; i < src_cnt; i++) { | |
416 | dma_addr_t src = pq16_get_src(descs, i); | |
417 | ||
418 | ioat_unmap(pdev, src - offset, len, | |
419 | PCI_DMA_TODEVICE, flags, 0); | |
420 | } | |
421 | ||
422 | /* the dests are sources in pq validate operations */ | |
423 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | |
424 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | |
425 | ioat_unmap(pdev, pq->p_addr - offset, | |
426 | len, PCI_DMA_TODEVICE, | |
427 | flags, 0); | |
428 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
429 | ioat_unmap(pdev, pq->q_addr - offset, | |
430 | len, PCI_DMA_TODEVICE, | |
431 | flags, 0); | |
432 | break; | |
433 | } | |
434 | } | |
435 | ||
436 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
437 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | |
438 | ioat_unmap(pdev, pq->p_addr - offset, len, | |
439 | PCI_DMA_BIDIRECTIONAL, flags, 1); | |
440 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
441 | ioat_unmap(pdev, pq->q_addr - offset, len, | |
442 | PCI_DMA_BIDIRECTIONAL, flags, 1); | |
443 | } | |
444 | break; | |
445 | } | |
bf40a686 DW |
446 | default: |
447 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | |
448 | __func__, desc->hw->ctl_f.op); | |
449 | } | |
450 | } | |
451 | ||
b094ad3b DW |
452 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
453 | { | |
454 | struct ioat_dma_descriptor *hw = desc->hw; | |
455 | ||
456 | if (hw->ctl_f.op == IOAT_OP_XOR || | |
457 | hw->ctl_f.op == IOAT_OP_XOR_VAL) { | |
458 | struct ioat_xor_descriptor *xor = desc->xor; | |
bf40a686 | 459 | |
b094ad3b DW |
460 | if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) |
461 | return true; | |
d69d235b DW |
462 | } else if (hw->ctl_f.op == IOAT_OP_PQ || |
463 | hw->ctl_f.op == IOAT_OP_PQ_VAL) { | |
464 | struct ioat_pq_descriptor *pq = desc->pq; | |
465 | ||
466 | if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) | |
467 | return true; | |
b094ad3b DW |
468 | } |
469 | ||
470 | return false; | |
471 | } | |
472 | ||
3f09ede4 DJ |
473 | static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) |
474 | { | |
475 | u64 phys_complete; | |
476 | u64 completion; | |
477 | ||
478 | completion = *chan->completion; | |
479 | phys_complete = ioat_chansts_to_addr(completion); | |
480 | ||
481 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, | |
482 | (unsigned long long) phys_complete); | |
483 | ||
484 | return phys_complete; | |
485 | } | |
486 | ||
487 | static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, | |
488 | u64 *phys_complete) | |
489 | { | |
490 | *phys_complete = ioat3_get_current_completion(chan); | |
491 | if (*phys_complete == chan->last_completion) | |
492 | return false; | |
493 | ||
494 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | |
495 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
496 | ||
497 | return true; | |
498 | } | |
499 | ||
75c6f0ab DJ |
500 | static void |
501 | desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) | |
502 | { | |
503 | struct ioat_dma_descriptor *hw = desc->hw; | |
504 | ||
505 | switch (hw->ctl_f.op) { | |
506 | case IOAT_OP_PQ_VAL: | |
507 | case IOAT_OP_PQ_VAL_16S: | |
508 | { | |
509 | struct ioat_pq_descriptor *pq = desc->pq; | |
510 | ||
511 | /* check if there's error written */ | |
512 | if (!pq->dwbes_f.wbes) | |
513 | return; | |
514 | ||
515 | /* need to set a chanerr var for checking to clear later */ | |
516 | ||
517 | if (pq->dwbes_f.p_val_err) | |
518 | *desc->result |= SUM_CHECK_P_RESULT; | |
519 | ||
520 | if (pq->dwbes_f.q_val_err) | |
521 | *desc->result |= SUM_CHECK_Q_RESULT; | |
522 | ||
523 | return; | |
524 | } | |
525 | default: | |
526 | return; | |
527 | } | |
528 | } | |
529 | ||
b094ad3b DW |
530 | /** |
531 | * __cleanup - reclaim used descriptors | |
532 | * @ioat: channel (ring) to clean | |
533 | * | |
534 | * The difference from the dma_v2.c __cleanup() is that this routine | |
535 | * handles extended descriptors and dma-unmapping raid operations. | |
536 | */ | |
27502935 | 537 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) |
bf40a686 DW |
538 | { |
539 | struct ioat_chan_common *chan = &ioat->base; | |
7727eaa4 | 540 | struct ioatdma_device *device = chan->device; |
bf40a686 DW |
541 | struct ioat_ring_ent *desc; |
542 | bool seen_current = false; | |
074cc476 | 543 | int idx = ioat->tail, i; |
bf40a686 | 544 | u16 active; |
bf40a686 DW |
545 | |
546 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | |
547 | __func__, ioat->head, ioat->tail, ioat->issued); | |
548 | ||
3f09ede4 DJ |
549 | /* |
550 | * At restart of the channel, the completion address and the | |
551 | * channel status will be 0 due to starting a new chain. Since | |
552 | * it's new chain and the first descriptor "fails", there is | |
553 | * nothing to clean up. We do not want to reap the entire submitted | |
554 | * chain due to this 0 address value and then BUG. | |
555 | */ | |
556 | if (!phys_complete) | |
557 | return; | |
558 | ||
bf40a686 DW |
559 | active = ioat2_ring_active(ioat); |
560 | for (i = 0; i < active && !seen_current; i++) { | |
561 | struct dma_async_tx_descriptor *tx; | |
562 | ||
074cc476 DW |
563 | smp_read_barrier_depends(); |
564 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); | |
565 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
bf40a686 | 566 | dump_desc_dbg(ioat, desc); |
75c6f0ab DJ |
567 | |
568 | /* set err stat if we are using dwbes */ | |
569 | if (device->cap & IOAT_CAP_DWBES) | |
570 | desc_get_errstat(ioat, desc); | |
571 | ||
bf40a686 DW |
572 | tx = &desc->txd; |
573 | if (tx->cookie) { | |
f7fbce07 | 574 | dma_cookie_complete(tx); |
074cc476 | 575 | ioat3_dma_unmap(ioat, desc, idx + i); |
bf40a686 DW |
576 | if (tx->callback) { |
577 | tx->callback(tx->callback_param); | |
578 | tx->callback = NULL; | |
579 | } | |
580 | } | |
581 | ||
582 | if (tx->phys == phys_complete) | |
583 | seen_current = true; | |
b094ad3b DW |
584 | |
585 | /* skip extended descriptors */ | |
586 | if (desc_has_ext(desc)) { | |
587 | BUG_ON(i + 1 >= active); | |
588 | i++; | |
589 | } | |
7727eaa4 DJ |
590 | |
591 | /* cleanup super extended descriptors */ | |
592 | if (desc->sed) { | |
593 | ioat3_free_sed(device, desc->sed); | |
594 | desc->sed = NULL; | |
595 | } | |
bf40a686 | 596 | } |
074cc476 DW |
597 | smp_mb(); /* finish all descriptor reads before incrementing tail */ |
598 | ioat->tail = idx + i; | |
aa75db00 | 599 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
bf40a686 | 600 | chan->last_completion = phys_complete; |
b9cc9869 | 601 | |
074cc476 | 602 | if (active - i == 0) { |
bf40a686 DW |
603 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", |
604 | __func__); | |
605 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
606 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
607 | } | |
b9cc9869 | 608 | /* 5 microsecond delay per pending descriptor */ |
074cc476 | 609 | writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), |
b9cc9869 | 610 | chan->device->reg_base + IOAT_INTRDELAY_OFFSET); |
bf40a686 DW |
611 | } |
612 | ||
074cc476 | 613 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) |
bf40a686 DW |
614 | { |
615 | struct ioat_chan_common *chan = &ioat->base; | |
3f09ede4 | 616 | u64 phys_complete; |
bf40a686 | 617 | |
b9cc9869 | 618 | spin_lock_bh(&chan->cleanup_lock); |
3f09ede4 DJ |
619 | |
620 | if (ioat3_cleanup_preamble(chan, &phys_complete)) | |
074cc476 | 621 | __cleanup(ioat, phys_complete); |
3f09ede4 DJ |
622 | |
623 | if (is_ioat_halted(*chan->completion)) { | |
624 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
625 | ||
626 | if (chanerr & IOAT_CHANERR_HANDLE_MASK) { | |
627 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
628 | ioat3_eh(ioat); | |
629 | } | |
630 | } | |
631 | ||
b9cc9869 DW |
632 | spin_unlock_bh(&chan->cleanup_lock); |
633 | } | |
634 | ||
aa4d72ae | 635 | static void ioat3_cleanup_event(unsigned long data) |
bf40a686 | 636 | { |
aa4d72ae | 637 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
e9ba61f0 | 638 | struct ioat_chan_common *chan = &ioat->base; |
bf40a686 | 639 | |
074cc476 | 640 | ioat3_cleanup(ioat); |
e9ba61f0 DW |
641 | if (!test_bit(IOAT_RUN, &chan->state)) |
642 | return; | |
773d9e2d | 643 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
bf40a686 DW |
644 | } |
645 | ||
646 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | |
647 | { | |
648 | struct ioat_chan_common *chan = &ioat->base; | |
3f09ede4 | 649 | u64 phys_complete; |
bf40a686 | 650 | |
b372ec2d | 651 | ioat2_quiesce(chan, 0); |
3f09ede4 | 652 | if (ioat3_cleanup_preamble(chan, &phys_complete)) |
bf40a686 DW |
653 | __cleanup(ioat, phys_complete); |
654 | ||
655 | __ioat2_restart_chan(ioat); | |
656 | } | |
657 | ||
3f09ede4 DJ |
658 | static void ioat3_eh(struct ioat2_dma_chan *ioat) |
659 | { | |
660 | struct ioat_chan_common *chan = &ioat->base; | |
661 | struct pci_dev *pdev = to_pdev(chan); | |
662 | struct ioat_dma_descriptor *hw; | |
663 | u64 phys_complete; | |
664 | struct ioat_ring_ent *desc; | |
665 | u32 err_handled = 0; | |
666 | u32 chanerr_int; | |
667 | u32 chanerr; | |
668 | ||
669 | /* cleanup so tail points to descriptor that caused the error */ | |
670 | if (ioat3_cleanup_preamble(chan, &phys_complete)) | |
671 | __cleanup(ioat, phys_complete); | |
672 | ||
673 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
674 | pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); | |
675 | ||
676 | dev_dbg(to_dev(chan), "%s: error = %x:%x\n", | |
677 | __func__, chanerr, chanerr_int); | |
678 | ||
679 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | |
680 | hw = desc->hw; | |
681 | dump_desc_dbg(ioat, desc); | |
682 | ||
683 | switch (hw->ctl_f.op) { | |
684 | case IOAT_OP_XOR_VAL: | |
685 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { | |
686 | *desc->result |= SUM_CHECK_P_RESULT; | |
687 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | |
688 | } | |
689 | break; | |
690 | case IOAT_OP_PQ_VAL: | |
7727eaa4 | 691 | case IOAT_OP_PQ_VAL_16S: |
3f09ede4 DJ |
692 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { |
693 | *desc->result |= SUM_CHECK_P_RESULT; | |
694 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | |
695 | } | |
696 | if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { | |
697 | *desc->result |= SUM_CHECK_Q_RESULT; | |
698 | err_handled |= IOAT_CHANERR_XOR_Q_ERR; | |
699 | } | |
700 | break; | |
701 | } | |
702 | ||
703 | /* fault on unhandled error or spurious halt */ | |
704 | if (chanerr ^ err_handled || chanerr == 0) { | |
705 | dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", | |
706 | __func__, chanerr, err_handled); | |
707 | BUG(); | |
708 | } | |
709 | ||
710 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
711 | pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); | |
712 | ||
713 | /* mark faulting descriptor as complete */ | |
714 | *chan->completion = desc->txd.phys; | |
715 | ||
716 | spin_lock_bh(&ioat->prep_lock); | |
717 | ioat3_restart_channel(ioat); | |
718 | spin_unlock_bh(&ioat->prep_lock); | |
719 | } | |
720 | ||
4dec23d7 | 721 | static void check_active(struct ioat2_dma_chan *ioat) |
bf40a686 | 722 | { |
bf40a686 DW |
723 | struct ioat_chan_common *chan = &ioat->base; |
724 | ||
4dec23d7 DJ |
725 | if (ioat2_ring_active(ioat)) { |
726 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
727 | return; | |
728 | } | |
bf40a686 | 729 | |
4dec23d7 DJ |
730 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) |
731 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
732 | else if (ioat->alloc_order > ioat_get_alloc_order()) { | |
bf40a686 DW |
733 | /* if the ring is idle, empty, and oversized try to step |
734 | * down the size | |
735 | */ | |
4dec23d7 | 736 | reshape_ring(ioat, ioat->alloc_order - 1); |
bf40a686 DW |
737 | |
738 | /* keep shrinking until we get back to our minimum | |
739 | * default size | |
740 | */ | |
741 | if (ioat->alloc_order > ioat_get_alloc_order()) | |
742 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
743 | } | |
4dec23d7 DJ |
744 | |
745 | } | |
746 | ||
a20702b8 | 747 | static void ioat3_timer_event(unsigned long data) |
4dec23d7 DJ |
748 | { |
749 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | |
750 | struct ioat_chan_common *chan = &ioat->base; | |
751 | dma_addr_t phys_complete; | |
752 | u64 status; | |
753 | ||
754 | status = ioat_chansts(chan); | |
755 | ||
756 | /* when halted due to errors check for channel | |
757 | * programming errors before advancing the completion state | |
758 | */ | |
759 | if (is_ioat_halted(status)) { | |
760 | u32 chanerr; | |
761 | ||
762 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
763 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | |
764 | __func__, chanerr); | |
765 | if (test_bit(IOAT_RUN, &chan->state)) | |
766 | BUG_ON(is_ioat_bug(chanerr)); | |
767 | else /* we never got off the ground */ | |
768 | return; | |
769 | } | |
770 | ||
771 | /* if we haven't made progress and we have already | |
772 | * acknowledged a pending completion once, then be more | |
773 | * forceful with a restart | |
774 | */ | |
775 | spin_lock_bh(&chan->cleanup_lock); | |
776 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
777 | __cleanup(ioat, phys_complete); | |
778 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | |
779 | spin_lock_bh(&ioat->prep_lock); | |
780 | ioat3_restart_channel(ioat); | |
781 | spin_unlock_bh(&ioat->prep_lock); | |
782 | spin_unlock_bh(&chan->cleanup_lock); | |
783 | return; | |
784 | } else { | |
785 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
786 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
787 | } | |
788 | ||
789 | ||
790 | if (ioat2_ring_active(ioat)) | |
791 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
792 | else { | |
793 | spin_lock_bh(&ioat->prep_lock); | |
794 | check_active(ioat); | |
795 | spin_unlock_bh(&ioat->prep_lock); | |
796 | } | |
797 | spin_unlock_bh(&chan->cleanup_lock); | |
bf40a686 DW |
798 | } |
799 | ||
800 | static enum dma_status | |
07934481 LW |
801 | ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
802 | struct dma_tx_state *txstate) | |
bf40a686 DW |
803 | { |
804 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
96a2af41 | 805 | enum dma_status ret; |
bf40a686 | 806 | |
96a2af41 RKAL |
807 | ret = dma_cookie_status(c, cookie, txstate); |
808 | if (ret == DMA_SUCCESS) | |
809 | return ret; | |
bf40a686 | 810 | |
074cc476 | 811 | ioat3_cleanup(ioat); |
bf40a686 | 812 | |
96a2af41 | 813 | return dma_cookie_status(c, cookie, txstate); |
bf40a686 DW |
814 | } |
815 | ||
816 | static struct dma_async_tx_descriptor * | |
817 | ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | |
818 | size_t len, unsigned long flags) | |
819 | { | |
820 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
821 | struct ioat_ring_ent *desc; | |
822 | size_t total_len = len; | |
823 | struct ioat_fill_descriptor *fill; | |
bf40a686 | 824 | u64 src_data = (0x0101010101010101ULL) * (value & 0xff); |
074cc476 | 825 | int num_descs, idx, i; |
bf40a686 DW |
826 | |
827 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
074cc476 DW |
828 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) |
829 | idx = ioat->head; | |
bf40a686 DW |
830 | else |
831 | return NULL; | |
cdef57db DW |
832 | i = 0; |
833 | do { | |
bf40a686 DW |
834 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); |
835 | ||
836 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
837 | fill = desc->fill; | |
838 | ||
839 | fill->size = xfer_size; | |
840 | fill->src_data = src_data; | |
841 | fill->dst_addr = dest; | |
842 | fill->ctl = 0; | |
843 | fill->ctl_f.op = IOAT_OP_FILL; | |
844 | ||
845 | len -= xfer_size; | |
846 | dest += xfer_size; | |
847 | dump_desc_dbg(ioat, desc); | |
cdef57db | 848 | } while (++i < num_descs); |
bf40a686 DW |
849 | |
850 | desc->txd.flags = flags; | |
851 | desc->len = total_len; | |
852 | fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
853 | fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
854 | fill->ctl_f.compl_write = 1; | |
855 | dump_desc_dbg(ioat, desc); | |
856 | ||
857 | /* we leave the channel locked to ensure in order submission */ | |
858 | return &desc->txd; | |
859 | } | |
860 | ||
b094ad3b DW |
861 | static struct dma_async_tx_descriptor * |
862 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |
863 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | |
864 | size_t len, unsigned long flags) | |
865 | { | |
866 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
867 | struct ioat_ring_ent *compl_desc; | |
868 | struct ioat_ring_ent *desc; | |
869 | struct ioat_ring_ent *ext; | |
870 | size_t total_len = len; | |
871 | struct ioat_xor_descriptor *xor; | |
872 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | |
873 | struct ioat_dma_descriptor *hw; | |
074cc476 | 874 | int num_descs, with_ext, idx, i; |
b094ad3b | 875 | u32 offset = 0; |
b094ad3b DW |
876 | u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; |
877 | ||
878 | BUG_ON(src_cnt < 2); | |
879 | ||
880 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
881 | /* we need 2x the number of descriptors to cover greater than 5 | |
882 | * sources | |
883 | */ | |
884 | if (src_cnt > 5) { | |
885 | with_ext = 1; | |
886 | num_descs *= 2; | |
887 | } else | |
888 | with_ext = 0; | |
889 | ||
890 | /* completion writes from the raid engine may pass completion | |
891 | * writes from the legacy engine, so we need one extra null | |
892 | * (legacy) descriptor to ensure all completion writes arrive in | |
893 | * order. | |
894 | */ | |
074cc476 DW |
895 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) |
896 | idx = ioat->head; | |
b094ad3b DW |
897 | else |
898 | return NULL; | |
cdef57db DW |
899 | i = 0; |
900 | do { | |
b094ad3b DW |
901 | struct ioat_raw_descriptor *descs[2]; |
902 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | |
903 | int s; | |
904 | ||
905 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
906 | xor = desc->xor; | |
907 | ||
908 | /* save a branch by unconditionally retrieving the | |
909 | * extended descriptor xor_set_src() knows to not write | |
910 | * to it in the single descriptor case | |
911 | */ | |
912 | ext = ioat2_get_ring_ent(ioat, idx + i + 1); | |
913 | xor_ex = ext->xor_ex; | |
914 | ||
915 | descs[0] = (struct ioat_raw_descriptor *) xor; | |
916 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | |
917 | for (s = 0; s < src_cnt; s++) | |
918 | xor_set_src(descs, src[s], offset, s); | |
919 | xor->size = xfer_size; | |
920 | xor->dst_addr = dest + offset; | |
921 | xor->ctl = 0; | |
922 | xor->ctl_f.op = op; | |
923 | xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); | |
924 | ||
925 | len -= xfer_size; | |
926 | offset += xfer_size; | |
927 | dump_desc_dbg(ioat, desc); | |
cdef57db | 928 | } while ((i += 1 + with_ext) < num_descs); |
b094ad3b DW |
929 | |
930 | /* last xor descriptor carries the unmap parameters and fence bit */ | |
931 | desc->txd.flags = flags; | |
932 | desc->len = total_len; | |
933 | if (result) | |
934 | desc->result = result; | |
935 | xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
936 | ||
937 | /* completion descriptor carries interrupt bit */ | |
938 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); | |
939 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | |
940 | hw = compl_desc->hw; | |
941 | hw->ctl = 0; | |
942 | hw->ctl_f.null = 1; | |
943 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
944 | hw->ctl_f.compl_write = 1; | |
945 | hw->size = NULL_DESC_BUFFER_SIZE; | |
946 | dump_desc_dbg(ioat, compl_desc); | |
947 | ||
948 | /* we leave the channel locked to ensure in order submission */ | |
49954c15 | 949 | return &compl_desc->txd; |
b094ad3b DW |
950 | } |
951 | ||
952 | static struct dma_async_tx_descriptor * | |
953 | ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
954 | unsigned int src_cnt, size_t len, unsigned long flags) | |
955 | { | |
956 | return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); | |
957 | } | |
958 | ||
959 | struct dma_async_tx_descriptor * | |
960 | ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | |
961 | unsigned int src_cnt, size_t len, | |
962 | enum sum_check_flags *result, unsigned long flags) | |
963 | { | |
964 | /* the cleanup routine only sets bits on validate failure, it | |
965 | * does not clear bits on validate success... so clear it here | |
966 | */ | |
967 | *result = 0; | |
968 | ||
969 | return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], | |
970 | src_cnt - 1, len, flags); | |
971 | } | |
972 | ||
d69d235b DW |
973 | static void |
974 | dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) | |
975 | { | |
976 | struct device *dev = to_dev(&ioat->base); | |
977 | struct ioat_pq_descriptor *pq = desc->pq; | |
978 | struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; | |
979 | struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; | |
980 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | |
981 | int i; | |
982 | ||
983 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | |
7727eaa4 DJ |
984 | " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" |
985 | " src_cnt: %d)\n", | |
d69d235b DW |
986 | desc_id(desc), (unsigned long long) desc->txd.phys, |
987 | (unsigned long long) (pq_ex ? pq_ex->next : pq->next), | |
988 | desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, | |
989 | pq->ctl_f.compl_write, | |
990 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | |
991 | pq->ctl_f.src_cnt); | |
992 | for (i = 0; i < src_cnt; i++) | |
993 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | |
994 | (unsigned long long) pq_get_src(descs, i), pq->coef[i]); | |
995 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | |
996 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | |
50f9f97e | 997 | dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); |
d69d235b DW |
998 | } |
999 | ||
7727eaa4 DJ |
1000 | static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, |
1001 | struct ioat_ring_ent *desc) | |
1002 | { | |
1003 | struct device *dev = to_dev(&ioat->base); | |
1004 | struct ioat_pq_descriptor *pq = desc->pq; | |
1005 | struct ioat_raw_descriptor *descs[] = { (void *)pq, | |
1006 | (void *)pq, | |
1007 | (void *)pq }; | |
1008 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | |
1009 | int i; | |
1010 | ||
1011 | if (desc->sed) { | |
1012 | descs[1] = (void *)desc->sed->hw; | |
1013 | descs[2] = (void *)desc->sed->hw + 64; | |
1014 | } | |
1015 | ||
1016 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | |
1017 | " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" | |
1018 | " src_cnt: %d)\n", | |
1019 | desc_id(desc), (unsigned long long) desc->txd.phys, | |
1020 | (unsigned long long) pq->next, | |
1021 | desc->txd.flags, pq->size, pq->ctl, | |
1022 | pq->ctl_f.op, pq->ctl_f.int_en, | |
1023 | pq->ctl_f.compl_write, | |
1024 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | |
1025 | pq->ctl_f.src_cnt); | |
1026 | for (i = 0; i < src_cnt; i++) { | |
1027 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | |
1028 | (unsigned long long) pq16_get_src(descs, i), | |
1029 | pq->coef[i]); | |
1030 | } | |
1031 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | |
1032 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | |
1033 | } | |
1034 | ||
d69d235b DW |
1035 | static struct dma_async_tx_descriptor * |
1036 | __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |
1037 | const dma_addr_t *dst, const dma_addr_t *src, | |
1038 | unsigned int src_cnt, const unsigned char *scf, | |
1039 | size_t len, unsigned long flags) | |
1040 | { | |
1041 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
1042 | struct ioat_chan_common *chan = &ioat->base; | |
e0884772 | 1043 | struct ioatdma_device *device = chan->device; |
d69d235b DW |
1044 | struct ioat_ring_ent *compl_desc; |
1045 | struct ioat_ring_ent *desc; | |
1046 | struct ioat_ring_ent *ext; | |
1047 | size_t total_len = len; | |
1048 | struct ioat_pq_descriptor *pq; | |
1049 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | |
1050 | struct ioat_dma_descriptor *hw; | |
1051 | u32 offset = 0; | |
d69d235b | 1052 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; |
074cc476 | 1053 | int i, s, idx, with_ext, num_descs; |
e0884772 | 1054 | int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; |
d69d235b DW |
1055 | |
1056 | dev_dbg(to_dev(chan), "%s\n", __func__); | |
1057 | /* the engine requires at least two sources (we provide | |
1058 | * at least 1 implied source in the DMA_PREP_CONTINUE case) | |
1059 | */ | |
1060 | BUG_ON(src_cnt + dmaf_continue(flags) < 2); | |
1061 | ||
1062 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
1063 | /* we need 2x the number of descriptors to cover greater than 3 | |
cd78809f DW |
1064 | * sources (we need 1 extra source in the q-only continuation |
1065 | * case and 3 extra sources in the p+q continuation case. | |
d69d235b | 1066 | */ |
cd78809f DW |
1067 | if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || |
1068 | (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { | |
d69d235b DW |
1069 | with_ext = 1; |
1070 | num_descs *= 2; | |
1071 | } else | |
1072 | with_ext = 0; | |
1073 | ||
1074 | /* completion writes from the raid engine may pass completion | |
1075 | * writes from the legacy engine, so we need one extra null | |
1076 | * (legacy) descriptor to ensure all completion writes arrive in | |
1077 | * order. | |
1078 | */ | |
1079 | if (likely(num_descs) && | |
e0884772 | 1080 | ioat2_check_space_lock(ioat, num_descs + cb32) == 0) |
074cc476 | 1081 | idx = ioat->head; |
d69d235b DW |
1082 | else |
1083 | return NULL; | |
cdef57db DW |
1084 | i = 0; |
1085 | do { | |
d69d235b DW |
1086 | struct ioat_raw_descriptor *descs[2]; |
1087 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | |
1088 | ||
1089 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
1090 | pq = desc->pq; | |
1091 | ||
1092 | /* save a branch by unconditionally retrieving the | |
1093 | * extended descriptor pq_set_src() knows to not write | |
1094 | * to it in the single descriptor case | |
1095 | */ | |
1096 | ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); | |
1097 | pq_ex = ext->pq_ex; | |
1098 | ||
1099 | descs[0] = (struct ioat_raw_descriptor *) pq; | |
1100 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | |
1101 | ||
1102 | for (s = 0; s < src_cnt; s++) | |
1103 | pq_set_src(descs, src[s], offset, scf[s], s); | |
1104 | ||
1105 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | |
1106 | if (dmaf_p_disabled_continue(flags)) | |
1107 | pq_set_src(descs, dst[1], offset, 1, s++); | |
1108 | else if (dmaf_continue(flags)) { | |
1109 | pq_set_src(descs, dst[0], offset, 0, s++); | |
1110 | pq_set_src(descs, dst[1], offset, 1, s++); | |
1111 | pq_set_src(descs, dst[1], offset, 0, s++); | |
1112 | } | |
1113 | pq->size = xfer_size; | |
1114 | pq->p_addr = dst[0] + offset; | |
1115 | pq->q_addr = dst[1] + offset; | |
1116 | pq->ctl = 0; | |
1117 | pq->ctl_f.op = op; | |
75c6f0ab DJ |
1118 | /* we turn on descriptor write back error status */ |
1119 | if (device->cap & IOAT_CAP_DWBES) | |
1120 | pq->ctl_f.wb_en = result ? 1 : 0; | |
d69d235b DW |
1121 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); |
1122 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | |
1123 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | |
1124 | ||
1125 | len -= xfer_size; | |
1126 | offset += xfer_size; | |
cdef57db | 1127 | } while ((i += 1 + with_ext) < num_descs); |
d69d235b DW |
1128 | |
1129 | /* last pq descriptor carries the unmap parameters and fence bit */ | |
1130 | desc->txd.flags = flags; | |
1131 | desc->len = total_len; | |
1132 | if (result) | |
1133 | desc->result = result; | |
1134 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
1135 | dump_pq_desc_dbg(ioat, desc, ext); | |
1136 | ||
e0884772 DJ |
1137 | if (!cb32) { |
1138 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
1139 | pq->ctl_f.compl_write = 1; | |
1140 | compl_desc = desc; | |
1141 | } else { | |
1142 | /* completion descriptor carries interrupt bit */ | |
1143 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); | |
1144 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | |
1145 | hw = compl_desc->hw; | |
1146 | hw->ctl = 0; | |
1147 | hw->ctl_f.null = 1; | |
1148 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
1149 | hw->ctl_f.compl_write = 1; | |
1150 | hw->size = NULL_DESC_BUFFER_SIZE; | |
1151 | dump_desc_dbg(ioat, compl_desc); | |
1152 | } | |
1153 | ||
d69d235b DW |
1154 | |
1155 | /* we leave the channel locked to ensure in order submission */ | |
49954c15 | 1156 | return &compl_desc->txd; |
d69d235b DW |
1157 | } |
1158 | ||
7727eaa4 DJ |
1159 | static struct dma_async_tx_descriptor * |
1160 | __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |
1161 | const dma_addr_t *dst, const dma_addr_t *src, | |
1162 | unsigned int src_cnt, const unsigned char *scf, | |
1163 | size_t len, unsigned long flags) | |
1164 | { | |
1165 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
1166 | struct ioat_chan_common *chan = &ioat->base; | |
1167 | struct ioatdma_device *device = chan->device; | |
1168 | struct ioat_ring_ent *desc; | |
1169 | size_t total_len = len; | |
1170 | struct ioat_pq_descriptor *pq; | |
1171 | u32 offset = 0; | |
1172 | u8 op; | |
1173 | int i, s, idx, num_descs; | |
1174 | ||
7727eaa4 DJ |
1175 | /* this function is only called with 9-16 sources */ |
1176 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; | |
1177 | ||
1178 | dev_dbg(to_dev(chan), "%s\n", __func__); | |
1179 | ||
1180 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
1181 | ||
1182 | /* | |
1183 | * 16 source pq is only available on cb3.3 and has no completion | |
1184 | * write hw bug. | |
1185 | */ | |
1186 | if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) | |
1187 | idx = ioat->head; | |
1188 | else | |
1189 | return NULL; | |
1190 | ||
1191 | i = 0; | |
1192 | ||
1193 | do { | |
1194 | struct ioat_raw_descriptor *descs[4]; | |
1195 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | |
1196 | ||
1197 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
1198 | pq = desc->pq; | |
1199 | ||
1200 | descs[0] = (struct ioat_raw_descriptor *) pq; | |
1201 | ||
b8e15d48 | 1202 | desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); |
7727eaa4 DJ |
1203 | if (!desc->sed) { |
1204 | dev_err(to_dev(chan), | |
1205 | "%s: no free sed entries\n", __func__); | |
1206 | return NULL; | |
1207 | } | |
1208 | ||
1209 | pq->sed_addr = desc->sed->dma; | |
1210 | desc->sed->parent = desc; | |
1211 | ||
1212 | descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; | |
1213 | descs[2] = (void *)descs[1] + 64; | |
1214 | ||
1215 | for (s = 0; s < src_cnt; s++) | |
1216 | pq16_set_src(descs, src[s], offset, scf[s], s); | |
1217 | ||
1218 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | |
1219 | if (dmaf_p_disabled_continue(flags)) | |
1220 | pq16_set_src(descs, dst[1], offset, 1, s++); | |
1221 | else if (dmaf_continue(flags)) { | |
1222 | pq16_set_src(descs, dst[0], offset, 0, s++); | |
1223 | pq16_set_src(descs, dst[1], offset, 1, s++); | |
1224 | pq16_set_src(descs, dst[1], offset, 0, s++); | |
1225 | } | |
1226 | ||
1227 | pq->size = xfer_size; | |
1228 | pq->p_addr = dst[0] + offset; | |
1229 | pq->q_addr = dst[1] + offset; | |
1230 | pq->ctl = 0; | |
1231 | pq->ctl_f.op = op; | |
1232 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); | |
75c6f0ab DJ |
1233 | /* we turn on descriptor write back error status */ |
1234 | if (device->cap & IOAT_CAP_DWBES) | |
1235 | pq->ctl_f.wb_en = result ? 1 : 0; | |
7727eaa4 DJ |
1236 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); |
1237 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | |
1238 | ||
1239 | len -= xfer_size; | |
1240 | offset += xfer_size; | |
1241 | } while (++i < num_descs); | |
1242 | ||
1243 | /* last pq descriptor carries the unmap parameters and fence bit */ | |
1244 | desc->txd.flags = flags; | |
1245 | desc->len = total_len; | |
1246 | if (result) | |
1247 | desc->result = result; | |
1248 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
1249 | ||
1250 | /* with cb3.3 we should be able to do completion w/o a null desc */ | |
1251 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
1252 | pq->ctl_f.compl_write = 1; | |
1253 | ||
1254 | dump_pq16_desc_dbg(ioat, desc); | |
1255 | ||
1256 | /* we leave the channel locked to ensure in order submission */ | |
1257 | return &desc->txd; | |
1258 | } | |
1259 | ||
36dabd38 DW |
1260 | static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) |
1261 | { | |
1262 | if (dmaf_p_disabled_continue(flags)) | |
1263 | return src_cnt + 1; | |
1264 | else if (dmaf_continue(flags)) | |
1265 | return src_cnt + 3; | |
1266 | else | |
1267 | return src_cnt; | |
1268 | } | |
1269 | ||
d69d235b DW |
1270 | static struct dma_async_tx_descriptor * |
1271 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |
1272 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
1273 | unsigned long flags) | |
1274 | { | |
de581b65 DW |
1275 | /* specify valid address for disabled result */ |
1276 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
1277 | dst[0] = dst[1]; | |
1278 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
1279 | dst[1] = dst[0]; | |
1280 | ||
d69d235b DW |
1281 | /* handle the single source multiply case from the raid6 |
1282 | * recovery path | |
1283 | */ | |
de581b65 | 1284 | if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { |
d69d235b DW |
1285 | dma_addr_t single_source[2]; |
1286 | unsigned char single_source_coef[2]; | |
1287 | ||
1288 | BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); | |
1289 | single_source[0] = src[0]; | |
1290 | single_source[1] = src[0]; | |
1291 | single_source_coef[0] = scf[0]; | |
1292 | single_source_coef[1] = 0; | |
1293 | ||
36dabd38 | 1294 | return src_cnt_flags(src_cnt, flags) > 8 ? |
7727eaa4 DJ |
1295 | __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, |
1296 | 2, single_source_coef, len, | |
1297 | flags) : | |
1298 | __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, | |
1299 | single_source_coef, len, flags); | |
1300 | ||
1301 | } else { | |
36dabd38 | 1302 | return src_cnt_flags(src_cnt, flags) > 8 ? |
7727eaa4 DJ |
1303 | __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, |
1304 | scf, len, flags) : | |
1305 | __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, | |
1306 | scf, len, flags); | |
1307 | } | |
d69d235b DW |
1308 | } |
1309 | ||
1310 | struct dma_async_tx_descriptor * | |
1311 | ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |
1312 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
1313 | enum sum_check_flags *pqres, unsigned long flags) | |
1314 | { | |
de581b65 DW |
1315 | /* specify valid address for disabled result */ |
1316 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
1317 | pq[0] = pq[1]; | |
1318 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
1319 | pq[1] = pq[0]; | |
1320 | ||
d69d235b DW |
1321 | /* the cleanup routine only sets bits on validate failure, it |
1322 | * does not clear bits on validate success... so clear it here | |
1323 | */ | |
1324 | *pqres = 0; | |
1325 | ||
36dabd38 | 1326 | return src_cnt_flags(src_cnt, flags) > 8 ? |
7727eaa4 DJ |
1327 | __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, |
1328 | flags) : | |
1329 | __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | |
1330 | flags); | |
d69d235b DW |
1331 | } |
1332 | ||
ae786624 DW |
1333 | static struct dma_async_tx_descriptor * |
1334 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |
1335 | unsigned int src_cnt, size_t len, unsigned long flags) | |
1336 | { | |
1337 | unsigned char scf[src_cnt]; | |
1338 | dma_addr_t pq[2]; | |
1339 | ||
1340 | memset(scf, 0, src_cnt); | |
ae786624 | 1341 | pq[0] = dst; |
de581b65 DW |
1342 | flags |= DMA_PREP_PQ_DISABLE_Q; |
1343 | pq[1] = dst; /* specify valid address for disabled result */ | |
ae786624 | 1344 | |
36dabd38 | 1345 | return src_cnt_flags(src_cnt, flags) > 8 ? |
7727eaa4 DJ |
1346 | __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, |
1347 | flags) : | |
1348 | __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | |
1349 | flags); | |
ae786624 DW |
1350 | } |
1351 | ||
1352 | struct dma_async_tx_descriptor * | |
1353 | ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |
1354 | unsigned int src_cnt, size_t len, | |
1355 | enum sum_check_flags *result, unsigned long flags) | |
1356 | { | |
1357 | unsigned char scf[src_cnt]; | |
1358 | dma_addr_t pq[2]; | |
1359 | ||
1360 | /* the cleanup routine only sets bits on validate failure, it | |
1361 | * does not clear bits on validate success... so clear it here | |
1362 | */ | |
1363 | *result = 0; | |
1364 | ||
1365 | memset(scf, 0, src_cnt); | |
ae786624 | 1366 | pq[0] = src[0]; |
de581b65 DW |
1367 | flags |= DMA_PREP_PQ_DISABLE_Q; |
1368 | pq[1] = pq[0]; /* specify valid address for disabled result */ | |
ae786624 | 1369 | |
36dabd38 | 1370 | return src_cnt_flags(src_cnt, flags) > 8 ? |
7727eaa4 DJ |
1371 | __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, |
1372 | scf, len, flags) : | |
1373 | __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, | |
1374 | scf, len, flags); | |
ae786624 DW |
1375 | } |
1376 | ||
58c8649e DW |
1377 | static struct dma_async_tx_descriptor * |
1378 | ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | |
1379 | { | |
1380 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
1381 | struct ioat_ring_ent *desc; | |
1382 | struct ioat_dma_descriptor *hw; | |
58c8649e | 1383 | |
074cc476 DW |
1384 | if (ioat2_check_space_lock(ioat, 1) == 0) |
1385 | desc = ioat2_get_ring_ent(ioat, ioat->head); | |
58c8649e DW |
1386 | else |
1387 | return NULL; | |
1388 | ||
1389 | hw = desc->hw; | |
1390 | hw->ctl = 0; | |
1391 | hw->ctl_f.null = 1; | |
1392 | hw->ctl_f.int_en = 1; | |
1393 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
1394 | hw->ctl_f.compl_write = 1; | |
1395 | hw->size = NULL_DESC_BUFFER_SIZE; | |
1396 | hw->src_addr = 0; | |
1397 | hw->dst_addr = 0; | |
1398 | ||
1399 | desc->txd.flags = flags; | |
1400 | desc->len = 1; | |
1401 | ||
1402 | dump_desc_dbg(ioat, desc); | |
1403 | ||
1404 | /* we leave the channel locked to ensure in order submission */ | |
1405 | return &desc->txd; | |
1406 | } | |
1407 | ||
4bf27b8b | 1408 | static void ioat3_dma_test_callback(void *dma_async_param) |
9de6fc71 DW |
1409 | { |
1410 | struct completion *cmp = dma_async_param; | |
1411 | ||
1412 | complete(cmp); | |
1413 | } | |
1414 | ||
1415 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | |
4bf27b8b | 1416 | static int ioat_xor_val_self_test(struct ioatdma_device *device) |
9de6fc71 DW |
1417 | { |
1418 | int i, src_idx; | |
1419 | struct page *dest; | |
1420 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | |
1421 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | |
1422 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | |
1423 | dma_addr_t dma_addr, dest_dma; | |
1424 | struct dma_async_tx_descriptor *tx; | |
1425 | struct dma_chan *dma_chan; | |
1426 | dma_cookie_t cookie; | |
1427 | u8 cmp_byte = 0; | |
1428 | u32 cmp_word; | |
1429 | u32 xor_val_result; | |
1430 | int err = 0; | |
1431 | struct completion cmp; | |
1432 | unsigned long tmo; | |
1433 | struct device *dev = &device->pdev->dev; | |
1434 | struct dma_device *dma = &device->common; | |
7369f56e | 1435 | u8 op = 0; |
9de6fc71 DW |
1436 | |
1437 | dev_dbg(dev, "%s\n", __func__); | |
1438 | ||
1439 | if (!dma_has_cap(DMA_XOR, dma->cap_mask)) | |
1440 | return 0; | |
1441 | ||
1442 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | |
1443 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | |
1444 | if (!xor_srcs[src_idx]) { | |
1445 | while (src_idx--) | |
1446 | __free_page(xor_srcs[src_idx]); | |
1447 | return -ENOMEM; | |
1448 | } | |
1449 | } | |
1450 | ||
1451 | dest = alloc_page(GFP_KERNEL); | |
1452 | if (!dest) { | |
1453 | while (src_idx--) | |
1454 | __free_page(xor_srcs[src_idx]); | |
1455 | return -ENOMEM; | |
1456 | } | |
1457 | ||
1458 | /* Fill in src buffers */ | |
1459 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | |
1460 | u8 *ptr = page_address(xor_srcs[src_idx]); | |
1461 | for (i = 0; i < PAGE_SIZE; i++) | |
1462 | ptr[i] = (1 << src_idx); | |
1463 | } | |
1464 | ||
1465 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) | |
1466 | cmp_byte ^= (u8) (1 << src_idx); | |
1467 | ||
1468 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
1469 | (cmp_byte << 8) | cmp_byte; | |
1470 | ||
1471 | memset(page_address(dest), 0, PAGE_SIZE); | |
1472 | ||
1473 | dma_chan = container_of(dma->channels.next, struct dma_chan, | |
1474 | device_node); | |
1475 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | |
1476 | err = -ENODEV; | |
1477 | goto out; | |
1478 | } | |
1479 | ||
1480 | /* test xor */ | |
7369f56e BZ |
1481 | op = IOAT_OP_XOR; |
1482 | ||
9de6fc71 DW |
1483 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
1484 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
1485 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | |
1486 | DMA_TO_DEVICE); | |
1487 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
1488 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | |
7369f56e BZ |
1489 | DMA_PREP_INTERRUPT | |
1490 | DMA_COMPL_SKIP_SRC_UNMAP | | |
1491 | DMA_COMPL_SKIP_DEST_UNMAP); | |
9de6fc71 DW |
1492 | |
1493 | if (!tx) { | |
1494 | dev_err(dev, "Self-test xor prep failed\n"); | |
1495 | err = -ENODEV; | |
7369f56e | 1496 | goto dma_unmap; |
9de6fc71 DW |
1497 | } |
1498 | ||
1499 | async_tx_ack(tx); | |
1500 | init_completion(&cmp); | |
1501 | tx->callback = ioat3_dma_test_callback; | |
1502 | tx->callback_param = &cmp; | |
1503 | cookie = tx->tx_submit(tx); | |
1504 | if (cookie < 0) { | |
1505 | dev_err(dev, "Self-test xor setup failed\n"); | |
1506 | err = -ENODEV; | |
7369f56e | 1507 | goto dma_unmap; |
9de6fc71 DW |
1508 | } |
1509 | dma->device_issue_pending(dma_chan); | |
1510 | ||
1511 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
1512 | ||
07934481 | 1513 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
9de6fc71 DW |
1514 | dev_err(dev, "Self-test xor timed out\n"); |
1515 | err = -ENODEV; | |
7369f56e | 1516 | goto dma_unmap; |
9de6fc71 DW |
1517 | } |
1518 | ||
7369f56e BZ |
1519 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); |
1520 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
1521 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | |
1522 | ||
9de6fc71 DW |
1523 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); |
1524 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
1525 | u32 *ptr = page_address(dest); | |
1526 | if (ptr[i] != cmp_word) { | |
1527 | dev_err(dev, "Self-test xor failed compare\n"); | |
1528 | err = -ENODEV; | |
1529 | goto free_resources; | |
1530 | } | |
1531 | } | |
ac498987 | 1532 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); |
9de6fc71 DW |
1533 | |
1534 | /* skip validate if the capability is not present */ | |
1535 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | |
1536 | goto free_resources; | |
1537 | ||
7369f56e BZ |
1538 | op = IOAT_OP_XOR_VAL; |
1539 | ||
9de6fc71 DW |
1540 | /* validate the sources with the destintation page */ |
1541 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
1542 | xor_val_srcs[i] = xor_srcs[i]; | |
1543 | xor_val_srcs[i] = dest; | |
1544 | ||
1545 | xor_val_result = 1; | |
1546 | ||
1547 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
1548 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | |
1549 | DMA_TO_DEVICE); | |
1550 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | |
1551 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | |
7369f56e BZ |
1552 | &xor_val_result, DMA_PREP_INTERRUPT | |
1553 | DMA_COMPL_SKIP_SRC_UNMAP | | |
1554 | DMA_COMPL_SKIP_DEST_UNMAP); | |
9de6fc71 DW |
1555 | if (!tx) { |
1556 | dev_err(dev, "Self-test zero prep failed\n"); | |
1557 | err = -ENODEV; | |
7369f56e | 1558 | goto dma_unmap; |
9de6fc71 DW |
1559 | } |
1560 | ||
1561 | async_tx_ack(tx); | |
1562 | init_completion(&cmp); | |
1563 | tx->callback = ioat3_dma_test_callback; | |
1564 | tx->callback_param = &cmp; | |
1565 | cookie = tx->tx_submit(tx); | |
1566 | if (cookie < 0) { | |
1567 | dev_err(dev, "Self-test zero setup failed\n"); | |
1568 | err = -ENODEV; | |
7369f56e | 1569 | goto dma_unmap; |
9de6fc71 DW |
1570 | } |
1571 | dma->device_issue_pending(dma_chan); | |
1572 | ||
1573 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
1574 | ||
07934481 | 1575 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
9de6fc71 DW |
1576 | dev_err(dev, "Self-test validate timed out\n"); |
1577 | err = -ENODEV; | |
7369f56e | 1578 | goto dma_unmap; |
9de6fc71 DW |
1579 | } |
1580 | ||
7369f56e BZ |
1581 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1582 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | |
1583 | ||
9de6fc71 DW |
1584 | if (xor_val_result != 0) { |
1585 | dev_err(dev, "Self-test validate failed compare\n"); | |
1586 | err = -ENODEV; | |
1587 | goto free_resources; | |
1588 | } | |
1589 | ||
1590 | /* skip memset if the capability is not present */ | |
1591 | if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) | |
1592 | goto free_resources; | |
1593 | ||
1594 | /* test memset */ | |
7369f56e BZ |
1595 | op = IOAT_OP_FILL; |
1596 | ||
9de6fc71 DW |
1597 | dma_addr = dma_map_page(dev, dest, 0, |
1598 | PAGE_SIZE, DMA_FROM_DEVICE); | |
1599 | tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | |
7369f56e BZ |
1600 | DMA_PREP_INTERRUPT | |
1601 | DMA_COMPL_SKIP_SRC_UNMAP | | |
1602 | DMA_COMPL_SKIP_DEST_UNMAP); | |
9de6fc71 DW |
1603 | if (!tx) { |
1604 | dev_err(dev, "Self-test memset prep failed\n"); | |
1605 | err = -ENODEV; | |
7369f56e | 1606 | goto dma_unmap; |
9de6fc71 DW |
1607 | } |
1608 | ||
1609 | async_tx_ack(tx); | |
1610 | init_completion(&cmp); | |
1611 | tx->callback = ioat3_dma_test_callback; | |
1612 | tx->callback_param = &cmp; | |
1613 | cookie = tx->tx_submit(tx); | |
1614 | if (cookie < 0) { | |
1615 | dev_err(dev, "Self-test memset setup failed\n"); | |
1616 | err = -ENODEV; | |
7369f56e | 1617 | goto dma_unmap; |
9de6fc71 DW |
1618 | } |
1619 | dma->device_issue_pending(dma_chan); | |
1620 | ||
1621 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
1622 | ||
07934481 | 1623 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
9de6fc71 DW |
1624 | dev_err(dev, "Self-test memset timed out\n"); |
1625 | err = -ENODEV; | |
7369f56e | 1626 | goto dma_unmap; |
9de6fc71 DW |
1627 | } |
1628 | ||
7369f56e BZ |
1629 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); |
1630 | ||
9de6fc71 DW |
1631 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { |
1632 | u32 *ptr = page_address(dest); | |
1633 | if (ptr[i]) { | |
1634 | dev_err(dev, "Self-test memset failed compare\n"); | |
1635 | err = -ENODEV; | |
1636 | goto free_resources; | |
1637 | } | |
1638 | } | |
1639 | ||
1640 | /* test for non-zero parity sum */ | |
7369f56e BZ |
1641 | op = IOAT_OP_XOR_VAL; |
1642 | ||
9de6fc71 DW |
1643 | xor_val_result = 0; |
1644 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
1645 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | |
1646 | DMA_TO_DEVICE); | |
1647 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | |
1648 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | |
7369f56e BZ |
1649 | &xor_val_result, DMA_PREP_INTERRUPT | |
1650 | DMA_COMPL_SKIP_SRC_UNMAP | | |
1651 | DMA_COMPL_SKIP_DEST_UNMAP); | |
9de6fc71 DW |
1652 | if (!tx) { |
1653 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | |
1654 | err = -ENODEV; | |
7369f56e | 1655 | goto dma_unmap; |
9de6fc71 DW |
1656 | } |
1657 | ||
1658 | async_tx_ack(tx); | |
1659 | init_completion(&cmp); | |
1660 | tx->callback = ioat3_dma_test_callback; | |
1661 | tx->callback_param = &cmp; | |
1662 | cookie = tx->tx_submit(tx); | |
1663 | if (cookie < 0) { | |
1664 | dev_err(dev, "Self-test 2nd zero setup failed\n"); | |
1665 | err = -ENODEV; | |
7369f56e | 1666 | goto dma_unmap; |
9de6fc71 DW |
1667 | } |
1668 | dma->device_issue_pending(dma_chan); | |
1669 | ||
1670 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
1671 | ||
07934481 | 1672 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
9de6fc71 DW |
1673 | dev_err(dev, "Self-test 2nd validate timed out\n"); |
1674 | err = -ENODEV; | |
7369f56e | 1675 | goto dma_unmap; |
9de6fc71 DW |
1676 | } |
1677 | ||
1678 | if (xor_val_result != SUM_CHECK_P_RESULT) { | |
1679 | dev_err(dev, "Self-test validate failed compare\n"); | |
1680 | err = -ENODEV; | |
7369f56e | 1681 | goto dma_unmap; |
9de6fc71 DW |
1682 | } |
1683 | ||
7369f56e BZ |
1684 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1685 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | |
1686 | ||
1687 | goto free_resources; | |
1688 | dma_unmap: | |
1689 | if (op == IOAT_OP_XOR) { | |
1690 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | |
1691 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
1692 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | |
1693 | DMA_TO_DEVICE); | |
1694 | } else if (op == IOAT_OP_XOR_VAL) { | |
1695 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
1696 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | |
1697 | DMA_TO_DEVICE); | |
1698 | } else if (op == IOAT_OP_FILL) | |
1699 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); | |
9de6fc71 DW |
1700 | free_resources: |
1701 | dma->device_free_chan_resources(dma_chan); | |
1702 | out: | |
1703 | src_idx = IOAT_NUM_SRC_TEST; | |
1704 | while (src_idx--) | |
1705 | __free_page(xor_srcs[src_idx]); | |
1706 | __free_page(dest); | |
1707 | return err; | |
1708 | } | |
1709 | ||
4bf27b8b | 1710 | static int ioat3_dma_self_test(struct ioatdma_device *device) |
9de6fc71 DW |
1711 | { |
1712 | int rc = ioat_dma_self_test(device); | |
1713 | ||
1714 | if (rc) | |
1715 | return rc; | |
1716 | ||
1717 | rc = ioat_xor_val_self_test(device); | |
1718 | if (rc) | |
1719 | return rc; | |
1720 | ||
1721 | return 0; | |
1722 | } | |
1723 | ||
8a52b9ff DJ |
1724 | static int ioat3_irq_reinit(struct ioatdma_device *device) |
1725 | { | |
1726 | int msixcnt = device->common.chancnt; | |
1727 | struct pci_dev *pdev = device->pdev; | |
1728 | int i; | |
1729 | struct msix_entry *msix; | |
1730 | struct ioat_chan_common *chan; | |
1731 | int err = 0; | |
1732 | ||
1733 | switch (device->irq_mode) { | |
1734 | case IOAT_MSIX: | |
1735 | ||
1736 | for (i = 0; i < msixcnt; i++) { | |
1737 | msix = &device->msix_entries[i]; | |
1738 | chan = ioat_chan_by_index(device, i); | |
1739 | devm_free_irq(&pdev->dev, msix->vector, chan); | |
1740 | } | |
1741 | ||
1742 | pci_disable_msix(pdev); | |
1743 | break; | |
1744 | ||
1745 | case IOAT_MSIX_SINGLE: | |
1746 | msix = &device->msix_entries[0]; | |
1747 | chan = ioat_chan_by_index(device, 0); | |
1748 | devm_free_irq(&pdev->dev, msix->vector, chan); | |
1749 | pci_disable_msix(pdev); | |
1750 | break; | |
1751 | ||
1752 | case IOAT_MSI: | |
1753 | chan = ioat_chan_by_index(device, 0); | |
1754 | devm_free_irq(&pdev->dev, pdev->irq, chan); | |
1755 | pci_disable_msi(pdev); | |
1756 | break; | |
1757 | ||
1758 | case IOAT_INTX: | |
1759 | chan = ioat_chan_by_index(device, 0); | |
1760 | devm_free_irq(&pdev->dev, pdev->irq, chan); | |
1761 | break; | |
1762 | ||
1763 | default: | |
1764 | return 0; | |
1765 | } | |
1766 | ||
1767 | device->irq_mode = IOAT_NOIRQ; | |
1768 | ||
1769 | err = ioat_dma_setup_interrupts(device); | |
1770 | ||
1771 | return err; | |
1772 | } | |
1773 | ||
a6d52d70 DW |
1774 | static int ioat3_reset_hw(struct ioat_chan_common *chan) |
1775 | { | |
1776 | /* throw away whatever the channel was doing and get it | |
1777 | * initialized, with ioat3 specific workarounds | |
1778 | */ | |
1779 | struct ioatdma_device *device = chan->device; | |
1780 | struct pci_dev *pdev = device->pdev; | |
1781 | u32 chanerr; | |
1782 | u16 dev_id; | |
1783 | int err; | |
1784 | ||
1785 | ioat2_quiesce(chan, msecs_to_jiffies(100)); | |
1786 | ||
1787 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
1788 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
1789 | ||
6ead7e48 DJ |
1790 | if (device->version < IOAT_VER_3_3) { |
1791 | /* clear any pending errors */ | |
1792 | err = pci_read_config_dword(pdev, | |
1793 | IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | |
1794 | if (err) { | |
1795 | dev_err(&pdev->dev, | |
1796 | "channel error register unreachable\n"); | |
1797 | return err; | |
1798 | } | |
1799 | pci_write_config_dword(pdev, | |
1800 | IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | |
a6d52d70 | 1801 | |
6ead7e48 DJ |
1802 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit |
1803 | * (workaround for spurious config parity error after restart) | |
1804 | */ | |
1805 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | |
1806 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | |
1807 | pci_write_config_dword(pdev, | |
1808 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | |
1809 | 0x10); | |
1810 | } | |
1811 | } | |
a6d52d70 | 1812 | |
8a52b9ff DJ |
1813 | err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); |
1814 | if (err) { | |
1815 | dev_err(&pdev->dev, "Failed to reset!\n"); | |
1816 | return err; | |
570727b5 DJ |
1817 | } |
1818 | ||
8a52b9ff DJ |
1819 | if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) |
1820 | err = ioat3_irq_reinit(device); | |
570727b5 | 1821 | |
8a52b9ff | 1822 | return err; |
570727b5 DJ |
1823 | } |
1824 | ||
75c6f0ab DJ |
1825 | static void ioat3_intr_quirk(struct ioatdma_device *device) |
1826 | { | |
1827 | struct dma_device *dma; | |
1828 | struct dma_chan *c; | |
1829 | struct ioat_chan_common *chan; | |
1830 | u32 errmask; | |
1831 | ||
1832 | dma = &device->common; | |
1833 | ||
1834 | /* | |
1835 | * if we have descriptor write back error status, we mask the | |
1836 | * error interrupts | |
1837 | */ | |
1838 | if (device->cap & IOAT_CAP_DWBES) { | |
1839 | list_for_each_entry(c, &dma->channels, device_node) { | |
1840 | chan = to_chan_common(c); | |
1841 | errmask = readl(chan->reg_base + | |
1842 | IOAT_CHANERR_MASK_OFFSET); | |
1843 | errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | | |
1844 | IOAT_CHANERR_XOR_Q_ERR; | |
1845 | writel(errmask, chan->reg_base + | |
1846 | IOAT_CHANERR_MASK_OFFSET); | |
1847 | } | |
1848 | } | |
1849 | } | |
1850 | ||
4bf27b8b | 1851 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) |
bf40a686 DW |
1852 | { |
1853 | struct pci_dev *pdev = device->pdev; | |
228c4f5c | 1854 | int dca_en = system_has_dca_enabled(pdev); |
bf40a686 DW |
1855 | struct dma_device *dma; |
1856 | struct dma_chan *c; | |
1857 | struct ioat_chan_common *chan; | |
e3232714 | 1858 | bool is_raid_device = false; |
bf40a686 | 1859 | int err; |
bf40a686 DW |
1860 | |
1861 | device->enumerate_channels = ioat2_enumerate_channels; | |
a6d52d70 | 1862 | device->reset_hw = ioat3_reset_hw; |
9de6fc71 | 1863 | device->self_test = ioat3_dma_self_test; |
75c6f0ab | 1864 | device->intr_quirk = ioat3_intr_quirk; |
bf40a686 DW |
1865 | dma = &device->common; |
1866 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
1867 | dma->device_issue_pending = ioat2_issue_pending; | |
1868 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
1869 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
58c8649e | 1870 | |
570727b5 | 1871 | if (is_xeon_cb32(pdev)) |
f26df1a1 DJ |
1872 | dma->copy_align = 6; |
1873 | ||
58c8649e DW |
1874 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); |
1875 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | |
1876 | ||
75c6f0ab | 1877 | device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
228c4f5c | 1878 | |
d302398d | 1879 | if (is_bwd_noraid(pdev)) |
75c6f0ab | 1880 | device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); |
d302398d | 1881 | |
228c4f5c | 1882 | /* dca is incompatible with raid operations */ |
75c6f0ab DJ |
1883 | if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) |
1884 | device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | |
228c4f5c | 1885 | |
75c6f0ab | 1886 | if (device->cap & IOAT_CAP_XOR) { |
e3232714 | 1887 | is_raid_device = true; |
b094ad3b | 1888 | dma->max_xor = 8; |
2adfc550 | 1889 | dma->xor_align = 6; |
b094ad3b DW |
1890 | |
1891 | dma_cap_set(DMA_XOR, dma->cap_mask); | |
1892 | dma->device_prep_dma_xor = ioat3_prep_xor; | |
1893 | ||
1894 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | |
1895 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; | |
1896 | } | |
eceec44e | 1897 | |
75c6f0ab | 1898 | if (device->cap & IOAT_CAP_PQ) { |
e3232714 | 1899 | is_raid_device = true; |
7727eaa4 | 1900 | |
75c6f0ab DJ |
1901 | dma->device_prep_dma_pq = ioat3_prep_pq; |
1902 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; | |
1903 | dma_cap_set(DMA_PQ, dma->cap_mask); | |
1904 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | |
1905 | ||
1906 | if (device->cap & IOAT_CAP_RAID16SS) { | |
7727eaa4 | 1907 | dma_set_maxpq(dma, 16, 0); |
eceec44e | 1908 | dma->pq_align = 0; |
7727eaa4 DJ |
1909 | } else { |
1910 | dma_set_maxpq(dma, 8, 0); | |
1911 | if (is_xeon_cb32(pdev)) | |
1912 | dma->pq_align = 6; | |
1913 | else | |
1914 | dma->pq_align = 0; | |
1915 | } | |
d69d235b | 1916 | |
75c6f0ab DJ |
1917 | if (!(device->cap & IOAT_CAP_XOR)) { |
1918 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | |
1919 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | |
1920 | dma_cap_set(DMA_XOR, dma->cap_mask); | |
1921 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | |
ae786624 | 1922 | |
75c6f0ab | 1923 | if (device->cap & IOAT_CAP_RAID16SS) { |
7727eaa4 | 1924 | dma->max_xor = 16; |
eceec44e | 1925 | dma->xor_align = 0; |
7727eaa4 DJ |
1926 | } else { |
1927 | dma->max_xor = 8; | |
1928 | if (is_xeon_cb32(pdev)) | |
1929 | dma->xor_align = 6; | |
1930 | else | |
1931 | dma->xor_align = 0; | |
1932 | } | |
ae786624 | 1933 | } |
d69d235b | 1934 | } |
eceec44e | 1935 | |
75c6f0ab | 1936 | if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { |
e3232714 DW |
1937 | dma_cap_set(DMA_MEMSET, dma->cap_mask); |
1938 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; | |
1939 | } | |
1940 | ||
1941 | ||
9a37f644 DJ |
1942 | dma->device_tx_status = ioat3_tx_status; |
1943 | device->cleanup_fn = ioat3_cleanup_event; | |
1944 | device->timer_fn = ioat3_timer_event; | |
bf40a686 | 1945 | |
3f09ede4 DJ |
1946 | if (is_xeon_cb32(pdev)) { |
1947 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); | |
1948 | dma->device_prep_dma_xor_val = NULL; | |
7b3cc2b1 | 1949 | |
3f09ede4 DJ |
1950 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); |
1951 | dma->device_prep_dma_pq_val = NULL; | |
1952 | } | |
7b3cc2b1 | 1953 | |
7727eaa4 | 1954 | /* starting with CB3.3 super extended descriptors are supported */ |
75c6f0ab | 1955 | if (device->cap & IOAT_CAP_RAID16SS) { |
7727eaa4 DJ |
1956 | char pool_name[14]; |
1957 | int i; | |
1958 | ||
1959 | /* allocate sw descriptor pool for SED */ | |
1960 | device->sed_pool = kmem_cache_create("ioat_sed", | |
1961 | sizeof(struct ioat_sed_ent), 0, 0, NULL); | |
1962 | if (!device->sed_pool) | |
1963 | return -ENOMEM; | |
1964 | ||
1965 | for (i = 0; i < MAX_SED_POOLS; i++) { | |
1966 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | |
1967 | ||
1968 | /* allocate SED DMA pool */ | |
1969 | device->sed_hw_pool[i] = dma_pool_create(pool_name, | |
1970 | &pdev->dev, | |
1971 | SED_SIZE * (i + 1), 64, 0); | |
1972 | if (!device->sed_hw_pool[i]) | |
1973 | goto sed_pool_cleanup; | |
1974 | ||
1975 | } | |
1976 | } | |
1977 | ||
bf40a686 DW |
1978 | err = ioat_probe(device); |
1979 | if (err) | |
1980 | return err; | |
1981 | ioat_set_tcp_copy_break(262144); | |
1982 | ||
1983 | list_for_each_entry(c, &dma->channels, device_node) { | |
1984 | chan = to_chan_common(c); | |
1985 | writel(IOAT_DMA_DCA_ANY_CPU, | |
1986 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
1987 | } | |
1988 | ||
1989 | err = ioat_register(device); | |
1990 | if (err) | |
1991 | return err; | |
5669e31c DW |
1992 | |
1993 | ioat_kobject_add(device, &ioat2_ktype); | |
1994 | ||
bf40a686 DW |
1995 | if (dca) |
1996 | device->dca = ioat3_dca_init(pdev, device->reg_base); | |
1997 | ||
1998 | return 0; | |
7727eaa4 DJ |
1999 | |
2000 | sed_pool_cleanup: | |
2001 | if (device->sed_pool) { | |
2002 | int i; | |
2003 | kmem_cache_destroy(device->sed_pool); | |
2004 | ||
2005 | for (i = 0; i < MAX_SED_POOLS; i++) | |
2006 | if (device->sed_hw_pool[i]) | |
2007 | dma_pool_destroy(device->sed_hw_pool[i]); | |
2008 | } | |
2009 | ||
2010 | return -ENOMEM; | |
2011 | } | |
2012 | ||
2013 | void ioat3_dma_remove(struct ioatdma_device *device) | |
2014 | { | |
2015 | if (device->sed_pool) { | |
2016 | int i; | |
2017 | kmem_cache_destroy(device->sed_pool); | |
2018 | ||
2019 | for (i = 0; i < MAX_SED_POOLS; i++) | |
2020 | if (device->sed_hw_pool[i]) | |
2021 | dma_pool_destroy(device->sed_hw_pool[i]); | |
2022 | } | |
bf40a686 | 2023 | } |