2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
58 #include <linux/module.h>
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "../dmaengine.h"
65 #include "registers.h"
70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2)
73 #define ndest_to_sw(x) ((x) + 1)
74 #define ndest_to_hw(x) ((x) - 1)
75 #define src16_cnt_to_sw(x) ((x) + 9)
76 #define src16_cnt_to_hw(x) ((x) - 9)
78 /* provide a lookup table for setting the source address in the base or
79 * extended descriptor of an xor or pq descriptor
81 static const u8 xor_idx_to_desc
= 0xe0;
82 static const u8 xor_idx_to_field
[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
83 static const u8 pq_idx_to_desc
= 0xf8;
84 static const u8 pq16_idx_to_desc
[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
85 2, 2, 2, 2, 2, 2, 2 };
86 static const u8 pq_idx_to_field
[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
87 static const u8 pq16_idx_to_field
[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
88 0, 1, 2, 3, 4, 5, 6 };
90 static void ioat3_eh(struct ioat2_dma_chan
*ioat
);
92 static dma_addr_t
xor_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
94 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
96 return raw
->field
[xor_idx_to_field
[idx
]];
99 static void xor_set_src(struct ioat_raw_descriptor
*descs
[2],
100 dma_addr_t addr
, u32 offset
, int idx
)
102 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
104 raw
->field
[xor_idx_to_field
[idx
]] = addr
+ offset
;
107 static dma_addr_t
pq_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
109 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
111 return raw
->field
[pq_idx_to_field
[idx
]];
114 static dma_addr_t
pq16_get_src(struct ioat_raw_descriptor
*desc
[3], int idx
)
116 struct ioat_raw_descriptor
*raw
= desc
[pq16_idx_to_desc
[idx
]];
118 return raw
->field
[pq16_idx_to_field
[idx
]];
121 static void pq_set_src(struct ioat_raw_descriptor
*descs
[2],
122 dma_addr_t addr
, u32 offset
, u8 coef
, int idx
)
124 struct ioat_pq_descriptor
*pq
= (struct ioat_pq_descriptor
*) descs
[0];
125 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
127 raw
->field
[pq_idx_to_field
[idx
]] = addr
+ offset
;
128 pq
->coef
[idx
] = coef
;
131 static bool is_jf_ioat(struct pci_dev
*pdev
)
133 switch (pdev
->device
) {
134 case PCI_DEVICE_ID_INTEL_IOAT_JSF0
:
135 case PCI_DEVICE_ID_INTEL_IOAT_JSF1
:
136 case PCI_DEVICE_ID_INTEL_IOAT_JSF2
:
137 case PCI_DEVICE_ID_INTEL_IOAT_JSF3
:
138 case PCI_DEVICE_ID_INTEL_IOAT_JSF4
:
139 case PCI_DEVICE_ID_INTEL_IOAT_JSF5
:
140 case PCI_DEVICE_ID_INTEL_IOAT_JSF6
:
141 case PCI_DEVICE_ID_INTEL_IOAT_JSF7
:
142 case PCI_DEVICE_ID_INTEL_IOAT_JSF8
:
143 case PCI_DEVICE_ID_INTEL_IOAT_JSF9
:
150 static bool is_snb_ioat(struct pci_dev
*pdev
)
152 switch (pdev
->device
) {
153 case PCI_DEVICE_ID_INTEL_IOAT_SNB0
:
154 case PCI_DEVICE_ID_INTEL_IOAT_SNB1
:
155 case PCI_DEVICE_ID_INTEL_IOAT_SNB2
:
156 case PCI_DEVICE_ID_INTEL_IOAT_SNB3
:
157 case PCI_DEVICE_ID_INTEL_IOAT_SNB4
:
158 case PCI_DEVICE_ID_INTEL_IOAT_SNB5
:
159 case PCI_DEVICE_ID_INTEL_IOAT_SNB6
:
160 case PCI_DEVICE_ID_INTEL_IOAT_SNB7
:
161 case PCI_DEVICE_ID_INTEL_IOAT_SNB8
:
162 case PCI_DEVICE_ID_INTEL_IOAT_SNB9
:
169 static bool is_ivb_ioat(struct pci_dev
*pdev
)
171 switch (pdev
->device
) {
172 case PCI_DEVICE_ID_INTEL_IOAT_IVB0
:
173 case PCI_DEVICE_ID_INTEL_IOAT_IVB1
:
174 case PCI_DEVICE_ID_INTEL_IOAT_IVB2
:
175 case PCI_DEVICE_ID_INTEL_IOAT_IVB3
:
176 case PCI_DEVICE_ID_INTEL_IOAT_IVB4
:
177 case PCI_DEVICE_ID_INTEL_IOAT_IVB5
:
178 case PCI_DEVICE_ID_INTEL_IOAT_IVB6
:
179 case PCI_DEVICE_ID_INTEL_IOAT_IVB7
:
180 case PCI_DEVICE_ID_INTEL_IOAT_IVB8
:
181 case PCI_DEVICE_ID_INTEL_IOAT_IVB9
:
189 static bool is_hsw_ioat(struct pci_dev
*pdev
)
191 switch (pdev
->device
) {
192 case PCI_DEVICE_ID_INTEL_IOAT_HSW0
:
193 case PCI_DEVICE_ID_INTEL_IOAT_HSW1
:
194 case PCI_DEVICE_ID_INTEL_IOAT_HSW2
:
195 case PCI_DEVICE_ID_INTEL_IOAT_HSW3
:
196 case PCI_DEVICE_ID_INTEL_IOAT_HSW4
:
197 case PCI_DEVICE_ID_INTEL_IOAT_HSW5
:
198 case PCI_DEVICE_ID_INTEL_IOAT_HSW6
:
199 case PCI_DEVICE_ID_INTEL_IOAT_HSW7
:
200 case PCI_DEVICE_ID_INTEL_IOAT_HSW8
:
201 case PCI_DEVICE_ID_INTEL_IOAT_HSW9
:
209 static bool is_xeon_cb32(struct pci_dev
*pdev
)
211 return is_jf_ioat(pdev
) || is_snb_ioat(pdev
) || is_ivb_ioat(pdev
) ||
215 static bool is_bwd_ioat(struct pci_dev
*pdev
)
217 switch (pdev
->device
) {
218 case PCI_DEVICE_ID_INTEL_IOAT_BWD0
:
219 case PCI_DEVICE_ID_INTEL_IOAT_BWD1
:
220 case PCI_DEVICE_ID_INTEL_IOAT_BWD2
:
221 case PCI_DEVICE_ID_INTEL_IOAT_BWD3
:
228 static bool is_bwd_noraid(struct pci_dev
*pdev
)
230 switch (pdev
->device
) {
231 case PCI_DEVICE_ID_INTEL_IOAT_BWD2
:
232 case PCI_DEVICE_ID_INTEL_IOAT_BWD3
:
240 static void pq16_set_src(struct ioat_raw_descriptor
*desc
[3],
241 dma_addr_t addr
, u32 offset
, u8 coef
, int idx
)
243 struct ioat_pq_descriptor
*pq
= (struct ioat_pq_descriptor
*)desc
[0];
244 struct ioat_pq16a_descriptor
*pq16
=
245 (struct ioat_pq16a_descriptor
*)desc
[1];
246 struct ioat_raw_descriptor
*raw
= desc
[pq16_idx_to_desc
[idx
]];
248 raw
->field
[pq16_idx_to_field
[idx
]] = addr
+ offset
;
251 pq
->coef
[idx
] = coef
;
253 pq16
->coef
[idx
- 8] = coef
;
256 static struct ioat_sed_ent
*
257 ioat3_alloc_sed(struct ioatdma_device
*device
, unsigned int hw_pool
)
259 struct ioat_sed_ent
*sed
;
260 gfp_t flags
= __GFP_ZERO
| GFP_ATOMIC
;
262 sed
= kmem_cache_alloc(device
->sed_pool
, flags
);
266 sed
->hw_pool
= hw_pool
;
267 sed
->hw
= dma_pool_alloc(device
->sed_hw_pool
[hw_pool
],
270 kmem_cache_free(device
->sed_pool
, sed
);
277 static void ioat3_free_sed(struct ioatdma_device
*device
, struct ioat_sed_ent
*sed
)
282 dma_pool_free(device
->sed_hw_pool
[sed
->hw_pool
], sed
->hw
, sed
->dma
);
283 kmem_cache_free(device
->sed_pool
, sed
);
286 static void ioat3_dma_unmap(struct ioat2_dma_chan
*ioat
,
287 struct ioat_ring_ent
*desc
, int idx
)
289 struct ioat_chan_common
*chan
= &ioat
->base
;
290 struct pci_dev
*pdev
= chan
->device
->pdev
;
291 size_t len
= desc
->len
;
292 size_t offset
= len
- desc
->hw
->size
;
293 struct dma_async_tx_descriptor
*tx
= &desc
->txd
;
294 enum dma_ctrl_flags flags
= tx
->flags
;
296 switch (desc
->hw
->ctl_f
.op
) {
298 if (!desc
->hw
->ctl_f
.null
) /* skip 'interrupt' ops */
299 ioat_dma_unmap(chan
, flags
, len
, desc
->hw
);
302 struct ioat_fill_descriptor
*hw
= desc
->fill
;
304 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
305 ioat_unmap(pdev
, hw
->dst_addr
- offset
, len
,
306 PCI_DMA_FROMDEVICE
, flags
, 1);
309 case IOAT_OP_XOR_VAL
:
311 struct ioat_xor_descriptor
*xor = desc
->xor;
312 struct ioat_ring_ent
*ext
;
313 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
314 int src_cnt
= src_cnt_to_sw(xor->ctl_f
.src_cnt
);
315 struct ioat_raw_descriptor
*descs
[2];
319 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
320 xor_ex
= ext
->xor_ex
;
323 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
324 descs
[0] = (struct ioat_raw_descriptor
*) xor;
325 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
326 for (i
= 0; i
< src_cnt
; i
++) {
327 dma_addr_t src
= xor_get_src(descs
, i
);
329 ioat_unmap(pdev
, src
- offset
, len
,
330 PCI_DMA_TODEVICE
, flags
, 0);
333 /* dest is a source in xor validate operations */
334 if (xor->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
335 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
336 PCI_DMA_TODEVICE
, flags
, 1);
341 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
342 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
343 PCI_DMA_FROMDEVICE
, flags
, 1);
348 struct ioat_pq_descriptor
*pq
= desc
->pq
;
349 struct ioat_ring_ent
*ext
;
350 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
351 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
352 struct ioat_raw_descriptor
*descs
[2];
356 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
360 /* in the 'continue' case don't unmap the dests as sources */
361 if (dmaf_p_disabled_continue(flags
))
363 else if (dmaf_continue(flags
))
366 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
367 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
368 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
369 for (i
= 0; i
< src_cnt
; i
++) {
370 dma_addr_t src
= pq_get_src(descs
, i
);
372 ioat_unmap(pdev
, src
- offset
, len
,
373 PCI_DMA_TODEVICE
, flags
, 0);
376 /* the dests are sources in pq validate operations */
377 if (pq
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
378 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
379 ioat_unmap(pdev
, pq
->p_addr
- offset
,
380 len
, PCI_DMA_TODEVICE
, flags
, 0);
381 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
382 ioat_unmap(pdev
, pq
->q_addr
- offset
,
383 len
, PCI_DMA_TODEVICE
, flags
, 0);
388 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
389 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
390 ioat_unmap(pdev
, pq
->p_addr
- offset
, len
,
391 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
392 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
393 ioat_unmap(pdev
, pq
->q_addr
- offset
, len
,
394 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
399 case IOAT_OP_PQ_VAL_16S
: {
400 struct ioat_pq_descriptor
*pq
= desc
->pq
;
401 int src_cnt
= src16_cnt_to_sw(pq
->ctl_f
.src_cnt
);
402 struct ioat_raw_descriptor
*descs
[4];
405 /* in the 'continue' case don't unmap the dests as sources */
406 if (dmaf_p_disabled_continue(flags
))
408 else if (dmaf_continue(flags
))
411 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
412 descs
[0] = (struct ioat_raw_descriptor
*)pq
;
413 descs
[1] = (struct ioat_raw_descriptor
*)(desc
->sed
->hw
);
414 descs
[2] = (struct ioat_raw_descriptor
*)(&desc
->sed
->hw
->b
[0]);
415 for (i
= 0; i
< src_cnt
; i
++) {
416 dma_addr_t src
= pq16_get_src(descs
, i
);
418 ioat_unmap(pdev
, src
- offset
, len
,
419 PCI_DMA_TODEVICE
, flags
, 0);
422 /* the dests are sources in pq validate operations */
423 if (pq
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
424 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
425 ioat_unmap(pdev
, pq
->p_addr
- offset
,
426 len
, PCI_DMA_TODEVICE
,
428 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
429 ioat_unmap(pdev
, pq
->q_addr
- offset
,
430 len
, PCI_DMA_TODEVICE
,
436 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
437 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
438 ioat_unmap(pdev
, pq
->p_addr
- offset
, len
,
439 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
440 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
441 ioat_unmap(pdev
, pq
->q_addr
- offset
, len
,
442 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
447 dev_err(&pdev
->dev
, "%s: unknown op type: %#x\n",
448 __func__
, desc
->hw
->ctl_f
.op
);
452 static bool desc_has_ext(struct ioat_ring_ent
*desc
)
454 struct ioat_dma_descriptor
*hw
= desc
->hw
;
456 if (hw
->ctl_f
.op
== IOAT_OP_XOR
||
457 hw
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
458 struct ioat_xor_descriptor
*xor = desc
->xor;
460 if (src_cnt_to_sw(xor->ctl_f
.src_cnt
) > 5)
462 } else if (hw
->ctl_f
.op
== IOAT_OP_PQ
||
463 hw
->ctl_f
.op
== IOAT_OP_PQ_VAL
) {
464 struct ioat_pq_descriptor
*pq
= desc
->pq
;
466 if (src_cnt_to_sw(pq
->ctl_f
.src_cnt
) > 3)
473 static u64
ioat3_get_current_completion(struct ioat_chan_common
*chan
)
478 completion
= *chan
->completion
;
479 phys_complete
= ioat_chansts_to_addr(completion
);
481 dev_dbg(to_dev(chan
), "%s: phys_complete: %#llx\n", __func__
,
482 (unsigned long long) phys_complete
);
484 return phys_complete
;
487 static bool ioat3_cleanup_preamble(struct ioat_chan_common
*chan
,
490 *phys_complete
= ioat3_get_current_completion(chan
);
491 if (*phys_complete
== chan
->last_completion
)
494 clear_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
495 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
501 desc_get_errstat(struct ioat2_dma_chan
*ioat
, struct ioat_ring_ent
*desc
)
503 struct ioat_dma_descriptor
*hw
= desc
->hw
;
505 switch (hw
->ctl_f
.op
) {
507 case IOAT_OP_PQ_VAL_16S
:
509 struct ioat_pq_descriptor
*pq
= desc
->pq
;
511 /* check if there's error written */
512 if (!pq
->dwbes_f
.wbes
)
515 /* need to set a chanerr var for checking to clear later */
517 if (pq
->dwbes_f
.p_val_err
)
518 *desc
->result
|= SUM_CHECK_P_RESULT
;
520 if (pq
->dwbes_f
.q_val_err
)
521 *desc
->result
|= SUM_CHECK_Q_RESULT
;
531 * __cleanup - reclaim used descriptors
532 * @ioat: channel (ring) to clean
534 * The difference from the dma_v2.c __cleanup() is that this routine
535 * handles extended descriptors and dma-unmapping raid operations.
537 static void __cleanup(struct ioat2_dma_chan
*ioat
, dma_addr_t phys_complete
)
539 struct ioat_chan_common
*chan
= &ioat
->base
;
540 struct ioatdma_device
*device
= chan
->device
;
541 struct ioat_ring_ent
*desc
;
542 bool seen_current
= false;
543 int idx
= ioat
->tail
, i
;
546 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
547 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
550 * At restart of the channel, the completion address and the
551 * channel status will be 0 due to starting a new chain. Since
552 * it's new chain and the first descriptor "fails", there is
553 * nothing to clean up. We do not want to reap the entire submitted
554 * chain due to this 0 address value and then BUG.
559 active
= ioat2_ring_active(ioat
);
560 for (i
= 0; i
< active
&& !seen_current
; i
++) {
561 struct dma_async_tx_descriptor
*tx
;
563 smp_read_barrier_depends();
564 prefetch(ioat2_get_ring_ent(ioat
, idx
+ i
+ 1));
565 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
566 dump_desc_dbg(ioat
, desc
);
568 /* set err stat if we are using dwbes */
569 if (device
->cap
& IOAT_CAP_DWBES
)
570 desc_get_errstat(ioat
, desc
);
574 dma_cookie_complete(tx
);
575 ioat3_dma_unmap(ioat
, desc
, idx
+ i
);
577 tx
->callback(tx
->callback_param
);
582 if (tx
->phys
== phys_complete
)
585 /* skip extended descriptors */
586 if (desc_has_ext(desc
)) {
587 BUG_ON(i
+ 1 >= active
);
591 /* cleanup super extended descriptors */
593 ioat3_free_sed(device
, desc
->sed
);
597 smp_mb(); /* finish all descriptor reads before incrementing tail */
598 ioat
->tail
= idx
+ i
;
599 BUG_ON(active
&& !seen_current
); /* no active descs have written a completion? */
600 chan
->last_completion
= phys_complete
;
602 if (active
- i
== 0) {
603 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
605 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
606 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
608 /* 5 microsecond delay per pending descriptor */
609 writew(min((5 * (active
- i
)), IOAT_INTRDELAY_MASK
),
610 chan
->device
->reg_base
+ IOAT_INTRDELAY_OFFSET
);
613 static void ioat3_cleanup(struct ioat2_dma_chan
*ioat
)
615 struct ioat_chan_common
*chan
= &ioat
->base
;
618 spin_lock_bh(&chan
->cleanup_lock
);
620 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
621 __cleanup(ioat
, phys_complete
);
623 if (is_ioat_halted(*chan
->completion
)) {
624 u32 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
626 if (chanerr
& IOAT_CHANERR_HANDLE_MASK
) {
627 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
632 spin_unlock_bh(&chan
->cleanup_lock
);
635 static void ioat3_cleanup_event(unsigned long data
)
637 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
640 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
643 static void ioat3_restart_channel(struct ioat2_dma_chan
*ioat
)
645 struct ioat_chan_common
*chan
= &ioat
->base
;
648 ioat2_quiesce(chan
, 0);
649 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
650 __cleanup(ioat
, phys_complete
);
652 __ioat2_restart_chan(ioat
);
655 static void ioat3_eh(struct ioat2_dma_chan
*ioat
)
657 struct ioat_chan_common
*chan
= &ioat
->base
;
658 struct pci_dev
*pdev
= to_pdev(chan
);
659 struct ioat_dma_descriptor
*hw
;
661 struct ioat_ring_ent
*desc
;
666 /* cleanup so tail points to descriptor that caused the error */
667 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
668 __cleanup(ioat
, phys_complete
);
670 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
671 pci_read_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr_int
);
673 dev_dbg(to_dev(chan
), "%s: error = %x:%x\n",
674 __func__
, chanerr
, chanerr_int
);
676 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
678 dump_desc_dbg(ioat
, desc
);
680 switch (hw
->ctl_f
.op
) {
681 case IOAT_OP_XOR_VAL
:
682 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
683 *desc
->result
|= SUM_CHECK_P_RESULT
;
684 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
688 case IOAT_OP_PQ_VAL_16S
:
689 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
690 *desc
->result
|= SUM_CHECK_P_RESULT
;
691 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
693 if (chanerr
& IOAT_CHANERR_XOR_Q_ERR
) {
694 *desc
->result
|= SUM_CHECK_Q_RESULT
;
695 err_handled
|= IOAT_CHANERR_XOR_Q_ERR
;
700 /* fault on unhandled error or spurious halt */
701 if (chanerr
^ err_handled
|| chanerr
== 0) {
702 dev_err(to_dev(chan
), "%s: fatal error (%x:%x)\n",
703 __func__
, chanerr
, err_handled
);
707 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
708 pci_write_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, chanerr_int
);
710 /* mark faulting descriptor as complete */
711 *chan
->completion
= desc
->txd
.phys
;
713 spin_lock_bh(&ioat
->prep_lock
);
714 ioat3_restart_channel(ioat
);
715 spin_unlock_bh(&ioat
->prep_lock
);
718 static void check_active(struct ioat2_dma_chan
*ioat
)
720 struct ioat_chan_common
*chan
= &ioat
->base
;
722 if (ioat2_ring_active(ioat
)) {
723 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
727 if (test_and_clear_bit(IOAT_CHAN_ACTIVE
, &chan
->state
))
728 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
729 else if (ioat
->alloc_order
> ioat_get_alloc_order()) {
730 /* if the ring is idle, empty, and oversized try to step
733 reshape_ring(ioat
, ioat
->alloc_order
- 1);
735 /* keep shrinking until we get back to our minimum
738 if (ioat
->alloc_order
> ioat_get_alloc_order())
739 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
744 static void ioat3_timer_event(unsigned long data
)
746 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
747 struct ioat_chan_common
*chan
= &ioat
->base
;
748 dma_addr_t phys_complete
;
751 status
= ioat_chansts(chan
);
753 /* when halted due to errors check for channel
754 * programming errors before advancing the completion state
756 if (is_ioat_halted(status
)) {
759 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
760 dev_err(to_dev(chan
), "%s: Channel halted (%x)\n",
762 if (test_bit(IOAT_RUN
, &chan
->state
))
763 BUG_ON(is_ioat_bug(chanerr
));
764 else /* we never got off the ground */
768 /* if we haven't made progress and we have already
769 * acknowledged a pending completion once, then be more
770 * forceful with a restart
772 spin_lock_bh(&chan
->cleanup_lock
);
773 if (ioat_cleanup_preamble(chan
, &phys_complete
))
774 __cleanup(ioat
, phys_complete
);
775 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
)) {
776 spin_lock_bh(&ioat
->prep_lock
);
777 ioat3_restart_channel(ioat
);
778 spin_unlock_bh(&ioat
->prep_lock
);
779 spin_unlock_bh(&chan
->cleanup_lock
);
782 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
783 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
787 if (ioat2_ring_active(ioat
))
788 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
790 spin_lock_bh(&ioat
->prep_lock
);
792 spin_unlock_bh(&ioat
->prep_lock
);
794 spin_unlock_bh(&chan
->cleanup_lock
);
797 static enum dma_status
798 ioat3_tx_status(struct dma_chan
*c
, dma_cookie_t cookie
,
799 struct dma_tx_state
*txstate
)
801 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
804 ret
= dma_cookie_status(c
, cookie
, txstate
);
805 if (ret
== DMA_SUCCESS
)
810 return dma_cookie_status(c
, cookie
, txstate
);
813 static struct dma_async_tx_descriptor
*
814 ioat3_prep_memset_lock(struct dma_chan
*c
, dma_addr_t dest
, int value
,
815 size_t len
, unsigned long flags
)
817 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
818 struct ioat_ring_ent
*desc
;
819 size_t total_len
= len
;
820 struct ioat_fill_descriptor
*fill
;
821 u64 src_data
= (0x0101010101010101ULL
) * (value
& 0xff);
822 int num_descs
, idx
, i
;
824 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
825 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
) == 0)
831 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
833 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
836 fill
->size
= xfer_size
;
837 fill
->src_data
= src_data
;
838 fill
->dst_addr
= dest
;
840 fill
->ctl_f
.op
= IOAT_OP_FILL
;
844 dump_desc_dbg(ioat
, desc
);
845 } while (++i
< num_descs
);
847 desc
->txd
.flags
= flags
;
848 desc
->len
= total_len
;
849 fill
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
850 fill
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
851 fill
->ctl_f
.compl_write
= 1;
852 dump_desc_dbg(ioat
, desc
);
854 /* we leave the channel locked to ensure in order submission */
858 static struct dma_async_tx_descriptor
*
859 __ioat3_prep_xor_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
860 dma_addr_t dest
, dma_addr_t
*src
, unsigned int src_cnt
,
861 size_t len
, unsigned long flags
)
863 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
864 struct ioat_ring_ent
*compl_desc
;
865 struct ioat_ring_ent
*desc
;
866 struct ioat_ring_ent
*ext
;
867 size_t total_len
= len
;
868 struct ioat_xor_descriptor
*xor;
869 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
870 struct ioat_dma_descriptor
*hw
;
871 int num_descs
, with_ext
, idx
, i
;
873 u8 op
= result
? IOAT_OP_XOR_VAL
: IOAT_OP_XOR
;
877 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
878 /* we need 2x the number of descriptors to cover greater than 5
887 /* completion writes from the raid engine may pass completion
888 * writes from the legacy engine, so we need one extra null
889 * (legacy) descriptor to ensure all completion writes arrive in
892 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
+1) == 0)
898 struct ioat_raw_descriptor
*descs
[2];
899 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
902 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
905 /* save a branch by unconditionally retrieving the
906 * extended descriptor xor_set_src() knows to not write
907 * to it in the single descriptor case
909 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ 1);
910 xor_ex
= ext
->xor_ex
;
912 descs
[0] = (struct ioat_raw_descriptor
*) xor;
913 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
914 for (s
= 0; s
< src_cnt
; s
++)
915 xor_set_src(descs
, src
[s
], offset
, s
);
916 xor->size
= xfer_size
;
917 xor->dst_addr
= dest
+ offset
;
920 xor->ctl_f
.src_cnt
= src_cnt_to_hw(src_cnt
);
924 dump_desc_dbg(ioat
, desc
);
925 } while ((i
+= 1 + with_ext
) < num_descs
);
927 /* last xor descriptor carries the unmap parameters and fence bit */
928 desc
->txd
.flags
= flags
;
929 desc
->len
= total_len
;
931 desc
->result
= result
;
932 xor->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
934 /* completion descriptor carries interrupt bit */
935 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
936 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
940 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
941 hw
->ctl_f
.compl_write
= 1;
942 hw
->size
= NULL_DESC_BUFFER_SIZE
;
943 dump_desc_dbg(ioat
, compl_desc
);
945 /* we leave the channel locked to ensure in order submission */
946 return &compl_desc
->txd
;
949 static struct dma_async_tx_descriptor
*
950 ioat3_prep_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
951 unsigned int src_cnt
, size_t len
, unsigned long flags
)
953 return __ioat3_prep_xor_lock(chan
, NULL
, dest
, src
, src_cnt
, len
, flags
);
956 struct dma_async_tx_descriptor
*
957 ioat3_prep_xor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
958 unsigned int src_cnt
, size_t len
,
959 enum sum_check_flags
*result
, unsigned long flags
)
961 /* the cleanup routine only sets bits on validate failure, it
962 * does not clear bits on validate success... so clear it here
966 return __ioat3_prep_xor_lock(chan
, result
, src
[0], &src
[1],
967 src_cnt
- 1, len
, flags
);
971 dump_pq_desc_dbg(struct ioat2_dma_chan
*ioat
, struct ioat_ring_ent
*desc
, struct ioat_ring_ent
*ext
)
973 struct device
*dev
= to_dev(&ioat
->base
);
974 struct ioat_pq_descriptor
*pq
= desc
->pq
;
975 struct ioat_pq_ext_descriptor
*pq_ex
= ext
? ext
->pq_ex
: NULL
;
976 struct ioat_raw_descriptor
*descs
[] = { (void *) pq
, (void *) pq_ex
};
977 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
980 dev_dbg(dev
, "desc[%d]: (%#llx->%#llx) flags: %#x"
981 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
983 desc_id(desc
), (unsigned long long) desc
->txd
.phys
,
984 (unsigned long long) (pq_ex
? pq_ex
->next
: pq
->next
),
985 desc
->txd
.flags
, pq
->size
, pq
->ctl
, pq
->ctl_f
.op
, pq
->ctl_f
.int_en
,
986 pq
->ctl_f
.compl_write
,
987 pq
->ctl_f
.p_disable
? "" : "p", pq
->ctl_f
.q_disable
? "" : "q",
989 for (i
= 0; i
< src_cnt
; i
++)
990 dev_dbg(dev
, "\tsrc[%d]: %#llx coef: %#x\n", i
,
991 (unsigned long long) pq_get_src(descs
, i
), pq
->coef
[i
]);
992 dev_dbg(dev
, "\tP: %#llx\n", pq
->p_addr
);
993 dev_dbg(dev
, "\tQ: %#llx\n", pq
->q_addr
);
994 dev_dbg(dev
, "\tNEXT: %#llx\n", pq
->next
);
997 static void dump_pq16_desc_dbg(struct ioat2_dma_chan
*ioat
,
998 struct ioat_ring_ent
*desc
)
1000 struct device
*dev
= to_dev(&ioat
->base
);
1001 struct ioat_pq_descriptor
*pq
= desc
->pq
;
1002 struct ioat_raw_descriptor
*descs
[] = { (void *)pq
,
1005 int src_cnt
= src16_cnt_to_sw(pq
->ctl_f
.src_cnt
);
1009 descs
[1] = (void *)desc
->sed
->hw
;
1010 descs
[2] = (void *)desc
->sed
->hw
+ 64;
1013 dev_dbg(dev
, "desc[%d]: (%#llx->%#llx) flags: %#x"
1014 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
1016 desc_id(desc
), (unsigned long long) desc
->txd
.phys
,
1017 (unsigned long long) pq
->next
,
1018 desc
->txd
.flags
, pq
->size
, pq
->ctl
,
1019 pq
->ctl_f
.op
, pq
->ctl_f
.int_en
,
1020 pq
->ctl_f
.compl_write
,
1021 pq
->ctl_f
.p_disable
? "" : "p", pq
->ctl_f
.q_disable
? "" : "q",
1023 for (i
= 0; i
< src_cnt
; i
++) {
1024 dev_dbg(dev
, "\tsrc[%d]: %#llx coef: %#x\n", i
,
1025 (unsigned long long) pq16_get_src(descs
, i
),
1028 dev_dbg(dev
, "\tP: %#llx\n", pq
->p_addr
);
1029 dev_dbg(dev
, "\tQ: %#llx\n", pq
->q_addr
);
1032 static struct dma_async_tx_descriptor
*
1033 __ioat3_prep_pq_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
1034 const dma_addr_t
*dst
, const dma_addr_t
*src
,
1035 unsigned int src_cnt
, const unsigned char *scf
,
1036 size_t len
, unsigned long flags
)
1038 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
1039 struct ioat_chan_common
*chan
= &ioat
->base
;
1040 struct ioatdma_device
*device
= chan
->device
;
1041 struct ioat_ring_ent
*compl_desc
;
1042 struct ioat_ring_ent
*desc
;
1043 struct ioat_ring_ent
*ext
;
1044 size_t total_len
= len
;
1045 struct ioat_pq_descriptor
*pq
;
1046 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
1047 struct ioat_dma_descriptor
*hw
;
1049 u8 op
= result
? IOAT_OP_PQ_VAL
: IOAT_OP_PQ
;
1050 int i
, s
, idx
, with_ext
, num_descs
;
1051 int cb32
= (device
->version
< IOAT_VER_3_3
) ? 1 : 0;
1053 dev_dbg(to_dev(chan
), "%s\n", __func__
);
1054 /* the engine requires at least two sources (we provide
1055 * at least 1 implied source in the DMA_PREP_CONTINUE case)
1057 BUG_ON(src_cnt
+ dmaf_continue(flags
) < 2);
1059 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
1060 /* we need 2x the number of descriptors to cover greater than 3
1061 * sources (we need 1 extra source in the q-only continuation
1062 * case and 3 extra sources in the p+q continuation case.
1064 if (src_cnt
+ dmaf_p_disabled_continue(flags
) > 3 ||
1065 (dmaf_continue(flags
) && !dmaf_p_disabled_continue(flags
))) {
1071 /* completion writes from the raid engine may pass completion
1072 * writes from the legacy engine, so we need one extra null
1073 * (legacy) descriptor to ensure all completion writes arrive in
1076 if (likely(num_descs
) &&
1077 ioat2_check_space_lock(ioat
, num_descs
+ cb32
) == 0)
1083 struct ioat_raw_descriptor
*descs
[2];
1084 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
1086 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
1089 /* save a branch by unconditionally retrieving the
1090 * extended descriptor pq_set_src() knows to not write
1091 * to it in the single descriptor case
1093 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ with_ext
);
1096 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
1097 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
1099 for (s
= 0; s
< src_cnt
; s
++)
1100 pq_set_src(descs
, src
[s
], offset
, scf
[s
], s
);
1102 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1103 if (dmaf_p_disabled_continue(flags
))
1104 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
1105 else if (dmaf_continue(flags
)) {
1106 pq_set_src(descs
, dst
[0], offset
, 0, s
++);
1107 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
1108 pq_set_src(descs
, dst
[1], offset
, 0, s
++);
1110 pq
->size
= xfer_size
;
1111 pq
->p_addr
= dst
[0] + offset
;
1112 pq
->q_addr
= dst
[1] + offset
;
1115 /* we turn on descriptor write back error status */
1116 if (device
->cap
& IOAT_CAP_DWBES
)
1117 pq
->ctl_f
.wb_en
= result
? 1 : 0;
1118 pq
->ctl_f
.src_cnt
= src_cnt_to_hw(s
);
1119 pq
->ctl_f
.p_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_P
);
1120 pq
->ctl_f
.q_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_Q
);
1123 offset
+= xfer_size
;
1124 } while ((i
+= 1 + with_ext
) < num_descs
);
1126 /* last pq descriptor carries the unmap parameters and fence bit */
1127 desc
->txd
.flags
= flags
;
1128 desc
->len
= total_len
;
1130 desc
->result
= result
;
1131 pq
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
1132 dump_pq_desc_dbg(ioat
, desc
, ext
);
1135 pq
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
1136 pq
->ctl_f
.compl_write
= 1;
1139 /* completion descriptor carries interrupt bit */
1140 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
1141 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
1142 hw
= compl_desc
->hw
;
1145 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
1146 hw
->ctl_f
.compl_write
= 1;
1147 hw
->size
= NULL_DESC_BUFFER_SIZE
;
1148 dump_desc_dbg(ioat
, compl_desc
);
1152 /* we leave the channel locked to ensure in order submission */
1153 return &compl_desc
->txd
;
1156 static struct dma_async_tx_descriptor
*
1157 __ioat3_prep_pq16_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
1158 const dma_addr_t
*dst
, const dma_addr_t
*src
,
1159 unsigned int src_cnt
, const unsigned char *scf
,
1160 size_t len
, unsigned long flags
)
1162 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
1163 struct ioat_chan_common
*chan
= &ioat
->base
;
1164 struct ioatdma_device
*device
= chan
->device
;
1165 struct ioat_ring_ent
*desc
;
1166 size_t total_len
= len
;
1167 struct ioat_pq_descriptor
*pq
;
1170 int i
, s
, idx
, num_descs
;
1172 /* this function is only called with 9-16 sources */
1173 op
= result
? IOAT_OP_PQ_VAL_16S
: IOAT_OP_PQ_16S
;
1175 dev_dbg(to_dev(chan
), "%s\n", __func__
);
1177 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
1180 * 16 source pq is only available on cb3.3 and has no completion
1183 if (num_descs
&& ioat2_check_space_lock(ioat
, num_descs
) == 0)
1191 struct ioat_raw_descriptor
*descs
[4];
1192 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
1194 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
1197 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
1199 desc
->sed
= ioat3_alloc_sed(device
, (src_cnt
-2) >> 3);
1201 dev_err(to_dev(chan
),
1202 "%s: no free sed entries\n", __func__
);
1206 pq
->sed_addr
= desc
->sed
->dma
;
1207 desc
->sed
->parent
= desc
;
1209 descs
[1] = (struct ioat_raw_descriptor
*)desc
->sed
->hw
;
1210 descs
[2] = (void *)descs
[1] + 64;
1212 for (s
= 0; s
< src_cnt
; s
++)
1213 pq16_set_src(descs
, src
[s
], offset
, scf
[s
], s
);
1215 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1216 if (dmaf_p_disabled_continue(flags
))
1217 pq16_set_src(descs
, dst
[1], offset
, 1, s
++);
1218 else if (dmaf_continue(flags
)) {
1219 pq16_set_src(descs
, dst
[0], offset
, 0, s
++);
1220 pq16_set_src(descs
, dst
[1], offset
, 1, s
++);
1221 pq16_set_src(descs
, dst
[1], offset
, 0, s
++);
1224 pq
->size
= xfer_size
;
1225 pq
->p_addr
= dst
[0] + offset
;
1226 pq
->q_addr
= dst
[1] + offset
;
1229 pq
->ctl_f
.src_cnt
= src16_cnt_to_hw(s
);
1230 /* we turn on descriptor write back error status */
1231 if (device
->cap
& IOAT_CAP_DWBES
)
1232 pq
->ctl_f
.wb_en
= result
? 1 : 0;
1233 pq
->ctl_f
.p_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_P
);
1234 pq
->ctl_f
.q_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_Q
);
1237 offset
+= xfer_size
;
1238 } while (++i
< num_descs
);
1240 /* last pq descriptor carries the unmap parameters and fence bit */
1241 desc
->txd
.flags
= flags
;
1242 desc
->len
= total_len
;
1244 desc
->result
= result
;
1245 pq
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
1247 /* with cb3.3 we should be able to do completion w/o a null desc */
1248 pq
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
1249 pq
->ctl_f
.compl_write
= 1;
1251 dump_pq16_desc_dbg(ioat
, desc
);
1253 /* we leave the channel locked to ensure in order submission */
1257 static int src_cnt_flags(unsigned int src_cnt
, unsigned long flags
)
1259 if (dmaf_p_disabled_continue(flags
))
1261 else if (dmaf_continue(flags
))
1267 static struct dma_async_tx_descriptor
*
1268 ioat3_prep_pq(struct dma_chan
*chan
, dma_addr_t
*dst
, dma_addr_t
*src
,
1269 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
1270 unsigned long flags
)
1272 /* specify valid address for disabled result */
1273 if (flags
& DMA_PREP_PQ_DISABLE_P
)
1275 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
1278 /* handle the single source multiply case from the raid6
1281 if ((flags
& DMA_PREP_PQ_DISABLE_P
) && src_cnt
== 1) {
1282 dma_addr_t single_source
[2];
1283 unsigned char single_source_coef
[2];
1285 BUG_ON(flags
& DMA_PREP_PQ_DISABLE_Q
);
1286 single_source
[0] = src
[0];
1287 single_source
[1] = src
[0];
1288 single_source_coef
[0] = scf
[0];
1289 single_source_coef
[1] = 0;
1291 return src_cnt_flags(src_cnt
, flags
) > 8 ?
1292 __ioat3_prep_pq16_lock(chan
, NULL
, dst
, single_source
,
1293 2, single_source_coef
, len
,
1295 __ioat3_prep_pq_lock(chan
, NULL
, dst
, single_source
, 2,
1296 single_source_coef
, len
, flags
);
1299 return src_cnt_flags(src_cnt
, flags
) > 8 ?
1300 __ioat3_prep_pq16_lock(chan
, NULL
, dst
, src
, src_cnt
,
1302 __ioat3_prep_pq_lock(chan
, NULL
, dst
, src
, src_cnt
,
1307 struct dma_async_tx_descriptor
*
1308 ioat3_prep_pq_val(struct dma_chan
*chan
, dma_addr_t
*pq
, dma_addr_t
*src
,
1309 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
1310 enum sum_check_flags
*pqres
, unsigned long flags
)
1312 /* specify valid address for disabled result */
1313 if (flags
& DMA_PREP_PQ_DISABLE_P
)
1315 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
1318 /* the cleanup routine only sets bits on validate failure, it
1319 * does not clear bits on validate success... so clear it here
1323 return src_cnt_flags(src_cnt
, flags
) > 8 ?
1324 __ioat3_prep_pq16_lock(chan
, pqres
, pq
, src
, src_cnt
, scf
, len
,
1326 __ioat3_prep_pq_lock(chan
, pqres
, pq
, src
, src_cnt
, scf
, len
,
1330 static struct dma_async_tx_descriptor
*
1331 ioat3_prep_pqxor(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t
*src
,
1332 unsigned int src_cnt
, size_t len
, unsigned long flags
)
1334 unsigned char scf
[src_cnt
];
1337 memset(scf
, 0, src_cnt
);
1339 flags
|= DMA_PREP_PQ_DISABLE_Q
;
1340 pq
[1] = dst
; /* specify valid address for disabled result */
1342 return src_cnt_flags(src_cnt
, flags
) > 8 ?
1343 __ioat3_prep_pq16_lock(chan
, NULL
, pq
, src
, src_cnt
, scf
, len
,
1345 __ioat3_prep_pq_lock(chan
, NULL
, pq
, src
, src_cnt
, scf
, len
,
1349 struct dma_async_tx_descriptor
*
1350 ioat3_prep_pqxor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
1351 unsigned int src_cnt
, size_t len
,
1352 enum sum_check_flags
*result
, unsigned long flags
)
1354 unsigned char scf
[src_cnt
];
1357 /* the cleanup routine only sets bits on validate failure, it
1358 * does not clear bits on validate success... so clear it here
1362 memset(scf
, 0, src_cnt
);
1364 flags
|= DMA_PREP_PQ_DISABLE_Q
;
1365 pq
[1] = pq
[0]; /* specify valid address for disabled result */
1367 return src_cnt_flags(src_cnt
, flags
) > 8 ?
1368 __ioat3_prep_pq16_lock(chan
, result
, pq
, &src
[1], src_cnt
- 1,
1370 __ioat3_prep_pq_lock(chan
, result
, pq
, &src
[1], src_cnt
- 1,
1374 static struct dma_async_tx_descriptor
*
1375 ioat3_prep_interrupt_lock(struct dma_chan
*c
, unsigned long flags
)
1377 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
1378 struct ioat_ring_ent
*desc
;
1379 struct ioat_dma_descriptor
*hw
;
1381 if (ioat2_check_space_lock(ioat
, 1) == 0)
1382 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
);
1389 hw
->ctl_f
.int_en
= 1;
1390 hw
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
1391 hw
->ctl_f
.compl_write
= 1;
1392 hw
->size
= NULL_DESC_BUFFER_SIZE
;
1396 desc
->txd
.flags
= flags
;
1399 dump_desc_dbg(ioat
, desc
);
1401 /* we leave the channel locked to ensure in order submission */
1405 static void ioat3_dma_test_callback(void *dma_async_param
)
1407 struct completion
*cmp
= dma_async_param
;
1412 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
1413 static int ioat_xor_val_self_test(struct ioatdma_device
*device
)
1417 struct page
*xor_srcs
[IOAT_NUM_SRC_TEST
];
1418 struct page
*xor_val_srcs
[IOAT_NUM_SRC_TEST
+ 1];
1419 dma_addr_t dma_srcs
[IOAT_NUM_SRC_TEST
+ 1];
1420 dma_addr_t dma_addr
, dest_dma
;
1421 struct dma_async_tx_descriptor
*tx
;
1422 struct dma_chan
*dma_chan
;
1423 dma_cookie_t cookie
;
1428 struct completion cmp
;
1430 struct device
*dev
= &device
->pdev
->dev
;
1431 struct dma_device
*dma
= &device
->common
;
1434 dev_dbg(dev
, "%s\n", __func__
);
1436 if (!dma_has_cap(DMA_XOR
, dma
->cap_mask
))
1439 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
1440 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
1441 if (!xor_srcs
[src_idx
]) {
1443 __free_page(xor_srcs
[src_idx
]);
1448 dest
= alloc_page(GFP_KERNEL
);
1451 __free_page(xor_srcs
[src_idx
]);
1455 /* Fill in src buffers */
1456 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
1457 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
1458 for (i
= 0; i
< PAGE_SIZE
; i
++)
1459 ptr
[i
] = (1 << src_idx
);
1462 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++)
1463 cmp_byte
^= (u8
) (1 << src_idx
);
1465 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
1466 (cmp_byte
<< 8) | cmp_byte
;
1468 memset(page_address(dest
), 0, PAGE_SIZE
);
1470 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
1472 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
1480 dest_dma
= dma_map_page(dev
, dest
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
1481 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1482 dma_srcs
[i
] = dma_map_page(dev
, xor_srcs
[i
], 0, PAGE_SIZE
,
1484 tx
= dma
->device_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
1485 IOAT_NUM_SRC_TEST
, PAGE_SIZE
,
1486 DMA_PREP_INTERRUPT
|
1487 DMA_COMPL_SKIP_SRC_UNMAP
|
1488 DMA_COMPL_SKIP_DEST_UNMAP
);
1491 dev_err(dev
, "Self-test xor prep failed\n");
1497 init_completion(&cmp
);
1498 tx
->callback
= ioat3_dma_test_callback
;
1499 tx
->callback_param
= &cmp
;
1500 cookie
= tx
->tx_submit(tx
);
1502 dev_err(dev
, "Self-test xor setup failed\n");
1506 dma
->device_issue_pending(dma_chan
);
1508 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1510 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1511 dev_err(dev
, "Self-test xor timed out\n");
1516 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1517 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1518 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1520 dma_sync_single_for_cpu(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1521 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
1522 u32
*ptr
= page_address(dest
);
1523 if (ptr
[i
] != cmp_word
) {
1524 dev_err(dev
, "Self-test xor failed compare\n");
1526 goto free_resources
;
1529 dma_sync_single_for_device(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1531 /* skip validate if the capability is not present */
1532 if (!dma_has_cap(DMA_XOR_VAL
, dma_chan
->device
->cap_mask
))
1533 goto free_resources
;
1535 op
= IOAT_OP_XOR_VAL
;
1537 /* validate the sources with the destintation page */
1538 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1539 xor_val_srcs
[i
] = xor_srcs
[i
];
1540 xor_val_srcs
[i
] = dest
;
1544 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1545 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1547 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1548 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1549 &xor_val_result
, DMA_PREP_INTERRUPT
|
1550 DMA_COMPL_SKIP_SRC_UNMAP
|
1551 DMA_COMPL_SKIP_DEST_UNMAP
);
1553 dev_err(dev
, "Self-test zero prep failed\n");
1559 init_completion(&cmp
);
1560 tx
->callback
= ioat3_dma_test_callback
;
1561 tx
->callback_param
= &cmp
;
1562 cookie
= tx
->tx_submit(tx
);
1564 dev_err(dev
, "Self-test zero setup failed\n");
1568 dma
->device_issue_pending(dma_chan
);
1570 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1572 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1573 dev_err(dev
, "Self-test validate timed out\n");
1578 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1579 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1581 if (xor_val_result
!= 0) {
1582 dev_err(dev
, "Self-test validate failed compare\n");
1584 goto free_resources
;
1587 /* skip memset if the capability is not present */
1588 if (!dma_has_cap(DMA_MEMSET
, dma_chan
->device
->cap_mask
))
1589 goto free_resources
;
1594 dma_addr
= dma_map_page(dev
, dest
, 0,
1595 PAGE_SIZE
, DMA_FROM_DEVICE
);
1596 tx
= dma
->device_prep_dma_memset(dma_chan
, dma_addr
, 0, PAGE_SIZE
,
1597 DMA_PREP_INTERRUPT
|
1598 DMA_COMPL_SKIP_SRC_UNMAP
|
1599 DMA_COMPL_SKIP_DEST_UNMAP
);
1601 dev_err(dev
, "Self-test memset prep failed\n");
1607 init_completion(&cmp
);
1608 tx
->callback
= ioat3_dma_test_callback
;
1609 tx
->callback_param
= &cmp
;
1610 cookie
= tx
->tx_submit(tx
);
1612 dev_err(dev
, "Self-test memset setup failed\n");
1616 dma
->device_issue_pending(dma_chan
);
1618 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1620 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1621 dev_err(dev
, "Self-test memset timed out\n");
1626 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1628 for (i
= 0; i
< PAGE_SIZE
/sizeof(u32
); i
++) {
1629 u32
*ptr
= page_address(dest
);
1631 dev_err(dev
, "Self-test memset failed compare\n");
1633 goto free_resources
;
1637 /* test for non-zero parity sum */
1638 op
= IOAT_OP_XOR_VAL
;
1641 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1642 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1644 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1645 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1646 &xor_val_result
, DMA_PREP_INTERRUPT
|
1647 DMA_COMPL_SKIP_SRC_UNMAP
|
1648 DMA_COMPL_SKIP_DEST_UNMAP
);
1650 dev_err(dev
, "Self-test 2nd zero prep failed\n");
1656 init_completion(&cmp
);
1657 tx
->callback
= ioat3_dma_test_callback
;
1658 tx
->callback_param
= &cmp
;
1659 cookie
= tx
->tx_submit(tx
);
1661 dev_err(dev
, "Self-test 2nd zero setup failed\n");
1665 dma
->device_issue_pending(dma_chan
);
1667 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1669 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1670 dev_err(dev
, "Self-test 2nd validate timed out\n");
1675 if (xor_val_result
!= SUM_CHECK_P_RESULT
) {
1676 dev_err(dev
, "Self-test validate failed compare\n");
1681 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1682 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1684 goto free_resources
;
1686 if (op
== IOAT_OP_XOR
) {
1687 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1688 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1689 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1691 } else if (op
== IOAT_OP_XOR_VAL
) {
1692 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1693 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1695 } else if (op
== IOAT_OP_FILL
)
1696 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1698 dma
->device_free_chan_resources(dma_chan
);
1700 src_idx
= IOAT_NUM_SRC_TEST
;
1702 __free_page(xor_srcs
[src_idx
]);
1707 static int ioat3_dma_self_test(struct ioatdma_device
*device
)
1709 int rc
= ioat_dma_self_test(device
);
1714 rc
= ioat_xor_val_self_test(device
);
1721 static int ioat3_irq_reinit(struct ioatdma_device
*device
)
1723 int msixcnt
= device
->common
.chancnt
;
1724 struct pci_dev
*pdev
= device
->pdev
;
1726 struct msix_entry
*msix
;
1727 struct ioat_chan_common
*chan
;
1730 switch (device
->irq_mode
) {
1733 for (i
= 0; i
< msixcnt
; i
++) {
1734 msix
= &device
->msix_entries
[i
];
1735 chan
= ioat_chan_by_index(device
, i
);
1736 devm_free_irq(&pdev
->dev
, msix
->vector
, chan
);
1739 pci_disable_msix(pdev
);
1742 case IOAT_MSIX_SINGLE
:
1743 msix
= &device
->msix_entries
[0];
1744 chan
= ioat_chan_by_index(device
, 0);
1745 devm_free_irq(&pdev
->dev
, msix
->vector
, chan
);
1746 pci_disable_msix(pdev
);
1750 chan
= ioat_chan_by_index(device
, 0);
1751 devm_free_irq(&pdev
->dev
, pdev
->irq
, chan
);
1752 pci_disable_msi(pdev
);
1756 chan
= ioat_chan_by_index(device
, 0);
1757 devm_free_irq(&pdev
->dev
, pdev
->irq
, chan
);
1764 device
->irq_mode
= IOAT_NOIRQ
;
1766 err
= ioat_dma_setup_interrupts(device
);
1771 static int ioat3_reset_hw(struct ioat_chan_common
*chan
)
1773 /* throw away whatever the channel was doing and get it
1774 * initialized, with ioat3 specific workarounds
1776 struct ioatdma_device
*device
= chan
->device
;
1777 struct pci_dev
*pdev
= device
->pdev
;
1782 ioat2_quiesce(chan
, msecs_to_jiffies(100));
1784 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1785 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1787 if (device
->version
< IOAT_VER_3_3
) {
1788 /* clear any pending errors */
1789 err
= pci_read_config_dword(pdev
,
1790 IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr
);
1793 "channel error register unreachable\n");
1796 pci_write_config_dword(pdev
,
1797 IOAT_PCI_CHANERR_INT_OFFSET
, chanerr
);
1799 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1800 * (workaround for spurious config parity error after restart)
1802 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
1803 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) {
1804 pci_write_config_dword(pdev
,
1805 IOAT_PCI_DMAUNCERRSTS_OFFSET
,
1810 err
= ioat2_reset_sync(chan
, msecs_to_jiffies(200));
1812 dev_err(&pdev
->dev
, "Failed to reset!\n");
1816 if (device
->irq_mode
!= IOAT_NOIRQ
&& is_bwd_ioat(pdev
))
1817 err
= ioat3_irq_reinit(device
);
1822 static void ioat3_intr_quirk(struct ioatdma_device
*device
)
1824 struct dma_device
*dma
;
1826 struct ioat_chan_common
*chan
;
1829 dma
= &device
->common
;
1832 * if we have descriptor write back error status, we mask the
1835 if (device
->cap
& IOAT_CAP_DWBES
) {
1836 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1837 chan
= to_chan_common(c
);
1838 errmask
= readl(chan
->reg_base
+
1839 IOAT_CHANERR_MASK_OFFSET
);
1840 errmask
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
|
1841 IOAT_CHANERR_XOR_Q_ERR
;
1842 writel(errmask
, chan
->reg_base
+
1843 IOAT_CHANERR_MASK_OFFSET
);
1848 int ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
1850 struct pci_dev
*pdev
= device
->pdev
;
1851 int dca_en
= system_has_dca_enabled(pdev
);
1852 struct dma_device
*dma
;
1854 struct ioat_chan_common
*chan
;
1855 bool is_raid_device
= false;
1858 device
->enumerate_channels
= ioat2_enumerate_channels
;
1859 device
->reset_hw
= ioat3_reset_hw
;
1860 device
->self_test
= ioat3_dma_self_test
;
1861 device
->intr_quirk
= ioat3_intr_quirk
;
1862 dma
= &device
->common
;
1863 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
1864 dma
->device_issue_pending
= ioat2_issue_pending
;
1865 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
1866 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
1868 if (is_xeon_cb32(pdev
))
1869 dma
->copy_align
= 6;
1871 dma_cap_set(DMA_INTERRUPT
, dma
->cap_mask
);
1872 dma
->device_prep_dma_interrupt
= ioat3_prep_interrupt_lock
;
1874 device
->cap
= readl(device
->reg_base
+ IOAT_DMA_CAP_OFFSET
);
1876 if (is_bwd_noraid(pdev
))
1877 device
->cap
&= ~(IOAT_CAP_XOR
| IOAT_CAP_PQ
| IOAT_CAP_RAID16SS
);
1879 /* dca is incompatible with raid operations */
1880 if (dca_en
&& (device
->cap
& (IOAT_CAP_XOR
|IOAT_CAP_PQ
)))
1881 device
->cap
&= ~(IOAT_CAP_XOR
|IOAT_CAP_PQ
);
1883 if (device
->cap
& IOAT_CAP_XOR
) {
1884 is_raid_device
= true;
1888 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1889 dma
->device_prep_dma_xor
= ioat3_prep_xor
;
1891 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1892 dma
->device_prep_dma_xor_val
= ioat3_prep_xor_val
;
1895 if (device
->cap
& IOAT_CAP_PQ
) {
1896 is_raid_device
= true;
1898 dma
->device_prep_dma_pq
= ioat3_prep_pq
;
1899 dma
->device_prep_dma_pq_val
= ioat3_prep_pq_val
;
1900 dma_cap_set(DMA_PQ
, dma
->cap_mask
);
1901 dma_cap_set(DMA_PQ_VAL
, dma
->cap_mask
);
1903 if (device
->cap
& IOAT_CAP_RAID16SS
) {
1904 dma_set_maxpq(dma
, 16, 0);
1907 dma_set_maxpq(dma
, 8, 0);
1908 if (is_xeon_cb32(pdev
))
1914 if (!(device
->cap
& IOAT_CAP_XOR
)) {
1915 dma
->device_prep_dma_xor
= ioat3_prep_pqxor
;
1916 dma
->device_prep_dma_xor_val
= ioat3_prep_pqxor_val
;
1917 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1918 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1920 if (device
->cap
& IOAT_CAP_RAID16SS
) {
1925 if (is_xeon_cb32(pdev
))
1933 if (is_raid_device
&& (device
->cap
& IOAT_CAP_FILL_BLOCK
)) {
1934 dma_cap_set(DMA_MEMSET
, dma
->cap_mask
);
1935 dma
->device_prep_dma_memset
= ioat3_prep_memset_lock
;
1939 dma
->device_tx_status
= ioat3_tx_status
;
1940 device
->cleanup_fn
= ioat3_cleanup_event
;
1941 device
->timer_fn
= ioat3_timer_event
;
1943 if (is_xeon_cb32(pdev
)) {
1944 dma_cap_clear(DMA_XOR_VAL
, dma
->cap_mask
);
1945 dma
->device_prep_dma_xor_val
= NULL
;
1947 dma_cap_clear(DMA_PQ_VAL
, dma
->cap_mask
);
1948 dma
->device_prep_dma_pq_val
= NULL
;
1951 /* starting with CB3.3 super extended descriptors are supported */
1952 if (device
->cap
& IOAT_CAP_RAID16SS
) {
1956 /* allocate sw descriptor pool for SED */
1957 device
->sed_pool
= kmem_cache_create("ioat_sed",
1958 sizeof(struct ioat_sed_ent
), 0, 0, NULL
);
1959 if (!device
->sed_pool
)
1962 for (i
= 0; i
< MAX_SED_POOLS
; i
++) {
1963 snprintf(pool_name
, 14, "ioat_hw%d_sed", i
);
1965 /* allocate SED DMA pool */
1966 device
->sed_hw_pool
[i
] = dma_pool_create(pool_name
,
1968 SED_SIZE
* (i
+ 1), 64, 0);
1969 if (!device
->sed_hw_pool
[i
])
1970 goto sed_pool_cleanup
;
1975 err
= ioat_probe(device
);
1978 ioat_set_tcp_copy_break(262144);
1980 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1981 chan
= to_chan_common(c
);
1982 writel(IOAT_DMA_DCA_ANY_CPU
,
1983 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
1986 err
= ioat_register(device
);
1990 ioat_kobject_add(device
, &ioat2_ktype
);
1993 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);
1998 if (device
->sed_pool
) {
2000 kmem_cache_destroy(device
->sed_pool
);
2002 for (i
= 0; i
< MAX_SED_POOLS
; i
++)
2003 if (device
->sed_hw_pool
[i
])
2004 dma_pool_destroy(device
->sed_hw_pool
[i
]);
2010 void ioat3_dma_remove(struct ioatdma_device
*device
)
2012 if (device
->sed_pool
) {
2014 kmem_cache_destroy(device
->sed_pool
);
2016 for (i
= 0; i
< MAX_SED_POOLS
; i
++)
2017 if (device
->sed_hw_pool
[i
])
2018 dma_pool_destroy(device
->sed_hw_pool
[i
]);