dump_desc_dbg(ioat, chain_tail);
dump_desc_dbg(ioat, first);
- ioat->pending += desc->tx_cnt;
+ ioat->pending += desc->hw->tx_cnt;
if (ioat->pending >= ioat_pending_level)
__ioat1_dma_memcpy_issue_pending(ioat);
spin_unlock_bh(&ioat->desc_lock);
spin_unlock_bh(&ioat->desc_lock);
desc->txd.flags = flags;
- desc->tx_cnt = tx_cnt;
desc->len = total_len;
list_splice(&chain, &desc->txd.tx_list);
hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
hw->ctl_f.compl_write = 1;
+ hw->tx_cnt = tx_cnt;
dump_desc_dbg(ioat, desc);
return &desc->txd;
* @hw: hardware DMA descriptor
* @node: this descriptor will either be on the free list,
* or attached to a transaction list (async_tx.tx_list)
- * @tx_cnt: number of descriptors required to complete the transaction
* @txd: the generic software descriptor for all engines
* @id: identifier for debug
*/
struct ioat_desc_sw {
struct ioat_dma_descriptor *hw;
struct list_head node;
- int tx_cnt;
size_t len;
struct dma_async_tx_descriptor txd;
#ifdef DEBUG