There are holes in the sdma build support routines that do
not clean any partially built sdma descriptors after mapping or
allocate failures.
This patch corrects these issues.
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
GFP_ATOMIC);
if (!tx->coalesce_buf)
- return -ENOMEM;
-
+ goto enomem;
tx->coalesce_idx = 0;
}
return 0;
}
if (unlikely(tx->num_desc == MAX_DESC))
- return -ENOMEM;
+ goto enomem;
tx->descp = kmalloc_array(
MAX_DESC,
sizeof(struct sdma_desc),
GFP_ATOMIC);
if (!tx->descp)
- return -ENOMEM;
+ goto enomem;
/* reserve last descriptor for coalescing */
tx->desc_limit = MAX_DESC - 1;
for (i = 0; i < tx->num_desc; i++)
tx->descp[i] = tx->descs[i];
return 0;
+enomem:
+ sdma_txclean(dd, tx);
+ return -ENOMEM;
}
/*
tx->tlen -= len;
/* special cases for last */
if (!tx->tlen) {
- if (tx->packet_len & (sizeof(u32) - 1))
+ if (tx->packet_len & (sizeof(u32) - 1)) {
rval = _pad_sdma_tx_descs(dd, tx);
- else
+ if (rval)
+ return rval;
+ } else {
_sdma_close_tx(dd, tx);
+ }
}
tx->num_desc++;
return rval;