src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- /*
- * Also allocate a shadow src ring in regular
- * mem to use for faster access.
- */
- src_ring->shadow_base_unaligned =
- kmalloc((nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN), GFP_KERNEL);
- if (!src_ring->shadow_base_unaligned) {
- dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- src_ring->base_addr_owner_space,
- src_ring->base_addr_ce_space);
- kfree(src_ring);
- return ERR_PTR(-ENOMEM);
- }
-
- src_ring->shadow_base = PTR_ALIGN(
- src_ring->shadow_base_unaligned,
- CE_DESC_RING_ALIGN);
-
return src_ring;
}
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) {
- kfree(ce_state->src_ring->shadow_base_unaligned);
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
/* CE address space */
u32 base_addr_ce_space;
- /*
- * Start of shadow copy of descriptors, within regular memory.
- * Aligned to descriptor-size boundary.
- */
- void *shadow_base_unaligned;
- struct ce_desc *shadow_base;
/* keep last */
void *per_transfer_context[0];
struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
- struct ce_desc *ce_desc;
struct sk_buff *skb;
int i;
if (!pci_pipe->buf_sz)
return;
- ce_desc = ce_ring->shadow_base;
- if (WARN_ON(!ce_desc))
- return;
-
for (i = 0; i < ce_ring->nentries; i++) {
skb = ce_ring->per_transfer_context[i];
if (!skb)