vring->swhead = 0;
vring->swtail = 0;
- vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+ vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
if (!vring->ctx) {
vring->va = NULL;
return -ENOMEM;
while (!wil_vring_is_empty(vring)) {
dma_addr_t pa;
- struct sk_buff *skb;
u16 dmalen;
+ struct wil_ctx *ctx;
if (tx) {
struct vring_tx_desc dd, *d = ⅆ
volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx;
+ ctx = &vring->ctx[vring->swtail];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- skb = vring->ctx[vring->swtail];
- if (skb) {
- dma_unmap_single(dev, pa, dmalen,
- DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
- vring->ctx[vring->swtail] = NULL;
- } else {
+ if (vring->ctx[vring->swtail].mapped_as_page) {
dma_unmap_page(dev, pa, dmalen,
DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(dev, pa, dmalen,
+ DMA_TO_DEVICE);
}
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring);
} else { /* rx */
struct vring_rx_desc dd, *d = ⅆ
volatile struct vring_rx_desc *_d =
&vring->va[vring->swhead].rx;
+ ctx = &vring->ctx[vring->swhead];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- skb = vring->ctx[vring->swhead];
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
- kfree_skb(skb);
+ kfree_skb(ctx->skb);
wil_vring_advance_head(vring, 1);
}
}
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
d->dma.length = cpu_to_le16(sz);
*_d = *d;
- vring->ctx[i] = skb;
+ vring->ctx[i].skb = skb;
return 0;
}
return NULL;
}
- skb = vring->ctx[vring->swhead];
+ skb = vring->ctx[vring->swhead].skb;
d = wil_skb_rxdesc(skb);
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
- vring->ctx[vring->swhead] = NULL;
+ vring->ctx[vring->swhead].skb = NULL;
wil_vring_advance_head(vring, 1);
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
wil_tx_desc_map(d, pa, len, vring_index);
- vring->ctx[i] = NULL;
+ vring->ctx[i].mapped_as_page = 1;
*_d = *d;
}
/* for the last seg only */
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
- vring->ctx[i] = skb_get(skb);
+ vring->ctx[i].skb = skb_get(skb);
return 0;
dma_error:
/* Note: increment @f to operate with positive index */
for (f++; f > 0; f--) {
u16 dmalen;
+ struct wil_ctx *ctx = &vring->ctx[i];
i = (swhead + f) % vring->size;
_d = &(vring->va[i].tx);
_d->dma.status = TX_DMA_STATUS_DU;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- if (vring->ctx[i])
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
- else
+ if (ctx->mapped_as_page)
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+
+ memset(ctx, 0, sizeof(*ctx));
}
return -EINVAL;
&vring->va[vring->swtail].tx;
struct vring_tx_desc dd, *d = ⅆ
dma_addr_t pa;
- struct sk_buff *skb;
u16 dmalen;
+ struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+ struct sk_buff *skb = ctx->skb;
*d = *_d;
(const void *)d, sizeof(*d), false);
pa = wil_desc_addr(&d->dma.addr);
- skb = vring->ctx[vring->swtail];
+ if (ctx->mapped_as_page)
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
if (skb) {
if (d->dma.error == 0) {
ndev->stats.tx_packets++;
ndev->stats.tx_errors++;
}
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
- vring->ctx[vring->swtail] = NULL;
- } else {
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
}
+ memset(ctx, 0, sizeof(*ctx));
/*
* There is no need to touch HW descriptor:
* - ststus bit TX_DMA_STATUS_DU is set by design,