#include <linux/irq.h>
#include <linux/iommu.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/filter.h>
#include "nic_reg.h"
struct cqe_rx_t *cqe_rx)
{
struct xdp_buff xdp;
+ struct page *page;
u32 action;
u16 len;
u64 dma_addr, cpu_addr;
switch (action) {
case XDP_PASS:
case XDP_TX:
- case XDP_ABORTED:
- case XDP_DROP:
/* Pass on all packets to network stack */
return false;
default:
bpf_warn_invalid_xdp_action(action);
+ case XDP_ABORTED:
+ trace_xdp_exception(nic->netdev, prog, action);
+ case XDP_DROP:
+ page = virt_to_page(xdp.data);
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ put_page(page);
+ return true;
}
return false;
}
if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx))
return;
- skb = nicvf_get_rcv_skb(snic, cqe_rx);
+ skb = nicvf_get_rcv_skb(snic, cqe_rx, nic->xdp_prog ? true : false);
if (!skb) {
netdev_dbg(nic->netdev, "Packet not received\n");
return;
/* Save the page in page cache */
pgcache->page = page;
+ pgcache->dma_addr = 0;
rbdr->pgalloc++;
}
/* Check if request can be accomodated in previous allocated page.
* But in XDP mode only one buffer per page is permitted.
*/
- if (!nic->pnicvf->xdp_prog && nic->rb_page &&
+ if (!rbdr->is_xdp && nic->rb_page &&
((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
nic->rb_pageref++;
goto ret;
if (pgcache)
nic->rb_page = pgcache->page;
ret:
- /* HW will ensure data coherency, CPU sync not required */
- *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
- nic->rb_page_offset, buf_len,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
- if (!nic->rb_page_offset)
- __free_pages(nic->rb_page, 0);
- nic->rb_page = NULL;
- return -ENOMEM;
+ if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
+ *rbuf = pgcache->dma_addr;
+ } else {
+ /* HW will ensure data coherency, CPU sync not required */
+ *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+ nic->rb_page_offset, buf_len,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+ if (!nic->rb_page_offset)
+ __free_pages(nic->rb_page, 0);
+ nic->rb_page = NULL;
+ return -ENOMEM;
+ }
+ if (pgcache)
+ pgcache->dma_addr = *rbuf;
+ nic->rb_page_offset += buf_len;
}
- nic->rb_page_offset += buf_len;
return 0;
}
* On embedded platforms i.e 81xx/83xx available memory itself
* is low and minimum ring size of RBDR is 8K, that takes away
* lots of memory.
+ *
+ * But for XDP it has to be a single buffer per page.
*/
- rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
+ if (!nic->pnicvf->xdp_prog) {
+ rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
+ rbdr->is_xdp = false;
+ } else {
+ rbdr->pgcnt = ring_len;
+ rbdr->is_xdp = true;
+ }
rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
rbdr->pgcnt, GFP_KERNEL);
#endif
}
+static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
+ u64 buf_addr, bool xdp)
+{
+ struct page *page = NULL;
+ int len = RCV_FRAG_LEN;
+
+ if (xdp) {
+ page = virt_to_page(phys_to_virt(buf_addr));
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) != 1)
+ return;
+ /* Receive buffers in XDP mode are mapped from page start */
+ dma_addr &= PAGE_MASK;
+ }
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
/* Returns SKB for a received packet */
-struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
+ struct cqe_rx_t *cqe_rx, bool xdp)
{
int frag;
int payload_len = 0;
if (!frag) {
/* First fragment */
- dma_unmap_page_attrs(&nic->pdev->dev,
- *rb_ptrs - cqe_rx->align_pad,
- RCV_FRAG_LEN, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ nicvf_unmap_rcv_buffer(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ phys_addr, xdp);
skb = nicvf_rb_ptr_to_skb(nic,
phys_addr - cqe_rx->align_pad,
payload_len);
skb_put(skb, payload_len);
} else {
/* Add fragments */
- dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
- RCV_FRAG_LEN, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
page = virt_to_page(phys_to_virt(phys_addr));
offset = phys_to_virt(phys_addr) - page_address(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,