From b9032741e4f86844d8c4a7c18001ee328dae2f7a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Jan 2017 08:25:34 -0800 Subject: [PATCH] bnx2x: avoid two atomic ops per page on x86 Commit 4cace675d687 ("bnx2x: Alloc 4k fragment for each rx ring buffer element") added extra put_page() and get_page() calls on arches where PAGE_SIZE=4K like x86 Reorder things to avoid this overhead. Signed-off-by: Eric Dumazet Cc: Gabriel Krisman Bertazi Cc: Yuval Mintz Cc: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3e199d3e461e..c0dac0e5696d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_alloc_pool *pool = &fp->page_pool; dma_addr_t mapping; - if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { - - /* put page reference used by the memory pool, since we - * won't be using this page as the mempool anymore. - */ - if (pool->page) - put_page(pool->page); - + if (!pool->page) { pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); if (unlikely(!pool->page)) return -ENOMEM; @@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, return -ENOMEM; } - get_page(pool->page); sw_buf->page = pool->page; sw_buf->offset = pool->offset; @@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, sge->addr_lo = cpu_to_le32(U64_LO(mapping)); pool->offset += SGE_PAGE_SIZE; - + if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) + get_page(pool->page); + else + pool->page = NULL; return 0; } -- 2.20.1