From 6069f3fbde03211f4b839e188eba2439f8b8326a Mon Sep 17 00:00:00 2001 From: VSR Burru Date: Wed, 22 Mar 2017 11:54:50 -0700 Subject: [PATCH] liquidio: fix tx completions in napi poll If there are no egress packets pending, then don't look for tx completions in napi poll. Also, fix broken tx queue wakeup logic. Signed-off-by: VSR Burru Signed-off-by: Felix Manlunas Signed-off-by: Satanand Burla Signed-off-by: Derek Chickles Signed-off-by: David S. Miller --- .../net/ethernet/cavium/liquidio/lio_main.c | 20 ++++++++++--------- .../ethernet/cavium/liquidio/lio_vf_main.c | 19 ++++++++++-------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 86ea86cfc133..10732e0e48cf 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -932,14 +932,13 @@ static void update_txq_status(struct octeon_device *oct, int iq_num) INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); netif_wake_subqueue(netdev, iq->q_index); - } else { - if (!octnet_iq_is_full(oct, lio->txq)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, - lio->txq, - tx_restart, 1); - wake_q(netdev, lio->txq); - } } + } else if (netif_queue_stopped(netdev) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, lio->txq))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, + lio->txq, tx_restart, 1); + netif_wake_queue(netdev); } } @@ -2454,8 +2453,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* Flush the instruction queue */ iq = oct->instr_queue[iq_no]; if (iq) { - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); + if (atomic_read(&iq->instr_pending)) + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, budget); + else + tx_done = 1; /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 65e6f4bfa1cf..68794fa5d322 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -687,13 +687,12 @@ static void update_txq_status(struct octeon_device *oct, int iq_num) netif_wake_subqueue(netdev, iq->q_index); INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); - } else { - if (!octnet_iq_is_full(oct, lio->txq)) { - INCR_INSTRQUEUE_PKT_COUNT( - lio->oct_dev, lio->txq, tx_restart, 1); - wake_q(netdev, lio->txq); - } } + } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, lio->txq))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, + lio->txq, tx_restart, 1); + netif_wake_queue(netdev); } } @@ -1636,8 +1635,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* Flush the instruction queue */ iq = oct->instr_queue[iq_no]; if (iq) { - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); + if (atomic_read(&iq->instr_pending)) + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, budget); + else + tx_done = 1; + /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ -- 2.20.1