liquidio: New unload state
authorRaghu Vatsavayi <rvatsavayi@caviumnetworks.com>
Wed, 22 Jun 2016 05:53:08 +0000 (22:53 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 25 Jun 2016 16:08:28 +0000 (12:08 -0400)
This patch adds new state so that the ctrl packets are not sent
to firmware during unload time and only rx packets are allowed.

Signed-off-by: Derek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: Satanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: Felix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: Raghu Vatsavayi <rvatsavayi@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/octeon_device.h
drivers/net/ethernet/cavium/liquidio/octeon_nic.c
drivers/net/ethernet/cavium/liquidio/response_manager.c

index 5fb1b79e9ef375e3d005257eba438c0f19496cfa..4440086aaef184cb2b1ded18abd8eb275fe80c55 100644 (file)
@@ -1327,6 +1327,10 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
                return 1;
        }
 
+       spin_lock_bh(&oct->cmd_resp_wqlock);
+       oct->cmd_resp_state = OCT_DRV_OFFLINE;
+       spin_unlock_bh(&oct->cmd_resp_wqlock);
+
        for (i = 0; i < oct->ifcount; i++) {
                lio = GET_LIO(oct->props[i].netdev);
                for (j = 0; j < lio->linfo.num_rxpciq; j++)
index abfc0d6d55910b7c31d99c67c57b46f58b39544b..ceb905d9ef4c86ba57aa5e4744db5ff3cc110557 100644 (file)
@@ -383,6 +383,10 @@ struct octeon_device {
 
        struct cavium_wq dma_comp_wq;
 
+       /** Lock for dma response list */
+       spinlock_t cmd_resp_wqlock;
+       u32 cmd_resp_state;
+
        struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
 
        struct cavium_wk nic_poll_work;
@@ -392,6 +396,8 @@ struct octeon_device {
        void *priv;
 };
 
+#define  OCT_DRV_ONLINE 1
+#define  OCT_DRV_OFFLINE 2
 #define  OCTEON_CN6XXX(oct)           ((oct->chip_id == OCTEON_CN66XX) || \
                                       (oct->chip_id == OCTEON_CN68XX))
 #define CHIP_FIELD(oct, TYPE, field)             \
index 7843b8a05dcfd973f6df57fa40e7c8b8a895e709..36f1970a860eacc753012ccbdb3342ee1d9fbfe9 100644 (file)
@@ -171,20 +171,36 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
        int retval;
        struct octeon_soft_command *sc = NULL;
 
+       spin_lock_bh(&oct->cmd_resp_wqlock);
+       /* Allow only rx ctrl command to stop traffic on the chip
+        * during offline operations
+        */
+       if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
+           (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
+               spin_unlock_bh(&oct->cmd_resp_wqlock);
+               dev_err(&oct->pci_dev->dev,
+                       "%s cmd:%d not processed since driver offline\n",
+                       __func__, nctrl->ncmd.s.cmd);
+               return -1;
+       }
+
        sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
        if (!sc) {
                dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
                        __func__);
+               spin_unlock_bh(&oct->cmd_resp_wqlock);
                return -1;
        }
 
        retval = octeon_send_soft_command(oct, sc);
        if (retval == IQ_SEND_FAILED) {
                octeon_free_soft_command(oct, sc);
-               dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
-                       __func__, retval);
+               dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
+                       __func__, nctrl->ncmd.s.cmd, retval);
+               spin_unlock_bh(&oct->cmd_resp_wqlock);
                return -1;
        }
 
+       spin_unlock_bh(&oct->cmd_resp_wqlock);
        return retval;
 }
index e2e9103e6ebd0e37839eb554530e67f5b388e549..c93210f99dda2929f49f913e0a85467f85c8a161 100644 (file)
@@ -54,6 +54,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
                spin_lock_init(&oct->response_list[i].lock);
                atomic_set(&oct->response_list[i].pending_req_count, 0);
        }
+       spin_lock_init(&oct->cmd_resp_wqlock);
 
        oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
        if (!oct->dma_comp_wq.wq) {
@@ -64,6 +65,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
        cwq = &oct->dma_comp_wq;
        INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
        cwq->wk.ctxptr = oct;
+       oct->cmd_resp_state = OCT_DRV_ONLINE;
        queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
 
        return ret;