return ret;
}
-static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns, *next;
unsigned i;
}
}
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
+static void nvme_scan_work(struct work_struct *work)
{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, scan_work);
struct nvme_id_ctrl *id;
unsigned nn;
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
if (nvme_identify_ctrl(ctrl, &id))
return;
if (!nvme_scan_ns_list(ctrl, nn))
goto done;
}
- __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
+ nvme_scan_ns_sequential(ctrl, nn);
done:
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
+
+ if (ctrl->ops->post_scan)
+ ctrl->ops->post_scan(ctrl);
}
-EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
+
+void nvme_queue_scan(struct nvme_ctrl *ctrl)
+{
+ /*
+ * Do not queue new scan work when a controller is reset during
+ * removal.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ schedule_work(&ctrl->scan_work);
+}
+EXPORT_SYMBOL_GPL(nvme_queue_scan);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ flush_work(&ctrl->scan_work);
+ nvme_remove_namespaces(ctrl);
+
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
spin_lock(&dev_list_lock);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
+ INIT_WORK(&ctrl->scan_work, nvme_scan_work);
ret = nvme_set_instance(ctrl);
if (ret)
u32 vs;
bool subsystem;
unsigned long quirks;
+ struct work_struct scan_work;
};
/*
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int (*reset_ctrl)(struct nvme_ctrl *ctrl);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
+ void (*post_scan)(struct nvme_ctrl *ctrl);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
+void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
struct msix_entry *entry;
void __iomem *bar;
struct work_struct reset_work;
- struct work_struct scan_work;
struct work_struct remove_work;
struct work_struct async_work;
struct timer_list watchdog_timer;
return 0;
}
-static void nvme_queue_scan(struct nvme_dev *dev)
-{
- /*
- * Do not queue new scan work when a controller is reset during
- * removal.
- */
- if (dev->ctrl.state == NVME_CTRL_LIVE)
- queue_work(nvme_workq, &dev->scan_work);
-}
-
static void nvme_complete_async_event(struct nvme_dev *dev,
struct nvme_completion *cqe)
{
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
dev_info(dev->ctrl.device, "rescanning\n");
- nvme_queue_scan(dev);
+ nvme_queue_scan(&dev->ctrl);
default:
dev_warn(dev->ctrl.device, "async event result %08x\n", result);
}
return result;
}
-static void nvme_set_irq_hints(struct nvme_dev *dev)
+static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq;
int i;
}
}
-static void nvme_dev_scan(struct work_struct *work)
-{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
-
- if (!dev->tagset.tags)
- return;
- nvme_scan_namespaces(&dev->ctrl);
- nvme_set_irq_hints(dev);
-}
-
static void nvme_del_queue_end(struct request *req, int error)
{
struct nvme_queue *nvmeq = req->end_io_data;
}
if (dev->online_queues > 1)
- nvme_queue_scan(dev);
+ nvme_queue_scan(&dev->ctrl);
return;
out:
.reg_read64 = nvme_pci_reg_read64,
.reset_ctrl = nvme_pci_reset_ctrl,
.free_ctrl = nvme_pci_free_ctrl,
+ .post_scan = nvme_pci_post_scan,
};
static int nvme_dev_map(struct nvme_dev *dev)
if (result)
goto free;
- INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
INIT_WORK(&dev->async_work, nvme_async_event_work);
pci_set_drvdata(pdev, NULL);
flush_work(&dev->async_work);
- flush_work(&dev->scan_work);
- nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true);
flush_work(&dev->reset_work);