return ret;
}
+/*
+ * Initialize the cached copies of the Identify data and various controller
+ * register in our nvme_ctrl structure. This should be called as soon as
+ * the admin queue is fully up and running.
+ */
+int nvme_init_identify(struct nvme_ctrl *ctrl)
+{
+ struct nvme_id_ctrl *id;
+ u64 cap;
+ int ret, page_shift;
+
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
+ if (ret) {
+ dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
+ return ret;
+ }
+ page_shift = NVME_CAP_MPSMIN(cap) + 12;
+
+ ret = nvme_identify_ctrl(ctrl, &id);
+ if (ret) {
+ dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
+ return -EIO;
+ }
+
+ ctrl->oncs = le16_to_cpup(&id->oncs);
+ ctrl->abort_limit = id->acl + 1;
+ ctrl->vwc = id->vwc;
+ memcpy(ctrl->serial, id->sn, sizeof(id->sn));
+ memcpy(ctrl->model, id->mn, sizeof(id->mn));
+ memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
+ if (id->mdts)
+ ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9);
+ else
+ ctrl->max_hw_sectors = UINT_MAX;
+
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
+ unsigned int max_hw_sectors;
+
+ ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
+ max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
+ if (ctrl->max_hw_sectors) {
+ ctrl->max_hw_sectors = min(max_hw_sectors,
+ ctrl->max_hw_sectors);
+ } else {
+ ctrl->max_hw_sectors = max_hw_sectors;
+ }
+ }
+
+ kfree(id);
+ return 0;
+}
+
static void nvme_free_ctrl(struct kref *kref)
{
struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
u32 ctrl_config;
u32 page_size;
+ u32 max_hw_sectors;
+ u32 stripe_size;
u16 oncs;
u16 abort_limit;
u8 event_limit;
struct nvme_ctrl_ops {
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
+ int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
};
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_identify(struct nvme_ctrl *ctrl);
void nvme_put_ns(struct nvme_ns *ns);
struct request *nvme_alloc_request(struct request_queue *q,
struct work_struct probe_work;
struct work_struct scan_work;
bool subsystem;
- u32 max_hw_sectors;
- u32 stripe_size;
void __iomem *cmb;
dma_addr_t cmb_dma_addr;
u64 cmb_size;
list_add_tail(&ns->list, &dev->namespaces);
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- if (dev->max_hw_sectors) {
- blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+ if (dev->ctrl.max_hw_sectors) {
+ blk_queue_max_hw_sectors(ns->queue, dev->ctrl.max_hw_sectors);
blk_queue_max_segments(ns->queue,
- (dev->max_hw_sectors / (dev->ctrl.page_size >> 9)) + 1);
+ (dev->ctrl.max_hw_sectors / (dev->ctrl.page_size >> 9)) + 1);
}
- if (dev->stripe_size)
- blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
+ if (dev->ctrl.stripe_size)
+ blk_queue_chunk_sectors(ns->queue, dev->ctrl.stripe_size >> 9);
if (dev->ctrl.vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
blk_queue_virt_boundary(ns->queue, dev->ctrl.page_size - 1);
static int nvme_dev_add(struct nvme_dev *dev)
{
int res;
- struct nvme_id_ctrl *ctrl;
- int shift = NVME_CAP_MPSMIN(lo_hi_readq(dev->bar + NVME_REG_CAP)) + 12;
-
- res = nvme_identify_ctrl(&dev->ctrl, &ctrl);
- if (res) {
- dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
- return -EIO;
- }
-
- dev->ctrl.oncs = le16_to_cpup(&ctrl->oncs);
- dev->ctrl.abort_limit = ctrl->acl + 1;
- dev->ctrl.vwc = ctrl->vwc;
- memcpy(dev->ctrl.serial, ctrl->sn, sizeof(ctrl->sn));
- memcpy(dev->ctrl.model, ctrl->mn, sizeof(ctrl->mn));
- memcpy(dev->ctrl.firmware_rev, ctrl->fr, sizeof(ctrl->fr));
- if (ctrl->mdts)
- dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- else
- dev->max_hw_sectors = UINT_MAX;
-
- if ((dev->ctrl.quirks & NVME_QUIRK_STRIPE_SIZE) && ctrl->vs[3]) {
- unsigned int max_hw_sectors;
- dev->stripe_size = 1 << (ctrl->vs[3] + shift);
- max_hw_sectors = dev->stripe_size >> (shift - 9);
- if (dev->max_hw_sectors) {
- dev->max_hw_sectors = min(max_hw_sectors,
- dev->max_hw_sectors);
- } else
- dev->max_hw_sectors = max_hw_sectors;
- }
- kfree(ctrl);
+ res = nvme_init_identify(&dev->ctrl);
+ if (res)
+ return res;
if (!dev->tagset.tags) {
dev->tagset.ops = &nvme_mq_ops;
return 0;
}
+static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ *val = readq(to_nvme_dev(ctrl)->bar + off);
+ return 0;
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
+ .reg_read64 = nvme_pci_reg_read64,
.free_ctrl = nvme_pci_free_ctrl,
};