req->errors = 0;
if (cmd_rq->aborted)
- dev_warn(&nvmeq->dev->pci_dev->dev,
+ dev_warn(nvmeq->dev->dev,
"completing aborted command with status:%04x\n",
status);
if (iod->nents) {
- dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
+ dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (blk_integrity_rq(req)) {
if (!rq_data_dir(req))
nvme_dif_remap(req, nvme_dif_complete);
- dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
+ dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
}
if (blk_rq_bytes(req) !=
nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
- dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg,
+ dma_unmap_sg(nvmeq->dev->dev, iod->sg,
iod->nents, dma_dir);
goto retry_cmd;
}
if (work_busy(&dev->reset_work))
goto out;
list_del_init(&dev->node);
- dev_warn(&dev->pci_dev->dev,
- "I/O %d QID %d timeout, reset controller\n",
+ dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth)
{
- struct device *dmadev = &dev->pci_dev->dev;
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
if (!nvmeq)
return NULL;
- nvmeq->cqes = dma_zalloc_coherent(dmadev, CQ_SIZE(depth),
+ nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
&nvmeq->cq_dma_addr, GFP_KERNEL);
if (!nvmeq->cqes)
goto free_nvmeq;
- nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
&nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
goto free_cqdma;
- nvmeq->q_dmadev = dmadev;
+ nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
dev->instance, qid);
return nvmeq;
free_cqdma:
- dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
+ dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
kfree(nvmeq);
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
- dev_err(&dev->pci_dev->dev,
+ dev_err(dev->dev,
"Device not ready; aborting %s\n", enabled ?
"initialisation" : "reset");
return -ENODEV;
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
- dev_err(&dev->pci_dev->dev,
+ dev_err(dev->dev,
"Device shutdown incomplete; abort shutdown\n");
return -ENODEV;
}
dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
dev->admin_tagset.reserved_tags = 1;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
+ dev->admin_tagset.numa_node = dev_to_node(dev->dev);
dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
dev->admin_tagset.driver_data = dev;
unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
if (page_shift < dev_page_min) {
- dev_err(&dev->pci_dev->dev,
+ dev_err(dev->dev,
"Minimum device page size (%u) too large for "
"host (%u)\n", 1 << dev_page_min,
1 << page_shift);
return -ENODEV;
}
if (page_shift > dev_page_max) {
- dev_info(&dev->pci_dev->dev,
+ dev_info(dev->dev,
"Device maximum page size (%u) smaller than "
"host (%u); enabling work-around\n",
1 << dev_page_max, 1 << page_shift);
sg_mark_end(&sg[i - 1]);
iod->nents = count;
- nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+ nents = dma_map_sg(dev->dev, sg, count,
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (!nents)
goto free_iod;
{
int i;
- dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents,
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
for (i = 0; i < iod->nents; i++)
goto unmap;
}
if (meta_len) {
- meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+ meta = dma_alloc_coherent(dev->dev, meta_len,
&meta_dma, GFP_KERNEL);
if (!meta) {
status = -ENOMEM;
meta_len))
status = -EFAULT;
}
- dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
+ dma_free_coherent(dev->dev, meta_len, meta, meta_dma);
}
return status;
}
u16 old_ms;
unsigned short bs;
- id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
- GFP_KERNEL);
+ id = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
if (!id) {
- dev_warn(&dev->pci_dev->dev, "%s: Memory alocation failure\n",
- __func__);
+ dev_warn(dev->dev, "%s: Memory alocation failure\n", __func__);
return 0;
}
if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
- dev_warn(&dev->pci_dev->dev,
+ dev_warn(dev->dev,
"identify failed ns:%d, setting capacity to 0\n",
ns->ns_id);
memset(id, 0, sizeof(*id));
if (dev->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns);
- dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
+ dma_free_coherent(dev->dev, 4096, id, dma_addr);
return 0;
}
if (work_busy(&dev->reset_work))
continue;
list_del_init(&dev->node);
- dev_warn(&dev->pci_dev->dev,
+ dev_warn(dev->dev,
"Failed status: %x, reset controller\n",
readl(&dev->bar->csts));
dev->reset_workfn = nvme_reset_failed_dev;
{
struct nvme_ns *ns;
struct gendisk *disk;
- int node = dev_to_node(&dev->pci_dev->dev);
+ int node = dev_to_node(dev->dev);
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
if (status < 0)
return status;
if (status > 0) {
- dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
- status);
+ dev_err(dev->dev, "Could not set queue count (%d)\n", status);
return 0;
}
return min(result & 0xffff, result >> 16) + 1;
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = dev->queues[0];
- struct pci_dev *pdev = dev->pci_dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, i, vecs, nr_io_queues, size;
nr_io_queues = num_possible_cpus();
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
- struct pci_dev *pdev = dev->pci_dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int res;
unsigned nn, i;
struct nvme_id_ctrl *ctrl;
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
+ mem = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
if (!mem)
return -ENOMEM;
res = nvme_identify(dev, 0, 1, dma_addr);
if (res) {
- dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
- dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+ dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
+ dma_free_coherent(dev->dev, 4096, mem, dma_addr);
return -EIO;
}
} else
dev->max_hw_sectors = max_hw_sectors;
}
- dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+ dma_free_coherent(dev->dev, 4096, mem, dma_addr);
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
+ dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
dev->tagset.cmd_size = nvme_cmd_size(dev);
{
u64 cap;
int bars, result = -ENOMEM;
- struct pci_dev *pdev = dev->pci_dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pci_enable_device_mem(pdev))
return result;
if (pci_request_selected_regions(pdev, bars, "nvme"))
goto disable_pci;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
goto disable;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
static void nvme_dev_unmap(struct nvme_dev *dev)
{
- if (dev->pci_dev->msi_enabled)
- pci_disable_msi(dev->pci_dev);
- else if (dev->pci_dev->msix_enabled)
- pci_disable_msix(dev->pci_dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ else if (pdev->msix_enabled)
+ pci_disable_msix(pdev);
if (dev->bar) {
iounmap(dev->bar);
dev->bar = NULL;
- pci_release_regions(dev->pci_dev);
+ pci_release_regions(pdev);
}
- if (pci_is_enabled(dev->pci_dev))
- pci_disable_device(dev->pci_dev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
}
struct nvme_delq_ctx {
&worker, "nvme%d", dev->instance);
if (IS_ERR(kworker_task)) {
- dev_err(&dev->pci_dev->dev,
+ dev_err(dev->dev,
"Failed to create queue del task\n");
for (i = dev->queue_count - 1; i > 0; i--)
nvme_disable_queue(dev, i);
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
- struct device *dmadev = &dev->pci_dev->dev;
- dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+ dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
PAGE_SIZE, PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
/* Optimisation for I/Os between 4k and 128k */
- dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+ dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
256, 256, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
- pci_dev_put(dev->pci_dev);
+ put_device(dev->dev);
put_device(dev->device);
nvme_free_namespaces(dev);
nvme_release_instance(dev);
static int nvme_remove_dead_ctrl(void *arg)
{
struct nvme_dev *dev = (struct nvme_dev *)arg;
- struct pci_dev *pdev = dev->pci_dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pci_get_drvdata(pdev))
pci_stop_and_remove_bus_device_locked(pdev);
{
nvme_dev_shutdown(dev);
if (nvme_dev_resume(dev)) {
- dev_warn(&dev->pci_dev->dev, "Device failed to resume\n");
+ dev_warn(dev->dev, "Device failed to resume\n");
kref_get(&dev->kref);
if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
dev->instance))) {
- dev_err(&dev->pci_dev->dev,
+ dev_err(dev->dev,
"Failed to start controller remove task\n");
kref_put(&dev->kref, nvme_free_dev);
}
INIT_LIST_HEAD(&dev->namespaces);
dev->reset_workfn = nvme_reset_failed_dev;
INIT_WORK(&dev->reset_work, nvme_reset_workfn);
- dev->pci_dev = pci_dev_get(pdev);
+ dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
if (result)
release:
nvme_release_instance(dev);
put_pci:
- pci_dev_put(dev->pci_dev);
+ put_device(dev->dev);
free:
kfree(dev->queues);
kfree(dev->entry);
u8 cmdque = 0x01 << 1;
u8 fw_offset = sizeof(dev->firmware_rev);
- mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
out_free:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
out_dma:
return res;
}
int xfer_len;
__be32 tmp_id = cpu_to_be32(ns->ns_id);
- mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
inq_response[6] = 0x00; /* Rsvd */
inq_response[7] = 0x44; /* Designator Length */
- sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
+ sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
memcpy(&inq_response[12], dev->model, sizeof(dev->model));
sprintf(&inq_response[52], "%04x", tmp_id);
memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
out_free:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
out_dma:
return res;
}
goto out_mem;
}
- mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
out_free:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
out_dma:
kfree(inq_response);
out_mem:
goto out_mem;
}
- mem = dma_alloc_coherent(&dev->pci_dev->dev,
- sizeof(struct nvme_smart_log),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
mem, dma_addr);
out_dma:
kfree(log_response);
goto out_mem;
}
- mem = dma_alloc_coherent(&dev->pci_dev->dev,
- sizeof(struct nvme_smart_log),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
mem, dma_addr);
out_dma:
kfree(log_response);
else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
return SNTI_INTERNAL_ERROR;
- mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
}
out_dma:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
out:
return res;
}
unsigned ps_desired = 0;
/* NVMe Controller Identify */
- mem = dma_alloc_coherent(&dev->pci_dev->dev,
- sizeof(struct nvme_id_ctrl),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
if (nvme_sc)
res = nvme_sc;
out_dma:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
out:
return res;
}
*/
if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
- mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ mem = dma_alloc_coherent(dev->dev,
sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
(1 << (id_ns->lbaf[flbas].ds));
}
out_dma:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns),
mem, dma_addr);
}
out:
struct nvme_command c;
/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
- mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
res = nvme_sc;
out_dma:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
out:
return res;
}
resp_size = READ_CAP_16_RESP_SIZE;
}
- mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
kfree(response);
out_dma:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
out:
return res;
}
goto out;
} else {
/* NVMe Controller Identify */
- mem = dma_alloc_coherent(&dev->pci_dev->dev,
- sizeof(struct nvme_id_ctrl),
+ mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
&dma_addr, GFP_KERNEL);
if (mem == NULL) {
res = -ENOMEM;
kfree(response);
out_dma:
- dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
- dma_addr);
+ dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
out:
return res;
}
goto out;
}
- range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+ range = dma_alloc_coherent(dev->dev, ndesc * sizeof(*range),
&dma_addr, GFP_KERNEL);
if (!range)
goto out;
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
res = nvme_trans_status_code(hdr, nvme_sc);
- dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
- range, dma_addr);
+ dma_free_coherent(dev->dev, ndesc * sizeof(*range), range, dma_addr);
out:
kfree(plist);
return res;