struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
{
struct nvme_dev *dev = ns->dev;
+ struct nvme_id_ctrl *id_ctrl;
+ int nvme_sc, res;
if (alloc_len < 72) {
return nvme_trans_completion(hdr,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
}
+ nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ return res;
+
memset(inq_response, 0, alloc_len);
inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
inq_response[3] = 0x48; /* Page Length */
inq_response[6] = 0x00; /* Rsvd */
inq_response[7] = 0x44; /* Designator Length */
- sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
+ sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
memcpy(&inq_response[12], dev->model, sizeof(dev->model));
sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
- return nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
+ res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
+ kfree(id_ctrl);
+ return res;
}
static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,