}
static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, int length,
- struct scatterlist *sg, int nents)
+ unsigned long addr, int length, struct scatterlist *sg)
{
int i, count;
count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
- dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
+ dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
for (i = 0; i < count; i++)
put_page(sg_page(&sg[i]));
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);
- nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
+ nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg);
nvme_free_prps(dev, prps);
return status;
}
else
status = nvme_submit_admin_cmd(dev, &c, NULL);
if (cmd.data_len) {
- nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, sg,
- nents);
+ nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, sg);
nvme_free_prps(dev, prps);
}
return status;