From 64dc81fca7f6d5c51e50ffa850640ad8358acd1f Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 27 Jun 2005 14:36:40 -0700 Subject: [PATCH] [PATCH] IB/mthca: Use dma_alloc_coherent instead of pci_alloc_consistent Switch all allocations of coherent memory from pci_alloc_consistent() to dma_alloc_coherent(), so that we can pass GFP_KERNEL. This should help when the system is low on memory. Signed-off-by: Roland Dreier Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/infiniband/hw/mthca/mthca_cq.c | 25 +++++++++++++------------ drivers/infiniband/hw/mthca/mthca_eq.c | 12 ++++++------ drivers/infiniband/hw/mthca/mthca_qp.c | 19 ++++++++++--------- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 3724d9db50a3..5dae5b5dc8e7 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -636,19 +636,19 @@ static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) int size; if (cq->is_direct) - pci_free_consistent(dev->pdev, - (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, - cq->queue.direct.buf, - pci_unmap_addr(&cq->queue.direct, - mapping)); + dma_free_coherent(&dev->pdev->dev, + (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, + cq->queue.direct.buf, + pci_unmap_addr(&cq->queue.direct, + mapping)); else { size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) if (cq->queue.page_list[i].buf) - pci_free_consistent(dev->pdev, PAGE_SIZE, - cq->queue.page_list[i].buf, - pci_unmap_addr(&cq->queue.page_list[i], - mapping)); + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + cq->queue.page_list[i].buf, + pci_unmap_addr(&cq->queue.page_list[i], + mapping)); kfree(cq->queue.page_list); } @@ -668,8 +668,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, npages = 1; shift = get_order(size) + PAGE_SHIFT; - cq->queue.direct.buf = pci_alloc_consistent(dev->pdev, - size, &t); + cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); if (!cq->queue.direct.buf) return -ENOMEM; @@ -707,7 +707,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, for (i = 0; i < npages; ++i) { cq->queue.page_list[i].buf = - pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); if (!cq->queue.page_list[i].buf) goto err_free; diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 7500ebc23f36..970cba24e79f 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -501,8 +501,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, eq_context = MAILBOX_ALIGN(mailbox); for (i = 0; i < npages; ++i) { - eq->page_list[i].buf = pci_alloc_consistent(dev->pdev, - PAGE_SIZE, &t); + eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, + PAGE_SIZE, &t, GFP_KERNEL); if (!eq->page_list[i].buf) goto err_out_free; @@ -582,10 +582,10 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, err_out_free: for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) - pci_free_consistent(dev->pdev, PAGE_SIZE, - eq->page_list[i].buf, - pci_unmap_addr(&eq->page_list[i], - mapping)); + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + pci_unmap_addr(&eq->page_list[i], + mapping)); kfree(eq->page_list); kfree(dma_list); diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index ca73bab11a02..031f690f5455 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -934,7 +934,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", size, shift); - qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t); + qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, + &t, GFP_KERNEL); if (!qp->queue.direct.buf) goto err_out; @@ -973,7 +974,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, for (i = 0; i < npages; ++i) { qp->queue.page_list[i].buf = - pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); if (!qp->queue.page_list[i].buf) goto err_out_free; @@ -996,16 +998,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, err_out_free: if (qp->is_direct) { - pci_free_consistent(dev->pdev, size, - qp->queue.direct.buf, - pci_unmap_addr(&qp->queue.direct, mapping)); + dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, + pci_unmap_addr(&qp->queue.direct, mapping)); } else for (i = 0; i < npages; ++i) { if (qp->queue.page_list[i].buf) - pci_free_consistent(dev->pdev, PAGE_SIZE, - qp->queue.page_list[i].buf, - pci_unmap_addr(&qp->queue.page_list[i], - mapping)); + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + qp->queue.page_list[i].buf, + pci_unmap_addr(&qp->queue.page_list[i], + mapping)); } -- 2.20.1