drm/nouveau: map pages using DMA API
authorAlexandre Courbot <acourbot@nvidia.com>
Thu, 31 Jul 2014 09:09:42 +0000 (18:09 +0900)
committerBen Skeggs <bskeggs@redhat.com>
Sat, 9 Aug 2014 19:11:11 +0000 (05:11 +1000)
The DMA API is the recommended way to map pages no matter what the
underlying bus is. Use the DMA functions for page mapping and remove
currently existing wrappers.

Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/core/engine/device/base.c
drivers/gpu/drm/nouveau/core/include/core/device.h
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/nouveau_bo.c

index 466dda2f7a3a67a7621a711377abe7ebf21f426f..6c16dabe13b3729cc628fca123ef1cc27c1c256f 100644 (file)
@@ -487,31 +487,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
        }
 }
 
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page)
-{
-       dma_addr_t ret;
-
-       if (nv_device_is_pci(device)) {
-               ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
-                                  PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(device->pdev, ret))
-                       ret = 0;
-       } else {
-               ret = page_to_phys(page);
-       }
-
-       return ret;
-}
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
-{
-       if (nv_device_is_pci(device))
-               pci_unmap_page(device->pdev, addr, PAGE_SIZE,
-                              PCI_DMA_BIDIRECTIONAL);
-}
-
 int
 nv_device_get_irq(struct nouveau_device *device, bool stall)
 {
index 9ce2ee9aa32e840d0058a85e0df80113a05ede5d..03c039dcc61f04880c56622bdea272adc0f4d989 100644 (file)
@@ -174,12 +174,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
 resource_size_t
 nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
 
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page);
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
-
 int
 nv_device_get_irq(struct nouveau_device *device, bool stall);
 
index 1fc55c1e91a13b00869c9aa8a46ad2bd0f253e43..7d88e17fa9271d030894f519bc9ea4773929468e 100644 (file)
@@ -250,7 +250,9 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
        priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (priv->r100c08_page) {
-               priv->r100c08 = nv_device_map_page(device, priv->r100c08_page);
+               priv->r100c08 = dma_map_page(nv_device_base(device),
+                                            priv->r100c08_page, 0, PAGE_SIZE,
+                                            DMA_BIDIRECTIONAL);
                if (!priv->r100c08)
                        nv_warn(priv, "failed 0x100c08 page map\n");
        } else {
@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object)
        struct nv50_fb_priv *priv = (void *)object;
 
        if (priv->r100c08_page) {
-               nv_device_unmap_page(device, priv->r100c08);
+               dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
                __free_page(priv->r100c08_page);
        }
 
index 0670ae33ee450d7428580c280f34051b893675e1..9f5f3ac8d4c6e9dfb350289e72c92a3a3fd3bc79 100644 (file)
@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object)
        struct nvc0_fb_priv *priv = (void *)object;
 
        if (priv->r100c10_page) {
-               nv_device_unmap_page(device, priv->r100c10);
+               dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
                __free_page(priv->r100c10_page);
        }
 
@@ -93,7 +94,9 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
        priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (priv->r100c10_page) {
-               priv->r100c10 = nv_device_map_page(device, priv->r100c10_page);
+               priv->r100c10 = dma_map_page(nv_device_base(device),
+                                            priv->r100c10_page, 0, PAGE_SIZE,
+                                            DMA_BIDIRECTIONAL);
                if (!priv->r100c10)
                        return -EFAULT;
        }
index d349078123bd826f7c7e50b780cf4a0bd641a35b..a405b75a90d9c9cc5a4a485d999603a870b38c8f 100644 (file)
@@ -1340,6 +1340,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
        struct nouveau_drm *drm;
        struct nouveau_device *device;
        struct drm_device *dev;
+       struct device *pdev;
        unsigned i;
        int r;
        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1358,6 +1359,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
        drm = nouveau_bdev(ttm->bdev);
        device = nv_device(drm->device);
        dev = drm->dev;
+       pdev = nv_device_base(device);
 
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
@@ -1377,17 +1379,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
        }
 
        for (i = 0; i < ttm->num_pages; i++) {
-               ttm_dma->dma_address[i] = nv_device_map_page(device,
-                                                            ttm->pages[i]);
-               if (!ttm_dma->dma_address[i]) {
+               dma_addr_t addr;
+
+               addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+                                   DMA_BIDIRECTIONAL);
+
+               if (dma_mapping_error(pdev, addr)) {
                        while (--i) {
-                               nv_device_unmap_page(device,
-                                                    ttm_dma->dma_address[i]);
+                               dma_unmap_page(pdev, ttm_dma->dma_address[i],
+                                              PAGE_SIZE, DMA_BIDIRECTIONAL);
                                ttm_dma->dma_address[i] = 0;
                        }
                        ttm_pool_unpopulate(ttm);
                        return -EFAULT;
                }
+
+               ttm_dma->dma_address[i] = addr;
        }
        return 0;
 }
@@ -1399,6 +1406,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
        struct nouveau_drm *drm;
        struct nouveau_device *device;
        struct drm_device *dev;
+       struct device *pdev;
        unsigned i;
        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
@@ -1408,6 +1416,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
        drm = nouveau_bdev(ttm->bdev);
        device = nv_device(drm->device);
        dev = drm->dev;
+       pdev = nv_device_base(device);
 
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
@@ -1425,7 +1434,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 
        for (i = 0; i < ttm->num_pages; i++) {
                if (ttm_dma->dma_address[i]) {
-                       nv_device_unmap_page(device, ttm_dma->dma_address[i]);
+                       dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+                                      DMA_BIDIRECTIONAL);
                }
        }