ttm: Include the 'struct dev' when using the DMA API.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tue, 22 Feb 2011 18:24:32 +0000 (13:24 -0500)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tue, 22 Feb 2011 18:26:23 +0000 (13:26 -0500)
This makes the accounting when using 'debug_dma_dump_mappings()'
and CONFIG_DMA_API_DEBUG=y be assigned to the correct device
instead of 'fallback'.

No functional change - just cosmetic.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_page_alloc.h

index a163c7c612e78eb6b6718620ee75773cd49a26f7..931b22142ed28ff17e75cc2b6da7110665a5440d 100644 (file)
@@ -559,6 +559,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
        if (ret)
                return ret;
 
+       dev_priv->ttm.bdev.dev = dev->dev;
        ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
                                 dev_priv->ttm.bo_global_ref.ref.object,
                                 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
index ca045058e4985365e54fe84f2bc79762acc6f9df..cfe223f223949b63608d28df79119a86fae4172e 100644 (file)
@@ -513,6 +513,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
        if (r) {
                return r;
        }
+       rdev->mman.bdev.dev = rdev->dev;
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&rdev->mman.bdev,
                               rdev->mman.bo_global_ref.ref.object,
index 737a2a2e46a58ca7ee66cbf67c0afd0583275ff6..35849dbf3ab5f69a976f6ed44bd7b417be1307b1 100644 (file)
@@ -664,7 +664,7 @@ out:
  */
 int ttm_get_pages(struct list_head *pages, int flags,
                  enum ttm_caching_state cstate, unsigned count,
-                 dma_addr_t *dma_address)
+                 dma_addr_t *dma_address, struct device *dev)
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p = NULL;
@@ -685,7 +685,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
                for (r = 0; r < count; ++r) {
                        if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
                                void *addr;
-                               addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+                               addr = dma_alloc_coherent(dev, PAGE_SIZE,
                                                          &dma_address[r],
                                                          gfp_flags);
                                if (addr == NULL)
@@ -730,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
                        printk(KERN_ERR TTM_PFX
                               "Failed to allocate extra pages "
                               "for large request.");
-                       ttm_put_pages(pages, 0, flags, cstate, NULL);
+                       ttm_put_pages(pages, 0, flags, cstate, NULL, NULL);
                        return r;
                }
        }
@@ -741,7 +741,8 @@ int ttm_get_pages(struct list_head *pages, int flags,
 
 /* Put all pages in pages list to correct pool to wait for reuse */
 void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
-                  enum ttm_caching_state cstate, dma_addr_t *dma_address)
+                  enum ttm_caching_state cstate, dma_addr_t *dma_address,
+                  struct device *dev)
 {
        unsigned long irq_flags;
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
@@ -757,7 +758,7 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
                                void *addr = page_address(p);
                                WARN_ON(!addr || !dma_address[r]);
                                if (addr)
-                                       dma_free_coherent(NULL, PAGE_SIZE,
+                                       dma_free_coherent(dev, PAGE_SIZE,
                                                          addr,
                                                          dma_address[r]);
                                dma_address[r] = 0;
index 86d5b1745a45a09cc0dd977a0cf6b1b655bc99cd..0f8fc9ff0c538b927d6ffaa8a05e8d2715b92881 100644 (file)
@@ -110,7 +110,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
                INIT_LIST_HEAD(&h);
 
                ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
-                                   &ttm->dma_address[index]);
+                                   &ttm->dma_address[index], ttm->be->bdev->dev);
 
                if (ret != 0)
                        return NULL;
@@ -304,7 +304,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
                }
        }
        ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
-                     ttm->dma_address);
+                     ttm->dma_address, ttm->be->bdev->dev);
        ttm->state = tt_unpopulated;
        ttm->first_himem_page = ttm->num_pages;
        ttm->last_lomem_page = -1;
index 10ca97ee02063c6c83c56b27f2d5ac895382dad1..4a8c7893e8ff84cdc9fb018c2cc41820a8fd007c 100644 (file)
@@ -322,7 +322,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        dev_priv->active_master = &dev_priv->fbdev_master;
 
-
+       dev_priv->bdev.dev = dev->dev;
        ret = ttm_bo_device_init(&dev_priv->bdev,
                                 dev_priv->bo_global_ref.ref.object,
                                 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
index ebcd3dd7203bf56a9f174194df6bad896bbdff38..4d97014e8c8df985db3d072ace1e6b45fc282897 100644 (file)
@@ -533,6 +533,7 @@ struct ttm_bo_device {
        struct list_head device_list;
        struct ttm_bo_global *glob;
        struct ttm_bo_driver *driver;
+       struct device *dev;
        rwlock_t vm_lock;
        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
        /*
index 8062890f725ef49955c231f5a09202539eebe0fc..ccb6b7a240e2a3aacfbd63c5556c5eb6cc06ed18 100644 (file)
  * @cstate: ttm caching state for the page.
  * @count: number of pages to allocate.
  * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * @dev: struct device for appropiate DMA accounting.
  */
 int ttm_get_pages(struct list_head *pages,
                  int flags,
                  enum ttm_caching_state cstate,
                  unsigned count,
-                 dma_addr_t *dma_address);
+                 dma_addr_t *dma_address,
+                 struct device *dev);
 /**
  * Put linked list of pages to pool.
  *
@@ -52,12 +54,14 @@ int ttm_get_pages(struct list_head *pages,
  * @flags: ttm flags for page allocation.
  * @cstate: ttm caching state.
  * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * @dev: struct device for appropiate DMA accounting.
  */
 void ttm_put_pages(struct list_head *pages,
                   unsigned page_count,
                   int flags,
                   enum ttm_caching_state cstate,
-                  dma_addr_t *dma_address);
+                  dma_addr_t *dma_address,
+                  struct device *dev);
 /**
  * Initialize pool allocator.
  */