drm/ttm: isolate dma data from ttm_tt V4
authorJerome Glisse <jglisse@redhat.com>
Wed, 9 Nov 2011 22:15:26 +0000 (17:15 -0500)
committerDave Airlie <airlied@redhat.com>
Tue, 6 Dec 2011 10:40:02 +0000 (10:40 +0000)
Move dma data to a superset ttm_dma_tt structure which herit
from ttm_tt. This allow driver that don't use dma functionalities
to not have to waste memory for it.

V2 Rebase on top of no memory account changes (where/when is my
   delorean when i need it ?)
V3 Make sure page list is initialized empty
V4 typo/syntax fixes

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_page_alloc.h

index 2dc0d8303cb7c4abdd68e00f4f2dab609efabdba..d6326af9fcc05b89987f6225fcc7f14bd9354f7b 100644 (file)
@@ -1052,6 +1052,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
 static int
 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 {
+       struct ttm_dma_tt *ttm_dma = (void *)ttm;
        struct drm_nouveau_private *dev_priv;
        struct drm_device *dev;
        unsigned i;
@@ -1065,7 +1066,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl()) {
-               return ttm_dma_populate(ttm, dev->dev);
+               return ttm_dma_populate((void *)ttm, dev->dev);
        }
 #endif
 
@@ -1075,14 +1076,14 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
        }
 
        for (i = 0; i < ttm->num_pages; i++) {
-               ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+               ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
                                                   0, PAGE_SIZE,
                                                   PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
+               if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
                        while (--i) {
-                               pci_unmap_page(dev->pdev, ttm->dma_address[i],
+                               pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-                               ttm->dma_address[i] = 0;
+                               ttm_dma->dma_address[i] = 0;
                        }
                        ttm_pool_unpopulate(ttm);
                        return -EFAULT;
@@ -1094,6 +1095,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 static void
 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 {
+       struct ttm_dma_tt *ttm_dma = (void *)ttm;
        struct drm_nouveau_private *dev_priv;
        struct drm_device *dev;
        unsigned i;
@@ -1103,14 +1105,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl()) {
-               ttm_dma_unpopulate(ttm, dev->dev);
+               ttm_dma_unpopulate((void *)ttm, dev->dev);
                return;
        }
 #endif
 
        for (i = 0; i < ttm->num_pages; i++) {
-               if (ttm->dma_address[i]) {
-                       pci_unmap_page(dev->pdev, ttm->dma_address[i],
+               if (ttm_dma->dma_address[i]) {
+                       pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                }
        }
index ee1eb7cba798ff2373cb95f5279171d8f9121293..47f245edf538f10534824a034a36a4b3bcd85aaa 100644 (file)
@@ -8,7 +8,10 @@
 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
 
 struct nouveau_sgdma_be {
-       struct ttm_tt ttm;
+       /* this has to be the first field so populate/unpopulated in
+        * nouve_bo.c works properly, otherwise have to move them here
+        */
+       struct ttm_dma_tt ttm;
        struct drm_device *dev;
        u64 offset;
 };
@@ -20,6 +23,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
 
        if (ttm) {
                NV_DEBUG(nvbe->dev, "\n");
+               ttm_dma_tt_fini(&nvbe->ttm);
                kfree(nvbe);
        }
 }
@@ -38,7 +42,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
        nvbe->offset = mem->start << PAGE_SHIFT;
        pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
        for (i = 0; i < ttm->num_pages; i++) {
-               dma_addr_t dma_offset = ttm->dma_address[i];
+               dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
                uint32_t offset_l = lower_32_bits(dma_offset);
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -97,7 +101,7 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-       dma_addr_t *list = ttm->dma_address;
+       dma_addr_t *list = nvbe->ttm.dma_address;
        u32 pte = mem->start << 2;
        u32 cnt = ttm->num_pages;
 
@@ -206,7 +210,7 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-       dma_addr_t *list = ttm->dma_address;
+       dma_addr_t *list = nvbe->ttm.dma_address;
        u32 pte = mem->start << 2, tmp[4];
        u32 cnt = ttm->num_pages;
        int i;
@@ -282,10 +286,11 @@ static struct ttm_backend_func nv44_sgdma_backend = {
 static int
 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct nouveau_mem *node = mem->mm_node;
 
        /* noop: bound in move_notify() */
-       node->pages = ttm->dma_address;
+       node->pages = nvbe->ttm.dma_address;
        return 0;
 }
 
@@ -316,12 +321,13 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
                return NULL;
 
        nvbe->dev = dev;
-       nvbe->ttm.func = dev_priv->gart_info.func;
+       nvbe->ttm.ttm.func = dev_priv->gart_info.func;
 
-       if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+       if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(nvbe);
                return NULL;
        }
-       return &nvbe->ttm;
+       return &nvbe->ttm.ttm;
 }
 
 int
index f499b2c69de557550725312ffa3f931dac33f619..e111a3812434c980b9d1882426e2961a39368c36 100644 (file)
@@ -501,7 +501,7 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
  * TTM backend functions.
  */
 struct radeon_ttm_tt {
-       struct ttm_tt                   ttm;
+       struct ttm_dma_tt               ttm;
        struct radeon_device            *rdev;
        u64                             offset;
 };
@@ -509,17 +509,16 @@ struct radeon_ttm_tt {
 static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
                                   struct ttm_mem_reg *bo_mem)
 {
-       struct radeon_ttm_tt *gtt;
+       struct radeon_ttm_tt *gtt = (void*)ttm;
        int r;
 
-       gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
        gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
        if (!ttm->num_pages) {
                WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
                     ttm->num_pages, bo_mem, ttm);
        }
        r = radeon_gart_bind(gtt->rdev, gtt->offset,
-                            ttm->num_pages, ttm->pages, ttm->dma_address);
+                            ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
        if (r) {
                DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
                          ttm->num_pages, (unsigned)gtt->offset);
@@ -530,18 +529,17 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
 
 static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
 {
-       struct radeon_ttm_tt *gtt;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
 
-       gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
        radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
        return 0;
 }
 
 static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
 {
-       struct radeon_ttm_tt *gtt;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
 
-       gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
+       ttm_dma_tt_fini(&gtt->ttm);
        kfree(gtt);
 }
 
@@ -570,17 +568,19 @@ struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
        if (gtt == NULL) {
                return NULL;
        }
-       gtt->ttm.func = &radeon_backend_func;
+       gtt->ttm.ttm.func = &radeon_backend_func;
        gtt->rdev = rdev;
-       if (ttm_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+       if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(gtt);
                return NULL;
        }
-       return &gtt->ttm;
+       return &gtt->ttm.ttm;
 }
 
 static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
 {
        struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
        unsigned i;
        int r;
 
@@ -591,7 +591,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
 
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl()) {
-               return ttm_dma_populate(ttm, rdev->dev);
+               return ttm_dma_populate(&gtt->ttm, rdev->dev);
        }
 #endif
 
@@ -601,14 +601,14 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
        }
 
        for (i = 0; i < ttm->num_pages; i++) {
-               ttm->dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
-                                                  0, PAGE_SIZE,
-                                                  PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(rdev->pdev, ttm->dma_address[i])) {
+               gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+                                                      0, PAGE_SIZE,
+                                                      PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
                        while (--i) {
-                               pci_unmap_page(rdev->pdev, ttm->dma_address[i],
+                               pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-                               ttm->dma_address[i] = 0;
+                               gtt->ttm.dma_address[i] = 0;
                        }
                        ttm_pool_unpopulate(ttm);
                        return -EFAULT;
@@ -620,20 +620,21 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
 {
        struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
        unsigned i;
 
        rdev = radeon_get_rdev(ttm->bdev);
 
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl()) {
-               ttm_dma_unpopulate(ttm, rdev->dev);
+               ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
                return;
        }
 #endif
 
        for (i = 0; i < ttm->num_pages; i++) {
-               if (ttm->dma_address[i]) {
-                       pci_unmap_page(rdev->pdev, ttm->dma_address[i],
+               if (gtt->ttm.dma_address[i]) {
+                       pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                }
        }
index 8d6267e434abc9732be90724fafb2e923797c72f..499debda791e9534f05b397e5a400ef69c97a293 100644 (file)
@@ -662,13 +662,61 @@ out:
        return count;
 }
 
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+                         enum ttm_caching_state cstate)
+{
+       unsigned long irq_flags;
+       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       unsigned i;
+
+       if (pool == NULL) {
+               /* No pool for this memory type so free the pages */
+               for (i = 0; i < npages; i++) {
+                       if (pages[i]) {
+                               if (page_count(pages[i]) != 1)
+                                       printk(KERN_ERR TTM_PFX
+                                              "Erroneous page count. "
+                                              "Leaking pages.\n");
+                               __free_page(pages[i]);
+                               pages[i] = NULL;
+                       }
+               }
+               return;
+       }
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       for (i = 0; i < npages; i++) {
+               if (pages[i]) {
+                       if (page_count(pages[i]) != 1)
+                               printk(KERN_ERR TTM_PFX
+                                      "Erroneous page count. "
+                                      "Leaking pages.\n");
+                       list_add_tail(&pages[i]->lru, &pool->list);
+                       pages[i] = NULL;
+                       pool->npages++;
+               }
+       }
+       /* Check that we don't go over the pool limit */
+       npages = 0;
+       if (pool->npages > _manager->options.max_size) {
+               npages = pool->npages - _manager->options.max_size;
+               /* free at least NUM_PAGES_TO_ALLOC number of pages
+                * to reduce calls to set_memory_wb */
+               if (npages < NUM_PAGES_TO_ALLOC)
+                       npages = NUM_PAGES_TO_ALLOC;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       if (npages)
+               ttm_page_pool_free(pool, npages);
+}
+
 /*
  * On success pages list will hold count number of correctly
  * cached pages.
  */
-int ttm_get_pages(struct page **pages, int flags,
-                 enum ttm_caching_state cstate, unsigned npages,
-                 dma_addr_t *dma_address)
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+                        enum ttm_caching_state cstate)
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct list_head plist;
@@ -736,7 +784,7 @@ int ttm_get_pages(struct page **pages, int flags,
                        printk(KERN_ERR TTM_PFX
                               "Failed to allocate extra pages "
                               "for large request.");
-                       ttm_put_pages(pages, count, flags, cstate, NULL);
+                       ttm_put_pages(pages, count, flags, cstate);
                        return r;
                }
        }
@@ -744,55 +792,6 @@ int ttm_get_pages(struct page **pages, int flags,
        return 0;
 }
 
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_put_pages(struct page **pages, unsigned npages, int flags,
-                  enum ttm_caching_state cstate, dma_addr_t *dma_address)
-{
-       unsigned long irq_flags;
-       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
-       unsigned i;
-
-       if (pool == NULL) {
-               /* No pool for this memory type so free the pages */
-               for (i = 0; i < npages; i++) {
-                       if (pages[i]) {
-                               if (page_count(pages[i]) != 1)
-                                       printk(KERN_ERR TTM_PFX
-                                              "Erroneous page count. "
-                                              "Leaking pages.\n");
-                               __free_page(pages[i]);
-                               pages[i] = NULL;
-                       }
-               }
-               return;
-       }
-
-       spin_lock_irqsave(&pool->lock, irq_flags);
-       for (i = 0; i < npages; i++) {
-               if (pages[i]) {
-                       if (page_count(pages[i]) != 1)
-                               printk(KERN_ERR TTM_PFX
-                                      "Erroneous page count. "
-                                      "Leaking pages.\n");
-                       list_add_tail(&pages[i]->lru, &pool->list);
-                       pages[i] = NULL;
-                       pool->npages++;
-               }
-       }
-       /* Check that we don't go over the pool limit */
-       npages = 0;
-       if (pool->npages > _manager->options.max_size) {
-               npages = pool->npages - _manager->options.max_size;
-               /* free at least NUM_PAGES_TO_ALLOC number of pages
-                * to reduce calls to set_memory_wb */
-               if (npages < NUM_PAGES_TO_ALLOC)
-                       npages = NUM_PAGES_TO_ALLOC;
-       }
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-       if (npages)
-               ttm_page_pool_free(pool, npages);
-}
-
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
                char *name)
 {
@@ -865,9 +864,9 @@ int ttm_pool_populate(struct ttm_tt *ttm)
                return 0;
 
        for (i = 0; i < ttm->num_pages; ++i) {
-               ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags,
-                                   ttm->caching_state, 1,
-                                   &ttm->dma_address[i]);
+               ret = ttm_get_pages(&ttm->pages[i], 1,
+                                   ttm->page_flags,
+                                   ttm->caching_state);
                if (ret != 0) {
                        ttm_pool_unpopulate(ttm);
                        return -ENOMEM;
@@ -904,8 +903,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
                                                 ttm->pages[i]);
                        ttm_put_pages(&ttm->pages[i], 1,
                                      ttm->page_flags,
-                                     ttm->caching_state,
-                                     ttm->dma_address);
+                                     ttm->caching_state);
                }
        }
        ttm->state = tt_unpopulated;
index 7a477930487799276bf68fe12fdff8191e64e52c..6678abca0d98a2e751e45d5121eb14c95189e3b1 100644 (file)
@@ -789,7 +789,7 @@ out:
 
 /*
  * @return count of pages still required to fulfill the request.
-*/
+ */
 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
                                         unsigned long *irq_flags)
 {
@@ -838,10 +838,11 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
  * allocates one page at a time.
  */
 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
-                                 struct ttm_tt *ttm,
+                                 struct ttm_dma_tt *ttm_dma,
                                  unsigned index)
 {
        struct dma_page *d_page;
+       struct ttm_tt *ttm = &ttm_dma->ttm;
        unsigned long irq_flags;
        int count, r = -ENOMEM;
 
@@ -850,8 +851,8 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
        if (count) {
                d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
                ttm->pages[index] = d_page->p;
-               ttm->dma_address[index] = d_page->dma;
-               list_move_tail(&d_page->page_list, &ttm->alloc_list);
+               ttm_dma->dma_address[index] = d_page->dma;
+               list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
                r = 0;
                pool->npages_in_use += 1;
                pool->npages_free -= 1;
@@ -864,8 +865,9 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
  * On success pages list will hold count number of correctly
  * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
  */
-int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
 {
+       struct ttm_tt *ttm = &ttm_dma->ttm;
        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
        struct dma_pool *pool;
        enum pool_type type;
@@ -892,18 +894,18 @@ int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
                }
        }
 
-       INIT_LIST_HEAD(&ttm->alloc_list);
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
        for (i = 0; i < ttm->num_pages; ++i) {
-               ret = ttm_dma_pool_get_pages(pool, ttm, i);
+               ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
                if (ret != 0) {
-                       ttm_dma_unpopulate(ttm, dev);
+                       ttm_dma_unpopulate(ttm_dma, dev);
                        return -ENOMEM;
                }
 
                ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
                                                false, false);
                if (unlikely(ret != 0)) {
-                       ttm_dma_unpopulate(ttm, dev);
+                       ttm_dma_unpopulate(ttm_dma, dev);
                        return -ENOMEM;
                }
        }
@@ -911,7 +913,7 @@ int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
        if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
                ret = ttm_tt_swapin(ttm);
                if (unlikely(ret != 0)) {
-                       ttm_dma_unpopulate(ttm, dev);
+                       ttm_dma_unpopulate(ttm_dma, dev);
                        return ret;
                }
        }
@@ -937,8 +939,9 @@ static int ttm_dma_pool_get_num_unused_pages(void)
 }
 
 /* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
 {
+       struct ttm_tt *ttm = &ttm_dma->ttm;
        struct dma_pool *pool;
        struct dma_page *d_page, *next;
        enum pool_type type;
@@ -956,7 +959,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
                     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
 
        /* make sure pages array match list and count number of pages */
-       list_for_each_entry(d_page, &ttm->alloc_list, page_list) {
+       list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
                ttm->pages[count] = d_page->p;
                count++;
        }
@@ -967,7 +970,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
                pool->nfrees += count;
        } else {
                pool->npages_free += count;
-               list_splice(&ttm->alloc_list, &pool->free_list);
+               list_splice(&ttm_dma->pages_list, &pool->free_list);
                if (pool->npages_free > _manager->options.max_size) {
                        count = pool->npages_free - _manager->options.max_size;
                }
@@ -975,7 +978,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
        spin_unlock_irqrestore(&pool->lock, irq_flags);
 
        if (is_cached) {
-               list_for_each_entry_safe(d_page, next, &ttm->alloc_list, page_list) {
+               list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
                        ttm_mem_global_free_page(ttm->glob->mem_glob,
                                                 d_page->p);
                        ttm_dma_page_put(pool, d_page);
@@ -987,10 +990,10 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
                }
        }
 
-       INIT_LIST_HEAD(&ttm->alloc_list);
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
        for (i = 0; i < ttm->num_pages; i++) {
                ttm->pages[i] = NULL;
-               ttm->dma_address[i] = 0;
+               ttm_dma->dma_address[i] = 0;
        }
 
        /* shrink pool if necessary */
index 1625739b434b18a8e75ea949ab7f68eb0ee1dbf3..58e1fa14fe3a021239785b36d742d83877a1e6ad 100644 (file)
  */
 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
 {
-       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
-       ttm->dma_address = drm_calloc_large(ttm->num_pages,
-                                           sizeof(*ttm->dma_address));
+       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
 }
 
-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 {
-       drm_free_large(ttm->pages);
-       ttm->pages = NULL;
-       drm_free_large(ttm->dma_address);
-       ttm->dma_address = NULL;
+       ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+       ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+                                           sizeof(*ttm->dma_address));
 }
 
 #ifdef CONFIG_X86
@@ -173,7 +170,6 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
 
        if (likely(ttm->pages != NULL)) {
                ttm->bdev->driver->ttm_tt_unpopulate(ttm);
-               ttm_tt_free_page_directory(ttm);
        }
 
        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
@@ -196,9 +192,8 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
        ttm->dummy_read_page = dummy_read_page;
        ttm->state = tt_unpopulated;
 
-       INIT_LIST_HEAD(&ttm->alloc_list);
        ttm_tt_alloc_page_directory(ttm);
-       if (!ttm->pages || !ttm->dma_address) {
+       if (!ttm->pages) {
                ttm_tt_destroy(ttm);
                printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
                return -ENOMEM;
@@ -207,6 +202,49 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
 }
 EXPORT_SYMBOL(ttm_tt_init);
 
+void ttm_tt_fini(struct ttm_tt *ttm)
+{
+       drm_free_large(ttm->pages);
+       ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
+
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+               unsigned long size, uint32_t page_flags,
+               struct page *dummy_read_page)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+
+       ttm->bdev = bdev;
+       ttm->glob = bdev->glob;
+       ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       ttm->caching_state = tt_cached;
+       ttm->page_flags = page_flags;
+       ttm->dummy_read_page = dummy_read_page;
+       ttm->state = tt_unpopulated;
+
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       ttm_dma_tt_alloc_page_directory(ttm_dma);
+       if (!ttm->pages || !ttm_dma->dma_address) {
+               ttm_tt_destroy(ttm);
+               printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+
+       drm_free_large(ttm->pages);
+       ttm->pages = NULL;
+       drm_free_large(ttm_dma->dma_address);
+       ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
+
 void ttm_tt_unbind(struct ttm_tt *ttm)
 {
        int ret;
index 3986d7468232af20bfdfd54be0df34263b572982..1e2c0fb7f7869c7ea97a24d6d545bdff62df6537 100644 (file)
@@ -168,6 +168,7 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
 {
        struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
+       ttm_tt_fini(ttm);
        kfree(vmw_be);
 }
 
@@ -191,6 +192,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
        vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
        if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(vmw_be);
                return NULL;
        }
 
index beef9abaaefdf77b5ade62b319994b42707e0ba0..b2a0848a4d024ad53db6d3a8f76c76aa60467467 100644 (file)
@@ -103,8 +103,6 @@ enum ttm_caching_state {
  * @swap_storage: Pointer to shmem struct file for swap storage.
  * @caching_state: The current caching state of the pages.
  * @state: The current binding state of the pages.
- * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
- * @alloc_list: used by some page allocation backend
  *
  * This is a structure holding the pages, caching- and aperture binding
  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -127,8 +125,23 @@ struct ttm_tt {
                tt_unbound,
                tt_unpopulated,
        } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+       struct ttm_tt ttm;
        dma_addr_t *dma_address;
-       struct list_head alloc_list;
+       struct list_head pages_list;
 };
 
 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)        /* Fixed (on-card) PCI memory */
@@ -595,6 +608,19 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
                        unsigned long size, uint32_t page_flags,
                        struct page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+                          unsigned long size, uint32_t page_flags,
+                          struct page *dummy_read_page);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
 
 /**
  * ttm_ttm_bind:
index 1e1337e81f318867882477a03cda37ae8bcbfe18..5fe27400d176cb16f14adbe6b7d7a584e167c0be 100644 (file)
 #include "ttm_bo_driver.h"
 #include "ttm_memory.h"
 
-/**
- * Get count number of pages from pool to pages list.
- *
- * @pages: head of empty linked list where pages are filled.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state for the page.
- * @count: number of pages to allocate.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
- */
-int ttm_get_pages(struct page **pages,
-                 int flags,
-                 enum ttm_caching_state cstate,
-                 unsigned npages,
-                 dma_addr_t *dma_address);
-/**
- * Put linked list of pages to pool.
- *
- * @pages: list of pages to free.
- * @page_count: number of pages in the list. Zero can be passed for unknown
- * count.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
- */
-void ttm_put_pages(struct page **pages,
-                  unsigned npages,
-                  int flags,
-                  enum ttm_caching_state cstate,
-                  dma_addr_t *dma_address);
 /**
  * Initialize pool allocator.
  */
@@ -107,8 +78,8 @@ void ttm_dma_page_alloc_fini(void);
  */
 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
 
-int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev);
-extern void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev);
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
 #else
 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,