ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Mon, 29 Nov 2010 19:03:30 +0000 (14:03 -0500)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 27 Jan 2011 21:03:09 +0000 (16:03 -0500)
For pages that have the TTM_PAGE_FLAG_DMA32 flag set we
use the DMA API. We save the bus address in our array which we
use to program the GART (see "radeon/ttm/PCIe: Use dma_addr if TTM
has set it." and "nouveau/ttm/PCIe: Use dma_addr if TTM has set it.").

The reason behind using the DMA API is that under Xen we would
end up programming the GART with the bounce buffer (SWIOTLB)
DMA address instead of the physical DMA address of the TTM page.
The reason being that alloc_page with GFP_DMA32 does not allocate
pages under the the 4GB mark when running under Xen hypervisor.

Under baremetal this means we do the DMA API call earlier instead
of when we program the GART.

For details please refer to:
https://lkml.org/lkml/2011/1/7/251

[v2: Fixed indentation, revised desc, added Reviewed-by]
Reviewed-by: Thomas Hellstrom <thomas@shipmail.org>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Ian Campbell <ian.campbell@citrix.com>
drivers/gpu/drm/ttm/ttm_page_alloc.c

index 9d9d92945f8c5e515adc87dc40e9f4b1f97427bc..737a2a2e46a58ca7ee66cbf67c0afd0583275ff6 100644 (file)
@@ -683,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags,
                        gfp_flags |= GFP_HIGHUSER;
 
                for (r = 0; r < count; ++r) {
-                       p = alloc_page(gfp_flags);
+                       if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+                               void *addr;
+                               addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+                                                         &dma_address[r],
+                                                         gfp_flags);
+                               if (addr == NULL)
+                                       return -ENOMEM;
+                               p = virt_to_page(addr);
+                       } else
+                               p = alloc_page(gfp_flags);
                        if (!p) {
 
                                printk(KERN_ERR TTM_PFX
                                       "Unable to allocate page.");
                                return -ENOMEM;
                        }
-
                        list_add(&p->lru, pages);
                }
                return 0;
@@ -738,12 +746,24 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
        unsigned long irq_flags;
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p, *tmp;
+       unsigned r;
 
        if (pool == NULL) {
                /* No pool for this memory type so free the pages */
 
+               r = page_count-1;
                list_for_each_entry_safe(p, tmp, pages, lru) {
-                       __free_page(p);
+                       if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+                               void *addr = page_address(p);
+                               WARN_ON(!addr || !dma_address[r]);
+                               if (addr)
+                                       dma_free_coherent(NULL, PAGE_SIZE,
+                                                         addr,
+                                                         dma_address[r]);
+                               dma_address[r] = 0;
+                       } else
+                               __free_page(p);
+                       r--;
                }
                /* Make the pages list empty */
                INIT_LIST_HEAD(pages);