From: Akinobu Mita Date: Wed, 4 Jun 2014 23:06:51 +0000 (-0700) Subject: intel-iommu: integrate DMA CMA X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=367464362591d89b371e2a690638e9bc899d8ebb;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git intel-iommu: integrate DMA CMA This adds support for the DMA Contiguous Memory Allocator for intel-iommu. This change enables dma_alloc_coherent() to allocate big contiguous memory. It is achieved in the same way as nommu_dma_ops currently does, i.e. trying to allocate memory by dma_alloc_from_contiguous() and alloc_pages() is used as a fallback. Signed-off-by: Akinobu Mita Cc: Marek Szyprowski Cc: Konrad Rzeszutek Wilk Cc: David Woodhouse Cc: Don Dutile Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Andi Kleen Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f256ffc02e29..6bb32773c3ac 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -3193,7 +3194,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { - void *vaddr; + struct page *page = NULL; int order; size = PAGE_ALIGN(size); @@ -3208,17 +3209,31 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA32; } - vaddr = (void *)__get_free_pages(flags, order); - if (!vaddr) + if (flags & __GFP_WAIT) { + unsigned int count = size >> PAGE_SHIFT; + + page = dma_alloc_from_contiguous(dev, count, order); + if (page && iommu_no_mapping(dev) && + page_to_phys(page) + size > dev->coherent_dma_mask) { + dma_release_from_contiguous(dev, page, count); + page = NULL; + } + } + + if (!page) + page = alloc_pages(flags, order); + if (!page) return NULL; - memset(vaddr, 0, size); + memset(page_address(page), 0, size); - *dma_handle = __intel_map_single(dev, virt_to_bus(vaddr), size, + *dma_handle = __intel_map_single(dev, page_to_phys(page), size, DMA_BIDIRECTIONAL, dev->coherent_dma_mask); if (*dma_handle) - return vaddr; - free_pages((unsigned long)vaddr, order); + return page_address(page); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); + return NULL; } @@ -3226,12 +3241,14 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { int order; + struct page *page = virt_to_page(vaddr); size = PAGE_ALIGN(size); order = get_order(size); intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); - free_pages((unsigned long)vaddr, order); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); } static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,