arch/tile: add option to skip DMA sync as a part of map and unmap
authorAlexander Duyck <alexander.h.duyck@intel.com>
Wed, 14 Dec 2016 23:05:18 +0000 (15:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:08 +0000 (16:04 -0800)
This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to
avoid invoking cache line invalidation if the driver will just handle it
via a sync_for_cpu or sync_for_device call.

Link: http://lkml.kernel.org/r/20161110113550.76501.73060.stgit@ahduyck-blue-test.jf.intel.com
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/tile/kernel/pci-dma.c

index 09bb774b39cd06e9cd4dc1e4f2e8c10425a8ceed..24e0f8c21f2f4b9734f027a06a688df76a9effe7 100644 (file)
@@ -213,10 +213,12 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 
        for_each_sg(sglist, sg, nents, i) {
                sg->dma_address = sg_phys(sg);
-               __dma_prep_pa_range(sg->dma_address, sg->length, direction);
 #ifdef CONFIG_NEED_SG_DMA_LENGTH
                sg->dma_length = sg->length;
 #endif
+               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+                       continue;
+               __dma_prep_pa_range(sg->dma_address, sg->length, direction);
        }
 
        return nents;
@@ -232,6 +234,8 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
        BUG_ON(!valid_dma_direction(direction));
        for_each_sg(sglist, sg, nents, i) {
                sg->dma_address = sg_phys(sg);
+               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+                       continue;
                __dma_complete_pa_range(sg->dma_address, sg->length,
                                        direction);
        }
@@ -245,7 +249,8 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
        BUG_ON(!valid_dma_direction(direction));
 
        BUG_ON(offset + size > PAGE_SIZE);
-       __dma_prep_page(page, offset, size, direction);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               __dma_prep_page(page, offset, size, direction);
 
        return page_to_pa(page) + offset;
 }
@@ -256,6 +261,9 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
 {
        BUG_ON(!valid_dma_direction(direction));
 
+       if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+               return;
+
        __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
                            dma_address & (PAGE_SIZE - 1), size, direction);
 }