scatterlist: use sg_phys()
authorDan Williams <dan.j.williams@intel.com>
Mon, 17 Aug 2015 14:13:26 +0000 (08:13 -0600)
committerJens Axboe <axboe@fb.com>
Mon, 17 Aug 2015 14:13:26 +0000 (08:13 -0600)
Coccinelle cleanup to replace open coded sg to physical address
translations.  This is in preparation for introducing scatterlists that
reference __pfn_t.

// sg_phys.cocci: convert usage page_to_phys(sg_page(sg)) to sg_phys(sg)
// usage: make coccicheck COCCI=sg_phys.cocci MODE=patch

virtual patch

@@
struct scatterlist *sg;
@@

- page_to_phys(sg_page(sg)) + sg->offset
+ sg_phys(sg)

@@
struct scatterlist *sg;
@@

- page_to_phys(sg_page(sg))
+ sg_phys(sg) & PAGE_MASK

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
arch/arm/mm/dma-mapping.c
arch/microblaze/kernel/dma.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/staging/android/ion/ion_chunk_heap.c

index 1ced8a0f7a52624cae84203eb0d3417aed5171db..4efaefd61c1cf7d329a3006c3a2833ba2b19b5b5 100644 (file)
@@ -1520,7 +1520,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                return -ENOMEM;
 
        for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
-               phys_addr_t phys = page_to_phys(sg_page(s));
+               phys_addr_t phys = sg_phys(s) & PAGE_MASK;
                unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
                if (!is_coherent &&
index bf4dec229437a836ee1829504be7bf603ce73b37..c89da63129545a9c300e61062e9d958d4c786790 100644 (file)
@@ -61,8 +61,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
        /* FIXME this part of code is untested */
        for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = sg_phys(sg);
-               __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
-                                                       sg->length, direction);
+               __dma_sync(sg_phys(sg), sg->length, direction);
        }
 
        return nents;
index a98a7b27aca1dec2cb2f53319df8a49abcf8e645..b261850a7694c671edb44f9d8b39923e3b08280d 100644 (file)
@@ -2094,7 +2094,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                        sg_res = aligned_nrpages(sg->offset, sg->length);
                        sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
                        sg->dma_length = sg->length;
-                       pteval = page_to_phys(sg_page(sg)) | prot;
+                       pteval = (sg_phys(sg) & PAGE_MASK) | prot;
                        phys_pfn = pteval >> VTD_PAGE_SHIFT;
                }
 
@@ -3620,7 +3620,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 
        for_each_sg(sglist, sg, nelems, i) {
                BUG_ON(!sg_page(sg));
-               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+               sg->dma_address = sg_phys(sg);
                sg->dma_length = sg->length;
        }
        return nelems;
index f286090931cc874f6851eab4f279b5f9f44276d1..049df495c2747cfc196eb5c953efcac3358808cb 100644 (file)
@@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
        min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
 
        for_each_sg(sg, s, nents, i) {
-               phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+               phys_addr_t phys = sg_phys(s);
 
                /*
                 * We are mapping on IOMMU page boundaries, so offset within
index 54746157d799a6e0f000f76cfc419948c240d28d..f7b6ef991cd0f1dce58238ddc36123a15a7805e4 100644 (file)
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
 err:
        sg = table->sgl;
        for (i -= 1; i >= 0; i--) {
-               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+               gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
                              sg->length);
                sg = sg_next(sg);
        }
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
                                                        DMA_BIDIRECTIONAL);
 
        for_each_sg(table->sgl, sg, table->nents, i) {
-               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+               gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
                              sg->length);
        }
        chunk_heap->allocated -= allocated_size;