kmemcheck: add hooks for page- and sg-dma-mappings
authorVegard Nossum <vegard.nossum@gmail.com>
Sat, 21 Feb 2009 12:52:37 +0000 (13:52 +0100)
committerVegard Nossum <vegard.nossum@gmail.com>
Mon, 15 Jun 2009 10:40:13 +0000 (12:40 +0200)
This is needed for page allocator support to prevent false positives
when accessing pages which are dma-mapped.

[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
arch/x86/include/asm/dma-mapping.h

index d57d0c1857bcff13cdf7df27e4471abb561cbcd0..b93405b228b47868dba489944324a147852f90cc 100644 (file)
@@ -89,8 +89,12 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg,
 {
        struct dma_map_ops *ops = get_dma_ops(hwdev);
        int ents;
+       struct scatterlist *s;
+       int i;
 
        BUG_ON(!valid_dma_direction(dir));
+       for_each_sg(sg, s, nents, i)
+               kmemcheck_mark_initialized(sg_virt(s), s->length);
        ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
        debug_dma_map_sg(hwdev, sg, nents, ents, dir);
 
@@ -202,6 +206,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
        dma_addr_t addr;
 
        BUG_ON(!valid_dma_direction(dir));
+       kmemcheck_mark_initialized(page_address(page) + offset, size);
        addr = ops->map_page(dev, page, offset, size, dir, NULL);
        debug_dma_map_page(dev, page, offset, size, dir, addr, false);