Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
12d04eef | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
1da177e4 | 3 | * |
12d04eef | 4 | * Provide default implementations of the DMA mapping callbacks for |
8dd0e952 | 5 | * directly mapped busses. |
1da177e4 LT |
6 | */ |
7 | ||
8 | #include <linux/device.h> | |
9 | #include <linux/dma-mapping.h> | |
80d3e8ab | 10 | #include <linux/dma-debug.h> |
b2f2e8fe | 11 | #include <linux/lmb.h> |
1da177e4 | 12 | #include <asm/bug.h> |
12d04eef | 13 | #include <asm/abs_addr.h> |
1da177e4 | 14 | |
12d04eef BH |
15 | /* |
16 | * Generic direct DMA implementation | |
92b20c40 | 17 | * |
31d1b493 ME |
18 | * This implementation supports a per-device offset that can be applied if |
19 | * the address at which memory is visible to devices is not 0. Platform code | |
20 | * can set archdata.dma_data to an unsigned long holding the offset. By | |
4fc665b8 | 21 | * default the offset is PCI_DRAM_OFFSET. |
12d04eef | 22 | */ |
5d33eebe | 23 | |
ec3cf2ec | 24 | unsigned long get_dma_direct_offset(struct device *dev) |
35e4a6e2 | 25 | { |
4fc665b8 BB |
26 | if (dev) |
27 | return (unsigned long)dev->archdata.dma_data; | |
28 | ||
29 | return PCI_DRAM_OFFSET; | |
35e4a6e2 ME |
30 | } |
31 | ||
4fc665b8 BB |
32 | void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
33 | dma_addr_t *dma_handle, gfp_t flag) | |
12d04eef | 34 | { |
8aa26590 | 35 | void *ret; |
4fc665b8 | 36 | #ifdef CONFIG_NOT_COHERENT_CACHE |
8b31e49d | 37 | ret = __dma_alloc_coherent(dev, size, dma_handle, flag); |
8aa26590 BH |
38 | if (ret == NULL) |
39 | return NULL; | |
40 | *dma_handle += get_dma_direct_offset(dev); | |
41 | return ret; | |
4fc665b8 | 42 | #else |
c80d9133 | 43 | struct page *page; |
8fae0353 | 44 | int node = dev_to_node(dev); |
12d04eef | 45 | |
4fc665b8 BB |
46 | /* ignore region specifiers */ |
47 | flag &= ~(__GFP_HIGHMEM); | |
48 | ||
c80d9133 BH |
49 | page = alloc_pages_node(node, flag, get_order(size)); |
50 | if (page == NULL) | |
51 | return NULL; | |
52 | ret = page_address(page); | |
53 | memset(ret, 0, size); | |
35e4a6e2 | 54 | *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); |
c80d9133 | 55 | |
12d04eef | 56 | return ret; |
4fc665b8 | 57 | #endif |
1da177e4 | 58 | } |
1da177e4 | 59 | |
4fc665b8 BB |
60 | void dma_direct_free_coherent(struct device *dev, size_t size, |
61 | void *vaddr, dma_addr_t dma_handle) | |
1da177e4 | 62 | { |
4fc665b8 BB |
63 | #ifdef CONFIG_NOT_COHERENT_CACHE |
64 | __dma_free_coherent(size, vaddr); | |
65 | #else | |
12d04eef | 66 | free_pages((unsigned long)vaddr, get_order(size)); |
4fc665b8 | 67 | #endif |
1da177e4 | 68 | } |
1da177e4 | 69 | |
78bdc310 | 70 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
3affedc4 MN |
71 | int nents, enum dma_data_direction direction, |
72 | struct dma_attrs *attrs) | |
1da177e4 | 73 | { |
78bdc310 | 74 | struct scatterlist *sg; |
12d04eef | 75 | int i; |
1da177e4 | 76 | |
78bdc310 | 77 | for_each_sg(sgl, sg, nents, i) { |
35e4a6e2 | 78 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); |
12d04eef | 79 | sg->dma_length = sg->length; |
2434bbb3 | 80 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
12d04eef | 81 | } |
5d33eebe | 82 | |
12d04eef | 83 | return nents; |
1da177e4 | 84 | } |
1da177e4 | 85 | |
12d04eef | 86 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
87 | int nents, enum dma_data_direction direction, |
88 | struct dma_attrs *attrs) | |
1da177e4 | 89 | { |
12d04eef | 90 | } |
5d33eebe | 91 | |
12d04eef BH |
92 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
93 | { | |
4fc665b8 | 94 | #ifdef CONFIG_PPC64 |
b2f2e8fe | 95 | /* Could be improved so platforms can set the limit in case |
12d04eef BH |
96 | * they have limited DMA windows |
97 | */ | |
b2f2e8fe | 98 | return mask >= (lmb_end_of_DRAM() - 1); |
4fc665b8 BB |
99 | #else |
100 | return 1; | |
101 | #endif | |
102 | } | |
103 | ||
104 | static inline dma_addr_t dma_direct_map_page(struct device *dev, | |
105 | struct page *page, | |
106 | unsigned long offset, | |
107 | size_t size, | |
108 | enum dma_data_direction dir, | |
109 | struct dma_attrs *attrs) | |
110 | { | |
111 | BUG_ON(dir == DMA_NONE); | |
112 | __dma_sync_page(page, offset, size, dir); | |
113 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); | |
114 | } | |
115 | ||
116 | static inline void dma_direct_unmap_page(struct device *dev, | |
117 | dma_addr_t dma_address, | |
118 | size_t size, | |
119 | enum dma_data_direction direction, | |
120 | struct dma_attrs *attrs) | |
121 | { | |
1da177e4 | 122 | } |
12d04eef | 123 | |
15e09c0e BB |
124 | #ifdef CONFIG_NOT_COHERENT_CACHE |
125 | static inline void dma_direct_sync_sg(struct device *dev, | |
126 | struct scatterlist *sgl, int nents, | |
127 | enum dma_data_direction direction) | |
128 | { | |
129 | struct scatterlist *sg; | |
130 | int i; | |
131 | ||
132 | for_each_sg(sgl, sg, nents, i) | |
133 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | |
134 | } | |
135 | ||
136 | static inline void dma_direct_sync_single_range(struct device *dev, | |
137 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
138 | enum dma_data_direction direction) | |
139 | { | |
140 | __dma_sync(bus_to_virt(dma_handle+offset), size, direction); | |
141 | } | |
142 | #endif | |
143 | ||
45223c54 | 144 | struct dma_map_ops dma_direct_ops = { |
12d04eef BH |
145 | .alloc_coherent = dma_direct_alloc_coherent, |
146 | .free_coherent = dma_direct_free_coherent, | |
12d04eef BH |
147 | .map_sg = dma_direct_map_sg, |
148 | .unmap_sg = dma_direct_unmap_sg, | |
149 | .dma_supported = dma_direct_dma_supported, | |
4fc665b8 BB |
150 | .map_page = dma_direct_map_page, |
151 | .unmap_page = dma_direct_unmap_page, | |
15e09c0e BB |
152 | #ifdef CONFIG_NOT_COHERENT_CACHE |
153 | .sync_single_range_for_cpu = dma_direct_sync_single_range, | |
154 | .sync_single_range_for_device = dma_direct_sync_single_range, | |
155 | .sync_sg_for_cpu = dma_direct_sync_sg, | |
156 | .sync_sg_for_device = dma_direct_sync_sg, | |
157 | #endif | |
12d04eef BH |
158 | }; |
159 | EXPORT_SYMBOL(dma_direct_ops); | |
80d3e8ab FT |
160 | |
161 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
162 | ||
163 | static int __init dma_init(void) | |
164 | { | |
165 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
166 | ||
167 | return 0; | |
168 | } | |
169 | fs_initcall(dma_init); |