include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / dma.c
1 /*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
6 */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/lmb.h>
13 #include <asm/bug.h>
14 #include <asm/abs_addr.h>
15
16 /*
17 * Generic direct DMA implementation
18 *
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
23 */
24
25
26 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t flag)
28 {
29 void *ret;
30 #ifdef CONFIG_NOT_COHERENT_CACHE
31 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
32 if (ret == NULL)
33 return NULL;
34 *dma_handle += get_dma_offset(dev);
35 return ret;
36 #else
37 struct page *page;
38 int node = dev_to_node(dev);
39
40 /* ignore region specifiers */
41 flag &= ~(__GFP_HIGHMEM);
42
43 page = alloc_pages_node(node, flag, get_order(size));
44 if (page == NULL)
45 return NULL;
46 ret = page_address(page);
47 memset(ret, 0, size);
48 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
49
50 return ret;
51 #endif
52 }
53
54 void dma_direct_free_coherent(struct device *dev, size_t size,
55 void *vaddr, dma_addr_t dma_handle)
56 {
57 #ifdef CONFIG_NOT_COHERENT_CACHE
58 __dma_free_coherent(size, vaddr);
59 #else
60 free_pages((unsigned long)vaddr, get_order(size));
61 #endif
62 }
63
64 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
65 int nents, enum dma_data_direction direction,
66 struct dma_attrs *attrs)
67 {
68 struct scatterlist *sg;
69 int i;
70
71 for_each_sg(sgl, sg, nents, i) {
72 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
73 sg->dma_length = sg->length;
74 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
75 }
76
77 return nents;
78 }
79
80 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
81 int nents, enum dma_data_direction direction,
82 struct dma_attrs *attrs)
83 {
84 }
85
86 static int dma_direct_dma_supported(struct device *dev, u64 mask)
87 {
88 #ifdef CONFIG_PPC64
89 /* Could be improved so platforms can set the limit in case
90 * they have limited DMA windows
91 */
92 return mask >= (lmb_end_of_DRAM() - 1);
93 #else
94 return 1;
95 #endif
96 }
97
98 static inline dma_addr_t dma_direct_map_page(struct device *dev,
99 struct page *page,
100 unsigned long offset,
101 size_t size,
102 enum dma_data_direction dir,
103 struct dma_attrs *attrs)
104 {
105 BUG_ON(dir == DMA_NONE);
106 __dma_sync_page(page, offset, size, dir);
107 return page_to_phys(page) + offset + get_dma_offset(dev);
108 }
109
110 static inline void dma_direct_unmap_page(struct device *dev,
111 dma_addr_t dma_address,
112 size_t size,
113 enum dma_data_direction direction,
114 struct dma_attrs *attrs)
115 {
116 }
117
118 #ifdef CONFIG_NOT_COHERENT_CACHE
119 static inline void dma_direct_sync_sg(struct device *dev,
120 struct scatterlist *sgl, int nents,
121 enum dma_data_direction direction)
122 {
123 struct scatterlist *sg;
124 int i;
125
126 for_each_sg(sgl, sg, nents, i)
127 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
128 }
129
130 static inline void dma_direct_sync_single_range(struct device *dev,
131 dma_addr_t dma_handle, unsigned long offset, size_t size,
132 enum dma_data_direction direction)
133 {
134 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
135 }
136 #endif
137
138 struct dma_map_ops dma_direct_ops = {
139 .alloc_coherent = dma_direct_alloc_coherent,
140 .free_coherent = dma_direct_free_coherent,
141 .map_sg = dma_direct_map_sg,
142 .unmap_sg = dma_direct_unmap_sg,
143 .dma_supported = dma_direct_dma_supported,
144 .map_page = dma_direct_map_page,
145 .unmap_page = dma_direct_unmap_page,
146 #ifdef CONFIG_NOT_COHERENT_CACHE
147 .sync_single_range_for_cpu = dma_direct_sync_single_range,
148 .sync_single_range_for_device = dma_direct_sync_single_range,
149 .sync_sg_for_cpu = dma_direct_sync_sg,
150 .sync_sg_for_device = dma_direct_sync_sg,
151 #endif
152 };
153 EXPORT_SYMBOL(dma_direct_ops);
154
155 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
156
157 static int __init dma_init(void)
158 {
159 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
160
161 return 0;
162 }
163 fs_initcall(dma_init);