iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / dma.c
CommitLineData
1da177e4 1/*
12d04eef 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
1da177e4 3 *
12d04eef 4 * Provide default implementations of the DMA mapping callbacks for
8dd0e952 5 * directly mapped busses.
1da177e4
LT
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
80d3e8ab 10#include <linux/dma-debug.h>
b2f2e8fe 11#include <linux/lmb.h>
1da177e4 12#include <asm/bug.h>
12d04eef 13#include <asm/abs_addr.h>
1da177e4 14
12d04eef
BH
15/*
16 * Generic direct DMA implementation
92b20c40 17 *
31d1b493
ME
18 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
4fc665b8 21 * default the offset is PCI_DRAM_OFFSET.
12d04eef 22 */
5d33eebe 23
35e4a6e2 24
4fc665b8
BB
25void *dma_direct_alloc_coherent(struct device *dev, size_t size,
26 dma_addr_t *dma_handle, gfp_t flag)
12d04eef 27{
8aa26590 28 void *ret;
4fc665b8 29#ifdef CONFIG_NOT_COHERENT_CACHE
8b31e49d 30 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
8aa26590
BH
31 if (ret == NULL)
32 return NULL;
1cebd7a0 33 *dma_handle += get_dma_offset(dev);
8aa26590 34 return ret;
4fc665b8 35#else
c80d9133 36 struct page *page;
8fae0353 37 int node = dev_to_node(dev);
12d04eef 38
4fc665b8
BB
39 /* ignore region specifiers */
40 flag &= ~(__GFP_HIGHMEM);
41
c80d9133
BH
42 page = alloc_pages_node(node, flag, get_order(size));
43 if (page == NULL)
44 return NULL;
45 ret = page_address(page);
46 memset(ret, 0, size);
1cebd7a0 47 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
c80d9133 48
12d04eef 49 return ret;
4fc665b8 50#endif
1da177e4 51}
1da177e4 52
4fc665b8
BB
53void dma_direct_free_coherent(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle)
1da177e4 55{
4fc665b8
BB
56#ifdef CONFIG_NOT_COHERENT_CACHE
57 __dma_free_coherent(size, vaddr);
58#else
12d04eef 59 free_pages((unsigned long)vaddr, get_order(size));
4fc665b8 60#endif
1da177e4 61}
1da177e4 62
78bdc310 63static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
3affedc4
MN
64 int nents, enum dma_data_direction direction,
65 struct dma_attrs *attrs)
1da177e4 66{
78bdc310 67 struct scatterlist *sg;
12d04eef 68 int i;
1da177e4 69
78bdc310 70 for_each_sg(sgl, sg, nents, i) {
1cebd7a0 71 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
12d04eef 72 sg->dma_length = sg->length;
2434bbb3 73 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
12d04eef 74 }
5d33eebe 75
12d04eef 76 return nents;
1da177e4 77}
1da177e4 78
12d04eef 79static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
3affedc4
MN
80 int nents, enum dma_data_direction direction,
81 struct dma_attrs *attrs)
1da177e4 82{
12d04eef 83}
5d33eebe 84
12d04eef
BH
85static int dma_direct_dma_supported(struct device *dev, u64 mask)
86{
4fc665b8 87#ifdef CONFIG_PPC64
b2f2e8fe 88 /* Could be improved so platforms can set the limit in case
12d04eef
BH
89 * they have limited DMA windows
90 */
b2f2e8fe 91 return mask >= (lmb_end_of_DRAM() - 1);
4fc665b8
BB
92#else
93 return 1;
94#endif
95}
96
97static inline dma_addr_t dma_direct_map_page(struct device *dev,
98 struct page *page,
99 unsigned long offset,
100 size_t size,
101 enum dma_data_direction dir,
102 struct dma_attrs *attrs)
103{
104 BUG_ON(dir == DMA_NONE);
105 __dma_sync_page(page, offset, size, dir);
1cebd7a0 106 return page_to_phys(page) + offset + get_dma_offset(dev);
4fc665b8
BB
107}
108
109static inline void dma_direct_unmap_page(struct device *dev,
110 dma_addr_t dma_address,
111 size_t size,
112 enum dma_data_direction direction,
113 struct dma_attrs *attrs)
114{
1da177e4 115}
12d04eef 116
15e09c0e
BB
117#ifdef CONFIG_NOT_COHERENT_CACHE
118static inline void dma_direct_sync_sg(struct device *dev,
119 struct scatterlist *sgl, int nents,
120 enum dma_data_direction direction)
121{
122 struct scatterlist *sg;
123 int i;
124
125 for_each_sg(sgl, sg, nents, i)
126 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
127}
128
129static inline void dma_direct_sync_single_range(struct device *dev,
130 dma_addr_t dma_handle, unsigned long offset, size_t size,
131 enum dma_data_direction direction)
132{
133 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
134}
135#endif
136
45223c54 137struct dma_map_ops dma_direct_ops = {
12d04eef
BH
138 .alloc_coherent = dma_direct_alloc_coherent,
139 .free_coherent = dma_direct_free_coherent,
12d04eef
BH
140 .map_sg = dma_direct_map_sg,
141 .unmap_sg = dma_direct_unmap_sg,
142 .dma_supported = dma_direct_dma_supported,
4fc665b8
BB
143 .map_page = dma_direct_map_page,
144 .unmap_page = dma_direct_unmap_page,
15e09c0e
BB
145#ifdef CONFIG_NOT_COHERENT_CACHE
146 .sync_single_range_for_cpu = dma_direct_sync_single_range,
147 .sync_single_range_for_device = dma_direct_sync_single_range,
148 .sync_sg_for_cpu = dma_direct_sync_sg,
149 .sync_sg_for_device = dma_direct_sync_sg,
150#endif
12d04eef
BH
151};
152EXPORT_SYMBOL(dma_direct_ops);
80d3e8ab
FT
153
154#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
155
156static int __init dma_init(void)
157{
158 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
159
160 return 0;
161}
162fs_initcall(dma_init);