Merge remote-tracking branch 'regulator/topic/twl' into v3.9-rc8
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / openrisc / kernel / dma.c
CommitLineData
a39af6f7
JB
1/*
2 * OpenRISC Linux
3 *
4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
6 * declaration.
7 *
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * DMA mapping callbacks...
18 * As alloc_coherent is the only DMA callback being used currently, that's
19 * the only thing implemented properly. The rest need looking into...
20 */
21
22#include <linux/dma-mapping.h>
23#include <linux/dma-debug.h>
7b903e6c
JB
24#include <linux/export.h>
25#include <linux/dma-attrs.h>
a39af6f7
JB
26
27#include <asm/cpuinfo.h>
28#include <asm/spr_defs.h>
29#include <asm/tlbflush.h>
30
7b903e6c
JB
31static int
32page_set_nocache(pte_t *pte, unsigned long addr,
33 unsigned long next, struct mm_walk *walk)
a39af6f7
JB
34{
35 unsigned long cl;
36
37 pte_val(*pte) |= _PAGE_CI;
38
39 /*
40 * Flush the page out of the TLB so that the new page flags get
41 * picked up next time there's an access
42 */
43 flush_tlb_page(NULL, addr);
44
45 /* Flush page out of dcache */
46 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
47 mtspr(SPR_DCBFR, cl);
48
49 return 0;
50}
51
7b903e6c
JB
52static int
53page_clear_nocache(pte_t *pte, unsigned long addr,
54 unsigned long next, struct mm_walk *walk)
a39af6f7
JB
55{
56 pte_val(*pte) &= ~_PAGE_CI;
57
58 /*
59 * Flush the page out of the TLB so that the new page flags get
60 * picked up next time there's an access
61 */
62 flush_tlb_page(NULL, addr);
63
64 return 0;
65}
66
67/*
68 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
69 *
70 * This function effectively just calls __get_free_pages, sets the
71 * cache-inhibit bit on those pages, and makes sure that the pages are
72 * flushed out of the cache before they are used.
73 *
7b903e6c
JB
74 * If the NON_CONSISTENT attribute is set, then this function just
75 * returns "normal", cachable memory.
76 *
77 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
78 * into consideration here, too. All current known implementations of
79 * the OR1K support only strongly ordered memory accesses, so that flag
80 * is being ignored for now; uncached but write-combined memory is a
81 * missing feature of the OR1K.
a39af6f7 82 */
7b903e6c
JB
83static void *
84or1k_dma_alloc(struct device *dev, size_t size,
85 dma_addr_t *dma_handle, gfp_t gfp,
86 struct dma_attrs *attrs)
a39af6f7
JB
87{
88 unsigned long va;
89 void *page;
90 struct mm_walk walk = {
91 .pte_entry = page_set_nocache,
92 .mm = &init_mm
93 };
94
95 page = alloc_pages_exact(size, gfp);
96 if (!page)
97 return NULL;
98
99 /* This gives us the real physical address of the first page. */
100 *dma_handle = __pa(page);
101
102 va = (unsigned long)page;
103
7b903e6c
JB
104 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
105 /*
106 * We need to iterate through the pages, clearing the dcache for
107 * them and setting the cache-inhibit bit.
108 */
109 if (walk_page_range(va, va + size, &walk)) {
110 free_pages_exact(page, size);
111 return NULL;
112 }
a39af6f7
JB
113 }
114
115 return (void *)va;
116}
117
7b903e6c
JB
118static void
119or1k_dma_free(struct device *dev, size_t size, void *vaddr,
120 dma_addr_t dma_handle, struct dma_attrs *attrs)
a39af6f7
JB
121{
122 unsigned long va = (unsigned long)vaddr;
123 struct mm_walk walk = {
124 .pte_entry = page_clear_nocache,
125 .mm = &init_mm
126 };
127
7b903e6c
JB
128 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
129 /* walk_page_range shouldn't be able to fail here */
130 WARN_ON(walk_page_range(va, va + size, &walk));
131 }
a39af6f7
JB
132
133 free_pages_exact(vaddr, size);
134}
135
7b903e6c
JB
136static dma_addr_t
137or1k_map_page(struct device *dev, struct page *page,
138 unsigned long offset, size_t size,
139 enum dma_data_direction dir,
140 struct dma_attrs *attrs)
a39af6f7
JB
141{
142 unsigned long cl;
143 dma_addr_t addr = page_to_phys(page) + offset;
144
145 switch (dir) {
146 case DMA_TO_DEVICE:
147 /* Flush the dcache for the requested range */
148 for (cl = addr; cl < addr + size;
149 cl += cpuinfo.dcache_block_size)
150 mtspr(SPR_DCBFR, cl);
151 break;
152 case DMA_FROM_DEVICE:
153 /* Invalidate the dcache for the requested range */
154 for (cl = addr; cl < addr + size;
155 cl += cpuinfo.dcache_block_size)
156 mtspr(SPR_DCBIR, cl);
157 break;
158 default:
159 /*
160 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
161 * flush nor invalidate the cache here as the area will need
162 * to be manually synced anyway.
163 */
164 break;
165 }
166
167 return addr;
168}
169
7b903e6c
JB
170static void
171or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
172 size_t size, enum dma_data_direction dir,
173 struct dma_attrs *attrs)
a39af6f7
JB
174{
175 /* Nothing special to do here... */
176}
177
7b903e6c
JB
178static int
179or1k_map_sg(struct device *dev, struct scatterlist *sg,
180 int nents, enum dma_data_direction dir,
181 struct dma_attrs *attrs)
707b38a0
JB
182{
183 struct scatterlist *s;
184 int i;
185
186 for_each_sg(sg, s, nents, i) {
187 s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
188 s->length, dir, NULL);
189 }
190
191 return nents;
192}
193
7b903e6c
JB
194static void
195or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
196 int nents, enum dma_data_direction dir,
197 struct dma_attrs *attrs)
707b38a0
JB
198{
199 struct scatterlist *s;
200 int i;
201
202 for_each_sg(sg, s, nents, i) {
203 or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
204 }
205}
206
7b903e6c
JB
207static void
208or1k_sync_single_for_cpu(struct device *dev,
209 dma_addr_t dma_handle, size_t size,
210 enum dma_data_direction dir)
a39af6f7
JB
211{
212 unsigned long cl;
213 dma_addr_t addr = dma_handle;
214
215 /* Invalidate the dcache for the requested range */
216 for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
217 mtspr(SPR_DCBIR, cl);
218}
219
7b903e6c
JB
220static void
221or1k_sync_single_for_device(struct device *dev,
222 dma_addr_t dma_handle, size_t size,
223 enum dma_data_direction dir)
a39af6f7
JB
224{
225 unsigned long cl;
226 dma_addr_t addr = dma_handle;
227
228 /* Flush the dcache for the requested range */
229 for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
230 mtspr(SPR_DCBFR, cl);
231}
232
7b903e6c
JB
233struct dma_map_ops or1k_dma_map_ops = {
234 .alloc = or1k_dma_alloc,
235 .free = or1k_dma_free,
236 .map_page = or1k_map_page,
237 .unmap_page = or1k_unmap_page,
238 .map_sg = or1k_map_sg,
239 .unmap_sg = or1k_unmap_sg,
240 .sync_single_for_cpu = or1k_sync_single_for_cpu,
241 .sync_single_for_device = or1k_sync_single_for_device,
242};
243EXPORT_SYMBOL(or1k_dma_map_ops);
244
a39af6f7
JB
245/* Number of entries preallocated for DMA-API debugging */
246#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
247
248static int __init dma_init(void)
249{
250 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
251
252 return 0;
253}
a39af6f7 254fs_initcall(dma_init);