Commit | Line | Data |
---|---|---|
b920de1b DH |
1 | /* DMA mapping routines for the MN10300 arch |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | #ifndef _ASM_DMA_MAPPING_H | |
12 | #define _ASM_DMA_MAPPING_H | |
13 | ||
14 | #include <linux/mm.h> | |
15 | #include <linux/scatterlist.h> | |
16 | ||
17 | #include <asm/cache.h> | |
18 | #include <asm/io.h> | |
19 | ||
20 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | |
21 | dma_addr_t *dma_handle, int flag); | |
22 | ||
23 | extern void dma_free_coherent(struct device *dev, size_t size, | |
24 | void *vaddr, dma_addr_t dma_handle); | |
25 | ||
26 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) | |
27 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) | |
28 | ||
29 | /* | |
30 | * Map a single buffer of the indicated size for DMA in streaming mode. The | |
31 | * 32-bit bus address to use is returned. | |
32 | * | |
33 | * Once the device is given the dma address, the device owns this memory until | |
34 | * either pci_unmap_single or pci_dma_sync_single is performed. | |
35 | */ | |
36 | static inline | |
37 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
38 | enum dma_data_direction direction) | |
39 | { | |
40 | BUG_ON(direction == DMA_NONE); | |
41 | mn10300_dcache_flush_inv(); | |
42 | return virt_to_bus(ptr); | |
43 | } | |
44 | ||
45 | /* | |
46 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | |
47 | * match what was provided for in a previous pci_map_single call. All other | |
48 | * usages are undefined. | |
49 | * | |
50 | * After this call, reads by the cpu to the buffer are guarenteed to see | |
51 | * whatever the device wrote there. | |
52 | */ | |
53 | static inline | |
54 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
55 | enum dma_data_direction direction) | |
56 | { | |
57 | BUG_ON(direction == DMA_NONE); | |
58 | } | |
59 | ||
60 | /* | |
61 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | |
62 | * This is the scather-gather version of the above pci_map_single interface. | |
63 | * Here the scatter gather list elements are each tagged with the appropriate | |
64 | * dma address and length. They are obtained via sg_dma_{address,length}(SG). | |
65 | * | |
66 | * NOTE: An implementation may be able to use a smaller number of DMA | |
67 | * address/length pairs than there are SG table elements. (for example | |
68 | * via virtual mapping capabilities) The routine returns the number of | |
69 | * addr/length pairs actually used, at most nents. | |
70 | * | |
71 | * Device ownership issues as mentioned above for pci_map_single are the same | |
72 | * here. | |
73 | */ | |
74 | static inline | |
75 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |
76 | enum dma_data_direction direction) | |
77 | { | |
78 | struct scatterlist *sg; | |
79 | int i; | |
80 | ||
81 | BUG_ON(!valid_dma_direction(direction)); | |
82 | WARN_ON(nents == 0 || sglist[0].length == 0); | |
83 | ||
84 | for_each_sg(sglist, sg, nents, i) { | |
85 | BUG_ON(!sg_page(sg)); | |
86 | ||
87 | sg->dma_address = sg_phys(sg); | |
88 | } | |
89 | ||
90 | mn10300_dcache_flush_inv(); | |
91 | return nents; | |
92 | } | |
93 | ||
94 | /* | |
95 | * Unmap a set of streaming mode DMA translations. | |
96 | * Again, cpu read rules concerning calls here are the same as for | |
97 | * pci_unmap_single() above. | |
98 | */ | |
99 | static inline | |
100 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
101 | enum dma_data_direction direction) | |
102 | { | |
103 | BUG_ON(!valid_dma_direction(direction)); | |
104 | } | |
105 | ||
106 | /* | |
107 | * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical | |
108 | * to pci_map_single, but takes a struct page instead of a virtual address | |
109 | */ | |
110 | static inline | |
111 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
112 | unsigned long offset, size_t size, | |
113 | enum dma_data_direction direction) | |
114 | { | |
115 | BUG_ON(direction == DMA_NONE); | |
116 | return page_to_bus(page) + offset; | |
117 | } | |
118 | ||
119 | static inline | |
120 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
121 | enum dma_data_direction direction) | |
122 | { | |
123 | BUG_ON(direction == DMA_NONE); | |
124 | } | |
125 | ||
126 | /* | |
127 | * Make physical memory consistent for a single streaming mode DMA translation | |
128 | * after a transfer. | |
129 | * | |
130 | * If you perform a pci_map_single() but wish to interrogate the buffer using | |
131 | * the cpu, yet do not wish to teardown the PCI dma mapping, you must call this | |
132 | * function before doing so. At the next point you give the PCI dma address | |
133 | * back to the card, the device again owns the buffer. | |
134 | */ | |
135 | static inline | |
136 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
137 | size_t size, enum dma_data_direction direction) | |
138 | { | |
139 | } | |
140 | ||
141 | static inline | |
142 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
143 | size_t size, enum dma_data_direction direction) | |
144 | { | |
145 | mn10300_dcache_flush_inv(); | |
146 | } | |
147 | ||
148 | static inline | |
149 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
150 | unsigned long offset, size_t size, | |
151 | enum dma_data_direction direction) | |
152 | { | |
153 | } | |
154 | ||
155 | static inline void | |
156 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
157 | unsigned long offset, size_t size, | |
158 | enum dma_data_direction direction) | |
159 | { | |
160 | mn10300_dcache_flush_inv(); | |
161 | } | |
162 | ||
163 | ||
164 | /* | |
165 | * Make physical memory consistent for a set of streaming mode DMA translations | |
166 | * after a transfer. | |
167 | * | |
168 | * The same as pci_dma_sync_single but for a scatter-gather list, same rules | |
169 | * and usage. | |
170 | */ | |
171 | static inline | |
172 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
173 | int nelems, enum dma_data_direction direction) | |
174 | { | |
175 | } | |
176 | ||
177 | static inline | |
178 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
179 | int nelems, enum dma_data_direction direction) | |
180 | { | |
181 | mn10300_dcache_flush_inv(); | |
182 | } | |
183 | ||
184 | static inline | |
8d8bb39b | 185 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
b920de1b DH |
186 | { |
187 | return 0; | |
188 | } | |
189 | ||
190 | /* | |
191 | * Return whether the given PCI device DMA address mask can be supported | |
192 | * properly. For example, if your device can only drive the low 24-bits during | |
193 | * PCI bus mastering, then you would pass 0x00ffffff as the mask to this | |
194 | * function. | |
195 | */ | |
196 | static inline | |
197 | int dma_supported(struct device *dev, u64 mask) | |
198 | { | |
199 | /* | |
200 | * we fall back to GFP_DMA when the mask isn't all 1s, so we can't | |
201 | * guarantee allocations that must be within a tighter range than | |
202 | * GFP_DMA | |
203 | */ | |
204 | if (mask < 0x00ffffff) | |
205 | return 0; | |
206 | return 1; | |
207 | } | |
208 | ||
209 | static inline | |
210 | int dma_set_mask(struct device *dev, u64 mask) | |
211 | { | |
212 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
213 | return -EIO; | |
214 | ||
215 | *dev->dma_mask = mask; | |
216 | return 0; | |
217 | } | |
218 | ||
219 | static inline | |
220 | int dma_get_cache_alignment(void) | |
221 | { | |
222 | return 1 << L1_CACHE_SHIFT; | |
223 | } | |
224 | ||
225 | #define dma_is_consistent(d) (1) | |
226 | ||
227 | static inline | |
228 | void dma_cache_sync(void *vaddr, size_t size, | |
229 | enum dma_data_direction direction) | |
230 | { | |
231 | mn10300_dcache_flush_inv(); | |
232 | } | |
233 | ||
234 | #endif |