Blackfin: make sure mmiowb inserts a write barrier with SSYNC
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / blackfin / include / asm / io.h
CommitLineData
96f1050d
RG
1/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
1394f032
BW
7#ifndef _BFIN_IO_H
8#define _BFIN_IO_H
9
10#ifdef __KERNEL__
11
12#ifndef __ASSEMBLY__
13#include <linux/types.h>
14#endif
15#include <linux/compiler.h>
16
17/*
18 * These are for ISA/PCI shared memory _only_ and should never be used
19 * on any other type of memory, including Zorro memory. They are meant to
20 * access the bus in the bus byte order which is little-endian!.
21 *
22 * readX/writeX() are used to access memory mapped devices. On some
23 * architectures the memory mapped IO stuff needs to be accessed
24 * differently. On the bfin architecture, we just read/write the
25 * memory location directly.
26 */
27#ifndef __ASSEMBLY__
28
216e39db 29static inline unsigned char readb(const volatile void __iomem *addr)
1394f032
BW
30{
31 unsigned int val;
32 int tmp;
33
2d40292b
MF
34 __asm__ __volatile__ (
35 "cli %1;"
36 "NOP; NOP; SSYNC;"
37 "%0 = b [%2] (z);"
38 "sti %1;"
39 : "=d"(val), "=d"(tmp)
40 : "a"(addr)
41 );
1394f032
BW
42
43 return (unsigned char) val;
44}
45
216e39db 46static inline unsigned short readw(const volatile void __iomem *addr)
1394f032
BW
47{
48 unsigned int val;
49 int tmp;
50
2d40292b
MF
51 __asm__ __volatile__ (
52 "cli %1;"
53 "NOP; NOP; SSYNC;"
54 "%0 = w [%2] (z);"
55 "sti %1;"
56 : "=d"(val), "=d"(tmp)
57 : "a"(addr)
58 );
1394f032
BW
59
60 return (unsigned short) val;
61}
62
216e39db 63static inline unsigned int readl(const volatile void __iomem *addr)
1394f032
BW
64{
65 unsigned int val;
66 int tmp;
67
2d40292b
MF
68 __asm__ __volatile__ (
69 "cli %1;"
70 "NOP; NOP; SSYNC;"
71 "%0 = [%2];"
72 "sti %1;"
73 : "=d"(val), "=d"(tmp)
74 : "a"(addr)
75 );
76
1394f032
BW
77 return val;
78}
79
80#endif /* __ASSEMBLY__ */
81
2d40292b
MF
82#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
83#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
84#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
1394f032
BW
85
86#define __raw_readb readb
87#define __raw_readw readw
88#define __raw_readl readl
89#define __raw_writeb writeb
90#define __raw_writew writew
91#define __raw_writel writel
2d40292b
MF
92#define memset_io(a, b, c) memset((void *)(a), (b), (c))
93#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
94#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
1394f032 95
f75196c4
MF
96/* Convert "I/O port addresses" to actual addresses. i.e. ugly casts. */
97#define __io(port) ((void *)(unsigned long)(port))
98
99#define inb(port) readb(__io(port))
100#define inw(port) readw(__io(port))
101#define inl(port) readl(__io(port))
2d40292b
MF
102#define outb(x, port) writeb(x, __io(port))
103#define outw(x, port) writew(x, __io(port))
104#define outl(x, port) writel(x, __io(port))
f75196c4
MF
105
106#define inb_p(port) inb(__io(port))
107#define inw_p(port) inw(__io(port))
108#define inl_p(port) inl(__io(port))
2d40292b
MF
109#define outb_p(x, port) outb(x, __io(port))
110#define outw_p(x, port) outw(x, __io(port))
111#define outl_p(x, port) outl(x, __io(port))
112
113#define ioread8_rep(a, d, c) readsb(a, d, c)
114#define ioread16_rep(a, d, c) readsw(a, d, c)
115#define ioread32_rep(a, d, c) readsl(a, d, c)
116#define iowrite8_rep(a, s, c) writesb(a, s, c)
117#define iowrite16_rep(a, s, c) writesw(a, s, c)
118#define iowrite32_rep(a, s, c) writesl(a, s, c)
119
120#define ioread8(x) readb(x)
121#define ioread16(x) readw(x)
122#define ioread32(x) readl(x)
123#define iowrite8(val, x) writeb(val, x)
124#define iowrite16(val, x) writew(val, x)
125#define iowrite32(val, x) writel(val, x)
1394f032 126
eb964909
MH
127/**
128 * I/O write barrier
129 *
130 * Ensure ordering of I/O space writes. This will make sure that writes
131 * following the barrier will arrive after all previous writes.
132 */
133#define mmiowb() do { SSYNC(); wmb(); } while (0)
ecdbfc1a 134
1394f032
BW
135#define IO_SPACE_LIMIT 0xffffffff
136
137/* Values for nocacheflag and cmode */
138#define IOMAP_NOCACHE_SER 1
139
140#ifndef __ASSEMBLY__
141
b7b2d344
BW
142extern void outsb(unsigned long port, const void *addr, unsigned long count);
143extern void outsw(unsigned long port, const void *addr, unsigned long count);
59069676 144extern void outsw_8(unsigned long port, const void *addr, unsigned long count);
b7b2d344 145extern void outsl(unsigned long port, const void *addr, unsigned long count);
1394f032 146
b7b2d344
BW
147extern void insb(unsigned long port, void *addr, unsigned long count);
148extern void insw(unsigned long port, void *addr, unsigned long count);
59069676 149extern void insw_8(unsigned long port, void *addr, unsigned long count);
b7b2d344 150extern void insl(unsigned long port, void *addr, unsigned long count);
5c91fb90 151extern void insl_16(unsigned long port, void *addr, unsigned long count);
23ee968d 152
b7b2d344
BW
153extern void dma_outsb(unsigned long port, const void *addr, unsigned short count);
154extern void dma_outsw(unsigned long port, const void *addr, unsigned short count);
155extern void dma_outsl(unsigned long port, const void *addr, unsigned short count);
23ee968d 156
b7b2d344
BW
157extern void dma_insb(unsigned long port, void *addr, unsigned short count);
158extern void dma_insw(unsigned long port, void *addr, unsigned short count);
159extern void dma_insl(unsigned long port, void *addr, unsigned short count);
1394f032 160
121e598f
BW
161static inline void readsl(const void __iomem *addr, void *buf, int len)
162{
163 insl((unsigned long)addr, buf, len);
164}
165
166static inline void readsw(const void __iomem *addr, void *buf, int len)
167{
168 insw((unsigned long)addr, buf, len);
169}
170
171static inline void readsb(const void __iomem *addr, void *buf, int len)
172{
173 insb((unsigned long)addr, buf, len);
174}
175
176static inline void writesl(const void __iomem *addr, const void *buf, int len)
177{
178 outsl((unsigned long)addr, buf, len);
179}
180
181static inline void writesw(const void __iomem *addr, const void *buf, int len)
182{
183 outsw((unsigned long)addr, buf, len);
184}
185
186static inline void writesb(const void __iomem *addr, const void *buf, int len)
187{
188 outsb((unsigned long)addr, buf, len);
189}
190
1394f032
BW
191/*
192 * Map some physical address range into the kernel address space.
193 */
194static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
195 int cacheflag)
196{
197 return (void __iomem *)physaddr;
198}
199
200/*
201 * Unmap a ioremap()ed region again
202 */
203static inline void iounmap(void *addr)
204{
205}
206
207/*
208 * __iounmap unmaps nearly everything, so be careful
209 * it doesn't free currently pointer/page tables anymore but it
210 * wans't used anyway and might be added later.
211 */
212static inline void __iounmap(void *addr, unsigned long size)
213{
214}
215
216/*
217 * Set new cache mode for some kernel address space.
218 * The caller must push data for that range itself, if such data may already
219 * be in the cache.
220 */
221static inline void kernel_set_cachemode(void *addr, unsigned long size,
222 int cmode)
223{
224}
225
226static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
227{
228 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
229}
230static inline void __iomem *ioremap_nocache(unsigned long physaddr,
231 unsigned long size)
232{
233 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
234}
235
236extern void blkfin_inv_cache_all(void);
237
238#endif
239
240#define ioport_map(port, nr) ((void __iomem*)(port))
241#define ioport_unmap(addr)
242
1394f032 243/* Pages to physical address... */
1394f032
BW
244#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
245
1394f032
BW
246#define phys_to_virt(vaddr) ((void *) (vaddr))
247#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
248
249#define virt_to_bus virt_to_phys
250#define bus_to_virt phys_to_virt
251
252/*
253 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
254 * access
255 */
256#define xlate_dev_mem_ptr(p) __va(p)
257
258/*
259 * Convert a virtual cached pointer to an uncached pointer
260 */
261#define xlate_dev_kmem_ptr(p) p
262
263#endif /* __KERNEL__ */
264
265#endif /* _BFIN_IO_H */