2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later.
13 #include <linux/types.h>
15 #include <linux/compiler.h>
18 * These are for ISA/PCI shared memory _only_ and should never be used
19 * on any other type of memory, including Zorro memory. They are meant to
20 * access the bus in the bus byte order which is little-endian!.
22 * readX/writeX() are used to access memory mapped devices. On some
23 * architectures the memory mapped IO stuff needs to be accessed
24 * differently. On the bfin architecture, we just read/write the
25 * memory location directly.
29 static inline unsigned char readb(const volatile void __iomem
*addr
)
34 __asm__
__volatile__ (
39 : "=d"(val
), "=d"(tmp
)
43 return (unsigned char) val
;
46 static inline unsigned short readw(const volatile void __iomem
*addr
)
51 __asm__
__volatile__ (
56 : "=d"(val
), "=d"(tmp
)
60 return (unsigned short) val
;
63 static inline unsigned int readl(const volatile void __iomem
*addr
)
68 __asm__
__volatile__ (
73 : "=d"(val
), "=d"(tmp
)
80 #endif /* __ASSEMBLY__ */
82 #define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
83 #define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
84 #define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
86 #define __raw_readb readb
87 #define __raw_readw readw
88 #define __raw_readl readl
89 #define __raw_writeb writeb
90 #define __raw_writew writew
91 #define __raw_writel writel
92 #define memset_io(a, b, c) memset((void *)(a), (b), (c))
93 #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
94 #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
96 /* Convert "I/O port addresses" to actual addresses. i.e. ugly casts. */
97 #define __io(port) ((void *)(unsigned long)(port))
99 #define inb(port) readb(__io(port))
100 #define inw(port) readw(__io(port))
101 #define inl(port) readl(__io(port))
102 #define outb(x, port) writeb(x, __io(port))
103 #define outw(x, port) writew(x, __io(port))
104 #define outl(x, port) writel(x, __io(port))
106 #define inb_p(port) inb(__io(port))
107 #define inw_p(port) inw(__io(port))
108 #define inl_p(port) inl(__io(port))
109 #define outb_p(x, port) outb(x, __io(port))
110 #define outw_p(x, port) outw(x, __io(port))
111 #define outl_p(x, port) outl(x, __io(port))
113 #define ioread8_rep(a, d, c) readsb(a, d, c)
114 #define ioread16_rep(a, d, c) readsw(a, d, c)
115 #define ioread32_rep(a, d, c) readsl(a, d, c)
116 #define iowrite8_rep(a, s, c) writesb(a, s, c)
117 #define iowrite16_rep(a, s, c) writesw(a, s, c)
118 #define iowrite32_rep(a, s, c) writesl(a, s, c)
120 #define ioread8(x) readb(x)
121 #define ioread16(x) readw(x)
122 #define ioread32(x) readl(x)
123 #define iowrite8(val, x) writeb(val, x)
124 #define iowrite16(val, x) writew(val, x)
125 #define iowrite32(val, x) writel(val, x)
130 * Ensure ordering of I/O space writes. This will make sure that writes
131 * following the barrier will arrive after all previous writes.
133 #define mmiowb() do { SSYNC(); wmb(); } while (0)
135 #define IO_SPACE_LIMIT 0xffffffff
137 /* Values for nocacheflag and cmode */
138 #define IOMAP_NOCACHE_SER 1
142 extern void outsb(unsigned long port
, const void *addr
, unsigned long count
);
143 extern void outsw(unsigned long port
, const void *addr
, unsigned long count
);
144 extern void outsw_8(unsigned long port
, const void *addr
, unsigned long count
);
145 extern void outsl(unsigned long port
, const void *addr
, unsigned long count
);
147 extern void insb(unsigned long port
, void *addr
, unsigned long count
);
148 extern void insw(unsigned long port
, void *addr
, unsigned long count
);
149 extern void insw_8(unsigned long port
, void *addr
, unsigned long count
);
150 extern void insl(unsigned long port
, void *addr
, unsigned long count
);
151 extern void insl_16(unsigned long port
, void *addr
, unsigned long count
);
153 extern void dma_outsb(unsigned long port
, const void *addr
, unsigned short count
);
154 extern void dma_outsw(unsigned long port
, const void *addr
, unsigned short count
);
155 extern void dma_outsl(unsigned long port
, const void *addr
, unsigned short count
);
157 extern void dma_insb(unsigned long port
, void *addr
, unsigned short count
);
158 extern void dma_insw(unsigned long port
, void *addr
, unsigned short count
);
159 extern void dma_insl(unsigned long port
, void *addr
, unsigned short count
);
161 static inline void readsl(const void __iomem
*addr
, void *buf
, int len
)
163 insl((unsigned long)addr
, buf
, len
);
166 static inline void readsw(const void __iomem
*addr
, void *buf
, int len
)
168 insw((unsigned long)addr
, buf
, len
);
171 static inline void readsb(const void __iomem
*addr
, void *buf
, int len
)
173 insb((unsigned long)addr
, buf
, len
);
176 static inline void writesl(const void __iomem
*addr
, const void *buf
, int len
)
178 outsl((unsigned long)addr
, buf
, len
);
181 static inline void writesw(const void __iomem
*addr
, const void *buf
, int len
)
183 outsw((unsigned long)addr
, buf
, len
);
186 static inline void writesb(const void __iomem
*addr
, const void *buf
, int len
)
188 outsb((unsigned long)addr
, buf
, len
);
192 * Map some physical address range into the kernel address space.
194 static inline void __iomem
*__ioremap(unsigned long physaddr
, unsigned long size
,
197 return (void __iomem
*)physaddr
;
201 * Unmap a ioremap()ed region again
203 static inline void iounmap(void *addr
)
208 * __iounmap unmaps nearly everything, so be careful
209 * it doesn't free currently pointer/page tables anymore but it
210 * wans't used anyway and might be added later.
212 static inline void __iounmap(void *addr
, unsigned long size
)
217 * Set new cache mode for some kernel address space.
218 * The caller must push data for that range itself, if such data may already
221 static inline void kernel_set_cachemode(void *addr
, unsigned long size
,
226 static inline void __iomem
*ioremap(unsigned long physaddr
, unsigned long size
)
228 return __ioremap(physaddr
, size
, IOMAP_NOCACHE_SER
);
230 static inline void __iomem
*ioremap_nocache(unsigned long physaddr
,
233 return __ioremap(physaddr
, size
, IOMAP_NOCACHE_SER
);
236 extern void blkfin_inv_cache_all(void);
240 #define ioport_map(port, nr) ((void __iomem*)(port))
241 #define ioport_unmap(addr)
243 /* Pages to physical address... */
244 #define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
246 #define phys_to_virt(vaddr) ((void *) (vaddr))
247 #define virt_to_phys(vaddr) ((unsigned long) (vaddr))
249 #define virt_to_bus virt_to_phys
250 #define bus_to_virt phys_to_virt
253 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
256 #define xlate_dev_mem_ptr(p) __va(p)
259 * Convert a virtual cached pointer to an uncached pointer
261 #define xlate_dev_kmem_ptr(p) p
263 #endif /* __KERNEL__ */
265 #endif /* _BFIN_IO_H */