sh: Clean up places that make 29-bit physical assumptions.
[deliverable/linux.git] / include / asm-sh / io.h
CommitLineData
1da177e4
LT
1#ifndef __ASM_SH_IO_H
2#define __ASM_SH_IO_H
3
4/*
5 * Convention:
6 * read{b,w,l}/write{b,w,l} are for PCI,
7 * while in{b,w,l}/out{b,w,l} are for ISA
8 * These may (will) be platform specific function.
9 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
10 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
11 * For read{b,w,l} and write{b,w,l} there are also __raw versions, which
12 * do not have a memory barrier after them.
13 *
b66c1a39 14 * In addition, we have
1da177e4
LT
15 * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
16 * which are processor specific.
17 */
18
19/*
20 * We follow the Alpha convention here:
21 * __inb expands to an inline function call (which calls via the mv)
22 * _inb is a real function call (note ___raw fns are _ version of __raw)
23 * inb by default expands to _inb, but the machine specific code may
24 * define it to __inb if it chooses.
25 */
1da177e4
LT
26#include <asm/cache.h>
27#include <asm/system.h>
28#include <asm/addrspace.h>
29#include <asm/machvec.h>
b66c1a39
PM
30#include <asm/pgtable.h>
31#include <asm-generic/iomap.h>
32
33#ifdef __KERNEL__
1da177e4
LT
34
35/*
36 * Depending on which platform we are running on, we need different
37 * I/O functions.
38 */
b66c1a39
PM
39#define __IO_PREFIX generic
40#include <asm/io_generic.h>
41
42#define maybebadio(port) \
43 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
44 __FUNCTION__, __LINE__, (port), (u32)__builtin_return_address(0))
1da177e4 45
1da177e4
LT
46/*
47 * Since boards are able to define their own set of I/O routines through
48 * their respective machine vector, we always wrap through the mv.
49 *
50 * Also, in the event that a board hasn't provided its own definition for
51 * a given routine, it will be wrapped to generic code at run-time.
52 */
53
b66c1a39
PM
54#define __inb(p) sh_mv.mv_inb((p))
55#define __inw(p) sh_mv.mv_inw((p))
56#define __inl(p) sh_mv.mv_inl((p))
57#define __outb(x,p) sh_mv.mv_outb((x),(p))
58#define __outw(x,p) sh_mv.mv_outw((x),(p))
59#define __outl(x,p) sh_mv.mv_outl((x),(p))
60
61#define __inb_p(p) sh_mv.mv_inb_p((p))
62#define __inw_p(p) sh_mv.mv_inw_p((p))
63#define __inl_p(p) sh_mv.mv_inl_p((p))
64#define __outb_p(x,p) sh_mv.mv_outb_p((x),(p))
65#define __outw_p(x,p) sh_mv.mv_outw_p((x),(p))
66#define __outl_p(x,p) sh_mv.mv_outl_p((x),(p))
67
68#define __insb(p,b,c) sh_mv.mv_insb((p), (b), (c))
69#define __insw(p,b,c) sh_mv.mv_insw((p), (b), (c))
70#define __insl(p,b,c) sh_mv.mv_insl((p), (b), (c))
71#define __outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c))
72#define __outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c))
73#define __outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c))
74
75#define __readb(a) sh_mv.mv_readb((a))
76#define __readw(a) sh_mv.mv_readw((a))
77#define __readl(a) sh_mv.mv_readl((a))
78#define __writeb(v,a) sh_mv.mv_writeb((v),(a))
79#define __writew(v,a) sh_mv.mv_writew((v),(a))
80#define __writel(v,a) sh_mv.mv_writel((v),(a))
81
82#define inb __inb
83#define inw __inw
84#define inl __inl
85#define outb __outb
86#define outw __outw
87#define outl __outl
88
89#define inb_p __inb_p
90#define inw_p __inw_p
91#define inl_p __inl_p
92#define outb_p __outb_p
93#define outw_p __outw_p
94#define outl_p __outl_p
95
96#define insb __insb
97#define insw __insw
98#define insl __insl
99#define outsb __outsb
100#define outsw __outsw
101#define outsl __outsl
102
103#define __raw_readb(a) __readb((void __iomem *)(a))
104#define __raw_readw(a) __readw((void __iomem *)(a))
105#define __raw_readl(a) __readl((void __iomem *)(a))
106#define __raw_writeb(v, a) __writeb(v, (void __iomem *)(a))
107#define __raw_writew(v, a) __writew(v, (void __iomem *)(a))
108#define __raw_writel(v, a) __writel(v, (void __iomem *)(a))
1da177e4 109
05ae9158
PM
110void __raw_writesl(unsigned long addr, const void *data, int longlen);
111void __raw_readsl(unsigned long addr, void *data, int longlen);
112
1da177e4
LT
113/*
114 * The platform header files may define some of these macros to use
115 * the inlined versions where appropriate. These macros may also be
116 * redefined by userlevel programs.
117 */
b66c1a39 118#ifdef __readb
66c5227e 119# define readb(a) ({ unsigned int r_ = __raw_readb(a); mb(); r_; })
1da177e4
LT
120#endif
121#ifdef __raw_readw
66c5227e 122# define readw(a) ({ unsigned int r_ = __raw_readw(a); mb(); r_; })
1da177e4
LT
123#endif
124#ifdef __raw_readl
66c5227e 125# define readl(a) ({ unsigned int r_ = __raw_readl(a); mb(); r_; })
1da177e4
LT
126#endif
127
128#ifdef __raw_writeb
b66c1a39 129# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); })
1da177e4
LT
130#endif
131#ifdef __raw_writew
b66c1a39 132# define writew(v,a) ({ __raw_writew((v),(a)); mb(); })
1da177e4
LT
133#endif
134#ifdef __raw_writel
b66c1a39 135# define writel(v,a) ({ __raw_writel((v),(a)); mb(); })
1da177e4
LT
136#endif
137
da6b003a
MD
138#define __BUILD_MEMORY_STRING(bwlq, type) \
139 \
140static inline void writes##bwlq(volatile void __iomem *mem, \
141 const void *addr, unsigned int count) \
142{ \
143 const volatile type *__addr = addr; \
144 \
145 while (count--) { \
146 __raw_write##bwlq(*__addr, mem); \
147 __addr++; \
148 } \
149} \
150 \
151static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
152 unsigned int count) \
153{ \
154 volatile type *__addr = addr; \
155 \
156 while (count--) { \
157 *__addr = __raw_read##bwlq(mem); \
158 __addr++; \
159 } \
160}
161
162__BUILD_MEMORY_STRING(b, u8)
163__BUILD_MEMORY_STRING(w, u16)
05ae9158
PM
164#define writesl __raw_writesl
165#define readsl __raw_readsl
166
1da177e4
LT
167#define readb_relaxed(a) readb(a)
168#define readw_relaxed(a) readw(a)
169#define readl_relaxed(a) readl(a)
170
b66c1a39
PM
171/* Simple MMIO */
172#define ioread8(a) readb(a)
173#define ioread16(a) readw(a)
174#define ioread16be(a) be16_to_cpu(__raw_readw((a)))
175#define ioread32(a) readl(a)
176#define ioread32be(a) be32_to_cpu(__raw_readl((a)))
1da177e4 177
b66c1a39
PM
178#define iowrite8(v,a) writeb((v),(a))
179#define iowrite16(v,a) writew((v),(a))
180#define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a))
181#define iowrite32(v,a) writel((v),(a))
182#define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a))
183
184#define ioread8_rep(a,d,c) insb((a),(d),(c))
185#define ioread16_rep(a,d,c) insw((a),(d),(c))
186#define ioread32_rep(a,d,c) insl((a),(d),(c))
187
188#define iowrite8_rep(a,s,c) outsb((a),(s),(c))
189#define iowrite16_rep(a,s,c) outsw((a),(s),(c))
190#define iowrite32_rep(a,s,c) outsl((a),(s),(c))
191
192#define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */
1da177e4 193
0f2c15ce
PM
194#define IO_SPACE_LIMIT 0xffffffff
195
1da177e4
LT
196/*
197 * This function provides a method for the generic case where a board-specific
b66c1a39 198 * ioport_map simply needs to return the port + some arbitrary port base.
1da177e4
LT
199 *
200 * We use this at board setup time to implicitly set the port base, and
b66c1a39 201 * as a result, we can use the generic ioport_map.
1da177e4
LT
202 */
203static inline void __set_io_port_base(unsigned long pbase)
204{
205 extern unsigned long generic_io_base;
206
207 generic_io_base = pbase;
208}
209
1da177e4 210/* We really want to try and get these to memcpy etc */
b66c1a39
PM
211extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long);
212extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
213extern void memset_io(volatile void __iomem *, int, unsigned long);
1da177e4
LT
214
215/* SuperH on-chip I/O functions */
b66c1a39 216static inline unsigned char ctrl_inb(unsigned long addr)
1da177e4
LT
217{
218 return *(volatile unsigned char*)addr;
219}
220
b66c1a39 221static inline unsigned short ctrl_inw(unsigned long addr)
1da177e4
LT
222{
223 return *(volatile unsigned short*)addr;
224}
225
b66c1a39 226static inline unsigned int ctrl_inl(unsigned long addr)
1da177e4
LT
227{
228 return *(volatile unsigned long*)addr;
229}
230
0f2c15ce
PM
231static inline unsigned long long ctrl_inq(unsigned long addr)
232{
233 return *(volatile unsigned long long*)addr;
234}
235
b66c1a39 236static inline void ctrl_outb(unsigned char b, unsigned long addr)
1da177e4
LT
237{
238 *(volatile unsigned char*)addr = b;
239}
240
b66c1a39 241static inline void ctrl_outw(unsigned short b, unsigned long addr)
1da177e4
LT
242{
243 *(volatile unsigned short*)addr = b;
244}
245
b66c1a39 246static inline void ctrl_outl(unsigned int b, unsigned long addr)
1da177e4
LT
247{
248 *(volatile unsigned long*)addr = b;
249}
250
0f2c15ce
PM
251static inline void ctrl_outq(unsigned long long b, unsigned long addr)
252{
253 *(volatile unsigned long long*)addr = b;
254}
255
959f85f8
PM
256static inline void ctrl_delay(void)
257{
da06b8d0 258#ifdef P2SEG
959f85f8 259 ctrl_inw(P2SEG);
da06b8d0 260#endif
959f85f8
PM
261}
262
ac490a48
PM
263/* Quad-word real-mode I/O, don't ask.. */
264unsigned long long peek_real_address_q(unsigned long long addr);
265unsigned long long poke_real_address_q(unsigned long long addr,
266 unsigned long long val);
267
0f2c15ce
PM
268/* arch/sh/mm/ioremap_64.c */
269unsigned long onchip_remap(unsigned long addr, unsigned long size,
270 const char *name);
271extern void onchip_unmap(unsigned long vaddr);
1da177e4 272
da06b8d0
PM
273#if !defined(CONFIG_MMU)
274#define virt_to_phys(address) ((unsigned long)(address))
275#define phys_to_virt(address) ((void *)(address))
d02b08f6 276#else
da06b8d0
PM
277#define virt_to_phys(address) (__pa(address))
278#define phys_to_virt(address) (__va(address))
a2d1a5fa 279#endif
1da177e4 280
1da177e4 281/*
da06b8d0
PM
282 * On 32-bit SH, we traditionally have the whole physical address space
283 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
284 * not need to do anything but place the address in the proper segment.
285 * This is true for P1 and P2 addresses, as well as some P3 ones.
286 * However, most of the P3 addresses and newer cores using extended
287 * addressing need to map through page tables, so the ioremap()
288 * implementation becomes a bit more complicated.
1da177e4 289 *
da06b8d0 290 * See arch/sh/mm/ioremap.c for additional notes on this.
1da177e4
LT
291 *
292 * We cheat a bit and always return uncachable areas until we've fixed
b66c1a39 293 * the drivers to handle caching properly.
da06b8d0
PM
294 *
295 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
296 * doesn't exist, so everything must go through page tables.
1da177e4 297 */
b66c1a39
PM
298#ifdef CONFIG_MMU
299void __iomem *__ioremap(unsigned long offset, unsigned long size,
300 unsigned long flags);
301void __iounmap(void __iomem *addr);
302#else
303#define __ioremap(offset, size, flags) ((void __iomem *)(offset))
304#define __iounmap(addr) do { } while (0)
305#endif /* CONFIG_MMU */
306
307static inline void __iomem *
308__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
1da177e4 309{
da06b8d0 310#ifdef CONFIG_SUPERH32
b66c1a39
PM
311 unsigned long last_addr = offset + size - 1;
312
313 /*
314 * For P1 and P2 space this is trivial, as everything is already
315 * mapped. Uncached access for P1 addresses are done through P2.
316 * In the P3 case or for addresses outside of the 29-bit space,
317 * mapping must be done by the PMB or by using page tables.
318 */
319 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
320 if (unlikely(flags & _PAGE_CACHABLE))
321 return (void __iomem *)P1SEGADDR(offset);
322
323 return (void __iomem *)P2SEGADDR(offset);
324 }
da06b8d0 325#endif
b66c1a39
PM
326
327 return __ioremap(offset, size, flags);
1da177e4
LT
328}
329
b66c1a39
PM
330#define ioremap(offset, size) \
331 __ioremap_mode((offset), (size), 0)
332#define ioremap_nocache(offset, size) \
333 __ioremap_mode((offset), (size), 0)
334#define ioremap_cache(offset, size) \
335 __ioremap_mode((offset), (size), _PAGE_CACHABLE)
336#define p3_ioremap(offset, size, flags) \
337 __ioremap((offset), (size), (flags))
338#define iounmap(addr) \
339 __iounmap((addr))
340
1da177e4
LT
341/*
342 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
343 * access
344 */
345#define xlate_dev_mem_ptr(p) __va(p)
346
347/*
348 * Convert a virtual cached pointer to an uncached pointer
349 */
350#define xlate_dev_kmem_ptr(p) p
351
352#endif /* __KERNEL__ */
353
354#endif /* __ASM_SH_IO_H */
This page took 0.33942 seconds and 5 git commands to generate.