[PATCH] kill eth_io_copy_and_sum()
[deliverable/linux.git] / include / asm-ppc / io.h
CommitLineData
1da177e4
LT
1#ifdef __KERNEL__
2#ifndef _PPC_IO_H
3#define _PPC_IO_H
4
1da177e4
LT
5#include <linux/string.h>
6#include <linux/types.h>
7
8#include <asm/page.h>
9#include <asm/byteorder.h>
feaf7cf1 10#include <asm/synch.h>
1da177e4
LT
11#include <asm/mmu.h>
12
13#define SIO_CONFIG_RA 0x398
14#define SIO_CONFIG_RD 0x399
15
16#define SLOW_DOWN_IO
17
18#define PMAC_ISA_MEM_BASE 0
19#define PMAC_PCI_DRAM_OFFSET 0
20#define CHRP_ISA_IO_BASE 0xf8000000
21#define CHRP_ISA_MEM_BASE 0xf7000000
22#define CHRP_PCI_DRAM_OFFSET 0
23#define PREP_ISA_IO_BASE 0x80000000
24#define PREP_ISA_MEM_BASE 0xc0000000
25#define PREP_PCI_DRAM_OFFSET 0x80000000
26
27#if defined(CONFIG_4xx)
28#include <asm/ibm4xx.h>
29#elif defined(CONFIG_8xx)
30#include <asm/mpc8xx.h>
31#elif defined(CONFIG_8260)
32#include <asm/mpc8260.h>
19a79859 33#elif defined(CONFIG_APUS) || !defined(CONFIG_PCI)
1da177e4
LT
34#define _IO_BASE 0
35#define _ISA_MEM_BASE 0
36#define PCI_DRAM_OFFSET 0
37#else /* Everyone else */
38#define _IO_BASE isa_io_base
39#define _ISA_MEM_BASE isa_mem_base
40#define PCI_DRAM_OFFSET pci_dram_offset
41#endif /* Platform-dependent I/O */
42
43#define ___IO_BASE ((void __iomem *)_IO_BASE)
44extern unsigned long isa_io_base;
45extern unsigned long isa_mem_base;
46extern unsigned long pci_dram_offset;
47
48/*
49 * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
50 *
51 * Read operations have additional twi & isync to make sure the read
52 * is actually performed (i.e. the data has come back) before we start
53 * executing any following instructions.
54 */
a3ca066e 55extern inline int in_8(const volatile unsigned char __iomem *addr)
1da177e4
LT
56{
57 int ret;
58
59 __asm__ __volatile__(
f007cacf 60 "sync; lbz%U1%X1 %0,%1;\n"
1da177e4
LT
61 "twi 0,%0,0;\n"
62 "isync" : "=r" (ret) : "m" (*addr));
63 return ret;
64}
65
66extern inline void out_8(volatile unsigned char __iomem *addr, int val)
67{
68 __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
69}
70
a3ca066e 71extern inline int in_le16(const volatile unsigned short __iomem *addr)
1da177e4
LT
72{
73 int ret;
74
f007cacf 75 __asm__ __volatile__("sync; lhbrx %0,0,%1;\n"
1da177e4
LT
76 "twi 0,%0,0;\n"
77 "isync" : "=r" (ret) :
78 "r" (addr), "m" (*addr));
79 return ret;
80}
81
a3ca066e 82extern inline int in_be16(const volatile unsigned short __iomem *addr)
1da177e4
LT
83{
84 int ret;
85
f007cacf 86 __asm__ __volatile__("sync; lhz%U1%X1 %0,%1;\n"
1da177e4
LT
87 "twi 0,%0,0;\n"
88 "isync" : "=r" (ret) : "m" (*addr));
89 return ret;
90}
91
92extern inline void out_le16(volatile unsigned short __iomem *addr, int val)
93{
f007cacf 94 __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) :
1da177e4
LT
95 "r" (val), "r" (addr));
96}
97
98extern inline void out_be16(volatile unsigned short __iomem *addr, int val)
99{
f007cacf 100 __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
1da177e4
LT
101}
102
a3ca066e 103extern inline unsigned in_le32(const volatile unsigned __iomem *addr)
1da177e4
LT
104{
105 unsigned ret;
106
f007cacf 107 __asm__ __volatile__("sync; lwbrx %0,0,%1;\n"
1da177e4
LT
108 "twi 0,%0,0;\n"
109 "isync" : "=r" (ret) :
110 "r" (addr), "m" (*addr));
111 return ret;
112}
113
a3ca066e 114extern inline unsigned in_be32(const volatile unsigned __iomem *addr)
1da177e4
LT
115{
116 unsigned ret;
117
f007cacf 118 __asm__ __volatile__("sync; lwz%U1%X1 %0,%1;\n"
1da177e4
LT
119 "twi 0,%0,0;\n"
120 "isync" : "=r" (ret) : "m" (*addr));
121 return ret;
122}
123
124extern inline void out_le32(volatile unsigned __iomem *addr, int val)
125{
f007cacf 126 __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) :
1da177e4
LT
127 "r" (val), "r" (addr));
128}
129
130extern inline void out_be32(volatile unsigned __iomem *addr, int val)
131{
f007cacf 132 __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
1da177e4
LT
133}
134#if defined (CONFIG_8260_PCI9)
135#define readb(addr) in_8((volatile u8 *)(addr))
136#define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
137#else
a3ca066e 138static inline __u8 readb(const volatile void __iomem *addr)
1da177e4
LT
139{
140 return in_8(addr);
141}
142static inline void writeb(__u8 b, volatile void __iomem *addr)
143{
144 out_8(addr, b);
145}
146#endif
147
148#if defined(CONFIG_APUS)
a3ca066e 149static inline __u16 readw(const volatile void __iomem *addr)
1da177e4
LT
150{
151 return *(__force volatile __u16 *)(addr);
152}
a3ca066e 153static inline __u32 readl(const volatile void __iomem *addr)
1da177e4
LT
154{
155 return *(__force volatile __u32 *)(addr);
156}
157static inline void writew(__u16 b, volatile void __iomem *addr)
158{
159 *(__force volatile __u16 *)(addr) = b;
160}
161static inline void writel(__u32 b, volatile void __iomem *addr)
162{
163 *(__force volatile __u32 *)(addr) = b;
164}
165#elif defined (CONFIG_8260_PCI9)
166/* Use macros if PCI9 workaround enabled */
167#define readw(addr) in_le16((volatile u16 *)(addr))
168#define readl(addr) in_le32((volatile u32 *)(addr))
169#define writew(b,addr) out_le16((volatile u16 *)(addr),(b))
170#define writel(b,addr) out_le32((volatile u32 *)(addr),(b))
171#else
a3ca066e 172static inline __u16 readw(const volatile void __iomem *addr)
1da177e4
LT
173{
174 return in_le16(addr);
175}
a3ca066e 176static inline __u32 readl(const volatile void __iomem *addr)
1da177e4
LT
177{
178 return in_le32(addr);
179}
180static inline void writew(__u16 b, volatile void __iomem *addr)
181{
182 out_le16(addr, b);
183}
184static inline void writel(__u32 b, volatile void __iomem *addr)
185{
186 out_le32(addr, b);
187}
188#endif /* CONFIG_APUS */
189
190#define readb_relaxed(addr) readb(addr)
191#define readw_relaxed(addr) readw(addr)
192#define readl_relaxed(addr) readl(addr)
193
194static inline __u8 __raw_readb(const volatile void __iomem *addr)
195{
196 return *(__force volatile __u8 *)(addr);
197}
198static inline __u16 __raw_readw(const volatile void __iomem *addr)
199{
200 return *(__force volatile __u16 *)(addr);
201}
202static inline __u32 __raw_readl(const volatile void __iomem *addr)
203{
204 return *(__force volatile __u32 *)(addr);
205}
206static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
207{
208 *(__force volatile __u8 *)(addr) = b;
209}
210static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
211{
212 *(__force volatile __u16 *)(addr) = b;
213}
214static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
215{
216 *(__force volatile __u32 *)(addr) = b;
217}
218
219#define mmiowb()
220
221/*
222 * The insw/outsw/insl/outsl macros don't do byte-swapping.
223 * They are only used in practice for transferring buffers which
224 * are arrays of bytes, and byte-swapping is not appropriate in
225 * that case. - paulus
226 */
227#define insb(port, buf, ns) _insb((port)+___IO_BASE, (buf), (ns))
228#define outsb(port, buf, ns) _outsb((port)+___IO_BASE, (buf), (ns))
229#define insw(port, buf, ns) _insw_ns((port)+___IO_BASE, (buf), (ns))
230#define outsw(port, buf, ns) _outsw_ns((port)+___IO_BASE, (buf), (ns))
231#define insl(port, buf, nl) _insl_ns((port)+___IO_BASE, (buf), (nl))
232#define outsl(port, buf, nl) _outsl_ns((port)+___IO_BASE, (buf), (nl))
233
6ad4e70c
BH
234#define readsb(a, b, n) _insb((a), (b), (n))
235#define readsw(a, b, n) _insw_ns((a), (b), (n))
236#define readsl(a, b, n) _insl_ns((a), (b), (n))
237#define writesb(a, b, n) _outsb((a),(b),(n))
238#define writesw(a, b, n) _outsw_ns((a),(b),(n))
239#define writesl(a, b, n) _outsl_ns((a),(b),(n))
240
241
1da177e4 242/*
55b6332e
MT
243 * On powermacs and 8xx we will get a machine check exception
244 * if we try to read data from a non-existent I/O port. Because
245 * the machine check is an asynchronous exception, it isn't
1da177e4
LT
246 * well-defined which instruction SRR0 will point to when the
247 * exception occurs.
248 * With the sequence below (twi; isync; nop), we have found that
249 * the machine check occurs on one of the three instructions on
250 * all PPC implementations tested so far. The twi and isync are
251 * needed on the 601 (in fact twi; sync works too), the isync and
252 * nop are needed on 604[e|r], and any of twi, sync or isync will
253 * work on 603[e], 750, 74xx.
254 * The twi creates an explicit data dependency on the returned
255 * value which seems to be needed to make the 601 wait for the
256 * load to finish.
257 */
258
259#define __do_in_asm(name, op) \
260extern __inline__ unsigned int name(unsigned int port) \
261{ \
262 unsigned int x; \
263 __asm__ __volatile__( \
f007cacf 264 "sync\n" \
55b6332e 265 "0:" op " %0,0,%1\n" \
1da177e4
LT
266 "1: twi 0,%0,0\n" \
267 "2: isync\n" \
268 "3: nop\n" \
269 "4:\n" \
270 ".section .fixup,\"ax\"\n" \
271 "5: li %0,-1\n" \
272 " b 4b\n" \
273 ".previous\n" \
274 ".section __ex_table,\"a\"\n" \
275 " .align 2\n" \
55b6332e 276 " .long 0b,5b\n" \
1da177e4
LT
277 " .long 1b,5b\n" \
278 " .long 2b,5b\n" \
279 " .long 3b,5b\n" \
280 ".previous" \
281 : "=&r" (x) \
282 : "r" (port + ___IO_BASE)); \
283 return x; \
284}
285
286#define __do_out_asm(name, op) \
287extern __inline__ void name(unsigned int val, unsigned int port) \
288{ \
289 __asm__ __volatile__( \
f007cacf 290 "sync\n" \
55b6332e 291 "0:" op " %0,0,%1\n" \
1da177e4
LT
292 "1: sync\n" \
293 "2:\n" \
294 ".section __ex_table,\"a\"\n" \
295 " .align 2\n" \
55b6332e 296 " .long 0b,2b\n" \
1da177e4
LT
297 " .long 1b,2b\n" \
298 ".previous" \
299 : : "r" (val), "r" (port + ___IO_BASE)); \
300}
301
302__do_out_asm(outb, "stbx")
303#ifdef CONFIG_APUS
304__do_in_asm(inb, "lbzx")
305__do_in_asm(inw, "lhz%U1%X1")
306__do_in_asm(inl, "lwz%U1%X1")
307__do_out_asm(outl,"stw%U0%X0")
308__do_out_asm(outw, "sth%U0%X0")
309#elif defined (CONFIG_8260_PCI9)
310/* in asm cannot be defined if PCI9 workaround is used */
311#define inb(port) in_8((port)+___IO_BASE)
312#define inw(port) in_le16((port)+___IO_BASE)
313#define inl(port) in_le32((port)+___IO_BASE)
314__do_out_asm(outw, "sthbrx")
315__do_out_asm(outl, "stwbrx")
316#else
317__do_in_asm(inb, "lbzx")
318__do_in_asm(inw, "lhbrx")
319__do_in_asm(inl, "lwbrx")
320__do_out_asm(outw, "sthbrx")
321__do_out_asm(outl, "stwbrx")
322
323#endif
324
325#define inb_p(port) inb((port))
326#define outb_p(val, port) outb((val), (port))
327#define inw_p(port) inw((port))
328#define outw_p(val, port) outw((val), (port))
329#define inl_p(port) inl((port))
330#define outl_p(val, port) outl((val), (port))
331
4cb3cee0
BH
332extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
333extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
334extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
335extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
336extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
337extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
1da177e4 338
1da177e4
LT
339
340#define IO_SPACE_LIMIT ~0
341
342#if defined (CONFIG_8260_PCI9)
343#define memset_io(a,b,c) memset((void *)(a),(b),(c))
344#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
345#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
346#else
347static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
348{
349 memset((void __force *)addr, val, count);
350}
351static inline void memcpy_fromio(void *dst,const volatile void __iomem *src, int count)
352{
353 memcpy(dst, (void __force *) src, count);
354}
355static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
356{
357 memcpy((void __force *) dst, src, count);
358}
359#endif
360
1da177e4
LT
361/*
362 * Map in an area of physical address space, for accessing
363 * I/O devices etc.
364 */
365extern void __iomem *__ioremap(phys_addr_t address, unsigned long size,
366 unsigned long flags);
367extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
368#ifdef CONFIG_44x
369extern void __iomem *ioremap64(unsigned long long address, unsigned long size);
370#endif
371#define ioremap_nocache(addr, size) ioremap((addr), (size))
372extern void iounmap(volatile void __iomem *addr);
373extern unsigned long iopa(unsigned long addr);
374extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
375extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
376 unsigned int size, int flags);
377
378/*
379 * The PCI bus is inherently Little-Endian. The PowerPC is being
380 * run Big-Endian. Thus all values which cross the [PCI] barrier
381 * must be endian-adjusted. Also, the local DRAM has a different
382 * address from the PCI point of view, thus buffer addresses also
383 * have to be modified [mapped] appropriately.
384 */
385extern inline unsigned long virt_to_bus(volatile void * address)
386{
387#ifndef CONFIG_APUS
388 if (address == (void *)0)
389 return 0;
390 return (unsigned long)address - KERNELBASE + PCI_DRAM_OFFSET;
391#else
392 return iopa ((unsigned long) address);
393#endif
394}
395
396extern inline void * bus_to_virt(unsigned long address)
397{
398#ifndef CONFIG_APUS
399 if (address == 0)
400 return NULL;
401 return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
402#else
403 return (void*) mm_ptov (address);
404#endif
405}
406
407/*
408 * Change virtual addresses to physical addresses and vv, for
409 * addresses in the area where the kernel has the RAM mapped.
410 */
411extern inline unsigned long virt_to_phys(volatile void * address)
412{
413#ifndef CONFIG_APUS
414 return (unsigned long) address - KERNELBASE;
415#else
416 return iopa ((unsigned long) address);
417#endif
418}
419
420extern inline void * phys_to_virt(unsigned long address)
421{
422#ifndef CONFIG_APUS
423 return (void *) (address + KERNELBASE);
424#else
425 return (void*) mm_ptov (address);
426#endif
427}
428
429/*
430 * Change "struct page" to physical address.
431 */
432#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
433#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
434
1da177e4
LT
435/* Enforce in-order execution of data I/O.
436 * No distinction between read/write on PPC; use eieio for all three.
437 */
438#define iobarrier_rw() eieio()
439#define iobarrier_r() eieio()
440#define iobarrier_w() eieio()
441
1da177e4
LT
442/*
443 * Here comes the ppc implementation of the IOMAP
444 * interfaces.
445 */
446static inline unsigned int ioread8(void __iomem *addr)
447{
448 return readb(addr);
449}
450
451static inline unsigned int ioread16(void __iomem *addr)
452{
453 return readw(addr);
454}
455
456static inline unsigned int ioread32(void __iomem *addr)
457{
458 return readl(addr);
459}
460
461static inline void iowrite8(u8 val, void __iomem *addr)
462{
463 writeb(val, addr);
464}
465
466static inline void iowrite16(u16 val, void __iomem *addr)
467{
468 writew(val, addr);
469}
470
471static inline void iowrite32(u32 val, void __iomem *addr)
472{
473 writel(val, addr);
474}
475
476static inline void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
477{
478 _insb(addr, dst, count);
479}
480
481static inline void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
482{
483 _insw_ns(addr, dst, count);
484}
485
486static inline void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
487{
488 _insl_ns(addr, dst, count);
489}
490
491static inline void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
492{
493 _outsb(addr, src, count);
494}
495
496static inline void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
497{
498 _outsw_ns(addr, src, count);
499}
500
501static inline void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
502{
503 _outsl_ns(addr, src, count);
504}
505
506/* Create a virtual mapping cookie for an IO port range */
507extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
508extern void ioport_unmap(void __iomem *);
509
510/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
511struct pci_dev;
512extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
513extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
514
515#endif /* _PPC_IO_H */
516
517#ifdef CONFIG_8260_PCI9
518#include <asm/mpc8260_pci9.h>
519#endif
520
78baa2f8
SR
521#ifdef CONFIG_NOT_COHERENT_CACHE
522
523#define dma_cache_inv(_start,_size) \
524 invalidate_dcache_range(_start, (_start + _size))
525#define dma_cache_wback(_start,_size) \
526 clean_dcache_range(_start, (_start + _size))
527#define dma_cache_wback_inv(_start,_size) \
528 flush_dcache_range(_start, (_start + _size))
529
530#else
531
532#define dma_cache_inv(_start,_size) do { } while (0)
533#define dma_cache_wback(_start,_size) do { } while (0)
534#define dma_cache_wback_inv(_start,_size) do { } while (0)
535
536#endif
537
1da177e4
LT
538/*
539 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
540 * access
541 */
542#define xlate_dev_mem_ptr(p) __va(p)
543
544/*
545 * Convert a virtual cached pointer to an uncached pointer
546 */
547#define xlate_dev_kmem_ptr(p) p
548
0ce928e1
VB
549/* access ports */
550#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) | (_v))
551#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
552
553#define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) | (_v))
554#define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v))
555
1da177e4 556#endif /* __KERNEL__ */
This page took 0.194615 seconds and 5 git commands to generate.