[SPARC64]: Add SG merging support back into IOMMU code.
[deliverable/linux.git] / include / asm-sparc64 / io.h
1 #ifndef __SPARC64_IO_H
2 #define __SPARC64_IO_H
3
4 #include <linux/kernel.h>
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7
8 #include <asm/page.h> /* IO address mapping routines need this */
9 #include <asm/system.h>
10 #include <asm/asi.h>
11
12 /* PC crapola... */
13 #define __SLOW_DOWN_IO do { } while (0)
14 #define SLOW_DOWN_IO do { } while (0)
15
16 /* BIO layer definitions. */
17 extern unsigned long kern_base, kern_size;
18 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
19 #define BIO_VMERGE_BOUNDARY 8192
20
21 static inline u8 _inb(unsigned long addr)
22 {
23 u8 ret;
24
25 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */"
26 : "=r" (ret)
27 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
28
29 return ret;
30 }
31
32 static inline u16 _inw(unsigned long addr)
33 {
34 u16 ret;
35
36 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */"
37 : "=r" (ret)
38 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
39
40 return ret;
41 }
42
43 static inline u32 _inl(unsigned long addr)
44 {
45 u32 ret;
46
47 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */"
48 : "=r" (ret)
49 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
50
51 return ret;
52 }
53
54 static inline void _outb(u8 b, unsigned long addr)
55 {
56 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */"
57 : /* no outputs */
58 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
59 }
60
61 static inline void _outw(u16 w, unsigned long addr)
62 {
63 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */"
64 : /* no outputs */
65 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
66 }
67
68 static inline void _outl(u32 l, unsigned long addr)
69 {
70 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */"
71 : /* no outputs */
72 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
73 }
74
75 #define inb(__addr) (_inb((unsigned long)(__addr)))
76 #define inw(__addr) (_inw((unsigned long)(__addr)))
77 #define inl(__addr) (_inl((unsigned long)(__addr)))
78 #define outb(__b, __addr) (_outb((u8)(__b), (unsigned long)(__addr)))
79 #define outw(__w, __addr) (_outw((u16)(__w), (unsigned long)(__addr)))
80 #define outl(__l, __addr) (_outl((u32)(__l), (unsigned long)(__addr)))
81
82 #define inb_p(__addr) inb(__addr)
83 #define outb_p(__b, __addr) outb(__b, __addr)
84 #define inw_p(__addr) inw(__addr)
85 #define outw_p(__w, __addr) outw(__w, __addr)
86 #define inl_p(__addr) inl(__addr)
87 #define outl_p(__l, __addr) outl(__l, __addr)
88
89 extern void outsb(unsigned long, const void *, unsigned long);
90 extern void outsw(unsigned long, const void *, unsigned long);
91 extern void outsl(unsigned long, const void *, unsigned long);
92 extern void insb(unsigned long, void *, unsigned long);
93 extern void insw(unsigned long, void *, unsigned long);
94 extern void insl(unsigned long, void *, unsigned long);
95
96 static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
97 {
98 insb((unsigned long __force)port, buf, count);
99 }
100 static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
101 {
102 insw((unsigned long __force)port, buf, count);
103 }
104
105 static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
106 {
107 insl((unsigned long __force)port, buf, count);
108 }
109
110 static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
111 {
112 outsb((unsigned long __force)port, buf, count);
113 }
114
115 static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
116 {
117 outsw((unsigned long __force)port, buf, count);
118 }
119
120 static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
121 {
122 outsl((unsigned long __force)port, buf, count);
123 }
124
125 /* Memory functions, same as I/O accesses on Ultra. */
126 static inline u8 _readb(const volatile void __iomem *addr)
127 { u8 ret;
128
129 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
130 : "=r" (ret)
131 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
132 return ret;
133 }
134
135 static inline u16 _readw(const volatile void __iomem *addr)
136 { u16 ret;
137
138 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
139 : "=r" (ret)
140 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
141
142 return ret;
143 }
144
145 static inline u32 _readl(const volatile void __iomem *addr)
146 { u32 ret;
147
148 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
149 : "=r" (ret)
150 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
151
152 return ret;
153 }
154
155 static inline u64 _readq(const volatile void __iomem *addr)
156 { u64 ret;
157
158 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
159 : "=r" (ret)
160 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
161
162 return ret;
163 }
164
165 static inline void _writeb(u8 b, volatile void __iomem *addr)
166 {
167 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
168 : /* no outputs */
169 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
170 }
171
172 static inline void _writew(u16 w, volatile void __iomem *addr)
173 {
174 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
175 : /* no outputs */
176 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
177 }
178
179 static inline void _writel(u32 l, volatile void __iomem *addr)
180 {
181 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
182 : /* no outputs */
183 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
184 }
185
186 static inline void _writeq(u64 q, volatile void __iomem *addr)
187 {
188 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
189 : /* no outputs */
190 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
191 }
192
193 #define readb(__addr) _readb(__addr)
194 #define readw(__addr) _readw(__addr)
195 #define readl(__addr) _readl(__addr)
196 #define readq(__addr) _readq(__addr)
197 #define readb_relaxed(__addr) _readb(__addr)
198 #define readw_relaxed(__addr) _readw(__addr)
199 #define readl_relaxed(__addr) _readl(__addr)
200 #define readq_relaxed(__addr) _readq(__addr)
201 #define writeb(__b, __addr) _writeb(__b, __addr)
202 #define writew(__w, __addr) _writew(__w, __addr)
203 #define writel(__l, __addr) _writel(__l, __addr)
204 #define writeq(__q, __addr) _writeq(__q, __addr)
205
206 /* Now versions without byte-swapping. */
207 static inline u8 _raw_readb(unsigned long addr)
208 {
209 u8 ret;
210
211 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
212 : "=r" (ret)
213 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
214
215 return ret;
216 }
217
218 static inline u16 _raw_readw(unsigned long addr)
219 {
220 u16 ret;
221
222 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
223 : "=r" (ret)
224 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
225
226 return ret;
227 }
228
229 static inline u32 _raw_readl(unsigned long addr)
230 {
231 u32 ret;
232
233 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
234 : "=r" (ret)
235 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
236
237 return ret;
238 }
239
240 static inline u64 _raw_readq(unsigned long addr)
241 {
242 u64 ret;
243
244 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
245 : "=r" (ret)
246 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
247
248 return ret;
249 }
250
251 static inline void _raw_writeb(u8 b, unsigned long addr)
252 {
253 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
254 : /* no outputs */
255 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
256 }
257
258 static inline void _raw_writew(u16 w, unsigned long addr)
259 {
260 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
261 : /* no outputs */
262 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
263 }
264
265 static inline void _raw_writel(u32 l, unsigned long addr)
266 {
267 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
268 : /* no outputs */
269 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
270 }
271
272 static inline void _raw_writeq(u64 q, unsigned long addr)
273 {
274 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
275 : /* no outputs */
276 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
277 }
278
279 #define __raw_readb(__addr) (_raw_readb((unsigned long)(__addr)))
280 #define __raw_readw(__addr) (_raw_readw((unsigned long)(__addr)))
281 #define __raw_readl(__addr) (_raw_readl((unsigned long)(__addr)))
282 #define __raw_readq(__addr) (_raw_readq((unsigned long)(__addr)))
283 #define __raw_writeb(__b, __addr) (_raw_writeb((u8)(__b), (unsigned long)(__addr)))
284 #define __raw_writew(__w, __addr) (_raw_writew((u16)(__w), (unsigned long)(__addr)))
285 #define __raw_writel(__l, __addr) (_raw_writel((u32)(__l), (unsigned long)(__addr)))
286 #define __raw_writeq(__q, __addr) (_raw_writeq((u64)(__q), (unsigned long)(__addr)))
287
288 /* Valid I/O Space regions are anywhere, because each PCI bus supported
289 * can live in an arbitrary area of the physical address range.
290 */
291 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
292
293 /* Now, SBUS variants, only difference from PCI is that we do
294 * not use little-endian ASIs.
295 */
296 static inline u8 _sbus_readb(const volatile void __iomem *addr)
297 {
298 u8 ret;
299
300 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */"
301 : "=r" (ret)
302 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
303
304 return ret;
305 }
306
307 static inline u16 _sbus_readw(const volatile void __iomem *addr)
308 {
309 u16 ret;
310
311 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */"
312 : "=r" (ret)
313 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
314
315 return ret;
316 }
317
318 static inline u32 _sbus_readl(const volatile void __iomem *addr)
319 {
320 u32 ret;
321
322 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */"
323 : "=r" (ret)
324 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
325
326 return ret;
327 }
328
329 static inline u64 _sbus_readq(const volatile void __iomem *addr)
330 {
331 u64 ret;
332
333 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */"
334 : "=r" (ret)
335 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
336
337 return ret;
338 }
339
340 static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
341 {
342 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */"
343 : /* no outputs */
344 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
345 }
346
347 static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
348 {
349 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */"
350 : /* no outputs */
351 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
352 }
353
354 static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
355 {
356 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */"
357 : /* no outputs */
358 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
359 }
360
361 static inline void _sbus_writeq(u64 l, volatile void __iomem *addr)
362 {
363 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */"
364 : /* no outputs */
365 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
366 }
367
368 #define sbus_readb(__addr) _sbus_readb(__addr)
369 #define sbus_readw(__addr) _sbus_readw(__addr)
370 #define sbus_readl(__addr) _sbus_readl(__addr)
371 #define sbus_readq(__addr) _sbus_readq(__addr)
372 #define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
373 #define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
374 #define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
375 #define sbus_writeq(__l, __addr) _sbus_writeq(__l, __addr)
376
377 static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
378 {
379 while(n--) {
380 sbus_writeb(c, dst);
381 dst++;
382 }
383 }
384
385 #define sbus_memset_io(d,c,sz) _sbus_memset_io(d,c,sz)
386
387 static inline void
388 _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
389 {
390 volatile void __iomem *d = dst;
391
392 while (n--) {
393 writeb(c, d);
394 d++;
395 }
396 }
397
398 #define memset_io(d,c,sz) _memset_io(d,c,sz)
399
400 static inline void
401 _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
402 {
403 char *d = dst;
404
405 while (n--) {
406 char tmp = readb(src);
407 *d++ = tmp;
408 src++;
409 }
410 }
411
412 #define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
413
414 static inline void
415 _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
416 {
417 const char *s = src;
418 volatile void __iomem *d = dst;
419
420 while (n--) {
421 char tmp = *s++;
422 writeb(tmp, d);
423 d++;
424 }
425 }
426
427 #define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
428
429 #define mmiowb()
430
431 #ifdef __KERNEL__
432
433 /* On sparc64 we have the whole physical IO address space accessible
434 * using physically addressed loads and stores, so this does nothing.
435 */
436 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
437 {
438 return (void __iomem *)offset;
439 }
440
441 #define ioremap_nocache(X,Y) ioremap((X),(Y))
442
443 static inline void iounmap(volatile void __iomem *addr)
444 {
445 }
446
447 #define ioread8(X) readb(X)
448 #define ioread16(X) readw(X)
449 #define ioread32(X) readl(X)
450 #define iowrite8(val,X) writeb(val,X)
451 #define iowrite16(val,X) writew(val,X)
452 #define iowrite32(val,X) writel(val,X)
453
454 /* Create a virtual mapping cookie for an IO port range */
455 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
456 extern void ioport_unmap(void __iomem *);
457
458 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
459 struct pci_dev;
460 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
461 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
462
463 /* Similarly for SBUS. */
464 #define sbus_ioremap(__res, __offset, __size, __name) \
465 ({ unsigned long __ret; \
466 __ret = (__res)->start + (((__res)->flags & 0x1ffUL) << 32UL); \
467 __ret += (unsigned long) (__offset); \
468 if (! request_region((__ret), (__size), (__name))) \
469 __ret = 0UL; \
470 (void __iomem *) __ret; \
471 })
472
473 #define sbus_iounmap(__addr, __size) \
474 release_region((unsigned long)(__addr), (__size))
475
476 /*
477 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
478 * access
479 */
480 #define xlate_dev_mem_ptr(p) __va(p)
481
482 /*
483 * Convert a virtual cached pointer to an uncached pointer
484 */
485 #define xlate_dev_kmem_ptr(p) p
486
487 #endif
488
489 #endif /* !(__SPARC64_IO_H) */
This page took 0.055656 seconds and 5 git commands to generate.