Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_IO_H |
2 | #define _ASM_IA64_IO_H | |
3 | ||
4 | /* | |
5 | * This file contains the definitions for the emulated IO instructions | |
6 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | |
7 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | |
8 | * versions of the single-IO instructions (inb_p/inw_p/..). | |
9 | * | |
10 | * This file is not meant to be obfuscating: it's just complicated to | |
11 | * (a) handle it all in a way that makes gcc able to optimize it as | |
12 | * well as possible and (b) trying to avoid writing the same thing | |
13 | * over and over again with slight variations and possibly making a | |
14 | * mistake somewhere. | |
15 | * | |
16 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
17 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
18 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | |
19 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
20 | */ | |
21 | ||
22 | /* We don't use IO slowdowns on the ia64, but.. */ | |
23 | #define __SLOW_DOWN_IO do { } while (0) | |
24 | #define SLOW_DOWN_IO do { } while (0) | |
25 | ||
0a41e250 | 26 | #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED) |
1da177e4 LT |
27 | |
28 | /* | |
29 | * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but | |
30 | * large machines may have multiple other I/O spaces so we can't place any a priori limit | |
31 | * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. | |
32 | */ | |
33 | #define IO_SPACE_LIMIT 0xffffffffffffffffUL | |
34 | ||
35 | #define MAX_IO_SPACES_BITS 4 | |
36 | #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) | |
37 | #define IO_SPACE_BITS 24 | |
38 | #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) | |
39 | ||
40 | #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) | |
41 | #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) | |
42 | #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) | |
43 | ||
b5f3616b | 44 | #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff)) |
1da177e4 LT |
45 | |
46 | struct io_space { | |
47 | unsigned long mmio_base; /* base in MMIO space */ | |
48 | int sparse; | |
49 | }; | |
50 | ||
51 | extern struct io_space io_space[]; | |
52 | extern unsigned int num_io_spaces; | |
53 | ||
54 | # ifdef __KERNEL__ | |
55 | ||
56 | /* | |
57 | * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: | |
58 | * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) | |
59 | * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) | |
60 | * | |
61 | * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch | |
62 | * code that uses bare port numbers without the prerequisite pci_iomap(). | |
63 | */ | |
64 | #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) | |
65 | #define PIO_MASK (PIO_OFFSET - 1) | |
66 | #define PIO_RESERVED __IA64_UNCACHED_OFFSET | |
67 | #define HAVE_ARCH_PIO_SIZE | |
68 | ||
69 | #include <asm/intrinsics.h> | |
70 | #include <asm/machvec.h> | |
71 | #include <asm/page.h> | |
72 | #include <asm/system.h> | |
73 | #include <asm-generic/iomap.h> | |
74 | ||
75 | /* | |
76 | * Change virtual addresses to physical addresses and vv. | |
77 | */ | |
78 | static inline unsigned long | |
79 | virt_to_phys (volatile void *address) | |
80 | { | |
81 | return (unsigned long) address - PAGE_OFFSET; | |
82 | } | |
83 | ||
84 | static inline void* | |
85 | phys_to_virt (unsigned long address) | |
86 | { | |
87 | return (void *) (address + PAGE_OFFSET); | |
88 | } | |
89 | ||
90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | |
91 | extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ | |
92 | ||
93 | /* | |
94 | * The following two macros are deprecated and scheduled for removal. | |
95 | * Please use the PCI-DMA interface defined in <asm/pci.h> instead. | |
96 | */ | |
97 | #define bus_to_virt phys_to_virt | |
98 | #define virt_to_bus virt_to_phys | |
99 | #define page_to_bus page_to_phys | |
100 | ||
101 | # endif /* KERNEL */ | |
102 | ||
103 | /* | |
104 | * Memory fence w/accept. This should never be used in code that is | |
105 | * not IA-64 specific. | |
106 | */ | |
107 | #define __ia64_mf_a() ia64_mfa() | |
108 | ||
109 | /** | |
110 | * ___ia64_mmiowb - I/O write barrier | |
111 | * | |
112 | * Ensure ordering of I/O space writes. This will make sure that writes | |
113 | * following the barrier will arrive after all previous writes. For most | |
114 | * ia64 platforms, this is a simple 'mf.a' instruction. | |
115 | * | |
116 | * See Documentation/DocBook/deviceiobook.tmpl for more information. | |
117 | */ | |
118 | static inline void ___ia64_mmiowb(void) | |
119 | { | |
120 | ia64_mfa(); | |
121 | } | |
122 | ||
1da177e4 LT |
123 | static inline void* |
124 | __ia64_mk_io_addr (unsigned long port) | |
125 | { | |
126 | struct io_space *space; | |
127 | unsigned long offset; | |
128 | ||
129 | space = &io_space[IO_SPACE_NR(port)]; | |
130 | port = IO_SPACE_PORT(port); | |
131 | if (space->sparse) | |
132 | offset = IO_SPACE_SPARSE_ENCODING(port); | |
133 | else | |
134 | offset = port; | |
135 | ||
136 | return (void *) (space->mmio_base | offset); | |
137 | } | |
138 | ||
139 | #define __ia64_inb ___ia64_inb | |
140 | #define __ia64_inw ___ia64_inw | |
141 | #define __ia64_inl ___ia64_inl | |
142 | #define __ia64_outb ___ia64_outb | |
143 | #define __ia64_outw ___ia64_outw | |
144 | #define __ia64_outl ___ia64_outl | |
145 | #define __ia64_readb ___ia64_readb | |
146 | #define __ia64_readw ___ia64_readw | |
147 | #define __ia64_readl ___ia64_readl | |
148 | #define __ia64_readq ___ia64_readq | |
149 | #define __ia64_readb_relaxed ___ia64_readb | |
150 | #define __ia64_readw_relaxed ___ia64_readw | |
151 | #define __ia64_readl_relaxed ___ia64_readl | |
152 | #define __ia64_readq_relaxed ___ia64_readq | |
153 | #define __ia64_writeb ___ia64_writeb | |
154 | #define __ia64_writew ___ia64_writew | |
155 | #define __ia64_writel ___ia64_writel | |
156 | #define __ia64_writeq ___ia64_writeq | |
157 | #define __ia64_mmiowb ___ia64_mmiowb | |
158 | ||
159 | /* | |
160 | * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure | |
161 | * that the access has completed before executing other I/O accesses. Since we're doing | |
162 | * the accesses through an uncachable (UC) translation, the CPU will execute them in | |
163 | * program order. However, we still need to tell the compiler not to shuffle them around | |
164 | * during optimization, which is why we use "volatile" pointers. | |
165 | */ | |
166 | ||
167 | static inline unsigned int | |
168 | ___ia64_inb (unsigned long port) | |
169 | { | |
170 | volatile unsigned char *addr = __ia64_mk_io_addr(port); | |
171 | unsigned char ret; | |
172 | ||
173 | ret = *addr; | |
174 | __ia64_mf_a(); | |
175 | return ret; | |
176 | } | |
177 | ||
178 | static inline unsigned int | |
179 | ___ia64_inw (unsigned long port) | |
180 | { | |
181 | volatile unsigned short *addr = __ia64_mk_io_addr(port); | |
182 | unsigned short ret; | |
183 | ||
184 | ret = *addr; | |
185 | __ia64_mf_a(); | |
186 | return ret; | |
187 | } | |
188 | ||
189 | static inline unsigned int | |
190 | ___ia64_inl (unsigned long port) | |
191 | { | |
192 | volatile unsigned int *addr = __ia64_mk_io_addr(port); | |
193 | unsigned int ret; | |
194 | ||
195 | ret = *addr; | |
196 | __ia64_mf_a(); | |
197 | return ret; | |
198 | } | |
199 | ||
200 | static inline void | |
201 | ___ia64_outb (unsigned char val, unsigned long port) | |
202 | { | |
203 | volatile unsigned char *addr = __ia64_mk_io_addr(port); | |
204 | ||
205 | *addr = val; | |
206 | __ia64_mf_a(); | |
207 | } | |
208 | ||
209 | static inline void | |
210 | ___ia64_outw (unsigned short val, unsigned long port) | |
211 | { | |
212 | volatile unsigned short *addr = __ia64_mk_io_addr(port); | |
213 | ||
214 | *addr = val; | |
215 | __ia64_mf_a(); | |
216 | } | |
217 | ||
218 | static inline void | |
219 | ___ia64_outl (unsigned int val, unsigned long port) | |
220 | { | |
221 | volatile unsigned int *addr = __ia64_mk_io_addr(port); | |
222 | ||
223 | *addr = val; | |
224 | __ia64_mf_a(); | |
225 | } | |
226 | ||
227 | static inline void | |
228 | __insb (unsigned long port, void *dst, unsigned long count) | |
229 | { | |
230 | unsigned char *dp = dst; | |
231 | ||
232 | while (count--) | |
233 | *dp++ = platform_inb(port); | |
234 | } | |
235 | ||
236 | static inline void | |
237 | __insw (unsigned long port, void *dst, unsigned long count) | |
238 | { | |
239 | unsigned short *dp = dst; | |
240 | ||
241 | while (count--) | |
242 | *dp++ = platform_inw(port); | |
243 | } | |
244 | ||
245 | static inline void | |
246 | __insl (unsigned long port, void *dst, unsigned long count) | |
247 | { | |
248 | unsigned int *dp = dst; | |
249 | ||
250 | while (count--) | |
251 | *dp++ = platform_inl(port); | |
252 | } | |
253 | ||
254 | static inline void | |
255 | __outsb (unsigned long port, const void *src, unsigned long count) | |
256 | { | |
257 | const unsigned char *sp = src; | |
258 | ||
259 | while (count--) | |
260 | platform_outb(*sp++, port); | |
261 | } | |
262 | ||
263 | static inline void | |
264 | __outsw (unsigned long port, const void *src, unsigned long count) | |
265 | { | |
266 | const unsigned short *sp = src; | |
267 | ||
268 | while (count--) | |
269 | platform_outw(*sp++, port); | |
270 | } | |
271 | ||
272 | static inline void | |
273 | __outsl (unsigned long port, const void *src, unsigned long count) | |
274 | { | |
275 | const unsigned int *sp = src; | |
276 | ||
277 | while (count--) | |
278 | platform_outl(*sp++, port); | |
279 | } | |
280 | ||
281 | /* | |
282 | * Unfortunately, some platforms are broken and do not follow the IA-64 architecture | |
283 | * specification regarding legacy I/O support. Thus, we have to make these operations | |
284 | * platform dependent... | |
285 | */ | |
286 | #define __inb platform_inb | |
287 | #define __inw platform_inw | |
288 | #define __inl platform_inl | |
289 | #define __outb platform_outb | |
290 | #define __outw platform_outw | |
291 | #define __outl platform_outl | |
292 | #define __mmiowb platform_mmiowb | |
293 | ||
294 | #define inb(p) __inb(p) | |
295 | #define inw(p) __inw(p) | |
296 | #define inl(p) __inl(p) | |
297 | #define insb(p,d,c) __insb(p,d,c) | |
298 | #define insw(p,d,c) __insw(p,d,c) | |
299 | #define insl(p,d,c) __insl(p,d,c) | |
300 | #define outb(v,p) __outb(v,p) | |
301 | #define outw(v,p) __outw(v,p) | |
302 | #define outl(v,p) __outl(v,p) | |
303 | #define outsb(p,s,c) __outsb(p,s,c) | |
304 | #define outsw(p,s,c) __outsw(p,s,c) | |
305 | #define outsl(p,s,c) __outsl(p,s,c) | |
306 | #define mmiowb() __mmiowb() | |
307 | ||
308 | /* | |
309 | * The address passed to these functions are ioremap()ped already. | |
310 | * | |
311 | * We need these to be machine vectors since some platforms don't provide | |
312 | * DMA coherence via PIO reads (PCI drivers and the spec imply that this is | |
313 | * a good idea). Writes are ok though for all existing ia64 platforms (and | |
314 | * hopefully it'll stay that way). | |
315 | */ | |
316 | static inline unsigned char | |
317 | ___ia64_readb (const volatile void __iomem *addr) | |
318 | { | |
319 | return *(volatile unsigned char __force *)addr; | |
320 | } | |
321 | ||
322 | static inline unsigned short | |
323 | ___ia64_readw (const volatile void __iomem *addr) | |
324 | { | |
325 | return *(volatile unsigned short __force *)addr; | |
326 | } | |
327 | ||
328 | static inline unsigned int | |
329 | ___ia64_readl (const volatile void __iomem *addr) | |
330 | { | |
331 | return *(volatile unsigned int __force *) addr; | |
332 | } | |
333 | ||
334 | static inline unsigned long | |
335 | ___ia64_readq (const volatile void __iomem *addr) | |
336 | { | |
337 | return *(volatile unsigned long __force *) addr; | |
338 | } | |
339 | ||
340 | static inline void | |
341 | __writeb (unsigned char val, volatile void __iomem *addr) | |
342 | { | |
343 | *(volatile unsigned char __force *) addr = val; | |
344 | } | |
345 | ||
346 | static inline void | |
347 | __writew (unsigned short val, volatile void __iomem *addr) | |
348 | { | |
349 | *(volatile unsigned short __force *) addr = val; | |
350 | } | |
351 | ||
352 | static inline void | |
353 | __writel (unsigned int val, volatile void __iomem *addr) | |
354 | { | |
355 | *(volatile unsigned int __force *) addr = val; | |
356 | } | |
357 | ||
358 | static inline void | |
359 | __writeq (unsigned long val, volatile void __iomem *addr) | |
360 | { | |
361 | *(volatile unsigned long __force *) addr = val; | |
362 | } | |
363 | ||
364 | #define __readb platform_readb | |
365 | #define __readw platform_readw | |
366 | #define __readl platform_readl | |
367 | #define __readq platform_readq | |
368 | #define __readb_relaxed platform_readb_relaxed | |
369 | #define __readw_relaxed platform_readw_relaxed | |
370 | #define __readl_relaxed platform_readl_relaxed | |
371 | #define __readq_relaxed platform_readq_relaxed | |
372 | ||
373 | #define readb(a) __readb((a)) | |
374 | #define readw(a) __readw((a)) | |
375 | #define readl(a) __readl((a)) | |
376 | #define readq(a) __readq((a)) | |
377 | #define readb_relaxed(a) __readb_relaxed((a)) | |
378 | #define readw_relaxed(a) __readw_relaxed((a)) | |
379 | #define readl_relaxed(a) __readl_relaxed((a)) | |
380 | #define readq_relaxed(a) __readq_relaxed((a)) | |
381 | #define __raw_readb readb | |
382 | #define __raw_readw readw | |
383 | #define __raw_readl readl | |
384 | #define __raw_readq readq | |
385 | #define __raw_readb_relaxed readb_relaxed | |
386 | #define __raw_readw_relaxed readw_relaxed | |
387 | #define __raw_readl_relaxed readl_relaxed | |
388 | #define __raw_readq_relaxed readq_relaxed | |
389 | #define writeb(v,a) __writeb((v), (a)) | |
390 | #define writew(v,a) __writew((v), (a)) | |
391 | #define writel(v,a) __writel((v), (a)) | |
392 | #define writeq(v,a) __writeq((v), (a)) | |
393 | #define __raw_writeb writeb | |
394 | #define __raw_writew writew | |
395 | #define __raw_writel writel | |
396 | #define __raw_writeq writeq | |
397 | ||
398 | #ifndef inb_p | |
399 | # define inb_p inb | |
400 | #endif | |
401 | #ifndef inw_p | |
402 | # define inw_p inw | |
403 | #endif | |
404 | #ifndef inl_p | |
405 | # define inl_p inl | |
406 | #endif | |
407 | ||
408 | #ifndef outb_p | |
409 | # define outb_p outb | |
410 | #endif | |
411 | #ifndef outw_p | |
412 | # define outw_p outw | |
413 | #endif | |
414 | #ifndef outl_p | |
415 | # define outl_p outl | |
416 | #endif | |
417 | ||
418 | /* | |
419 | * An "address" in IO memory space is not clearly either an integer or a pointer. We will | |
420 | * accept both, thus the casts. | |
421 | * | |
422 | * On ia-64, we access the physical I/O memory space through the uncached kernel region. | |
423 | */ | |
424 | static inline void __iomem * | |
425 | ioremap (unsigned long offset, unsigned long size) | |
426 | { | |
427 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); | |
428 | } | |
429 | ||
430 | static inline void | |
431 | iounmap (volatile void __iomem *addr) | |
432 | { | |
433 | } | |
434 | ||
435 | #define ioremap_nocache(o,s) ioremap(o,s) | |
436 | ||
437 | # ifdef __KERNEL__ | |
438 | ||
439 | /* | |
440 | * String version of IO memory access ops: | |
441 | */ | |
442 | extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); | |
443 | extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); | |
444 | extern void memset_io(volatile void __iomem *s, int c, long n); | |
445 | ||
446 | #define dma_cache_inv(_start,_size) do { } while (0) | |
447 | #define dma_cache_wback(_start,_size) do { } while (0) | |
448 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | |
449 | ||
450 | # endif /* __KERNEL__ */ | |
451 | ||
452 | /* | |
453 | * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that | |
454 | * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). | |
455 | * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on | |
456 | * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing | |
457 | * over BIO-level virtual merging. | |
458 | */ | |
459 | extern unsigned long ia64_max_iommu_merge_mask; | |
460 | #if 1 | |
461 | #define BIO_VMERGE_BOUNDARY 0 | |
462 | #else | |
463 | /* | |
464 | * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be | |
465 | * replaced by dma_merge_mask() or something of that sort. Note: the only way | |
466 | * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets | |
467 | * expanded into: | |
468 | * | |
469 | * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) | |
470 | * | |
471 | * which is precisely what we want. | |
472 | */ | |
473 | #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) | |
474 | #endif | |
475 | ||
476 | #endif /* _ASM_IA64_IO_H */ |