Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_IO_H |
2 | #define _ASM_IA64_IO_H | |
3 | ||
4 | /* | |
5 | * This file contains the definitions for the emulated IO instructions | |
6 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | |
7 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | |
8 | * versions of the single-IO instructions (inb_p/inw_p/..). | |
9 | * | |
10 | * This file is not meant to be obfuscating: it's just complicated to | |
11 | * (a) handle it all in a way that makes gcc able to optimize it as | |
12 | * well as possible and (b) trying to avoid writing the same thing | |
13 | * over and over again with slight variations and possibly making a | |
14 | * mistake somewhere. | |
15 | * | |
16 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
17 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
18 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | |
19 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
20 | */ | |
21 | ||
22 | /* We don't use IO slowdowns on the ia64, but.. */ | |
23 | #define __SLOW_DOWN_IO do { } while (0) | |
24 | #define SLOW_DOWN_IO do { } while (0) | |
25 | ||
0a41e250 | 26 | #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED) |
1da177e4 LT |
27 | |
28 | /* | |
29 | * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but | |
30 | * large machines may have multiple other I/O spaces so we can't place any a priori limit | |
31 | * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. | |
32 | */ | |
33 | #define IO_SPACE_LIMIT 0xffffffffffffffffUL | |
34 | ||
8ea6091f | 35 | #define MAX_IO_SPACES_BITS 8 |
1da177e4 LT |
36 | #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) |
37 | #define IO_SPACE_BITS 24 | |
38 | #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) | |
39 | ||
40 | #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) | |
41 | #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) | |
42 | #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) | |
43 | ||
b5f3616b | 44 | #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff)) |
1da177e4 LT |
45 | |
46 | struct io_space { | |
47 | unsigned long mmio_base; /* base in MMIO space */ | |
48 | int sparse; | |
49 | }; | |
50 | ||
51 | extern struct io_space io_space[]; | |
52 | extern unsigned int num_io_spaces; | |
53 | ||
54 | # ifdef __KERNEL__ | |
55 | ||
56 | /* | |
57 | * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: | |
58 | * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) | |
59 | * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) | |
60 | * | |
61 | * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch | |
62 | * code that uses bare port numbers without the prerequisite pci_iomap(). | |
63 | */ | |
64 | #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) | |
65 | #define PIO_MASK (PIO_OFFSET - 1) | |
66 | #define PIO_RESERVED __IA64_UNCACHED_OFFSET | |
67 | #define HAVE_ARCH_PIO_SIZE | |
68 | ||
69 | #include <asm/intrinsics.h> | |
70 | #include <asm/machvec.h> | |
71 | #include <asm/page.h> | |
72 | #include <asm/system.h> | |
73 | #include <asm-generic/iomap.h> | |
74 | ||
75 | /* | |
76 | * Change virtual addresses to physical addresses and vv. | |
77 | */ | |
78 | static inline unsigned long | |
79 | virt_to_phys (volatile void *address) | |
80 | { | |
81 | return (unsigned long) address - PAGE_OFFSET; | |
82 | } | |
83 | ||
84 | static inline void* | |
85 | phys_to_virt (unsigned long address) | |
86 | { | |
87 | return (void *) (address + PAGE_OFFSET); | |
88 | } | |
89 | ||
90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | |
32e62c63 | 91 | extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size); |
136939a2 | 92 | extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ |
06c67bef | 93 | extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count); |
1da177e4 LT |
94 | |
95 | /* | |
96 | * The following two macros are deprecated and scheduled for removal. | |
97 | * Please use the PCI-DMA interface defined in <asm/pci.h> instead. | |
98 | */ | |
99 | #define bus_to_virt phys_to_virt | |
100 | #define virt_to_bus virt_to_phys | |
101 | #define page_to_bus page_to_phys | |
102 | ||
103 | # endif /* KERNEL */ | |
104 | ||
105 | /* | |
106 | * Memory fence w/accept. This should never be used in code that is | |
107 | * not IA-64 specific. | |
108 | */ | |
109 | #define __ia64_mf_a() ia64_mfa() | |
110 | ||
111 | /** | |
112 | * ___ia64_mmiowb - I/O write barrier | |
113 | * | |
114 | * Ensure ordering of I/O space writes. This will make sure that writes | |
115 | * following the barrier will arrive after all previous writes. For most | |
116 | * ia64 platforms, this is a simple 'mf.a' instruction. | |
117 | * | |
118 | * See Documentation/DocBook/deviceiobook.tmpl for more information. | |
119 | */ | |
120 | static inline void ___ia64_mmiowb(void) | |
121 | { | |
122 | ia64_mfa(); | |
123 | } | |
124 | ||
1da177e4 LT |
125 | static inline void* |
126 | __ia64_mk_io_addr (unsigned long port) | |
127 | { | |
128 | struct io_space *space; | |
129 | unsigned long offset; | |
130 | ||
131 | space = &io_space[IO_SPACE_NR(port)]; | |
132 | port = IO_SPACE_PORT(port); | |
133 | if (space->sparse) | |
134 | offset = IO_SPACE_SPARSE_ENCODING(port); | |
135 | else | |
136 | offset = port; | |
137 | ||
138 | return (void *) (space->mmio_base | offset); | |
139 | } | |
140 | ||
141 | #define __ia64_inb ___ia64_inb | |
142 | #define __ia64_inw ___ia64_inw | |
143 | #define __ia64_inl ___ia64_inl | |
144 | #define __ia64_outb ___ia64_outb | |
145 | #define __ia64_outw ___ia64_outw | |
146 | #define __ia64_outl ___ia64_outl | |
147 | #define __ia64_readb ___ia64_readb | |
148 | #define __ia64_readw ___ia64_readw | |
149 | #define __ia64_readl ___ia64_readl | |
150 | #define __ia64_readq ___ia64_readq | |
151 | #define __ia64_readb_relaxed ___ia64_readb | |
152 | #define __ia64_readw_relaxed ___ia64_readw | |
153 | #define __ia64_readl_relaxed ___ia64_readl | |
154 | #define __ia64_readq_relaxed ___ia64_readq | |
155 | #define __ia64_writeb ___ia64_writeb | |
156 | #define __ia64_writew ___ia64_writew | |
157 | #define __ia64_writel ___ia64_writel | |
158 | #define __ia64_writeq ___ia64_writeq | |
159 | #define __ia64_mmiowb ___ia64_mmiowb | |
160 | ||
161 | /* | |
162 | * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure | |
163 | * that the access has completed before executing other I/O accesses. Since we're doing | |
164 | * the accesses through an uncachable (UC) translation, the CPU will execute them in | |
165 | * program order. However, we still need to tell the compiler not to shuffle them around | |
166 | * during optimization, which is why we use "volatile" pointers. | |
167 | */ | |
168 | ||
169 | static inline unsigned int | |
170 | ___ia64_inb (unsigned long port) | |
171 | { | |
172 | volatile unsigned char *addr = __ia64_mk_io_addr(port); | |
173 | unsigned char ret; | |
174 | ||
175 | ret = *addr; | |
176 | __ia64_mf_a(); | |
177 | return ret; | |
178 | } | |
179 | ||
180 | static inline unsigned int | |
181 | ___ia64_inw (unsigned long port) | |
182 | { | |
183 | volatile unsigned short *addr = __ia64_mk_io_addr(port); | |
184 | unsigned short ret; | |
185 | ||
186 | ret = *addr; | |
187 | __ia64_mf_a(); | |
188 | return ret; | |
189 | } | |
190 | ||
191 | static inline unsigned int | |
192 | ___ia64_inl (unsigned long port) | |
193 | { | |
194 | volatile unsigned int *addr = __ia64_mk_io_addr(port); | |
195 | unsigned int ret; | |
196 | ||
197 | ret = *addr; | |
198 | __ia64_mf_a(); | |
199 | return ret; | |
200 | } | |
201 | ||
202 | static inline void | |
203 | ___ia64_outb (unsigned char val, unsigned long port) | |
204 | { | |
205 | volatile unsigned char *addr = __ia64_mk_io_addr(port); | |
206 | ||
207 | *addr = val; | |
208 | __ia64_mf_a(); | |
209 | } | |
210 | ||
211 | static inline void | |
212 | ___ia64_outw (unsigned short val, unsigned long port) | |
213 | { | |
214 | volatile unsigned short *addr = __ia64_mk_io_addr(port); | |
215 | ||
216 | *addr = val; | |
217 | __ia64_mf_a(); | |
218 | } | |
219 | ||
220 | static inline void | |
221 | ___ia64_outl (unsigned int val, unsigned long port) | |
222 | { | |
223 | volatile unsigned int *addr = __ia64_mk_io_addr(port); | |
224 | ||
225 | *addr = val; | |
226 | __ia64_mf_a(); | |
227 | } | |
228 | ||
229 | static inline void | |
230 | __insb (unsigned long port, void *dst, unsigned long count) | |
231 | { | |
232 | unsigned char *dp = dst; | |
233 | ||
234 | while (count--) | |
235 | *dp++ = platform_inb(port); | |
236 | } | |
237 | ||
238 | static inline void | |
239 | __insw (unsigned long port, void *dst, unsigned long count) | |
240 | { | |
241 | unsigned short *dp = dst; | |
242 | ||
243 | while (count--) | |
244 | *dp++ = platform_inw(port); | |
245 | } | |
246 | ||
247 | static inline void | |
248 | __insl (unsigned long port, void *dst, unsigned long count) | |
249 | { | |
250 | unsigned int *dp = dst; | |
251 | ||
252 | while (count--) | |
253 | *dp++ = platform_inl(port); | |
254 | } | |
255 | ||
256 | static inline void | |
257 | __outsb (unsigned long port, const void *src, unsigned long count) | |
258 | { | |
259 | const unsigned char *sp = src; | |
260 | ||
261 | while (count--) | |
262 | platform_outb(*sp++, port); | |
263 | } | |
264 | ||
265 | static inline void | |
266 | __outsw (unsigned long port, const void *src, unsigned long count) | |
267 | { | |
268 | const unsigned short *sp = src; | |
269 | ||
270 | while (count--) | |
271 | platform_outw(*sp++, port); | |
272 | } | |
273 | ||
274 | static inline void | |
275 | __outsl (unsigned long port, const void *src, unsigned long count) | |
276 | { | |
277 | const unsigned int *sp = src; | |
278 | ||
279 | while (count--) | |
280 | platform_outl(*sp++, port); | |
281 | } | |
282 | ||
283 | /* | |
284 | * Unfortunately, some platforms are broken and do not follow the IA-64 architecture | |
285 | * specification regarding legacy I/O support. Thus, we have to make these operations | |
286 | * platform dependent... | |
287 | */ | |
288 | #define __inb platform_inb | |
289 | #define __inw platform_inw | |
290 | #define __inl platform_inl | |
291 | #define __outb platform_outb | |
292 | #define __outw platform_outw | |
293 | #define __outl platform_outl | |
294 | #define __mmiowb platform_mmiowb | |
295 | ||
296 | #define inb(p) __inb(p) | |
297 | #define inw(p) __inw(p) | |
298 | #define inl(p) __inl(p) | |
299 | #define insb(p,d,c) __insb(p,d,c) | |
300 | #define insw(p,d,c) __insw(p,d,c) | |
301 | #define insl(p,d,c) __insl(p,d,c) | |
302 | #define outb(v,p) __outb(v,p) | |
303 | #define outw(v,p) __outw(v,p) | |
304 | #define outl(v,p) __outl(v,p) | |
305 | #define outsb(p,s,c) __outsb(p,s,c) | |
306 | #define outsw(p,s,c) __outsw(p,s,c) | |
307 | #define outsl(p,s,c) __outsl(p,s,c) | |
308 | #define mmiowb() __mmiowb() | |
309 | ||
310 | /* | |
311 | * The address passed to these functions are ioremap()ped already. | |
312 | * | |
313 | * We need these to be machine vectors since some platforms don't provide | |
314 | * DMA coherence via PIO reads (PCI drivers and the spec imply that this is | |
315 | * a good idea). Writes are ok though for all existing ia64 platforms (and | |
316 | * hopefully it'll stay that way). | |
317 | */ | |
318 | static inline unsigned char | |
319 | ___ia64_readb (const volatile void __iomem *addr) | |
320 | { | |
321 | return *(volatile unsigned char __force *)addr; | |
322 | } | |
323 | ||
324 | static inline unsigned short | |
325 | ___ia64_readw (const volatile void __iomem *addr) | |
326 | { | |
327 | return *(volatile unsigned short __force *)addr; | |
328 | } | |
329 | ||
330 | static inline unsigned int | |
331 | ___ia64_readl (const volatile void __iomem *addr) | |
332 | { | |
333 | return *(volatile unsigned int __force *) addr; | |
334 | } | |
335 | ||
336 | static inline unsigned long | |
337 | ___ia64_readq (const volatile void __iomem *addr) | |
338 | { | |
339 | return *(volatile unsigned long __force *) addr; | |
340 | } | |
341 | ||
342 | static inline void | |
343 | __writeb (unsigned char val, volatile void __iomem *addr) | |
344 | { | |
345 | *(volatile unsigned char __force *) addr = val; | |
346 | } | |
347 | ||
348 | static inline void | |
349 | __writew (unsigned short val, volatile void __iomem *addr) | |
350 | { | |
351 | *(volatile unsigned short __force *) addr = val; | |
352 | } | |
353 | ||
354 | static inline void | |
355 | __writel (unsigned int val, volatile void __iomem *addr) | |
356 | { | |
357 | *(volatile unsigned int __force *) addr = val; | |
358 | } | |
359 | ||
360 | static inline void | |
361 | __writeq (unsigned long val, volatile void __iomem *addr) | |
362 | { | |
363 | *(volatile unsigned long __force *) addr = val; | |
364 | } | |
365 | ||
366 | #define __readb platform_readb | |
367 | #define __readw platform_readw | |
368 | #define __readl platform_readl | |
369 | #define __readq platform_readq | |
370 | #define __readb_relaxed platform_readb_relaxed | |
371 | #define __readw_relaxed platform_readw_relaxed | |
372 | #define __readl_relaxed platform_readl_relaxed | |
373 | #define __readq_relaxed platform_readq_relaxed | |
374 | ||
375 | #define readb(a) __readb((a)) | |
376 | #define readw(a) __readw((a)) | |
377 | #define readl(a) __readl((a)) | |
378 | #define readq(a) __readq((a)) | |
379 | #define readb_relaxed(a) __readb_relaxed((a)) | |
380 | #define readw_relaxed(a) __readw_relaxed((a)) | |
381 | #define readl_relaxed(a) __readl_relaxed((a)) | |
382 | #define readq_relaxed(a) __readq_relaxed((a)) | |
383 | #define __raw_readb readb | |
384 | #define __raw_readw readw | |
385 | #define __raw_readl readl | |
386 | #define __raw_readq readq | |
387 | #define __raw_readb_relaxed readb_relaxed | |
388 | #define __raw_readw_relaxed readw_relaxed | |
389 | #define __raw_readl_relaxed readl_relaxed | |
390 | #define __raw_readq_relaxed readq_relaxed | |
391 | #define writeb(v,a) __writeb((v), (a)) | |
392 | #define writew(v,a) __writew((v), (a)) | |
393 | #define writel(v,a) __writel((v), (a)) | |
394 | #define writeq(v,a) __writeq((v), (a)) | |
395 | #define __raw_writeb writeb | |
396 | #define __raw_writew writew | |
397 | #define __raw_writel writel | |
398 | #define __raw_writeq writeq | |
399 | ||
400 | #ifndef inb_p | |
401 | # define inb_p inb | |
402 | #endif | |
403 | #ifndef inw_p | |
404 | # define inw_p inw | |
405 | #endif | |
406 | #ifndef inl_p | |
407 | # define inl_p inl | |
408 | #endif | |
409 | ||
410 | #ifndef outb_p | |
411 | # define outb_p outb | |
412 | #endif | |
413 | #ifndef outw_p | |
414 | # define outw_p outw | |
415 | #endif | |
416 | #ifndef outl_p | |
417 | # define outl_p outl | |
418 | #endif | |
419 | ||
ffc45571 AG |
420 | # ifdef __KERNEL__ |
421 | ||
e9b0a071 BH |
422 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); |
423 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); | |
1da177e4 LT |
424 | |
425 | static inline void | |
426 | iounmap (volatile void __iomem *addr) | |
427 | { | |
428 | } | |
429 | ||
3ed3bce8 MD |
430 | /* Use normal IO mappings for DMI */ |
431 | #define dmi_ioremap ioremap | |
432 | #define dmi_iounmap(x,l) iounmap(x) | |
433 | #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) | |
434 | ||
1da177e4 LT |
435 | /* |
436 | * String version of IO memory access ops: | |
437 | */ | |
438 | extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); | |
439 | extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); | |
440 | extern void memset_io(volatile void __iomem *s, int c, long n); | |
441 | ||
442 | #define dma_cache_inv(_start,_size) do { } while (0) | |
443 | #define dma_cache_wback(_start,_size) do { } while (0) | |
444 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | |
445 | ||
446 | # endif /* __KERNEL__ */ | |
447 | ||
448 | /* | |
449 | * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that | |
450 | * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). | |
451 | * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on | |
452 | * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing | |
453 | * over BIO-level virtual merging. | |
454 | */ | |
455 | extern unsigned long ia64_max_iommu_merge_mask; | |
456 | #if 1 | |
457 | #define BIO_VMERGE_BOUNDARY 0 | |
458 | #else | |
459 | /* | |
460 | * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be | |
461 | * replaced by dma_merge_mask() or something of that sort. Note: the only way | |
462 | * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets | |
463 | * expanded into: | |
464 | * | |
465 | * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) | |
466 | * | |
467 | * which is precisely what we want. | |
468 | */ | |
469 | #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) | |
470 | #endif | |
471 | ||
472 | #endif /* _ASM_IA64_IO_H */ |