Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ALPHA_IO_H |
2 | #define __ALPHA_IO_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
1da177e4 LT |
6 | #include <linux/kernel.h> |
7 | #include <asm/compiler.h> | |
8 | #include <asm/system.h> | |
9 | #include <asm/pgtable.h> | |
10 | #include <asm/machvec.h> | |
11 | #include <asm/hwrpb.h> | |
12 | ||
13 | /* The generic header contains only prototypes. Including it ensures that | |
14 | the implementation we have here matches that interface. */ | |
15 | #include <asm-generic/iomap.h> | |
16 | ||
17 | /* We don't use IO slowdowns on the Alpha, but.. */ | |
18 | #define __SLOW_DOWN_IO do { } while (0) | |
19 | #define SLOW_DOWN_IO do { } while (0) | |
20 | ||
21 | /* | |
22 | * Virtual -> physical identity mapping starts at this offset | |
23 | */ | |
24 | #ifdef USE_48_BIT_KSEG | |
25 | #define IDENT_ADDR 0xffff800000000000UL | |
26 | #else | |
27 | #define IDENT_ADDR 0xfffffc0000000000UL | |
28 | #endif | |
29 | ||
30 | /* | |
31 | * We try to avoid hae updates (thus the cache), but when we | |
32 | * do need to update the hae, we need to do it atomically, so | |
33 | * that any interrupts wouldn't get confused with the hae | |
34 | * register not being up-to-date with respect to the hardware | |
35 | * value. | |
36 | */ | |
37 | static inline void __set_hae(unsigned long new_hae) | |
38 | { | |
39 | unsigned long flags; | |
40 | local_irq_save(flags); | |
41 | ||
42 | alpha_mv.hae_cache = new_hae; | |
43 | *alpha_mv.hae_register = new_hae; | |
44 | mb(); | |
45 | /* Re-read to make sure it was written. */ | |
46 | new_hae = *alpha_mv.hae_register; | |
47 | ||
48 | local_irq_restore(flags); | |
49 | } | |
50 | ||
51 | static inline void set_hae(unsigned long new_hae) | |
52 | { | |
53 | if (new_hae != alpha_mv.hae_cache) | |
54 | __set_hae(new_hae); | |
55 | } | |
56 | ||
57 | /* | |
58 | * Change virtual addresses to physical addresses and vv. | |
59 | */ | |
60 | #ifdef USE_48_BIT_KSEG | |
61 | static inline unsigned long virt_to_phys(void *address) | |
62 | { | |
63 | return (unsigned long)address - IDENT_ADDR; | |
64 | } | |
65 | ||
66 | static inline void * phys_to_virt(unsigned long address) | |
67 | { | |
68 | return (void *) (address + IDENT_ADDR); | |
69 | } | |
70 | #else | |
71 | static inline unsigned long virt_to_phys(void *address) | |
72 | { | |
73 | unsigned long phys = (unsigned long)address; | |
74 | ||
75 | /* Sign-extend from bit 41. */ | |
76 | phys <<= (64 - 41); | |
77 | phys = (long)phys >> (64 - 41); | |
78 | ||
79 | /* Crop to the physical address width of the processor. */ | |
80 | phys &= (1ul << hwrpb->pa_bits) - 1; | |
81 | ||
82 | return phys; | |
83 | } | |
84 | ||
85 | static inline void * phys_to_virt(unsigned long address) | |
86 | { | |
87 | return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); | |
88 | } | |
89 | #endif | |
90 | ||
91 | #define page_to_phys(page) page_to_pa(page) | |
92 | ||
93 | /* This depends on working iommu. */ | |
94 | #define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0) | |
95 | ||
96 | /* Maximum PIO space address supported? */ | |
97 | #define IO_SPACE_LIMIT 0xffff | |
98 | ||
99 | /* | |
100 | * Change addresses as seen by the kernel (virtual) to addresses as | |
101 | * seen by a device (bus), and vice versa. | |
102 | * | |
103 | * Note that this only works for a limited range of kernel addresses, | |
104 | * and very well may not span all memory. Consider this interface | |
105 | * deprecated in favour of the mapping functions in <asm/pci.h>. | |
106 | */ | |
107 | extern unsigned long __direct_map_base; | |
108 | extern unsigned long __direct_map_size; | |
109 | ||
110 | static inline unsigned long virt_to_bus(void *address) | |
111 | { | |
112 | unsigned long phys = virt_to_phys(address); | |
113 | unsigned long bus = phys + __direct_map_base; | |
114 | return phys <= __direct_map_size ? bus : 0; | |
115 | } | |
1b75b05b | 116 | #define isa_virt_to_bus virt_to_bus |
1da177e4 LT |
117 | |
118 | static inline void *bus_to_virt(unsigned long address) | |
119 | { | |
120 | void *virt; | |
121 | ||
122 | /* This check is a sanity check but also ensures that bus address 0 | |
123 | maps to virtual address 0 which is useful to detect null pointers | |
124 | (the NCR driver is much simpler if NULL pointers are preserved). */ | |
125 | address -= __direct_map_base; | |
126 | virt = phys_to_virt(address); | |
127 | return (long)address <= 0 ? NULL : virt; | |
128 | } | |
129 | ||
130 | /* | |
131 | * There are different chipsets to interface the Alpha CPUs to the world. | |
132 | */ | |
133 | ||
134 | #define IO_CONCAT(a,b) _IO_CONCAT(a,b) | |
135 | #define _IO_CONCAT(a,b) a ## _ ## b | |
136 | ||
137 | #ifdef CONFIG_ALPHA_GENERIC | |
138 | ||
139 | /* In a generic kernel, we always go through the machine vector. */ | |
140 | ||
141 | #define REMAP1(TYPE, NAME, QUAL) \ | |
142 | static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ | |
143 | { \ | |
144 | return alpha_mv.mv_##NAME(addr); \ | |
145 | } | |
146 | ||
147 | #define REMAP2(TYPE, NAME, QUAL) \ | |
148 | static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ | |
149 | { \ | |
150 | alpha_mv.mv_##NAME(b, addr); \ | |
151 | } | |
152 | ||
153 | REMAP1(unsigned int, ioread8, /**/) | |
154 | REMAP1(unsigned int, ioread16, /**/) | |
155 | REMAP1(unsigned int, ioread32, /**/) | |
156 | REMAP1(u8, readb, const volatile) | |
157 | REMAP1(u16, readw, const volatile) | |
158 | REMAP1(u32, readl, const volatile) | |
159 | REMAP1(u64, readq, const volatile) | |
160 | ||
161 | REMAP2(u8, iowrite8, /**/) | |
162 | REMAP2(u16, iowrite16, /**/) | |
163 | REMAP2(u32, iowrite32, /**/) | |
164 | REMAP2(u8, writeb, volatile) | |
165 | REMAP2(u16, writew, volatile) | |
166 | REMAP2(u32, writel, volatile) | |
167 | REMAP2(u64, writeq, volatile) | |
168 | ||
169 | #undef REMAP1 | |
170 | #undef REMAP2 | |
171 | ||
172 | static inline void __iomem *generic_ioportmap(unsigned long a) | |
173 | { | |
174 | return alpha_mv.mv_ioportmap(a); | |
175 | } | |
176 | ||
177 | static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) | |
178 | { | |
179 | return alpha_mv.mv_ioremap(a, s); | |
180 | } | |
181 | ||
182 | static inline void generic_iounmap(volatile void __iomem *a) | |
183 | { | |
184 | return alpha_mv.mv_iounmap(a); | |
185 | } | |
186 | ||
187 | static inline int generic_is_ioaddr(unsigned long a) | |
188 | { | |
189 | return alpha_mv.mv_is_ioaddr(a); | |
190 | } | |
191 | ||
192 | static inline int generic_is_mmio(const volatile void __iomem *a) | |
193 | { | |
194 | return alpha_mv.mv_is_mmio(a); | |
195 | } | |
196 | ||
197 | #define __IO_PREFIX generic | |
198 | #define generic_trivial_rw_bw 0 | |
199 | #define generic_trivial_rw_lq 0 | |
200 | #define generic_trivial_io_bw 0 | |
201 | #define generic_trivial_io_lq 0 | |
202 | #define generic_trivial_iounmap 0 | |
203 | ||
204 | #else | |
205 | ||
206 | #if defined(CONFIG_ALPHA_APECS) | |
207 | # include <asm/core_apecs.h> | |
208 | #elif defined(CONFIG_ALPHA_CIA) | |
209 | # include <asm/core_cia.h> | |
210 | #elif defined(CONFIG_ALPHA_IRONGATE) | |
211 | # include <asm/core_irongate.h> | |
212 | #elif defined(CONFIG_ALPHA_JENSEN) | |
213 | # include <asm/jensen.h> | |
214 | #elif defined(CONFIG_ALPHA_LCA) | |
215 | # include <asm/core_lca.h> | |
216 | #elif defined(CONFIG_ALPHA_MARVEL) | |
217 | # include <asm/core_marvel.h> | |
218 | #elif defined(CONFIG_ALPHA_MCPCIA) | |
219 | # include <asm/core_mcpcia.h> | |
220 | #elif defined(CONFIG_ALPHA_POLARIS) | |
221 | # include <asm/core_polaris.h> | |
222 | #elif defined(CONFIG_ALPHA_T2) | |
223 | # include <asm/core_t2.h> | |
224 | #elif defined(CONFIG_ALPHA_TSUNAMI) | |
225 | # include <asm/core_tsunami.h> | |
226 | #elif defined(CONFIG_ALPHA_TITAN) | |
227 | # include <asm/core_titan.h> | |
228 | #elif defined(CONFIG_ALPHA_WILDFIRE) | |
229 | # include <asm/core_wildfire.h> | |
230 | #else | |
231 | #error "What system is this?" | |
232 | #endif | |
233 | ||
234 | #endif /* GENERIC */ | |
235 | ||
236 | /* | |
237 | * We always have external versions of these routines. | |
238 | */ | |
239 | extern u8 inb(unsigned long port); | |
240 | extern u16 inw(unsigned long port); | |
241 | extern u32 inl(unsigned long port); | |
242 | extern void outb(u8 b, unsigned long port); | |
243 | extern void outw(u16 b, unsigned long port); | |
244 | extern void outl(u32 b, unsigned long port); | |
245 | ||
246 | extern u8 readb(const volatile void __iomem *addr); | |
247 | extern u16 readw(const volatile void __iomem *addr); | |
248 | extern u32 readl(const volatile void __iomem *addr); | |
249 | extern u64 readq(const volatile void __iomem *addr); | |
250 | extern void writeb(u8 b, volatile void __iomem *addr); | |
251 | extern void writew(u16 b, volatile void __iomem *addr); | |
252 | extern void writel(u32 b, volatile void __iomem *addr); | |
253 | extern void writeq(u64 b, volatile void __iomem *addr); | |
254 | ||
255 | extern u8 __raw_readb(const volatile void __iomem *addr); | |
256 | extern u16 __raw_readw(const volatile void __iomem *addr); | |
257 | extern u32 __raw_readl(const volatile void __iomem *addr); | |
258 | extern u64 __raw_readq(const volatile void __iomem *addr); | |
259 | extern void __raw_writeb(u8 b, volatile void __iomem *addr); | |
260 | extern void __raw_writew(u16 b, volatile void __iomem *addr); | |
261 | extern void __raw_writel(u32 b, volatile void __iomem *addr); | |
262 | extern void __raw_writeq(u64 b, volatile void __iomem *addr); | |
263 | ||
264 | /* | |
265 | * Mapping from port numbers to __iomem space is pretty easy. | |
266 | */ | |
267 | ||
268 | /* These two have to be extern inline because of the extern prototype from | |
269 | <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for | |
270 | the same declaration. */ | |
271 | extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) | |
272 | { | |
273 | return IO_CONCAT(__IO_PREFIX,ioportmap) (port); | |
274 | } | |
275 | ||
276 | extern inline void ioport_unmap(void __iomem *addr) | |
277 | { | |
278 | } | |
279 | ||
280 | static inline void __iomem *ioremap(unsigned long port, unsigned long size) | |
281 | { | |
282 | return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); | |
283 | } | |
284 | ||
285 | static inline void __iomem *__ioremap(unsigned long port, unsigned long size, | |
286 | unsigned long flags) | |
287 | { | |
288 | return ioremap(port, size); | |
289 | } | |
290 | ||
291 | static inline void __iomem * ioremap_nocache(unsigned long offset, | |
292 | unsigned long size) | |
293 | { | |
294 | return ioremap(offset, size); | |
295 | } | |
296 | ||
297 | static inline void iounmap(volatile void __iomem *addr) | |
298 | { | |
299 | IO_CONCAT(__IO_PREFIX,iounmap)(addr); | |
300 | } | |
301 | ||
302 | static inline int __is_ioaddr(unsigned long addr) | |
303 | { | |
304 | return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); | |
305 | } | |
306 | #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) | |
307 | ||
308 | static inline int __is_mmio(const volatile void __iomem *addr) | |
309 | { | |
310 | return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); | |
311 | } | |
312 | ||
313 | ||
314 | /* | |
315 | * If the actual I/O bits are sufficiently trivial, then expand inline. | |
316 | */ | |
317 | ||
318 | #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) | |
319 | extern inline unsigned int ioread8(void __iomem *addr) | |
320 | { | |
321 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); | |
322 | mb(); | |
323 | return ret; | |
324 | } | |
325 | ||
326 | extern inline unsigned int ioread16(void __iomem *addr) | |
327 | { | |
328 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); | |
329 | mb(); | |
330 | return ret; | |
331 | } | |
332 | ||
333 | extern inline void iowrite8(u8 b, void __iomem *addr) | |
334 | { | |
335 | IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); | |
336 | mb(); | |
337 | } | |
338 | ||
339 | extern inline void iowrite16(u16 b, void __iomem *addr) | |
340 | { | |
341 | IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); | |
342 | mb(); | |
343 | } | |
344 | ||
345 | extern inline u8 inb(unsigned long port) | |
346 | { | |
347 | return ioread8(ioport_map(port, 1)); | |
348 | } | |
349 | ||
350 | extern inline u16 inw(unsigned long port) | |
351 | { | |
352 | return ioread16(ioport_map(port, 2)); | |
353 | } | |
354 | ||
355 | extern inline void outb(u8 b, unsigned long port) | |
356 | { | |
357 | iowrite8(b, ioport_map(port, 1)); | |
358 | } | |
359 | ||
360 | extern inline void outw(u16 b, unsigned long port) | |
361 | { | |
362 | iowrite16(b, ioport_map(port, 2)); | |
363 | } | |
364 | #endif | |
365 | ||
366 | #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) | |
367 | extern inline unsigned int ioread32(void __iomem *addr) | |
368 | { | |
369 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); | |
370 | mb(); | |
371 | return ret; | |
372 | } | |
373 | ||
374 | extern inline void iowrite32(u32 b, void __iomem *addr) | |
375 | { | |
376 | IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); | |
377 | mb(); | |
378 | } | |
379 | ||
380 | extern inline u32 inl(unsigned long port) | |
381 | { | |
382 | return ioread32(ioport_map(port, 4)); | |
383 | } | |
384 | ||
385 | extern inline void outl(u32 b, unsigned long port) | |
386 | { | |
387 | iowrite32(b, ioport_map(port, 4)); | |
388 | } | |
389 | #endif | |
390 | ||
391 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 | |
392 | extern inline u8 __raw_readb(const volatile void __iomem *addr) | |
393 | { | |
394 | return IO_CONCAT(__IO_PREFIX,readb)(addr); | |
395 | } | |
396 | ||
397 | extern inline u16 __raw_readw(const volatile void __iomem *addr) | |
398 | { | |
399 | return IO_CONCAT(__IO_PREFIX,readw)(addr); | |
400 | } | |
401 | ||
402 | extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) | |
403 | { | |
404 | IO_CONCAT(__IO_PREFIX,writeb)(b, addr); | |
405 | } | |
406 | ||
407 | extern inline void __raw_writew(u16 b, volatile void __iomem *addr) | |
408 | { | |
409 | IO_CONCAT(__IO_PREFIX,writew)(b, addr); | |
410 | } | |
411 | ||
412 | extern inline u8 readb(const volatile void __iomem *addr) | |
413 | { | |
414 | u8 ret = __raw_readb(addr); | |
415 | mb(); | |
416 | return ret; | |
417 | } | |
418 | ||
419 | extern inline u16 readw(const volatile void __iomem *addr) | |
420 | { | |
421 | u16 ret = __raw_readw(addr); | |
422 | mb(); | |
423 | return ret; | |
424 | } | |
425 | ||
426 | extern inline void writeb(u8 b, volatile void __iomem *addr) | |
427 | { | |
428 | __raw_writeb(b, addr); | |
429 | mb(); | |
430 | } | |
431 | ||
432 | extern inline void writew(u16 b, volatile void __iomem *addr) | |
433 | { | |
434 | __raw_writew(b, addr); | |
435 | mb(); | |
436 | } | |
437 | #endif | |
438 | ||
439 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 | |
440 | extern inline u32 __raw_readl(const volatile void __iomem *addr) | |
441 | { | |
442 | return IO_CONCAT(__IO_PREFIX,readl)(addr); | |
443 | } | |
444 | ||
445 | extern inline u64 __raw_readq(const volatile void __iomem *addr) | |
446 | { | |
447 | return IO_CONCAT(__IO_PREFIX,readq)(addr); | |
448 | } | |
449 | ||
450 | extern inline void __raw_writel(u32 b, volatile void __iomem *addr) | |
451 | { | |
452 | IO_CONCAT(__IO_PREFIX,writel)(b, addr); | |
453 | } | |
454 | ||
455 | extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) | |
456 | { | |
457 | IO_CONCAT(__IO_PREFIX,writeq)(b, addr); | |
458 | } | |
459 | ||
460 | extern inline u32 readl(const volatile void __iomem *addr) | |
461 | { | |
462 | u32 ret = __raw_readl(addr); | |
463 | mb(); | |
464 | return ret; | |
465 | } | |
466 | ||
467 | extern inline u64 readq(const volatile void __iomem *addr) | |
468 | { | |
469 | u64 ret = __raw_readq(addr); | |
470 | mb(); | |
471 | return ret; | |
472 | } | |
473 | ||
474 | extern inline void writel(u32 b, volatile void __iomem *addr) | |
475 | { | |
476 | __raw_writel(b, addr); | |
477 | mb(); | |
478 | } | |
479 | ||
480 | extern inline void writeq(u64 b, volatile void __iomem *addr) | |
481 | { | |
482 | __raw_writeq(b, addr); | |
483 | mb(); | |
484 | } | |
485 | #endif | |
486 | ||
487 | #define inb_p inb | |
488 | #define inw_p inw | |
489 | #define inl_p inl | |
490 | #define outb_p outb | |
491 | #define outw_p outw | |
492 | #define outl_p outl | |
493 | #define readb_relaxed(addr) __raw_readb(addr) | |
494 | #define readw_relaxed(addr) __raw_readw(addr) | |
495 | #define readl_relaxed(addr) __raw_readl(addr) | |
496 | #define readq_relaxed(addr) __raw_readq(addr) | |
497 | ||
498 | #define mmiowb() | |
499 | ||
500 | /* | |
501 | * String version of IO memory access ops: | |
502 | */ | |
503 | extern void memcpy_fromio(void *, const volatile void __iomem *, long); | |
504 | extern void memcpy_toio(volatile void __iomem *, const void *, long); | |
505 | extern void _memset_c_io(volatile void __iomem *, unsigned long, long); | |
506 | ||
507 | static inline void memset_io(volatile void __iomem *addr, u8 c, long len) | |
508 | { | |
509 | _memset_c_io(addr, 0x0101010101010101UL * c, len); | |
510 | } | |
511 | ||
512 | #define __HAVE_ARCH_MEMSETW_IO | |
513 | static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) | |
514 | { | |
515 | _memset_c_io(addr, 0x0001000100010001UL * c, len); | |
516 | } | |
517 | ||
518 | /* | |
519 | * String versions of in/out ops: | |
520 | */ | |
521 | extern void insb (unsigned long port, void *dst, unsigned long count); | |
522 | extern void insw (unsigned long port, void *dst, unsigned long count); | |
523 | extern void insl (unsigned long port, void *dst, unsigned long count); | |
524 | extern void outsb (unsigned long port, const void *src, unsigned long count); | |
525 | extern void outsw (unsigned long port, const void *src, unsigned long count); | |
526 | extern void outsl (unsigned long port, const void *src, unsigned long count); | |
527 | ||
1da177e4 LT |
528 | /* |
529 | * The Alpha Jensen hardware for some rather strange reason puts | |
530 | * the RTC clock at 0x170 instead of 0x70. Probably due to some | |
531 | * misguided idea about using 0x70 for NMI stuff. | |
532 | * | |
533 | * These defines will override the defaults when doing RTC queries | |
534 | */ | |
535 | ||
536 | #ifdef CONFIG_ALPHA_GENERIC | |
537 | # define RTC_PORT(x) ((x) + alpha_mv.rtc_port) | |
538 | #else | |
539 | # ifdef CONFIG_ALPHA_JENSEN | |
540 | # define RTC_PORT(x) (0x170+(x)) | |
541 | # else | |
542 | # define RTC_PORT(x) (0x70 + (x)) | |
543 | # endif | |
544 | #endif | |
545 | #define RTC_ALWAYS_BCD 0 | |
546 | ||
547 | /* Nothing to do */ | |
548 | ||
549 | #define dma_cache_inv(_start,_size) do { } while (0) | |
550 | #define dma_cache_wback(_start,_size) do { } while (0) | |
551 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | |
552 | ||
553 | /* | |
554 | * Some mucking forons use if[n]def writeq to check if platform has it. | |
555 | * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them | |
556 | * to play with; for now just use cpp anti-recursion logics and make sure | |
557 | * that damn thing is defined and expands to itself. | |
558 | */ | |
559 | ||
560 | #define writeq writeq | |
561 | #define readq readq | |
562 | ||
563 | /* | |
564 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
565 | * access | |
566 | */ | |
567 | #define xlate_dev_mem_ptr(p) __va(p) | |
568 | ||
569 | /* | |
570 | * Convert a virtual cached pointer to an uncached pointer | |
571 | */ | |
572 | #define xlate_dev_kmem_ptr(p) p | |
573 | ||
574 | #endif /* __KERNEL__ */ | |
575 | ||
576 | #endif /* __ALPHA_IO_H */ |