Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 1995 Waldorf GmbH | |
7 | * Copyright (C) 1994 - 2000 Ralf Baechle | |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
9 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | |
10 | * Author: Maciej W. Rozycki <macro@mips.com> | |
11 | */ | |
12 | #ifndef _ASM_IO_H | |
13 | #define _ASM_IO_H | |
14 | ||
15 | #include <linux/config.h> | |
16 | #include <linux/compiler.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/types.h> | |
19 | ||
20 | #include <asm/addrspace.h> | |
21 | #include <asm/bug.h> | |
22 | #include <asm/byteorder.h> | |
23 | #include <asm/cpu.h> | |
24 | #include <asm/cpu-features.h> | |
25 | #include <asm/page.h> | |
26 | #include <asm/pgtable-bits.h> | |
27 | #include <asm/processor.h> | |
28 | ||
29 | #include <mangle-port.h> | |
30 | ||
31 | /* | |
32 | * Slowdown I/O port space accesses for antique hardware. | |
33 | */ | |
34 | #undef CONF_SLOWDOWN_IO | |
35 | ||
36 | /* | |
4912ba72 | 37 | * Raw operations are never swapped in software. OTOH values that raw |
1da177e4 LT |
38 | * operations are working on may or may not have been swapped by the bus |
39 | * hardware. An example use would be for flash memory that's used for | |
40 | * execute in place. | |
41 | */ | |
42 | # define __raw_ioswabb(x) (x) | |
43 | # define __raw_ioswabw(x) (x) | |
44 | # define __raw_ioswabl(x) (x) | |
45 | # define __raw_ioswabq(x) (x) | |
4912ba72 | 46 | # define ____raw_ioswabq(x) (x) |
1da177e4 LT |
47 | |
48 | /* | |
49 | * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; | |
50 | * less sane hardware forces software to fiddle with this... | |
4912ba72 MR |
51 | * |
52 | * Regardless, if the host bus endianness mismatches that of PCI/ISA, then | |
53 | * you can't have the numerical value of data and byte addresses within | |
54 | * multibyte quantities both preserved at the same time. Hence two | |
55 | * variations of functions: non-prefixed ones that preserve the value | |
56 | * and prefixed ones that preserve byte addresses. The latters are | |
57 | * typically used for moving raw data between a peripheral and memory (cf. | |
58 | * string I/O functions), hence the "mem_" prefix. | |
1da177e4 LT |
59 | */ |
60 | #if defined(CONFIG_SWAP_IO_SPACE) | |
61 | ||
62 | # define ioswabb(x) (x) | |
4912ba72 | 63 | # define mem_ioswabb(x) (x) |
1da177e4 LT |
64 | # ifdef CONFIG_SGI_IP22 |
65 | /* | |
66 | * IP22 seems braindead enough to swap 16bits values in hardware, but | |
67 | * not 32bits. Go figure... Can't tell without documentation. | |
68 | */ | |
69 | # define ioswabw(x) (x) | |
4912ba72 | 70 | # define mem_ioswabw(x) le16_to_cpu(x) |
1da177e4 LT |
71 | # else |
72 | # define ioswabw(x) le16_to_cpu(x) | |
4912ba72 | 73 | # define mem_ioswabw(x) (x) |
1da177e4 LT |
74 | # endif |
75 | # define ioswabl(x) le32_to_cpu(x) | |
4912ba72 | 76 | # define mem_ioswabl(x) (x) |
1da177e4 | 77 | # define ioswabq(x) le64_to_cpu(x) |
4912ba72 | 78 | # define mem_ioswabq(x) (x) |
1da177e4 LT |
79 | |
80 | #else | |
81 | ||
82 | # define ioswabb(x) (x) | |
4912ba72 | 83 | # define mem_ioswabb(x) (x) |
1da177e4 | 84 | # define ioswabw(x) (x) |
4912ba72 | 85 | # define mem_ioswabw(x) cpu_to_le16(x) |
1da177e4 | 86 | # define ioswabl(x) (x) |
4912ba72 | 87 | # define mem_ioswabl(x) cpu_to_le32(x) |
1da177e4 | 88 | # define ioswabq(x) (x) |
4912ba72 | 89 | # define mem_ioswabq(x) cpu_to_le32(x) |
1da177e4 LT |
90 | |
91 | #endif | |
92 | ||
1da177e4 LT |
93 | #define IO_SPACE_LIMIT 0xffff |
94 | ||
95 | /* | |
96 | * On MIPS I/O ports are memory mapped, so we access them using normal | |
97 | * load/store instructions. mips_io_port_base is the virtual address to | |
98 | * which all ports are being mapped. For sake of efficiency some code | |
99 | * assumes that this is an address that can be loaded with a single lui | |
100 | * instruction, so the lower 16 bits must be zero. Should be true on | |
101 | * on any sane architecture; generic code does not use this assumption. | |
102 | */ | |
103 | extern const unsigned long mips_io_port_base; | |
104 | ||
105 | #define set_io_port_base(base) \ | |
106 | do { * (unsigned long *) &mips_io_port_base = (base); } while (0) | |
107 | ||
108 | /* | |
109 | * Thanks to James van Artsdalen for a better timing-fix than | |
110 | * the two short jumps: using outb's to a nonexistent port seems | |
111 | * to guarantee better timings even on fast machines. | |
112 | * | |
113 | * On the other hand, I'd like to be sure of a non-existent port: | |
114 | * I feel a bit unsafe about using 0x80 (should be safe, though) | |
115 | * | |
116 | * Linus | |
117 | * | |
118 | */ | |
119 | ||
120 | #define __SLOW_DOWN_IO \ | |
121 | __asm__ __volatile__( \ | |
122 | "sb\t$0,0x80(%0)" \ | |
123 | : : "r" (mips_io_port_base)); | |
124 | ||
125 | #ifdef CONF_SLOWDOWN_IO | |
126 | #ifdef REALLY_SLOW_IO | |
127 | #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } | |
128 | #else | |
129 | #define SLOW_DOWN_IO __SLOW_DOWN_IO | |
130 | #endif | |
131 | #else | |
132 | #define SLOW_DOWN_IO | |
133 | #endif | |
134 | ||
135 | /* | |
136 | * virt_to_phys - map virtual addresses to physical | |
137 | * @address: address to remap | |
138 | * | |
139 | * The returned physical address is the physical (CPU) mapping for | |
140 | * the memory address given. It is only valid to use this function on | |
141 | * addresses directly mapped or allocated via kmalloc. | |
142 | * | |
143 | * This function does not give bus mappings for DMA transfers. In | |
144 | * almost all conceivable cases a device driver should not be using | |
145 | * this function | |
146 | */ | |
147 | static inline unsigned long virt_to_phys(volatile void * address) | |
148 | { | |
149 | return (unsigned long)address - PAGE_OFFSET; | |
150 | } | |
151 | ||
152 | /* | |
153 | * phys_to_virt - map physical address to virtual | |
154 | * @address: address to remap | |
155 | * | |
156 | * The returned virtual address is a current CPU mapping for | |
157 | * the memory address given. It is only valid to use this function on | |
158 | * addresses that have a kernel mapping | |
159 | * | |
160 | * This function does not handle bus mappings for DMA transfers. In | |
161 | * almost all conceivable cases a device driver should not be using | |
162 | * this function | |
163 | */ | |
164 | static inline void * phys_to_virt(unsigned long address) | |
165 | { | |
166 | return (void *)(address + PAGE_OFFSET); | |
167 | } | |
168 | ||
169 | /* | |
170 | * ISA I/O bus memory addresses are 1:1 with the physical address. | |
171 | */ | |
172 | static inline unsigned long isa_virt_to_bus(volatile void * address) | |
173 | { | |
174 | return (unsigned long)address - PAGE_OFFSET; | |
175 | } | |
176 | ||
177 | static inline void * isa_bus_to_virt(unsigned long address) | |
178 | { | |
179 | return (void *)(address + PAGE_OFFSET); | |
180 | } | |
181 | ||
182 | #define isa_page_to_bus page_to_phys | |
183 | ||
184 | /* | |
185 | * However PCI ones are not necessarily 1:1 and therefore these interfaces | |
186 | * are forbidden in portable PCI drivers. | |
187 | * | |
188 | * Allow them for x86 for legacy drivers, though. | |
189 | */ | |
190 | #define virt_to_bus virt_to_phys | |
191 | #define bus_to_virt phys_to_virt | |
192 | ||
193 | /* | |
194 | * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped | |
195 | * for the processor. This implies the assumption that there is only | |
196 | * one of these busses. | |
197 | */ | |
198 | extern unsigned long isa_slot_offset; | |
199 | ||
200 | /* | |
201 | * Change "struct page" to physical address. | |
202 | */ | |
203 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | |
204 | ||
205 | extern void * __ioremap(phys_t offset, phys_t size, unsigned long flags); | |
206 | extern void __iounmap(volatile void __iomem *addr); | |
207 | ||
208 | static inline void * __ioremap_mode(phys_t offset, unsigned long size, | |
209 | unsigned long flags) | |
210 | { | |
211 | if (cpu_has_64bit_addresses) { | |
212 | u64 base = UNCAC_BASE; | |
213 | ||
214 | /* | |
215 | * R10000 supports a 2 bit uncached attribute therefore | |
216 | * UNCAC_BASE may not equal IO_BASE. | |
217 | */ | |
218 | if (flags == _CACHE_UNCACHED) | |
219 | base = (u64) IO_BASE; | |
220 | return (void *) (unsigned long) (base + offset); | |
221 | } | |
222 | ||
223 | return __ioremap(offset, size, flags); | |
224 | } | |
225 | ||
226 | /* | |
227 | * ioremap - map bus memory into CPU space | |
228 | * @offset: bus address of the memory | |
229 | * @size: size of the resource to map | |
230 | * | |
231 | * ioremap performs a platform specific sequence of operations to | |
232 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
233 | * writew/writel functions and the other mmio helpers. The returned | |
234 | * address is not guaranteed to be usable directly as a virtual | |
235 | * address. | |
236 | */ | |
237 | #define ioremap(offset, size) \ | |
238 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) | |
239 | ||
240 | /* | |
241 | * ioremap_nocache - map bus memory into CPU space | |
242 | * @offset: bus address of the memory | |
243 | * @size: size of the resource to map | |
244 | * | |
245 | * ioremap_nocache performs a platform specific sequence of operations to | |
246 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
247 | * writew/writel functions and the other mmio helpers. The returned | |
248 | * address is not guaranteed to be usable directly as a virtual | |
249 | * address. | |
250 | * | |
251 | * This version of ioremap ensures that the memory is marked uncachable | |
252 | * on the CPU as well as honouring existing caching rules from things like | |
253 | * the PCI bus. Note that there are other caches and buffers on many | |
254 | * busses. In paticular driver authors should read up on PCI writes | |
255 | * | |
256 | * It's useful if some control registers are in such an area and | |
257 | * write combining or read caching is not desirable: | |
258 | */ | |
259 | #define ioremap_nocache(offset, size) \ | |
260 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) | |
261 | ||
262 | /* | |
263 | * These two are MIPS specific ioremap variant. ioremap_cacheable_cow | |
264 | * requests a cachable mapping, ioremap_uncached_accelerated requests a | |
265 | * mapping using the uncached accelerated mode which isn't supported on | |
266 | * all processors. | |
267 | */ | |
268 | #define ioremap_cacheable_cow(offset, size) \ | |
269 | __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) | |
270 | #define ioremap_uncached_accelerated(offset, size) \ | |
271 | __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) | |
272 | ||
273 | static inline void iounmap(volatile void __iomem *addr) | |
274 | { | |
275 | if (cpu_has_64bit_addresses) | |
276 | return; | |
277 | ||
278 | __iounmap(addr); | |
279 | } | |
280 | ||
281 | ||
282 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ | |
283 | \ | |
284 | static inline void pfx##write##bwlq(type val, \ | |
285 | volatile void __iomem *mem) \ | |
286 | { \ | |
287 | volatile type *__mem; \ | |
288 | type __val; \ | |
289 | \ | |
290 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ | |
291 | \ | |
292 | __val = pfx##ioswab##bwlq(val); \ | |
293 | \ | |
294 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ | |
295 | *__mem = __val; \ | |
296 | else if (cpu_has_64bits) { \ | |
297 | unsigned long __flags; \ | |
298 | type __tmp; \ | |
299 | \ | |
300 | if (irq) \ | |
301 | local_irq_save(__flags); \ | |
302 | __asm__ __volatile__( \ | |
303 | ".set mips3" "\t\t# __writeq""\n\t" \ | |
304 | "dsll32 %L0, %L0, 0" "\n\t" \ | |
305 | "dsrl32 %L0, %L0, 0" "\n\t" \ | |
306 | "dsll32 %M0, %M0, 0" "\n\t" \ | |
307 | "or %L0, %L0, %M0" "\n\t" \ | |
308 | "sd %L0, %2" "\n\t" \ | |
309 | ".set mips0" "\n" \ | |
310 | : "=r" (__tmp) \ | |
311 | : "0" (__val), "m" (*__mem)); \ | |
312 | if (irq) \ | |
313 | local_irq_restore(__flags); \ | |
314 | } else \ | |
315 | BUG(); \ | |
316 | } \ | |
317 | \ | |
318 | static inline type pfx##read##bwlq(volatile void __iomem *mem) \ | |
319 | { \ | |
320 | volatile type *__mem; \ | |
321 | type __val; \ | |
322 | \ | |
323 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ | |
324 | \ | |
325 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ | |
326 | __val = *__mem; \ | |
327 | else if (cpu_has_64bits) { \ | |
328 | unsigned long __flags; \ | |
329 | \ | |
049b13c3 TS |
330 | if (irq) \ |
331 | local_irq_save(__flags); \ | |
1da177e4 LT |
332 | __asm__ __volatile__( \ |
333 | ".set mips3" "\t\t# __readq" "\n\t" \ | |
334 | "ld %L0, %1" "\n\t" \ | |
335 | "dsra32 %M0, %L0, 0" "\n\t" \ | |
336 | "sll %L0, %L0, 0" "\n\t" \ | |
337 | ".set mips0" "\n" \ | |
338 | : "=r" (__val) \ | |
339 | : "m" (*__mem)); \ | |
049b13c3 TS |
340 | if (irq) \ |
341 | local_irq_restore(__flags); \ | |
1da177e4 LT |
342 | } else { \ |
343 | __val = 0; \ | |
344 | BUG(); \ | |
345 | } \ | |
346 | \ | |
347 | return pfx##ioswab##bwlq(__val); \ | |
348 | } | |
349 | ||
350 | #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ | |
351 | \ | |
352 | static inline void pfx##out##bwlq##p(type val, unsigned long port) \ | |
353 | { \ | |
354 | volatile type *__addr; \ | |
355 | type __val; \ | |
356 | \ | |
357 | port = __swizzle_addr_##bwlq(port); \ | |
358 | __addr = (void *)(mips_io_port_base + port); \ | |
359 | \ | |
360 | __val = pfx##ioswab##bwlq(val); \ | |
361 | \ | |
362 | if (sizeof(type) != sizeof(u64)) { \ | |
363 | *__addr = __val; \ | |
364 | slow; \ | |
365 | } else \ | |
366 | BUILD_BUG(); \ | |
367 | } \ | |
368 | \ | |
369 | static inline type pfx##in##bwlq##p(unsigned long port) \ | |
370 | { \ | |
371 | volatile type *__addr; \ | |
372 | type __val; \ | |
373 | \ | |
374 | port = __swizzle_addr_##bwlq(port); \ | |
375 | __addr = (void *)(mips_io_port_base + port); \ | |
376 | \ | |
377 | if (sizeof(type) != sizeof(u64)) { \ | |
378 | __val = *__addr; \ | |
379 | slow; \ | |
380 | } else { \ | |
381 | __val = 0; \ | |
382 | BUILD_BUG(); \ | |
383 | } \ | |
384 | \ | |
385 | return pfx##ioswab##bwlq(__val); \ | |
386 | } | |
387 | ||
388 | #define __BUILD_MEMORY_PFX(bus, bwlq, type) \ | |
389 | \ | |
390 | __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) | |
391 | ||
392 | #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ | |
393 | \ | |
394 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ | |
395 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) | |
396 | ||
397 | #define BUILDIO(bwlq, type) \ | |
398 | \ | |
1da177e4 | 399 | __BUILD_MEMORY_PFX(__raw_, bwlq, type) \ |
4912ba72 MR |
400 | __BUILD_MEMORY_PFX(, bwlq, type) \ |
401 | __BUILD_MEMORY_PFX(mem_, bwlq, type) \ | |
1da177e4 | 402 | __BUILD_IOPORT_PFX(, bwlq, type) \ |
4912ba72 | 403 | __BUILD_IOPORT_PFX(mem_, bwlq, type) |
1da177e4 LT |
404 | |
405 | #define __BUILDIO(bwlq, type) \ | |
406 | \ | |
4912ba72 | 407 | __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) |
1da177e4 LT |
408 | |
409 | BUILDIO(b, u8) | |
410 | BUILDIO(w, u16) | |
411 | BUILDIO(l, u32) | |
412 | BUILDIO(q, u64) | |
413 | ||
414 | __BUILDIO(q, u64) | |
415 | ||
416 | #define readb_relaxed readb | |
417 | #define readw_relaxed readw | |
418 | #define readl_relaxed readl | |
419 | #define readq_relaxed readq | |
420 | ||
421 | /* | |
422 | * Some code tests for these symbols | |
423 | */ | |
424 | #define readq readq | |
425 | #define writeq writeq | |
426 | ||
427 | #define __BUILD_MEMORY_STRING(bwlq, type) \ | |
428 | \ | |
429 | static inline void writes##bwlq(volatile void __iomem *mem, void *addr, \ | |
430 | unsigned int count) \ | |
431 | { \ | |
432 | volatile type *__addr = addr; \ | |
433 | \ | |
434 | while (count--) { \ | |
4912ba72 | 435 | mem_write##bwlq(*__addr, mem); \ |
1da177e4 LT |
436 | __addr++; \ |
437 | } \ | |
438 | } \ | |
439 | \ | |
440 | static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ | |
441 | unsigned int count) \ | |
442 | { \ | |
443 | volatile type *__addr = addr; \ | |
444 | \ | |
445 | while (count--) { \ | |
4912ba72 | 446 | *__addr = mem_read##bwlq(mem); \ |
1da177e4 LT |
447 | __addr++; \ |
448 | } \ | |
449 | } | |
450 | ||
451 | #define __BUILD_IOPORT_STRING(bwlq, type) \ | |
452 | \ | |
453 | static inline void outs##bwlq(unsigned long port, void *addr, \ | |
454 | unsigned int count) \ | |
455 | { \ | |
456 | volatile type *__addr = addr; \ | |
457 | \ | |
458 | while (count--) { \ | |
4912ba72 | 459 | mem_out##bwlq(*__addr, port); \ |
1da177e4 LT |
460 | __addr++; \ |
461 | } \ | |
462 | } \ | |
463 | \ | |
464 | static inline void ins##bwlq(unsigned long port, void *addr, \ | |
465 | unsigned int count) \ | |
466 | { \ | |
467 | volatile type *__addr = addr; \ | |
468 | \ | |
469 | while (count--) { \ | |
4912ba72 | 470 | *__addr = mem_in##bwlq(port); \ |
1da177e4 LT |
471 | __addr++; \ |
472 | } \ | |
473 | } | |
474 | ||
475 | #define BUILDSTRING(bwlq, type) \ | |
476 | \ | |
477 | __BUILD_MEMORY_STRING(bwlq, type) \ | |
478 | __BUILD_IOPORT_STRING(bwlq, type) | |
479 | ||
480 | BUILDSTRING(b, u8) | |
481 | BUILDSTRING(w, u16) | |
482 | BUILDSTRING(l, u32) | |
483 | BUILDSTRING(q, u64) | |
484 | ||
485 | ||
486 | /* Depends on MIPS II instruction set */ | |
487 | #define mmiowb() asm volatile ("sync" ::: "memory") | |
488 | ||
489 | #define memset_io(a,b,c) memset((void *)(a),(b),(c)) | |
490 | #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) | |
491 | #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) | |
492 | ||
493 | /* | |
494 | * Memory Mapped I/O | |
495 | */ | |
496 | #define ioread8(addr) readb(addr) | |
497 | #define ioread16(addr) readw(addr) | |
498 | #define ioread32(addr) readl(addr) | |
499 | ||
500 | #define iowrite8(b,addr) writeb(b,addr) | |
501 | #define iowrite16(w,addr) writew(w,addr) | |
502 | #define iowrite32(l,addr) writel(l,addr) | |
503 | ||
504 | #define ioread8_rep(a,b,c) readsb(a,b,c) | |
505 | #define ioread16_rep(a,b,c) readsw(a,b,c) | |
506 | #define ioread32_rep(a,b,c) readsl(a,b,c) | |
507 | ||
508 | #define iowrite8_rep(a,b,c) writesb(a,b,c) | |
509 | #define iowrite16_rep(a,b,c) writesw(a,b,c) | |
510 | #define iowrite32_rep(a,b,c) writesl(a,b,c) | |
511 | ||
512 | /* Create a virtual mapping cookie for an IO port range */ | |
513 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | |
514 | extern void ioport_unmap(void __iomem *); | |
515 | ||
516 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | |
517 | struct pci_dev; | |
518 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | |
519 | extern void pci_iounmap(struct pci_dev *dev, void __iomem *); | |
520 | ||
521 | /* | |
522 | * ISA space is 'always mapped' on currently supported MIPS systems, no need | |
523 | * to explicitly ioremap() it. The fact that the ISA IO space is mapped | |
524 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | |
525 | * are physical addresses. The following constant pointer can be | |
526 | * used as the IO-area pointer (it can be iounmapped as well, so the | |
527 | * analogy with PCI is quite large): | |
528 | */ | |
529 | #define __ISA_IO_base ((char *)(isa_slot_offset)) | |
530 | ||
531 | #define isa_readb(a) readb(__ISA_IO_base + (a)) | |
532 | #define isa_readw(a) readw(__ISA_IO_base + (a)) | |
533 | #define isa_readl(a) readl(__ISA_IO_base + (a)) | |
534 | #define isa_readq(a) readq(__ISA_IO_base + (a)) | |
535 | #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) | |
536 | #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) | |
537 | #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) | |
538 | #define isa_writeq(q,a) writeq(q,__ISA_IO_base + (a)) | |
539 | #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) | |
540 | #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) | |
541 | #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) | |
542 | ||
543 | /* | |
544 | * We don't have csum_partial_copy_fromio() yet, so we cheat here and | |
545 | * just copy it. The net code will then do the checksum later. | |
546 | */ | |
547 | #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) | |
548 | #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d)) | |
549 | ||
550 | /* | |
551 | * check_signature - find BIOS signatures | |
552 | * @io_addr: mmio address to check | |
553 | * @signature: signature block | |
554 | * @length: length of signature | |
555 | * | |
556 | * Perform a signature comparison with the mmio address io_addr. This | |
557 | * address should have been obtained by ioremap. | |
558 | * Returns 1 on a match. | |
559 | */ | |
560 | static inline int check_signature(char __iomem *io_addr, | |
561 | const unsigned char *signature, int length) | |
562 | { | |
563 | int retval = 0; | |
564 | do { | |
565 | if (readb(io_addr) != *signature) | |
566 | goto out; | |
567 | io_addr++; | |
568 | signature++; | |
569 | length--; | |
570 | } while (length); | |
571 | retval = 1; | |
572 | out: | |
573 | return retval; | |
574 | } | |
575 | ||
576 | /* | |
577 | * The caches on some architectures aren't dma-coherent and have need to | |
578 | * handle this in software. There are three types of operations that | |
579 | * can be applied to dma buffers. | |
580 | * | |
581 | * - dma_cache_wback_inv(start, size) makes caches and coherent by | |
582 | * writing the content of the caches back to memory, if necessary. | |
583 | * The function also invalidates the affected part of the caches as | |
584 | * necessary before DMA transfers from outside to memory. | |
585 | * - dma_cache_wback(start, size) makes caches and coherent by | |
586 | * writing the content of the caches back to memory, if necessary. | |
587 | * The function also invalidates the affected part of the caches as | |
588 | * necessary before DMA transfers from outside to memory. | |
589 | * - dma_cache_inv(start, size) invalidates the affected parts of the | |
590 | * caches. Dirty lines of the caches may be written back or simply | |
591 | * be discarded. This operation is necessary before dma operations | |
592 | * to the memory. | |
593 | */ | |
594 | #ifdef CONFIG_DMA_NONCOHERENT | |
595 | ||
596 | extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); | |
597 | extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); | |
598 | extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); | |
599 | ||
600 | #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start,size) | |
601 | #define dma_cache_wback(start, size) _dma_cache_wback(start,size) | |
602 | #define dma_cache_inv(start, size) _dma_cache_inv(start,size) | |
603 | ||
604 | #else /* Sane hardware */ | |
605 | ||
606 | #define dma_cache_wback_inv(start,size) \ | |
607 | do { (void) (start); (void) (size); } while (0) | |
608 | #define dma_cache_wback(start,size) \ | |
609 | do { (void) (start); (void) (size); } while (0) | |
610 | #define dma_cache_inv(start,size) \ | |
611 | do { (void) (start); (void) (size); } while (0) | |
612 | ||
613 | #endif /* CONFIG_DMA_NONCOHERENT */ | |
614 | ||
615 | /* | |
616 | * Read a 32-bit register that requires a 64-bit read cycle on the bus. | |
617 | * Avoid interrupt mucking, just adjust the address for 4-byte access. | |
618 | * Assume the addresses are 8-byte aligned. | |
619 | */ | |
620 | #ifdef __MIPSEB__ | |
621 | #define __CSR_32_ADJUST 4 | |
622 | #else | |
623 | #define __CSR_32_ADJUST 0 | |
624 | #endif | |
625 | ||
626 | #define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) | |
627 | #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) | |
628 | ||
629 | /* | |
630 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
631 | * access | |
632 | */ | |
633 | #define xlate_dev_mem_ptr(p) __va(p) | |
634 | ||
635 | /* | |
636 | * Convert a virtual cached pointer to an uncached pointer | |
637 | */ | |
638 | #define xlate_dev_kmem_ptr(p) p | |
639 | ||
640 | #endif /* _ASM_IO_H */ |