Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 1995 Waldorf GmbH | |
966f4406 | 7 | * Copyright (C) 1994 - 2000, 06 Ralf Baechle |
1da177e4 LT |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | |
10 | * Author: Maciej W. Rozycki <macro@mips.com> | |
11 | */ | |
12 | #ifndef _ASM_IO_H | |
13 | #define _ASM_IO_H | |
14 | ||
1da177e4 LT |
15 | #include <linux/compiler.h> |
16 | #include <linux/kernel.h> | |
17 | #include <linux/types.h> | |
18 | ||
19 | #include <asm/addrspace.h> | |
893a0574 | 20 | #include <asm/bug.h> |
1da177e4 LT |
21 | #include <asm/byteorder.h> |
22 | #include <asm/cpu.h> | |
23 | #include <asm/cpu-features.h> | |
140c1729 | 24 | #include <asm-generic/iomap.h> |
1da177e4 LT |
25 | #include <asm/page.h> |
26 | #include <asm/pgtable-bits.h> | |
27 | #include <asm/processor.h> | |
fe00f943 | 28 | #include <asm/string.h> |
1da177e4 | 29 | |
c3455b0e | 30 | #include <ioremap.h> |
1da177e4 LT |
31 | #include <mangle-port.h> |
32 | ||
33 | /* | |
34 | * Slowdown I/O port space accesses for antique hardware. | |
35 | */ | |
36 | #undef CONF_SLOWDOWN_IO | |
37 | ||
38 | /* | |
4912ba72 | 39 | * Raw operations are never swapped in software. OTOH values that raw |
1da177e4 LT |
40 | * operations are working on may or may not have been swapped by the bus |
41 | * hardware. An example use would be for flash memory that's used for | |
42 | * execute in place. | |
43 | */ | |
21a151d8 RB |
44 | # define __raw_ioswabb(a, x) (x) |
45 | # define __raw_ioswabw(a, x) (x) | |
46 | # define __raw_ioswabl(a, x) (x) | |
47 | # define __raw_ioswabq(a, x) (x) | |
48 | # define ____raw_ioswabq(a, x) (x) | |
1da177e4 | 49 | |
a8433137 | 50 | /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ |
1da177e4 | 51 | |
1da177e4 LT |
52 | #define IO_SPACE_LIMIT 0xffff |
53 | ||
54 | /* | |
55 | * On MIPS I/O ports are memory mapped, so we access them using normal | |
56 | * load/store instructions. mips_io_port_base is the virtual address to | |
57 | * which all ports are being mapped. For sake of efficiency some code | |
58 | * assumes that this is an address that can be loaded with a single lui | |
59 | * instruction, so the lower 16 bits must be zero. Should be true on | |
60 | * on any sane architecture; generic code does not use this assumption. | |
61 | */ | |
62 | extern const unsigned long mips_io_port_base; | |
63 | ||
966f4406 RB |
64 | /* |
65 | * Gcc will generate code to load the value of mips_io_port_base after each | |
66 | * function call which may be fairly wasteful in some cases. So we don't | |
67 | * play quite by the book. We tell gcc mips_io_port_base is a long variable | |
68 | * which solves the code generation issue. Now we need to violate the | |
69 | * aliasing rules a little to make initialization possible and finally we | |
70 | * will need the barrier() to fight side effects of the aliasing chat. | |
71 | * This trickery will eventually collapse under gcc's optimizer. Oh well. | |
72 | */ | |
73 | static inline void set_io_port_base(unsigned long base) | |
74 | { | |
75 | * (unsigned long *) &mips_io_port_base = base; | |
76 | barrier(); | |
77 | } | |
1da177e4 LT |
78 | |
79 | /* | |
80 | * Thanks to James van Artsdalen for a better timing-fix than | |
81 | * the two short jumps: using outb's to a nonexistent port seems | |
82 | * to guarantee better timings even on fast machines. | |
83 | * | |
84 | * On the other hand, I'd like to be sure of a non-existent port: | |
85 | * I feel a bit unsafe about using 0x80 (should be safe, though) | |
86 | * | |
87 | * Linus | |
88 | * | |
89 | */ | |
90 | ||
91 | #define __SLOW_DOWN_IO \ | |
92 | __asm__ __volatile__( \ | |
93 | "sb\t$0,0x80(%0)" \ | |
94 | : : "r" (mips_io_port_base)); | |
95 | ||
96 | #ifdef CONF_SLOWDOWN_IO | |
97 | #ifdef REALLY_SLOW_IO | |
98 | #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } | |
99 | #else | |
100 | #define SLOW_DOWN_IO __SLOW_DOWN_IO | |
101 | #endif | |
102 | #else | |
103 | #define SLOW_DOWN_IO | |
104 | #endif | |
105 | ||
106 | /* | |
107 | * virt_to_phys - map virtual addresses to physical | |
108 | * @address: address to remap | |
109 | * | |
110 | * The returned physical address is the physical (CPU) mapping for | |
111 | * the memory address given. It is only valid to use this function on | |
112 | * addresses directly mapped or allocated via kmalloc. | |
113 | * | |
114 | * This function does not give bus mappings for DMA transfers. In | |
115 | * almost all conceivable cases a device driver should not be using | |
116 | * this function | |
117 | */ | |
99e3b942 | 118 | static inline unsigned long virt_to_phys(volatile const void *address) |
1da177e4 | 119 | { |
6f284a2c | 120 | return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; |
1da177e4 LT |
121 | } |
122 | ||
123 | /* | |
124 | * phys_to_virt - map physical address to virtual | |
125 | * @address: address to remap | |
126 | * | |
127 | * The returned virtual address is a current CPU mapping for | |
128 | * the memory address given. It is only valid to use this function on | |
129 | * addresses that have a kernel mapping | |
130 | * | |
131 | * This function does not handle bus mappings for DMA transfers. In | |
132 | * almost all conceivable cases a device driver should not be using | |
133 | * this function | |
134 | */ | |
135 | static inline void * phys_to_virt(unsigned long address) | |
136 | { | |
6f284a2c | 137 | return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); |
1da177e4 LT |
138 | } |
139 | ||
140 | /* | |
141 | * ISA I/O bus memory addresses are 1:1 with the physical address. | |
142 | */ | |
143 | static inline unsigned long isa_virt_to_bus(volatile void * address) | |
144 | { | |
145 | return (unsigned long)address - PAGE_OFFSET; | |
146 | } | |
147 | ||
148 | static inline void * isa_bus_to_virt(unsigned long address) | |
149 | { | |
150 | return (void *)(address + PAGE_OFFSET); | |
151 | } | |
152 | ||
153 | #define isa_page_to_bus page_to_phys | |
154 | ||
155 | /* | |
156 | * However PCI ones are not necessarily 1:1 and therefore these interfaces | |
157 | * are forbidden in portable PCI drivers. | |
158 | * | |
159 | * Allow them for x86 for legacy drivers, though. | |
160 | */ | |
161 | #define virt_to_bus virt_to_phys | |
162 | #define bus_to_virt phys_to_virt | |
163 | ||
1da177e4 LT |
164 | /* |
165 | * Change "struct page" to physical address. | |
166 | */ | |
167 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | |
168 | ||
0f04afb5 | 169 | extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); |
d89e36d8 | 170 | extern void __iounmap(const volatile void __iomem *addr); |
1da177e4 | 171 | |
0f04afb5 | 172 | static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, |
1da177e4 LT |
173 | unsigned long flags) |
174 | { | |
5ddcb3c3 AN |
175 | void __iomem *addr = plat_ioremap(offset, size, flags); |
176 | ||
177 | if (addr) | |
178 | return addr; | |
179 | ||
c3455b0e MR |
180 | #define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) |
181 | ||
1da177e4 LT |
182 | if (cpu_has_64bit_addresses) { |
183 | u64 base = UNCAC_BASE; | |
184 | ||
185 | /* | |
186 | * R10000 supports a 2 bit uncached attribute therefore | |
187 | * UNCAC_BASE may not equal IO_BASE. | |
188 | */ | |
189 | if (flags == _CACHE_UNCACHED) | |
190 | base = (u64) IO_BASE; | |
fe00f943 | 191 | return (void __iomem *) (unsigned long) (base + offset); |
c3455b0e MR |
192 | } else if (__builtin_constant_p(offset) && |
193 | __builtin_constant_p(size) && __builtin_constant_p(flags)) { | |
194 | phys_t phys_addr, last_addr; | |
195 | ||
196 | phys_addr = fixup_bigphys_addr(offset, size); | |
197 | ||
198 | /* Don't allow wraparound or zero size. */ | |
199 | last_addr = phys_addr + size - 1; | |
200 | if (!size || last_addr < phys_addr) | |
201 | return NULL; | |
202 | ||
203 | /* | |
204 | * Map uncached objects in the low 512MB of address | |
205 | * space using KSEG1. | |
206 | */ | |
207 | if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && | |
208 | flags == _CACHE_UNCACHED) | |
c0cf5001 AN |
209 | return (void __iomem *) |
210 | (unsigned long)CKSEG1ADDR(phys_addr); | |
1da177e4 LT |
211 | } |
212 | ||
213 | return __ioremap(offset, size, flags); | |
c3455b0e MR |
214 | |
215 | #undef __IS_LOW512 | |
1da177e4 LT |
216 | } |
217 | ||
218 | /* | |
219 | * ioremap - map bus memory into CPU space | |
220 | * @offset: bus address of the memory | |
221 | * @size: size of the resource to map | |
222 | * | |
223 | * ioremap performs a platform specific sequence of operations to | |
224 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
225 | * writew/writel functions and the other mmio helpers. The returned | |
226 | * address is not guaranteed to be usable directly as a virtual | |
227 | * address. | |
228 | */ | |
229 | #define ioremap(offset, size) \ | |
230 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) | |
231 | ||
232 | /* | |
233 | * ioremap_nocache - map bus memory into CPU space | |
234 | * @offset: bus address of the memory | |
235 | * @size: size of the resource to map | |
236 | * | |
237 | * ioremap_nocache performs a platform specific sequence of operations to | |
238 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
239 | * writew/writel functions and the other mmio helpers. The returned | |
240 | * address is not guaranteed to be usable directly as a virtual | |
241 | * address. | |
242 | * | |
243 | * This version of ioremap ensures that the memory is marked uncachable | |
244 | * on the CPU as well as honouring existing caching rules from things like | |
245 | * the PCI bus. Note that there are other caches and buffers on many | |
25985edc | 246 | * busses. In particular driver authors should read up on PCI writes |
1da177e4 LT |
247 | * |
248 | * It's useful if some control registers are in such an area and | |
249 | * write combining or read caching is not desirable: | |
250 | */ | |
251 | #define ioremap_nocache(offset, size) \ | |
252 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) | |
253 | ||
778e2ac5 RB |
254 | /* |
255 | * ioremap_cachable - map bus memory into CPU space | |
256 | * @offset: bus address of the memory | |
257 | * @size: size of the resource to map | |
258 | * | |
259 | * ioremap_nocache performs a platform specific sequence of operations to | |
260 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
261 | * writew/writel functions and the other mmio helpers. The returned | |
262 | * address is not guaranteed to be usable directly as a virtual | |
263 | * address. | |
264 | * | |
265 | * This version of ioremap ensures that the memory is marked cachable by | |
266 | * the CPU. Also enables full write-combining. Useful for some | |
267 | * memory-like regions on I/O busses. | |
268 | */ | |
269 | #define ioremap_cachable(offset, size) \ | |
35133692 | 270 | __ioremap_mode((offset), (size), _page_cachable_default) |
778e2ac5 | 271 | |
1da177e4 LT |
272 | /* |
273 | * These two are MIPS specific ioremap variant. ioremap_cacheable_cow | |
274 | * requests a cachable mapping, ioremap_uncached_accelerated requests a | |
275 | * mapping using the uncached accelerated mode which isn't supported on | |
276 | * all processors. | |
277 | */ | |
278 | #define ioremap_cacheable_cow(offset, size) \ | |
279 | __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) | |
280 | #define ioremap_uncached_accelerated(offset, size) \ | |
281 | __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) | |
282 | ||
d89e36d8 | 283 | static inline void iounmap(const volatile void __iomem *addr) |
1da177e4 | 284 | { |
5ddcb3c3 AN |
285 | if (plat_iounmap(addr)) |
286 | return; | |
287 | ||
c3455b0e MR |
288 | #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) |
289 | ||
290 | if (cpu_has_64bit_addresses || | |
291 | (__builtin_constant_p(addr) && __IS_KSEG1(addr))) | |
1da177e4 LT |
292 | return; |
293 | ||
294 | __iounmap(addr); | |
1da177e4 | 295 | |
c3455b0e MR |
296 | #undef __IS_KSEG1 |
297 | } | |
1da177e4 | 298 | |
8faca49a DD |
299 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
300 | #define war_octeon_io_reorder_wmb() wmb() | |
301 | #else | |
302 | #define war_octeon_io_reorder_wmb() do { } while (0) | |
303 | #endif | |
304 | ||
1da177e4 LT |
305 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ |
306 | \ | |
307 | static inline void pfx##write##bwlq(type val, \ | |
308 | volatile void __iomem *mem) \ | |
309 | { \ | |
310 | volatile type *__mem; \ | |
311 | type __val; \ | |
312 | \ | |
8faca49a DD |
313 | war_octeon_io_reorder_wmb(); \ |
314 | \ | |
1da177e4 LT |
315 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ |
316 | \ | |
a8433137 | 317 | __val = pfx##ioswab##bwlq(__mem, val); \ |
1da177e4 LT |
318 | \ |
319 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ | |
320 | *__mem = __val; \ | |
321 | else if (cpu_has_64bits) { \ | |
322 | unsigned long __flags; \ | |
323 | type __tmp; \ | |
324 | \ | |
325 | if (irq) \ | |
326 | local_irq_save(__flags); \ | |
327 | __asm__ __volatile__( \ | |
328 | ".set mips3" "\t\t# __writeq""\n\t" \ | |
329 | "dsll32 %L0, %L0, 0" "\n\t" \ | |
330 | "dsrl32 %L0, %L0, 0" "\n\t" \ | |
331 | "dsll32 %M0, %M0, 0" "\n\t" \ | |
332 | "or %L0, %L0, %M0" "\n\t" \ | |
333 | "sd %L0, %2" "\n\t" \ | |
334 | ".set mips0" "\n" \ | |
335 | : "=r" (__tmp) \ | |
b77bb37a | 336 | : "0" (__val), "m" (*__mem)); \ |
1da177e4 LT |
337 | if (irq) \ |
338 | local_irq_restore(__flags); \ | |
339 | } else \ | |
340 | BUG(); \ | |
341 | } \ | |
342 | \ | |
b887d3f2 | 343 | static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ |
1da177e4 LT |
344 | { \ |
345 | volatile type *__mem; \ | |
346 | type __val; \ | |
347 | \ | |
348 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ | |
349 | \ | |
350 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ | |
351 | __val = *__mem; \ | |
352 | else if (cpu_has_64bits) { \ | |
353 | unsigned long __flags; \ | |
354 | \ | |
049b13c3 TS |
355 | if (irq) \ |
356 | local_irq_save(__flags); \ | |
1da177e4 LT |
357 | __asm__ __volatile__( \ |
358 | ".set mips3" "\t\t# __readq" "\n\t" \ | |
359 | "ld %L0, %1" "\n\t" \ | |
360 | "dsra32 %M0, %L0, 0" "\n\t" \ | |
361 | "sll %L0, %L0, 0" "\n\t" \ | |
362 | ".set mips0" "\n" \ | |
363 | : "=r" (__val) \ | |
b77bb37a | 364 | : "m" (*__mem)); \ |
049b13c3 TS |
365 | if (irq) \ |
366 | local_irq_restore(__flags); \ | |
1da177e4 LT |
367 | } else { \ |
368 | __val = 0; \ | |
369 | BUG(); \ | |
370 | } \ | |
371 | \ | |
a8433137 | 372 | return pfx##ioswab##bwlq(__mem, __val); \ |
1da177e4 LT |
373 | } |
374 | ||
375 | #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ | |
376 | \ | |
377 | static inline void pfx##out##bwlq##p(type val, unsigned long port) \ | |
378 | { \ | |
379 | volatile type *__addr; \ | |
380 | type __val; \ | |
381 | \ | |
8faca49a DD |
382 | war_octeon_io_reorder_wmb(); \ |
383 | \ | |
a8433137 | 384 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
1da177e4 | 385 | \ |
a8433137 | 386 | __val = pfx##ioswab##bwlq(__addr, val); \ |
1da177e4 | 387 | \ |
9d58f302 RB |
388 | /* Really, we want this to be atomic */ \ |
389 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ | |
390 | \ | |
391 | *__addr = __val; \ | |
392 | slow; \ | |
1da177e4 LT |
393 | } \ |
394 | \ | |
395 | static inline type pfx##in##bwlq##p(unsigned long port) \ | |
396 | { \ | |
397 | volatile type *__addr; \ | |
398 | type __val; \ | |
399 | \ | |
a8433137 | 400 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
1da177e4 | 401 | \ |
9d58f302 RB |
402 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
403 | \ | |
404 | __val = *__addr; \ | |
405 | slow; \ | |
1da177e4 | 406 | \ |
a8433137 | 407 | return pfx##ioswab##bwlq(__addr, __val); \ |
1da177e4 LT |
408 | } |
409 | ||
410 | #define __BUILD_MEMORY_PFX(bus, bwlq, type) \ | |
411 | \ | |
412 | __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) | |
413 | ||
9d58f302 | 414 | #define BUILDIO_MEM(bwlq, type) \ |
1da177e4 | 415 | \ |
1da177e4 | 416 | __BUILD_MEMORY_PFX(__raw_, bwlq, type) \ |
4912ba72 | 417 | __BUILD_MEMORY_PFX(, bwlq, type) \ |
290f10ae | 418 | __BUILD_MEMORY_PFX(__mem_, bwlq, type) \ |
9d58f302 RB |
419 | |
420 | BUILDIO_MEM(b, u8) | |
421 | BUILDIO_MEM(w, u16) | |
422 | BUILDIO_MEM(l, u32) | |
423 | BUILDIO_MEM(q, u64) | |
424 | ||
425 | #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ | |
426 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ | |
427 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) | |
428 | ||
429 | #define BUILDIO_IOPORT(bwlq, type) \ | |
430 | __BUILD_IOPORT_PFX(, bwlq, type) \ | |
290f10ae | 431 | __BUILD_IOPORT_PFX(__mem_, bwlq, type) |
9d58f302 RB |
432 | |
433 | BUILDIO_IOPORT(b, u8) | |
434 | BUILDIO_IOPORT(w, u16) | |
435 | BUILDIO_IOPORT(l, u32) | |
436 | #ifdef CONFIG_64BIT | |
437 | BUILDIO_IOPORT(q, u64) | |
438 | #endif | |
1da177e4 LT |
439 | |
440 | #define __BUILDIO(bwlq, type) \ | |
441 | \ | |
4912ba72 | 442 | __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) |
1da177e4 | 443 | |
1da177e4 LT |
444 | __BUILDIO(q, u64) |
445 | ||
446 | #define readb_relaxed readb | |
447 | #define readw_relaxed readw | |
448 | #define readl_relaxed readl | |
449 | #define readq_relaxed readq | |
450 | ||
f868ba29 FF |
451 | #define readb_be(addr) \ |
452 | __raw_readb((__force unsigned *)(addr)) | |
453 | #define readw_be(addr) \ | |
454 | be16_to_cpu(__raw_readw((__force unsigned *)(addr))) | |
455 | #define readl_be(addr) \ | |
456 | be32_to_cpu(__raw_readl((__force unsigned *)(addr))) | |
457 | #define readq_be(addr) \ | |
458 | be64_to_cpu(__raw_readq((__force unsigned *)(addr))) | |
459 | ||
460 | #define writeb_be(val, addr) \ | |
461 | __raw_writeb((val), (__force unsigned *)(addr)) | |
462 | #define writew_be(val, addr) \ | |
463 | __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) | |
464 | #define writel_be(val, addr) \ | |
465 | __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) | |
466 | #define writeq_be(val, addr) \ | |
467 | __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) | |
468 | ||
1da177e4 LT |
469 | /* |
470 | * Some code tests for these symbols | |
471 | */ | |
472 | #define readq readq | |
473 | #define writeq writeq | |
474 | ||
475 | #define __BUILD_MEMORY_STRING(bwlq, type) \ | |
476 | \ | |
99289a4e AG |
477 | static inline void writes##bwlq(volatile void __iomem *mem, \ |
478 | const void *addr, unsigned int count) \ | |
1da177e4 | 479 | { \ |
99289a4e | 480 | const volatile type *__addr = addr; \ |
1da177e4 LT |
481 | \ |
482 | while (count--) { \ | |
290f10ae | 483 | __mem_write##bwlq(*__addr, mem); \ |
1da177e4 LT |
484 | __addr++; \ |
485 | } \ | |
486 | } \ | |
487 | \ | |
488 | static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ | |
489 | unsigned int count) \ | |
490 | { \ | |
491 | volatile type *__addr = addr; \ | |
492 | \ | |
493 | while (count--) { \ | |
290f10ae | 494 | *__addr = __mem_read##bwlq(mem); \ |
1da177e4 LT |
495 | __addr++; \ |
496 | } \ | |
497 | } | |
498 | ||
499 | #define __BUILD_IOPORT_STRING(bwlq, type) \ | |
500 | \ | |
ecba36da | 501 | static inline void outs##bwlq(unsigned long port, const void *addr, \ |
1da177e4 LT |
502 | unsigned int count) \ |
503 | { \ | |
ecba36da | 504 | const volatile type *__addr = addr; \ |
1da177e4 LT |
505 | \ |
506 | while (count--) { \ | |
290f10ae | 507 | __mem_out##bwlq(*__addr, port); \ |
1da177e4 LT |
508 | __addr++; \ |
509 | } \ | |
510 | } \ | |
511 | \ | |
512 | static inline void ins##bwlq(unsigned long port, void *addr, \ | |
513 | unsigned int count) \ | |
514 | { \ | |
515 | volatile type *__addr = addr; \ | |
516 | \ | |
517 | while (count--) { \ | |
290f10ae | 518 | *__addr = __mem_in##bwlq(port); \ |
1da177e4 LT |
519 | __addr++; \ |
520 | } \ | |
521 | } | |
522 | ||
523 | #define BUILDSTRING(bwlq, type) \ | |
524 | \ | |
525 | __BUILD_MEMORY_STRING(bwlq, type) \ | |
526 | __BUILD_IOPORT_STRING(bwlq, type) | |
527 | ||
528 | BUILDSTRING(b, u8) | |
529 | BUILDSTRING(w, u16) | |
530 | BUILDSTRING(l, u32) | |
9d58f302 | 531 | #ifdef CONFIG_64BIT |
1da177e4 | 532 | BUILDSTRING(q, u64) |
9d58f302 | 533 | #endif |
1da177e4 LT |
534 | |
535 | ||
8faca49a DD |
536 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
537 | #define mmiowb() wmb() | |
538 | #else | |
1da177e4 LT |
539 | /* Depends on MIPS II instruction set */ |
540 | #define mmiowb() asm volatile ("sync" ::: "memory") | |
8faca49a | 541 | #endif |
1da177e4 | 542 | |
fe00f943 RB |
543 | static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) |
544 | { | |
545 | memset((void __force *) addr, val, count); | |
546 | } | |
547 | static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) | |
548 | { | |
549 | memcpy(dst, (void __force *) src, count); | |
550 | } | |
551 | static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) | |
552 | { | |
553 | memcpy((void __force *) dst, src, count); | |
554 | } | |
1da177e4 | 555 | |
1da177e4 LT |
556 | /* |
557 | * The caches on some architectures aren't dma-coherent and have need to | |
558 | * handle this in software. There are three types of operations that | |
559 | * can be applied to dma buffers. | |
560 | * | |
561 | * - dma_cache_wback_inv(start, size) makes caches and coherent by | |
562 | * writing the content of the caches back to memory, if necessary. | |
563 | * The function also invalidates the affected part of the caches as | |
564 | * necessary before DMA transfers from outside to memory. | |
565 | * - dma_cache_wback(start, size) makes caches and coherent by | |
566 | * writing the content of the caches back to memory, if necessary. | |
567 | * The function also invalidates the affected part of the caches as | |
568 | * necessary before DMA transfers from outside to memory. | |
569 | * - dma_cache_inv(start, size) invalidates the affected parts of the | |
570 | * caches. Dirty lines of the caches may be written back or simply | |
571 | * be discarded. This operation is necessary before dma operations | |
572 | * to the memory. | |
622a9edd RB |
573 | * |
574 | * This API used to be exported; it now is for arch code internal use only. | |
1da177e4 LT |
575 | */ |
576 | #ifdef CONFIG_DMA_NONCOHERENT | |
577 | ||
578 | extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); | |
579 | extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); | |
580 | extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); | |
581 | ||
21a151d8 RB |
582 | #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) |
583 | #define dma_cache_wback(start, size) _dma_cache_wback(start, size) | |
584 | #define dma_cache_inv(start, size) _dma_cache_inv(start, size) | |
1da177e4 LT |
585 | |
586 | #else /* Sane hardware */ | |
587 | ||
588 | #define dma_cache_wback_inv(start,size) \ | |
589 | do { (void) (start); (void) (size); } while (0) | |
590 | #define dma_cache_wback(start,size) \ | |
591 | do { (void) (start); (void) (size); } while (0) | |
592 | #define dma_cache_inv(start,size) \ | |
593 | do { (void) (start); (void) (size); } while (0) | |
594 | ||
595 | #endif /* CONFIG_DMA_NONCOHERENT */ | |
596 | ||
597 | /* | |
598 | * Read a 32-bit register that requires a 64-bit read cycle on the bus. | |
599 | * Avoid interrupt mucking, just adjust the address for 4-byte access. | |
600 | * Assume the addresses are 8-byte aligned. | |
601 | */ | |
602 | #ifdef __MIPSEB__ | |
603 | #define __CSR_32_ADJUST 4 | |
604 | #else | |
605 | #define __CSR_32_ADJUST 0 | |
606 | #endif | |
607 | ||
21a151d8 | 608 | #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) |
1da177e4 LT |
609 | #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) |
610 | ||
611 | /* | |
612 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
613 | * access | |
614 | */ | |
615 | #define xlate_dev_mem_ptr(p) __va(p) | |
616 | ||
617 | /* | |
618 | * Convert a virtual cached pointer to an uncached pointer | |
619 | */ | |
620 | #define xlate_dev_kmem_ptr(p) p | |
621 | ||
622 | #endif /* _ASM_IO_H */ |