From: Arnd Bergmann Date: Tue, 11 Nov 2014 18:55:45 +0000 (+0100) Subject: Merge branch 'io' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into... X-Git-Url: http://drtracing.org/?a=commitdiff_plain;h=1c8d29696f0d79902962526d6c54ebfeb842c61d;p=deliverable%2Flinux.git Merge branch 'io' of git://git./linux/kernel/git/will/linux into asm-generic * 'io' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux: documentation: memory-barriers: clarify relaxed io accessor semantics x86: io: implement dummy relaxed accessor macros for writes tile: io: implement dummy relaxed accessor macros for writes sparc: io: implement dummy relaxed accessor macros for writes powerpc: io: implement dummy relaxed accessor macros for writes parisc: io: implement dummy relaxed accessor macros for writes mn10300: io: implement dummy relaxed accessor macros for writes m68k: io: implement dummy relaxed accessor macros for writes m32r: io: implement dummy relaxed accessor macros for writes ia64: io: implement dummy relaxed accessor macros for writes cris: io: implement dummy relaxed accessor macros for writes frv: io: implement dummy relaxed accessor macros for writes xtensa: io: remove dummy relaxed accessor macros for reads s390: io: remove dummy relaxed accessor macros for reads microblaze: io: remove dummy relaxed accessor macros asm-generic: io: implement relaxed accessor macros as conditional wrappers Conflicts: include/asm-generic/io.h Signed-off-by: Arnd Bergmann --- 1c8d29696f0d79902962526d6c54ebfeb842c61d diff --cc include/asm-generic/io.h index 00483d769d86,fc8dc0eb203c..9db042304df3 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@@ -164,113 -96,42 +164,150 @@@ static inline void writel(u32 value, vo } #endif -#define writeb __raw_writeb +#ifdef CONFIG_64BIT +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 value, volatile void __iomem *addr) +{ + __raw_writeq(__cpu_to_le64(value), addr); +} +#endif +#endif /* CONFIG_64BIT */ + ++/* ++ * {read,write}{b,w,l,q}_relaxed() are like the regular version, but ++ * are not guaranteed to provide ordering against spinlocks or memory ++ * accesses. ++ */ ++#ifndef readb_relaxed ++#define readb_relaxed readb ++#endif ++ ++#ifndef readw_relaxed ++#define readw_relaxed readw ++#endif ++ ++#ifndef readl_relaxed ++#define readl_relaxed readl ++#endif ++ ++#ifndef readq_relaxed ++#define readq_relaxed readq ++#endif ++ + #ifndef writeb_relaxed + #define writeb_relaxed writeb + #endif + -#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) + #ifndef writew_relaxed + #define writew_relaxed writew + #endif + -#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) + #ifndef writel_relaxed + #define writel_relaxed writel + #endif + ++#ifndef writeq_relaxed ++#define writeq_relaxed writeq ++#endif ++ +/* + * {read,write}s{b,w,l,q}() repeatedly access the same memory address in + * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). + */ +#ifndef readsb +#define readsb readsb +static inline void readsb(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u8 *buf = buffer; + + do { + u8 x = __raw_readb(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsw +#define readsw readsw +static inline void readsw(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u16 *buf = buffer; + + do { + u16 x = __raw_readw(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsl +#define readsl readsl +static inline void readsl(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u32 *buf = buffer; + + do { + u32 x = __raw_readl(addr); + *buf++ = x; + } while (--count); + } +} +#endif + #ifdef CONFIG_64BIT -#ifndef __raw_readq -static inline u64 __raw_readq(const volatile void __iomem *addr) +#ifndef readsq +#define readsq readsq +static inline void readsq(const volatile void __iomem *addr, void *buffer, + unsigned int count) { - return *(const volatile u64 __force *) addr; + if (count) { + u64 *buf = buffer; + + do { + u64 x = __raw_readq(addr); + *buf++ = x; + } while (--count); + } } #endif +#endif /* CONFIG_64BIT */ -#define readq readq -static inline u64 readq(const volatile void __iomem *addr) +#ifndef writesb +#define writesb writesb +static inline void writesb(volatile void __iomem *addr, const void *buffer, + unsigned int count) { - return __le64_to_cpu(__raw_readq(addr)); + if (count) { + const u8 *buf = buffer; + + do { + __raw_writeb(*buf++, addr); + } while (--count); + } } -#ifndef readq_relaxed -#define readq_relaxed readq #endif -#ifndef __raw_writeq -static inline void __raw_writeq(u64 b, volatile void __iomem *addr) +#ifndef writesw +#define writesw writesw +static inline void writesw(volatile void __iomem *addr, const void *buffer, + unsigned int count) { - *(volatile u64 __force *) addr = b; + if (count) { + const u16 *buf = buffer; + + do { + __raw_writew(*buf++, addr); + } while (--count); + } } #endif