Merge branch 'merge'
[deliverable/linux.git] / include / asm-powerpc / eeh.h
CommitLineData
172ca926 1/*
1da177e4
LT
2 * eeh.h
3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
172ca926 9 *
1da177e4
LT
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
172ca926 14 *
1da177e4
LT
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef _PPC64_EEH_H
21#define _PPC64_EEH_H
88ced031 22#ifdef __KERNEL__
1da177e4
LT
23
24#include <linux/config.h>
25#include <linux/init.h>
26#include <linux/list.h>
27#include <linux/string.h>
28
29struct pci_dev;
827c1a6c 30struct pci_bus;
1da177e4 31struct device_node;
1da177e4
LT
32
33#ifdef CONFIG_EEH
34
1e28a7dd
DW
35extern int eeh_subsystem_enabled;
36
1da177e4 37/* Values for eeh_mode bits in device_node */
77bd7415
LV
38#define EEH_MODE_SUPPORTED (1<<0)
39#define EEH_MODE_NOCHECK (1<<1)
40#define EEH_MODE_ISOLATED (1<<2)
41#define EEH_MODE_RECOVERING (1<<3)
42#define EEH_MODE_IRQ_DISABLED (1<<4)
1da177e4 43
172ca926
LV
44/* Max number of EEH freezes allowed before we consider the device
45 * to be permanently disabled. */
46#define EEH_MAX_ALLOWED_FREEZES 5
47
1da177e4
LT
48void __init eeh_init(void);
49unsigned long eeh_check_failure(const volatile void __iomem *token,
50 unsigned long val);
51int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
52void __init pci_addr_cache_build(void);
53
54/**
55 * eeh_add_device_early
56 * eeh_add_device_late
57 *
58 * Perform eeh initialization for devices added after boot.
59 * Call eeh_add_device_early before doing any i/o to the
60 * device (including config space i/o). Call eeh_add_device_late
61 * to finish the eeh setup for this device.
62 */
e2a296ee 63void eeh_add_device_tree_early(struct device_node *);
827c1a6c 64void eeh_add_device_tree_late(struct pci_bus *);
1da177e4 65
e2a296ee
LV
66/**
67 * eeh_remove_device_recursive - undo EEH for device & children.
68 * @dev: pci device to be removed
69 *
70 * As above, this removes the device; it also removes child
71 * pci devices as well.
72 */
73void eeh_remove_bus_device(struct pci_dev *);
74
1da177e4
LT
75/**
76 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
77 *
78 * If this macro yields TRUE, the caller relays to eeh_check_failure()
79 * which does further tests out of line.
80 */
1e28a7dd 81#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
1da177e4
LT
82
83/*
84 * Reads from a device which has been isolated by EEH will return
85 * all 1s. This macro gives an all-1s value of the given size (in
86 * bytes: 1, 2, or 4) for comparing with the result of a read.
87 */
88#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
89
90#else /* !CONFIG_EEH */
91static inline void eeh_init(void) { }
92
93static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
94{
95 return val;
96}
97
98static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
99{
100 return 0;
101}
102
103static inline void pci_addr_cache_build(void) { }
104
022930eb
HM
105static inline void eeh_add_device_tree_early(struct device_node *dn) { }
106
827c1a6c
JR
107static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
108
022930eb 109static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
1da177e4
LT
110#define EEH_POSSIBLE_ERROR(val, type) (0)
111#define EEH_IO_ERROR_VALUE(size) (-1UL)
112#endif /* CONFIG_EEH */
113
172ca926 114/*
1da177e4
LT
115 * MMIO read/write operations with EEH support.
116 */
117static inline u8 eeh_readb(const volatile void __iomem *addr)
118{
119 u8 val = in_8(addr);
120 if (EEH_POSSIBLE_ERROR(val, u8))
121 return eeh_check_failure(addr, val);
122 return val;
123}
124static inline void eeh_writeb(u8 val, volatile void __iomem *addr)
125{
126 out_8(addr, val);
127}
128
129static inline u16 eeh_readw(const volatile void __iomem *addr)
130{
131 u16 val = in_le16(addr);
132 if (EEH_POSSIBLE_ERROR(val, u16))
133 return eeh_check_failure(addr, val);
134 return val;
135}
136static inline void eeh_writew(u16 val, volatile void __iomem *addr)
137{
138 out_le16(addr, val);
139}
140static inline u16 eeh_raw_readw(const volatile void __iomem *addr)
141{
142 u16 val = in_be16(addr);
143 if (EEH_POSSIBLE_ERROR(val, u16))
144 return eeh_check_failure(addr, val);
145 return val;
146}
147static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
148 volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr;
149 out_be16(vaddr, val);
150}
151
152static inline u32 eeh_readl(const volatile void __iomem *addr)
153{
154 u32 val = in_le32(addr);
155 if (EEH_POSSIBLE_ERROR(val, u32))
156 return eeh_check_failure(addr, val);
157 return val;
158}
159static inline void eeh_writel(u32 val, volatile void __iomem *addr)
160{
161 out_le32(addr, val);
162}
163static inline u32 eeh_raw_readl(const volatile void __iomem *addr)
164{
165 u32 val = in_be32(addr);
166 if (EEH_POSSIBLE_ERROR(val, u32))
167 return eeh_check_failure(addr, val);
168 return val;
169}
170static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr)
171{
172 out_be32(addr, val);
173}
174
175static inline u64 eeh_readq(const volatile void __iomem *addr)
176{
177 u64 val = in_le64(addr);
178 if (EEH_POSSIBLE_ERROR(val, u64))
179 return eeh_check_failure(addr, val);
180 return val;
181}
182static inline void eeh_writeq(u64 val, volatile void __iomem *addr)
183{
184 out_le64(addr, val);
185}
186static inline u64 eeh_raw_readq(const volatile void __iomem *addr)
187{
188 u64 val = in_be64(addr);
189 if (EEH_POSSIBLE_ERROR(val, u64))
190 return eeh_check_failure(addr, val);
191 return val;
192}
193static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr)
194{
195 out_be64(addr, val);
196}
197
198#define EEH_CHECK_ALIGN(v,a) \
199 ((((unsigned long)(v)) & ((a) - 1)) == 0)
200
201static inline void eeh_memset_io(volatile void __iomem *addr, int c,
202 unsigned long n)
203{
6c9afc65 204 void *p = (void __force *)addr;
1da177e4
LT
205 u32 lc = c;
206 lc |= lc << 8;
207 lc |= lc << 16;
208
6c9afc65
Z
209 while(n && !EEH_CHECK_ALIGN(p, 4)) {
210 *((volatile u8 *)p) = c;
211 p++;
1da177e4
LT
212 n--;
213 }
214 while(n >= 4) {
6c9afc65
Z
215 *((volatile u32 *)p) = lc;
216 p += 4;
1da177e4
LT
217 n -= 4;
218 }
219 while(n) {
6c9afc65
Z
220 *((volatile u8 *)p) = c;
221 p++;
1da177e4
LT
222 n--;
223 }
224 __asm__ __volatile__ ("sync" : : : "memory");
225}
226static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src,
227 unsigned long n)
228{
229 void *vsrc = (void __force *) src;
230 void *destsave = dest;
231 unsigned long nsave = n;
232
233 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
234 *((u8 *)dest) = *((volatile u8 *)vsrc);
235 __asm__ __volatile__ ("eieio" : : : "memory");
6c9afc65
Z
236 vsrc++;
237 dest++;
1da177e4
LT
238 n--;
239 }
240 while(n > 4) {
241 *((u32 *)dest) = *((volatile u32 *)vsrc);
242 __asm__ __volatile__ ("eieio" : : : "memory");
6c9afc65
Z
243 vsrc += 4;
244 dest += 4;
1da177e4
LT
245 n -= 4;
246 }
247 while(n) {
248 *((u8 *)dest) = *((volatile u8 *)vsrc);
249 __asm__ __volatile__ ("eieio" : : : "memory");
6c9afc65
Z
250 vsrc++;
251 dest++;
1da177e4
LT
252 n--;
253 }
254 __asm__ __volatile__ ("sync" : : : "memory");
255
256 /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
257 * were copied. Check all four bytes.
258 */
259 if ((nsave >= 4) &&
260 (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
261 eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
262 }
263}
264
265static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
266 unsigned long n)
267{
268 void *vdest = (void __force *) dest;
269
270 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
271 *((volatile u8 *)vdest) = *((u8 *)src);
6c9afc65
Z
272 src++;
273 vdest++;
1da177e4
LT
274 n--;
275 }
276 while(n > 4) {
277 *((volatile u32 *)vdest) = *((volatile u32 *)src);
6c9afc65
Z
278 src += 4;
279 vdest += 4;
1da177e4
LT
280 n-=4;
281 }
282 while(n) {
283 *((volatile u8 *)vdest) = *((u8 *)src);
6c9afc65
Z
284 src++;
285 vdest++;
1da177e4
LT
286 n--;
287 }
288 __asm__ __volatile__ ("sync" : : : "memory");
289}
290
291#undef EEH_CHECK_ALIGN
292
293static inline u8 eeh_inb(unsigned long port)
294{
295 u8 val;
1da177e4
LT
296 val = in_8((u8 __iomem *)(port+pci_io_base));
297 if (EEH_POSSIBLE_ERROR(val, u8))
298 return eeh_check_failure((void __iomem *)(port), val);
299 return val;
300}
301
302static inline void eeh_outb(u8 val, unsigned long port)
303{
c256f4b9 304 out_8((u8 __iomem *)(port+pci_io_base), val);
1da177e4
LT
305}
306
307static inline u16 eeh_inw(unsigned long port)
308{
309 u16 val;
1da177e4
LT
310 val = in_le16((u16 __iomem *)(port+pci_io_base));
311 if (EEH_POSSIBLE_ERROR(val, u16))
312 return eeh_check_failure((void __iomem *)(port), val);
313 return val;
314}
315
316static inline void eeh_outw(u16 val, unsigned long port)
317{
c256f4b9 318 out_le16((u16 __iomem *)(port+pci_io_base), val);
1da177e4
LT
319}
320
321static inline u32 eeh_inl(unsigned long port)
322{
323 u32 val;
1da177e4
LT
324 val = in_le32((u32 __iomem *)(port+pci_io_base));
325 if (EEH_POSSIBLE_ERROR(val, u32))
326 return eeh_check_failure((void __iomem *)(port), val);
327 return val;
328}
329
330static inline void eeh_outl(u32 val, unsigned long port)
331{
c256f4b9 332 out_le32((u32 __iomem *)(port+pci_io_base), val);
1da177e4
LT
333}
334
335/* in-string eeh macros */
336static inline void eeh_insb(unsigned long port, void * buf, int ns)
337{
338 _insb((u8 __iomem *)(port+pci_io_base), buf, ns);
339 if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
340 eeh_check_failure((void __iomem *)(port), *(u8*)buf);
341}
342
343static inline void eeh_insw_ns(unsigned long port, void * buf, int ns)
344{
345 _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns);
346 if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
347 eeh_check_failure((void __iomem *)(port), *(u16*)buf);
348}
349
350static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
351{
352 _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl);
353 if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
354 eeh_check_failure((void __iomem *)(port), *(u32*)buf);
355}
356
88ced031 357#endif /* __KERNEL__ */
1da177e4 358#endif /* _PPC64_EEH_H */
This page took 0.135631 seconds and 5 git commands to generate.