Merge remote-tracking branch 'asoc/topic/topology' into asoc-next
[deliverable/linux.git] / arch / powerpc / kernel / eeh_driver.c
CommitLineData
77bd7415
LV
1/*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3c8c90ab
LV
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
77bd7415
LV
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
3c8c90ab 23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
77bd7415
LV
24 */
25#include <linux/delay.h>
77bd7415 26#include <linux/interrupt.h>
ac325acd 27#include <linux/irq.h>
feadf7c0 28#include <linux/module.h>
77bd7415
LV
29#include <linux/pci.h>
30#include <asm/eeh.h>
31#include <asm/eeh_event.h>
32#include <asm/ppc-pci.h>
33#include <asm/pci-bridge.h>
34#include <asm/prom.h>
35#include <asm/rtas.h>
36
67086e32
WY
37struct eeh_rmv_data {
38 struct list_head edev_list;
39 int removed;
40};
41
29f8bf1b
GS
42/**
43 * eeh_pcid_name - Retrieve name of PCI device driver
44 * @pdev: PCI device
45 *
46 * This routine is used to retrieve the name of PCI device driver
47 * if that's valid.
48 */
40a7cd92 49static inline const char *eeh_pcid_name(struct pci_dev *pdev)
77bd7415 50{
273d2803 51 if (pdev && pdev->dev.driver)
77bd7415
LV
52 return pdev->dev.driver->name;
53 return "";
54}
55
feadf7c0
GS
56/**
57 * eeh_pcid_get - Get the PCI device driver
58 * @pdev: PCI device
59 *
60 * The function is used to retrieve the PCI device driver for
61 * the indicated PCI device. Besides, we will increase the reference
62 * of the PCI device driver to prevent that being unloaded on
63 * the fly. Otherwise, kernel crash would be seen.
64 */
65static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
66{
67 if (!pdev || !pdev->driver)
68 return NULL;
69
70 if (!try_module_get(pdev->driver->driver.owner))
71 return NULL;
72
73 return pdev->driver;
74}
75
76/**
77 * eeh_pcid_put - Dereference on the PCI device driver
78 * @pdev: PCI device
79 *
80 * The function is called to do dereference on the PCI device
81 * driver of the indicated PCI device.
82 */
83static inline void eeh_pcid_put(struct pci_dev *pdev)
84{
85 if (!pdev || !pdev->driver)
86 return;
87
88 module_put(pdev->driver->driver.owner);
89}
90
8535ef05 91/**
29f8bf1b
GS
92 * eeh_disable_irq - Disable interrupt for the recovering device
93 * @dev: PCI device
94 *
95 * This routine must be called when reporting temporary or permanent
96 * error to the particular PCI device to disable interrupt of that
97 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
98 * do real work because EEH should freeze DMA transfers for those PCI
99 * devices encountering EEH errors, which includes MSI or MSI-X.
8535ef05
MM
100 */
101static void eeh_disable_irq(struct pci_dev *dev)
102{
40a7cd92 103 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
8535ef05
MM
104
105 /* Don't disable MSI and MSI-X interrupts. They are
106 * effectively disabled by the DMA Stopped state
107 * when an EEH error occurs.
29f8bf1b 108 */
8535ef05
MM
109 if (dev->msi_enabled || dev->msix_enabled)
110 return;
111
59e3f837 112 if (!irq_has_action(dev->irq))
8535ef05
MM
113 return;
114
dbbceee1 115 edev->mode |= EEH_DEV_IRQ_DISABLED;
8535ef05
MM
116 disable_irq_nosync(dev->irq);
117}
118
119/**
29f8bf1b
GS
120 * eeh_enable_irq - Enable interrupt for the recovering device
121 * @dev: PCI device
122 *
123 * This routine must be called to enable interrupt while failed
124 * device could be resumed.
8535ef05
MM
125 */
126static void eeh_enable_irq(struct pci_dev *dev)
127{
40a7cd92 128 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
8535ef05 129
dbbceee1
GS
130 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
131 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
b8a9a11b
TG
132 /*
133 * FIXME !!!!!
134 *
135 * This is just ass backwards. This maze has
136 * unbalanced irq_enable/disable calls. So instead of
137 * finding the root cause it works around the warning
138 * in the irq_enable code by conditionally calling
139 * into it.
140 *
141 * That's just wrong.The warning in the core code is
142 * there to tell people to fix their assymetries in
143 * their own code, not by abusing the core information
144 * to avoid it.
145 *
146 * I so wish that the assymetry would be the other way
147 * round and a few more irq_disable calls render that
148 * shit unusable forever.
149 *
150 * tglx
151 */
57310c3c 152 if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
91150af3 153 enable_irq(dev->irq);
57310c3c 154 }
8535ef05
MM
155}
156
d2b0f6f7
GS
157static bool eeh_dev_removed(struct eeh_dev *edev)
158{
159 /* EEH device removed ? */
160 if (!edev || (edev->mode & EEH_DEV_REMOVED))
161 return true;
162
163 return false;
164}
165
5cfb20b9
GS
166static void *eeh_dev_save_state(void *data, void *userdata)
167{
168 struct eeh_dev *edev = data;
169 struct pci_dev *pdev;
170
171 if (!edev)
172 return NULL;
173
174 pdev = eeh_dev_to_pci_dev(edev);
175 if (!pdev)
176 return NULL;
177
178 pci_save_state(pdev);
179 return NULL;
180}
181
cb5b5624 182/**
29f8bf1b 183 * eeh_report_error - Report pci error to each device driver
9b3c76f0 184 * @data: eeh device
29f8bf1b 185 * @userdata: return value
a84f273c
GS
186 *
187 * Report an EEH error to each device driver, collect up and
188 * merge the device driver responses. Cumulative response
cb5b5624 189 * passed back in "userdata".
77bd7415 190 */
9b3c76f0 191static void *eeh_report_error(void *data, void *userdata)
77bd7415 192{
9b3c76f0
GS
193 struct eeh_dev *edev = (struct eeh_dev *)data;
194 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
18eb3b39 195 enum pci_ers_result rc, *res = userdata;
feadf7c0 196 struct pci_driver *driver;
77bd7415 197
2311cca5 198 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
d2b0f6f7 199 return NULL;
77bd7415
LV
200 dev->error_state = pci_channel_io_frozen;
201
feadf7c0
GS
202 driver = eeh_pcid_get(dev);
203 if (!driver) return NULL;
77bd7415 204
8535ef05
MM
205 eeh_disable_irq(dev);
206
6a1ca373 207 if (!driver->err_handler ||
feadf7c0
GS
208 !driver->err_handler->error_detected) {
209 eeh_pcid_put(dev);
9b3c76f0 210 return NULL;
feadf7c0 211 }
77bd7415 212
29f8bf1b 213 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
2a50f144
LV
214
215 /* A driver that needs a reset trumps all others */
216 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
18eb3b39 217 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
70298c6e 218
67086e32 219 edev->in_error = true;
feadf7c0 220 eeh_pcid_put(dev);
9b3c76f0 221 return NULL;
6a1ca373
LV
222}
223
224/**
29f8bf1b 225 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
9b3c76f0 226 * @data: eeh device
29f8bf1b 227 * @userdata: return value
6a1ca373 228 *
638799b3
LV
229 * Tells each device driver that IO ports, MMIO and config space I/O
230 * are now enabled. Collects up and merges the device driver responses.
231 * Cumulative response passed back in "userdata".
6a1ca373 232 */
9b3c76f0 233static void *eeh_report_mmio_enabled(void *data, void *userdata)
6a1ca373 234{
9b3c76f0
GS
235 struct eeh_dev *edev = (struct eeh_dev *)data;
236 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
6a1ca373 237 enum pci_ers_result rc, *res = userdata;
9b3c76f0 238 struct pci_driver *driver;
6a1ca373 239
2311cca5 240 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
d2b0f6f7
GS
241 return NULL;
242
feadf7c0
GS
243 driver = eeh_pcid_get(dev);
244 if (!driver) return NULL;
9b3c76f0 245
feadf7c0 246 if (!driver->err_handler ||
f26c7a03
GS
247 !driver->err_handler->mmio_enabled ||
248 (edev->mode & EEH_DEV_NO_HANDLER)) {
feadf7c0 249 eeh_pcid_put(dev);
9b3c76f0 250 return NULL;
feadf7c0 251 }
6a1ca373 252
29f8bf1b 253 rc = driver->err_handler->mmio_enabled(dev);
2a50f144
LV
254
255 /* A driver that needs a reset trumps all others */
256 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
6a1ca373 257 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
70298c6e 258
feadf7c0 259 eeh_pcid_put(dev);
9b3c76f0 260 return NULL;
77bd7415
LV
261}
262
cb5b5624 263/**
29f8bf1b 264 * eeh_report_reset - Tell device that slot has been reset
9b3c76f0 265 * @data: eeh device
29f8bf1b
GS
266 * @userdata: return value
267 *
268 * This routine must be called while EEH tries to reset particular
269 * PCI device so that the associated PCI device driver could take
270 * some actions, usually to save data the driver needs so that the
271 * driver can work again while the device is recovered.
77bd7415 272 */
9b3c76f0 273static void *eeh_report_reset(void *data, void *userdata)
77bd7415 274{
9b3c76f0
GS
275 struct eeh_dev *edev = (struct eeh_dev *)data;
276 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
6a1ca373 277 enum pci_ers_result rc, *res = userdata;
9b3c76f0 278 struct pci_driver *driver;
77bd7415 279
2311cca5 280 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
d2b0f6f7 281 return NULL;
c58dc575
MM
282 dev->error_state = pci_channel_io_normal;
283
feadf7c0
GS
284 driver = eeh_pcid_get(dev);
285 if (!driver) return NULL;
286
8535ef05
MM
287 eeh_enable_irq(dev);
288
6a1ca373 289 if (!driver->err_handler ||
f26c7a03 290 !driver->err_handler->slot_reset ||
67086e32
WY
291 (edev->mode & EEH_DEV_NO_HANDLER) ||
292 (!edev->in_error)) {
feadf7c0 293 eeh_pcid_put(dev);
9b3c76f0 294 return NULL;
feadf7c0 295 }
77bd7415 296
6a1ca373 297 rc = driver->err_handler->slot_reset(dev);
5794dbcb
LV
298 if ((*res == PCI_ERS_RESULT_NONE) ||
299 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
6a1ca373
LV
300 if (*res == PCI_ERS_RESULT_DISCONNECT &&
301 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
70298c6e 302
feadf7c0 303 eeh_pcid_put(dev);
9b3c76f0 304 return NULL;
77bd7415
LV
305}
306
5cfb20b9
GS
307static void *eeh_dev_restore_state(void *data, void *userdata)
308{
309 struct eeh_dev *edev = data;
310 struct pci_dev *pdev;
311
312 if (!edev)
313 return NULL;
314
315 pdev = eeh_dev_to_pci_dev(edev);
316 if (!pdev)
317 return NULL;
318
319 pci_restore_state(pdev);
320 return NULL;
321}
322
cb5b5624 323/**
29f8bf1b 324 * eeh_report_resume - Tell device to resume normal operations
9b3c76f0 325 * @data: eeh device
29f8bf1b
GS
326 * @userdata: return value
327 *
328 * This routine must be called to notify the device driver that it
329 * could resume so that the device driver can do some initialization
330 * to make the recovered device work again.
cb5b5624 331 */
9b3c76f0 332static void *eeh_report_resume(void *data, void *userdata)
77bd7415 333{
9b3c76f0
GS
334 struct eeh_dev *edev = (struct eeh_dev *)data;
335 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
67086e32 336 bool was_in_error;
9b3c76f0
GS
337 struct pci_driver *driver;
338
2311cca5 339 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
d2b0f6f7 340 return NULL;
77bd7415
LV
341 dev->error_state = pci_channel_io_normal;
342
feadf7c0
GS
343 driver = eeh_pcid_get(dev);
344 if (!driver) return NULL;
d0e70341 345
67086e32
WY
346 was_in_error = edev->in_error;
347 edev->in_error = false;
8535ef05
MM
348 eeh_enable_irq(dev);
349
d0e70341 350 if (!driver->err_handler ||
f26c7a03 351 !driver->err_handler->resume ||
67086e32 352 (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
f26c7a03 353 edev->mode &= ~EEH_DEV_NO_HANDLER;
feadf7c0 354 eeh_pcid_put(dev);
9b3c76f0 355 return NULL;
feadf7c0 356 }
77bd7415
LV
357
358 driver->err_handler->resume(dev);
70298c6e 359
feadf7c0 360 eeh_pcid_put(dev);
9b3c76f0 361 return NULL;
77bd7415
LV
362}
363
cb5b5624 364/**
29f8bf1b 365 * eeh_report_failure - Tell device driver that device is dead.
9b3c76f0 366 * @data: eeh device
29f8bf1b 367 * @userdata: return value
cb5b5624
LV
368 *
369 * This informs the device driver that the device is permanently
370 * dead, and that no further recovery attempts will be made on it.
371 */
9b3c76f0 372static void *eeh_report_failure(void *data, void *userdata)
77bd7415 373{
9b3c76f0
GS
374 struct eeh_dev *edev = (struct eeh_dev *)data;
375 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
376 struct pci_driver *driver;
377
2311cca5 378 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
d2b0f6f7 379 return NULL;
77bd7415
LV
380 dev->error_state = pci_channel_io_perm_failure;
381
feadf7c0
GS
382 driver = eeh_pcid_get(dev);
383 if (!driver) return NULL;
77bd7415 384
8535ef05
MM
385 eeh_disable_irq(dev);
386
387 if (!driver->err_handler ||
feadf7c0
GS
388 !driver->err_handler->error_detected) {
389 eeh_pcid_put(dev);
9b3c76f0 390 return NULL;
feadf7c0 391 }
8535ef05 392
77bd7415 393 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
70298c6e 394
feadf7c0 395 eeh_pcid_put(dev);
9b3c76f0 396 return NULL;
77bd7415
LV
397}
398
67086e32
WY
399static void *eeh_add_virt_device(void *data, void *userdata)
400{
401 struct pci_driver *driver;
402 struct eeh_dev *edev = (struct eeh_dev *)data;
403 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
404 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
405
406 if (!(edev->physfn)) {
407 pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
408 __func__, edev->phb->global_number, pdn->busno,
409 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
410 return NULL;
411 }
412
413 driver = eeh_pcid_get(dev);
414 if (driver) {
415 eeh_pcid_put(dev);
416 if (driver->err_handler)
417 return NULL;
418 }
419
420#ifdef CONFIG_PPC_POWERNV
421 pci_iov_add_virtfn(edev->physfn, pdn->vf_index, 0);
422#endif
423 return NULL;
424}
425
f5c57710
GS
426static void *eeh_rmv_device(void *data, void *userdata)
427{
428 struct pci_driver *driver;
429 struct eeh_dev *edev = (struct eeh_dev *)data;
430 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
67086e32
WY
431 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
432 int *removed = rmv_data ? &rmv_data->removed : NULL;
f5c57710
GS
433
434 /*
435 * Actually, we should remove the PCI bridges as well.
436 * However, that's lots of complexity to do that,
437 * particularly some of devices under the bridge might
438 * support EEH. So we just care about PCI devices for
439 * simplicity here.
440 */
93de6901 441 if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
f5c57710 442 return NULL;
8cc6b6cd 443
d2b0f6f7
GS
444 /*
445 * We rely on count-based pcibios_release_device() to
446 * detach permanently offlined PEs. Unfortunately, that's
447 * not reliable enough. We might have the permanently
448 * offlined PEs attached, but we needn't take care of
449 * them and their child devices.
450 */
451 if (eeh_dev_removed(edev))
452 return NULL;
453
f5c57710 454 driver = eeh_pcid_get(dev);
8cc6b6cd
TLSC
455 if (driver) {
456 eeh_pcid_put(dev);
3fa7bf72
GS
457 if (removed &&
458 eeh_pe_passed(edev->pe))
459 return NULL;
67086e32
WY
460 if (removed &&
461 driver->err_handler &&
f2da4ccf 462 driver->err_handler->error_detected &&
f6bf0fa1 463 driver->err_handler->slot_reset)
8cc6b6cd
TLSC
464 return NULL;
465 }
f5c57710
GS
466
467 /* Remove it from PCI subsystem */
468 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
469 pci_name(dev));
470 edev->bus = dev->bus;
471 edev->mode |= EEH_DEV_DISCONNECTED;
67086e32
WY
472 if (removed)
473 (*removed)++;
f5c57710 474
67086e32
WY
475 if (edev->physfn) {
476#ifdef CONFIG_PPC_POWERNV
477 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
478
479 pci_iov_remove_virtfn(edev->physfn, pdn->vf_index, 0);
480 edev->pdev = NULL;
481
482 /*
483 * We have to set the VF PE number to invalid one, which is
484 * required to plug the VF successfully.
485 */
486 pdn->pe_number = IODA_INVALID_PE;
487#endif
488 if (rmv_data)
489 list_add(&edev->rmv_list, &rmv_data->edev_list);
490 } else {
491 pci_lock_rescan_remove();
492 pci_stop_and_remove_bus_device(dev);
493 pci_unlock_rescan_remove();
494 }
f5c57710
GS
495
496 return NULL;
497}
498
499static void *eeh_pe_detach_dev(void *data, void *userdata)
500{
501 struct eeh_pe *pe = (struct eeh_pe *)data;
502 struct eeh_dev *edev, *tmp;
503
504 eeh_pe_for_each_dev(pe, edev, tmp) {
505 if (!(edev->mode & EEH_DEV_DISCONNECTED))
506 continue;
507
508 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
509 eeh_rmv_from_parent_pe(edev);
510 }
511
512 return NULL;
513}
514
78954700
GS
515/*
516 * Explicitly clear PE's frozen state for PowerNV where
517 * we have frozen PE until BAR restore is completed. It's
518 * harmless to clear it for pSeries. To be consistent with
519 * PE reset (for 3 times), we try to clear the frozen state
520 * for 3 times as well.
521 */
2c665992 522static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
78954700 523{
2c665992 524 struct eeh_pe *pe = (struct eeh_pe *)data;
5cfb20b9 525 bool *clear_sw_state = flag;
c9dd0143 526 int i, rc = 1;
78954700 527
c9dd0143 528 for (i = 0; rc && i < 3; i++)
5cfb20b9 529 rc = eeh_unfreeze_pe(pe, clear_sw_state);
78954700 530
c9dd0143 531 /* Stop immediately on any errors */
2c665992 532 if (rc) {
c9dd0143
GS
533 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
534 __func__, rc, pe->phb->global_number, pe->addr);
2c665992
GS
535 return (void *)pe;
536 }
537
538 return NULL;
539}
540
5cfb20b9
GS
541static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
542 bool clear_sw_state)
2c665992
GS
543{
544 void *rc;
545
5cfb20b9 546 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
2c665992 547 if (!rc)
78954700
GS
548 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
549
2c665992 550 return rc ? -EIO : 0;
78954700
GS
551}
552
5cfb20b9
GS
553int eeh_pe_reset_and_recover(struct eeh_pe *pe)
554{
555 int result, ret;
556
557 /* Bail if the PE is being recovered */
558 if (pe->state & EEH_PE_RECOVERING)
559 return 0;
560
561 /* Put the PE into recovery mode */
562 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
563
564 /* Save states */
565 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
566
567 /* Report error */
568 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
569
570 /* Issue reset */
5cfb20b9
GS
571 ret = eeh_reset_pe(pe);
572 if (ret) {
28bf36f9 573 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
5cfb20b9
GS
574 return ret;
575 }
5cfb20b9
GS
576
577 /* Unfreeze the PE */
578 ret = eeh_clear_pe_frozen_state(pe, true);
579 if (ret) {
580 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
581 return ret;
582 }
583
584 /* Notify completion of reset */
585 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
586
587 /* Restore device state */
588 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
589
590 /* Resume */
591 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
592
593 /* Clear recovery mode */
594 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
595
596 return 0;
597}
598
77bd7415 599/**
29f8bf1b 600 * eeh_reset_device - Perform actual reset of a pci slot
9b3c76f0 601 * @pe: EEH PE
29f8bf1b 602 * @bus: PCI bus corresponding to the isolcated slot
77bd7415 603 *
29f8bf1b
GS
604 * This routine must be called to do reset on the indicated PE.
605 * During the reset, udev might be invoked because those affected
606 * PCI devices will be removed and then added.
77bd7415 607 */
67086e32
WY
608static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
609 struct eeh_rmv_data *rmv_data)
77bd7415 610{
f5c57710 611 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
5a71978e 612 struct timeval tstamp;
67086e32
WY
613 int cnt, rc;
614 struct eeh_dev *edev;
42405456
LV
615
616 /* pcibios will clear the counter; save the value */
9b3c76f0 617 cnt = pe->freeze_count;
5a71978e 618 tstamp = pe->tstamp;
42405456 619
20ee6a97
GS
620 /*
621 * We don't remove the corresponding PE instances because
622 * we need the information afterwords. The attached EEH
623 * devices are expected to be attached soon when calling
624 * into pcibios_add_pci_devices().
625 */
f5c57710 626 eeh_pe_state_mark(pe, EEH_PE_KEEP);
1c2042c8 627 if (bus) {
67086e32
WY
628 if (pe->type & EEH_PE_VF) {
629 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
630 } else {
631 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
632 pci_lock_rescan_remove();
633 pcibios_remove_pci_devices(bus);
634 pci_unlock_rescan_remove();
635 }
1c2042c8 636 } else if (frozen_bus) {
67086e32 637 eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
1c2042c8 638 }
77bd7415 639
d0914f50
GS
640 /*
641 * Reset the pci controller. (Asserts RST#; resets config space).
b6495c0c 642 * Reconfigure bridges and devices. Don't try to bring the system
29f8bf1b 643 * up if the reset failed for some reason.
d0914f50
GS
644 *
645 * During the reset, it's very dangerous to have uncontrolled PCI
646 * config accesses. So we prefer to block them. However, controlled
647 * PCI config accesses initiated from EEH itself are allowed.
29f8bf1b 648 */
9b3c76f0 649 rc = eeh_reset_pe(pe);
28bf36f9 650 if (rc)
b6495c0c 651 return rc;
77bd7415 652
1c2042c8
RW
653 pci_lock_rescan_remove();
654
9b3c76f0
GS
655 /* Restore PE */
656 eeh_ops->configure_bridge(pe);
657 eeh_pe_restore_bars(pe);
77bd7415 658
dc9c41bd
AD
659 /* Clear frozen state */
660 rc = eeh_clear_pe_frozen_state(pe, false);
661 if (rc)
662 return rc;
78954700 663
77bd7415 664 /* Give the system 5 seconds to finish running the user-space
a84f273c
GS
665 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
666 * this is a hack, but if we don't do this, and try to bring
667 * the device up before the scripts have taken it down,
77bd7415
LV
668 * potentially weird things happen.
669 */
670 if (bus) {
f5c57710 671 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
29f8bf1b 672 ssleep(5);
f5c57710
GS
673
674 /*
675 * The EEH device is still connected with its parent
676 * PE. We should disconnect it so the binding can be
677 * rebuilt when adding PCI devices.
678 */
67086e32 679 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
f5c57710 680 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
67086e32
WY
681 if (pe->type & EEH_PE_VF)
682 eeh_add_virt_device(edev, NULL);
683 else
684 pcibios_add_pci_devices(bus);
685 } else if (frozen_bus && rmv_data->removed) {
f5c57710
GS
686 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
687 ssleep(5);
688
67086e32 689 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
f5c57710 690 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
67086e32
WY
691 if (pe->type & EEH_PE_VF)
692 eeh_add_virt_device(edev, NULL);
693 else
694 pcibios_add_pci_devices(frozen_bus);
77bd7415 695 }
f5c57710 696 eeh_pe_state_clear(pe, EEH_PE_KEEP);
5a71978e
GS
697
698 pe->tstamp = tstamp;
9b3c76f0 699 pe->freeze_count = cnt;
b6495c0c 700
1c2042c8 701 pci_unlock_rescan_remove();
b6495c0c 702 return 0;
77bd7415
LV
703}
704
705/* The longest amount of time to wait for a pci device
706 * to come back on line, in seconds.
707 */
fb48dc22 708#define MAX_WAIT_FOR_RECOVERY 300
77bd7415 709
8a6b1bc7 710static void eeh_handle_normal_event(struct eeh_pe *pe)
77bd7415 711{
77bd7415 712 struct pci_bus *frozen_bus;
67086e32 713 struct eeh_dev *edev, *tmp;
b6495c0c 714 int rc = 0;
18eb3b39 715 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
67086e32 716 struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0};
77bd7415 717
9b3c76f0 718 frozen_bus = eeh_pe_bus_get(pe);
77bd7415 719 if (!frozen_bus) {
9b3c76f0
GS
720 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
721 __func__, pe->phb->global_number, pe->addr);
722 return;
77bd7415
LV
723 }
724
5a71978e 725 eeh_pe_update_time_stamp(pe);
9b3c76f0 726 pe->freeze_count++;
1b28f170 727 if (pe->freeze_count > eeh_max_freezes)
8df83028 728 goto excess_failures;
0dae2743 729 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
9b3c76f0 730 pe->freeze_count);
77bd7415
LV
731
732 /* Walk the various device drivers attached to this slot through
733 * a reset sequence, giving each an opportunity to do what it needs
734 * to accomplish the reset. Each child gets a report of the
735 * status ... if any child can't handle the reset, then the entire
736 * slot is dlpar removed and added.
8234fced
GS
737 *
738 * When the PHB is fenced, we have to issue a reset to recover from
739 * the error. Override the result if necessary to have partially
740 * hotplug for this case.
77bd7415 741 */
56ca4fde 742 pr_info("EEH: Notify device drivers to shutdown\n");
9b3c76f0 743 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
8234fced
GS
744 if ((pe->type & EEH_PE_PHB) &&
745 result != PCI_ERS_RESULT_NONE &&
746 result != PCI_ERS_RESULT_NEED_RESET)
747 result = PCI_ERS_RESULT_NEED_RESET;
77bd7415 748
5f1a7c81 749 /* Get the current PCI slot state. This can take a long time,
2ac3990c 750 * sometimes over 300 seconds for certain systems.
29f8bf1b 751 */
9b3c76f0 752 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
eb594a47 753 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
0dae2743 754 pr_warn("EEH: Permanent failure\n");
5f1a7c81
LV
755 goto hard_fail;
756 }
757
ede8ca26
LV
758 /* Since rtas may enable MMIO when posting the error log,
759 * don't post the error log until after all dev drivers
17213c3b
LV
760 * have been informed.
761 */
56ca4fde 762 pr_info("EEH: Collect temporary log\n");
9b3c76f0 763 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
ede8ca26 764
77bd7415
LV
765 /* If all device drivers were EEH-unaware, then shut
766 * down all of the device drivers, and hope they
767 * go down willingly, without panicing the system.
768 */
18eb3b39 769 if (result == PCI_ERS_RESULT_NONE) {
56ca4fde 770 pr_info("EEH: Reset with hotplug activity\n");
67086e32 771 rc = eeh_reset_device(pe, frozen_bus, NULL);
e0f90b64 772 if (rc) {
0dae2743
GS
773 pr_warn("%s: Unable to reset, err=%d\n",
774 __func__, rc);
b6495c0c 775 goto hard_fail;
e0f90b64 776 }
77bd7415
LV
777 }
778
6a1ca373
LV
779 /* If all devices reported they can proceed, then re-enable MMIO */
780 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
56ca4fde 781 pr_info("EEH: Enable I/O for affected devices\n");
9b3c76f0 782 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
6a1ca373 783
fa1be476
LV
784 if (rc < 0)
785 goto hard_fail;
6a1ca373
LV
786 if (rc) {
787 result = PCI_ERS_RESULT_NEED_RESET;
788 } else {
56ca4fde 789 pr_info("EEH: Notify device drivers to resume I/O\n");
9b3c76f0 790 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
6a1ca373 791 }
77bd7415
LV
792 }
793
6a1ca373 794 /* If all devices reported they can proceed, then re-enable DMA */
18eb3b39 795 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
56ca4fde 796 pr_info("EEH: Enabled DMA for affected devices\n");
9b3c76f0 797 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
6a1ca373 798
fa1be476
LV
799 if (rc < 0)
800 goto hard_fail;
35845a78 801 if (rc) {
6a1ca373 802 result = PCI_ERS_RESULT_NEED_RESET;
35845a78
GS
803 } else {
804 /*
805 * We didn't do PE reset for the case. The PE
806 * is still in frozen state. Clear it before
807 * resuming the PE.
808 */
809 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
d0e70341 810 result = PCI_ERS_RESULT_RECOVERED;
35845a78 811 }
6a1ca373
LV
812 }
813
814 /* If any device has a hard failure, then shut off everything. */
e0f90b64 815 if (result == PCI_ERS_RESULT_DISCONNECT) {
0dae2743 816 pr_warn("EEH: Device driver gave up\n");
6a1ca373 817 goto hard_fail;
e0f90b64 818 }
6a1ca373
LV
819
820 /* If any device called out for a reset, then reset the slot */
821 if (result == PCI_ERS_RESULT_NEED_RESET) {
56ca4fde 822 pr_info("EEH: Reset without hotplug activity\n");
67086e32 823 rc = eeh_reset_device(pe, NULL, &rmv_data);
e0f90b64 824 if (rc) {
0dae2743
GS
825 pr_warn("%s: Cannot reset, err=%d\n",
826 __func__, rc);
b6495c0c 827 goto hard_fail;
e0f90b64 828 }
56ca4fde
GS
829
830 pr_info("EEH: Notify device drivers "
831 "the completion of reset\n");
6a1ca373 832 result = PCI_ERS_RESULT_NONE;
9b3c76f0 833 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
77bd7415
LV
834 }
835
6a1ca373 836 /* All devices should claim they have recovered by now. */
90fdd613
LV
837 if ((result != PCI_ERS_RESULT_RECOVERED) &&
838 (result != PCI_ERS_RESULT_NONE)) {
0dae2743 839 pr_warn("EEH: Not recovered\n");
6a1ca373 840 goto hard_fail;
e0f90b64 841 }
6a1ca373 842
67086e32
WY
843 /*
844 * For those hot removed VFs, we should add back them after PF get
845 * recovered properly.
846 */
847 list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) {
848 eeh_add_virt_device(edev, NULL);
849 list_del(&edev->rmv_list);
850 }
851
77bd7415 852 /* Tell all device drivers that they can resume operations */
56ca4fde 853 pr_info("EEH: Notify device driver to resume\n");
9b3c76f0 854 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
b6495c0c 855
9b3c76f0 856 return;
a84f273c 857
8df83028 858excess_failures:
b6495c0c
LV
859 /*
860 * About 90% of all real-life EEH failures in the field
861 * are due to poorly seated PCI cards. Only 10% or so are
862 * due to actual, failed cards.
863 */
9b3c76f0
GS
864 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
865 "last hour and has been permanently disabled.\n"
866 "Please try reseating or replacing it.\n",
867 pe->phb->global_number, pe->addr,
868 pe->freeze_count);
8df83028
LV
869 goto perm_error;
870
871hard_fail:
9b3c76f0
GS
872 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
873 "Please try reseating or replacing it\n",
874 pe->phb->global_number, pe->addr);
b6495c0c 875
8df83028 876perm_error:
9b3c76f0 877 eeh_slot_error_detail(pe, EEH_LOG_PERM);
b6495c0c
LV
878
879 /* Notify all devices that they're about to go down. */
9b3c76f0 880 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
b6495c0c 881
d2b0f6f7 882 /* Mark the PE to be removed permanently */
432227e9 883 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
d2b0f6f7
GS
884
885 /*
886 * Shut down the device drivers for good. We mark
887 * all removed devices correctly to avoid access
888 * the their PCI config any more.
889 */
1c2042c8 890 if (frozen_bus) {
67086e32
WY
891 if (pe->type & EEH_PE_VF) {
892 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
893 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
894 } else {
895 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
896 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
d2b0f6f7 897
67086e32
WY
898 pci_lock_rescan_remove();
899 pcibios_remove_pci_devices(frozen_bus);
900 pci_unlock_rescan_remove();
901 }
1c2042c8 902 }
77bd7415 903}
8a6b1bc7
GS
904
905static void eeh_handle_special_event(void)
906{
907 struct eeh_pe *pe, *phb_pe;
908 struct pci_bus *bus;
7e4e7867 909 struct pci_controller *hose;
8a6b1bc7 910 unsigned long flags;
7e4e7867 911 int rc;
8a6b1bc7 912
8a6b1bc7 913
7e4e7867
GS
914 do {
915 rc = eeh_ops->next_error(&pe);
916
917 switch (rc) {
918 case EEH_NEXT_ERR_DEAD_IOC:
919 /* Mark all PHBs in dead state */
920 eeh_serialize_lock(&flags);
921
922 /* Purge all events */
5c7a35e3 923 eeh_remove_event(NULL, true);
7e4e7867
GS
924
925 list_for_each_entry(hose, &hose_list, list_node) {
926 phb_pe = eeh_phb_pe_get(hose);
927 if (!phb_pe) continue;
928
9e049375 929 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
7e4e7867
GS
930 }
931
932 eeh_serialize_unlock(flags);
933
934 break;
935 case EEH_NEXT_ERR_FROZEN_PE:
936 case EEH_NEXT_ERR_FENCED_PHB:
937 case EEH_NEXT_ERR_DEAD_PHB:
938 /* Mark the PE in fenced state */
939 eeh_serialize_lock(&flags);
940
941 /* Purge all events of the PHB */
5c7a35e3 942 eeh_remove_event(pe, true);
7e4e7867
GS
943
944 if (rc == EEH_NEXT_ERR_DEAD_PHB)
9e049375 945 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
7e4e7867
GS
946 else
947 eeh_pe_state_mark(pe,
948 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
949
950 eeh_serialize_unlock(flags);
951
952 break;
953 case EEH_NEXT_ERR_NONE:
954 return;
955 default:
956 pr_warn("%s: Invalid value %d from next_error()\n",
957 __func__, rc);
958 return;
8a6b1bc7 959 }
8a6b1bc7 960
7e4e7867
GS
961 /*
962 * For fenced PHB and frozen PE, it's handled as normal
963 * event. We have to remove the affected PHBs for dead
964 * PHB and IOC
965 */
966 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
967 rc == EEH_NEXT_ERR_FENCED_PHB) {
968 eeh_handle_normal_event(pe);
9e049375 969 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
7e4e7867 970 } else {
1b17366d 971 pci_lock_rescan_remove();
7e4e7867
GS
972 list_for_each_entry(hose, &hose_list, list_node) {
973 phb_pe = eeh_phb_pe_get(hose);
974 if (!phb_pe ||
9e049375
GS
975 !(phb_pe->state & EEH_PE_ISOLATED) ||
976 (phb_pe->state & EEH_PE_RECOVERING))
7e4e7867
GS
977 continue;
978
979 /* Notify all devices to be down */
05ba75f8 980 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
7e4e7867
GS
981 bus = eeh_pe_bus_get(phb_pe);
982 eeh_pe_dev_traverse(pe,
983 eeh_report_failure, NULL);
984 pcibios_remove_pci_devices(bus);
985 }
1b17366d 986 pci_unlock_rescan_remove();
8a6b1bc7 987 }
7e4e7867
GS
988
989 /*
990 * If we have detected dead IOC, we needn't proceed
991 * any more since all PHBs would have been removed
992 */
993 if (rc == EEH_NEXT_ERR_DEAD_IOC)
994 break;
995 } while (rc != EEH_NEXT_ERR_NONE);
8a6b1bc7
GS
996}
997
998/**
999 * eeh_handle_event - Reset a PCI device after hard lockup.
1000 * @pe: EEH PE
1001 *
1002 * While PHB detects address or data parity errors on particular PCI
1003 * slot, the associated PE will be frozen. Besides, DMA's occurring
1004 * to wild addresses (which usually happen due to bugs in device
1005 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
1006 * #PERR or other misc PCI-related errors also can trigger EEH errors.
1007 *
1008 * Recovery process consists of unplugging the device driver (which
1009 * generated hotplug events to userspace), then issuing a PCI #RST to
1010 * the device, then reconfiguring the PCI config space for all bridges
1011 * & devices under this slot, and then finally restarting the device
1012 * drivers (which cause a second set of hotplug events to go out to
1013 * userspace).
1014 */
1015void eeh_handle_event(struct eeh_pe *pe)
1016{
1017 if (pe)
1018 eeh_handle_normal_event(pe);
1019 else
1020 eeh_handle_special_event();
1021}
This page took 0.714169 seconds and 5 git commands to generate.