powerpc/powernv: Fix killed EEH event
[deliverable/linux.git] / arch / powerpc / platforms / powernv / eeh-ioda.c
1 /*
2 * The file intends to implement the functions needed by EEH, which is
3 * built on IODA compliant chip. Actually, lots of functions related
4 * to EEH would be built based on the OPAL APIs.
5 *
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/msi.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
24
25 #include <asm/eeh.h>
26 #include <asm/eeh_event.h>
27 #include <asm/io.h>
28 #include <asm/iommu.h>
29 #include <asm/msi_bitmap.h>
30 #include <asm/opal.h>
31 #include <asm/pci-bridge.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/tce.h>
34
35 #include "powernv.h"
36 #include "pci.h"
37
38 static int ioda_eeh_nb_init = 0;
39
40 static int ioda_eeh_event(struct notifier_block *nb,
41 unsigned long events, void *change)
42 {
43 uint64_t changed_evts = (uint64_t)change;
44
45 /*
46 * We simply send special EEH event if EEH has
47 * been enabled, or clear pending events in
48 * case that we enable EEH soon
49 */
50 if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
51 !(events & OPAL_EVENT_PCI_ERROR))
52 return 0;
53
54 if (eeh_enabled())
55 eeh_send_failure_event(NULL);
56 else
57 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
58
59 return 0;
60 }
61
62 static struct notifier_block ioda_eeh_nb = {
63 .notifier_call = ioda_eeh_event,
64 .next = NULL,
65 .priority = 0
66 };
67
68 #ifdef CONFIG_DEBUG_FS
69 static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
70 {
71 struct pci_controller *hose = data;
72 struct pnv_phb *phb = hose->private_data;
73
74 out_be64(phb->regs + offset, val);
75 return 0;
76 }
77
78 static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
79 {
80 struct pci_controller *hose = data;
81 struct pnv_phb *phb = hose->private_data;
82
83 *val = in_be64(phb->regs + offset);
84 return 0;
85 }
86
87 static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
88 {
89 return ioda_eeh_dbgfs_set(data, 0xD10, val);
90 }
91
92 static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
93 {
94 return ioda_eeh_dbgfs_get(data, 0xD10, val);
95 }
96
97 static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
98 {
99 return ioda_eeh_dbgfs_set(data, 0xD90, val);
100 }
101
102 static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
103 {
104 return ioda_eeh_dbgfs_get(data, 0xD90, val);
105 }
106
107 static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
108 {
109 return ioda_eeh_dbgfs_set(data, 0xE10, val);
110 }
111
112 static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
113 {
114 return ioda_eeh_dbgfs_get(data, 0xE10, val);
115 }
116
117 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
118 ioda_eeh_outb_dbgfs_set, "0x%llx\n");
119 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
120 ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
121 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
122 ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
123 #endif /* CONFIG_DEBUG_FS */
124
125
126 /**
127 * ioda_eeh_post_init - Chip dependent post initialization
128 * @hose: PCI controller
129 *
130 * The function will be called after eeh PEs and devices
131 * have been built. That means the EEH is ready to supply
132 * service with I/O cache.
133 */
134 static int ioda_eeh_post_init(struct pci_controller *hose)
135 {
136 struct pnv_phb *phb = hose->private_data;
137 int ret;
138
139 /* Register OPAL event notifier */
140 if (!ioda_eeh_nb_init) {
141 ret = opal_notifier_register(&ioda_eeh_nb);
142 if (ret) {
143 pr_err("%s: Can't register OPAL event notifier (%d)\n",
144 __func__, ret);
145 return ret;
146 }
147
148 ioda_eeh_nb_init = 1;
149 }
150
151 #ifdef CONFIG_DEBUG_FS
152 if (!phb->has_dbgfs && phb->dbgfs) {
153 phb->has_dbgfs = 1;
154
155 debugfs_create_file("err_injct_outbound", 0600,
156 phb->dbgfs, hose,
157 &ioda_eeh_outb_dbgfs_ops);
158 debugfs_create_file("err_injct_inboundA", 0600,
159 phb->dbgfs, hose,
160 &ioda_eeh_inbA_dbgfs_ops);
161 debugfs_create_file("err_injct_inboundB", 0600,
162 phb->dbgfs, hose,
163 &ioda_eeh_inbB_dbgfs_ops);
164 }
165 #endif
166
167 /* If EEH is enabled, we're going to rely on that.
168 * Otherwise, we restore to conventional mechanism
169 * to clear frozen PE during PCI config access.
170 */
171 if (eeh_enabled())
172 phb->flags |= PNV_PHB_FLAG_EEH;
173 else
174 phb->flags &= ~PNV_PHB_FLAG_EEH;
175
176 return 0;
177 }
178
179 /**
180 * ioda_eeh_set_option - Set EEH operation or I/O setting
181 * @pe: EEH PE
182 * @option: options
183 *
184 * Enable or disable EEH option for the indicated PE. The
185 * function also can be used to enable I/O or DMA for the
186 * PE.
187 */
188 static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
189 {
190 s64 ret;
191 u32 pe_no;
192 struct pci_controller *hose = pe->phb;
193 struct pnv_phb *phb = hose->private_data;
194
195 /* Check on PE number */
196 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
197 pr_err("%s: PE address %x out of range [0, %x] "
198 "on PHB#%x\n",
199 __func__, pe->addr, phb->ioda.total_pe,
200 hose->global_number);
201 return -EINVAL;
202 }
203
204 pe_no = pe->addr;
205 switch (option) {
206 case EEH_OPT_DISABLE:
207 ret = -EEXIST;
208 break;
209 case EEH_OPT_ENABLE:
210 ret = 0;
211 break;
212 case EEH_OPT_THAW_MMIO:
213 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
214 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
215 if (ret) {
216 pr_warning("%s: Failed to enable MMIO for "
217 "PHB#%x-PE#%x, err=%lld\n",
218 __func__, hose->global_number, pe_no, ret);
219 return -EIO;
220 }
221
222 break;
223 case EEH_OPT_THAW_DMA:
224 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
225 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
226 if (ret) {
227 pr_warning("%s: Failed to enable DMA for "
228 "PHB#%x-PE#%x, err=%lld\n",
229 __func__, hose->global_number, pe_no, ret);
230 return -EIO;
231 }
232
233 break;
234 default:
235 pr_warning("%s: Invalid option %d\n", __func__, option);
236 return -EINVAL;
237 }
238
239 return ret;
240 }
241
242 static void ioda_eeh_phb_diag(struct pci_controller *hose)
243 {
244 struct pnv_phb *phb = hose->private_data;
245 long rc;
246
247 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
248 PNV_PCI_DIAG_BUF_SIZE);
249 if (rc != OPAL_SUCCESS) {
250 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
251 __func__, hose->global_number, rc);
252 return;
253 }
254
255 pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
256 }
257
258 /**
259 * ioda_eeh_get_state - Retrieve the state of PE
260 * @pe: EEH PE
261 *
262 * The PE's state should be retrieved from the PEEV, PEST
263 * IODA tables. Since the OPAL has exported the function
264 * to do it, it'd better to use that.
265 */
266 static int ioda_eeh_get_state(struct eeh_pe *pe)
267 {
268 s64 ret = 0;
269 u8 fstate;
270 __be16 pcierr;
271 u32 pe_no;
272 int result;
273 struct pci_controller *hose = pe->phb;
274 struct pnv_phb *phb = hose->private_data;
275
276 /*
277 * Sanity check on PE address. The PHB PE address should
278 * be zero.
279 */
280 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
281 pr_err("%s: PE address %x out of range [0, %x] "
282 "on PHB#%x\n",
283 __func__, pe->addr, phb->ioda.total_pe,
284 hose->global_number);
285 return EEH_STATE_NOT_SUPPORT;
286 }
287
288 /*
289 * If we're in middle of PE reset, return normal
290 * state to keep EEH core going. For PHB reset, we
291 * still expect to have fenced PHB cleared with
292 * PHB reset.
293 */
294 if (!(pe->type & EEH_PE_PHB) &&
295 (pe->state & EEH_PE_RESET)) {
296 result = (EEH_STATE_MMIO_ACTIVE |
297 EEH_STATE_DMA_ACTIVE |
298 EEH_STATE_MMIO_ENABLED |
299 EEH_STATE_DMA_ENABLED);
300 return result;
301 }
302
303 /* Retrieve PE status through OPAL */
304 pe_no = pe->addr;
305 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
306 &fstate, &pcierr, NULL);
307 if (ret) {
308 pr_err("%s: Failed to get EEH status on "
309 "PHB#%x-PE#%x\n, err=%lld\n",
310 __func__, hose->global_number, pe_no, ret);
311 return EEH_STATE_NOT_SUPPORT;
312 }
313
314 /* Check PHB status */
315 if (pe->type & EEH_PE_PHB) {
316 result = 0;
317 result &= ~EEH_STATE_RESET_ACTIVE;
318
319 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
320 result |= EEH_STATE_MMIO_ACTIVE;
321 result |= EEH_STATE_DMA_ACTIVE;
322 result |= EEH_STATE_MMIO_ENABLED;
323 result |= EEH_STATE_DMA_ENABLED;
324 } else if (!(pe->state & EEH_PE_ISOLATED)) {
325 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
326 ioda_eeh_phb_diag(hose);
327 }
328
329 return result;
330 }
331
332 /* Parse result out */
333 result = 0;
334 switch (fstate) {
335 case OPAL_EEH_STOPPED_NOT_FROZEN:
336 result &= ~EEH_STATE_RESET_ACTIVE;
337 result |= EEH_STATE_MMIO_ACTIVE;
338 result |= EEH_STATE_DMA_ACTIVE;
339 result |= EEH_STATE_MMIO_ENABLED;
340 result |= EEH_STATE_DMA_ENABLED;
341 break;
342 case OPAL_EEH_STOPPED_MMIO_FREEZE:
343 result &= ~EEH_STATE_RESET_ACTIVE;
344 result |= EEH_STATE_DMA_ACTIVE;
345 result |= EEH_STATE_DMA_ENABLED;
346 break;
347 case OPAL_EEH_STOPPED_DMA_FREEZE:
348 result &= ~EEH_STATE_RESET_ACTIVE;
349 result |= EEH_STATE_MMIO_ACTIVE;
350 result |= EEH_STATE_MMIO_ENABLED;
351 break;
352 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
353 result &= ~EEH_STATE_RESET_ACTIVE;
354 break;
355 case OPAL_EEH_STOPPED_RESET:
356 result |= EEH_STATE_RESET_ACTIVE;
357 break;
358 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
359 result |= EEH_STATE_UNAVAILABLE;
360 break;
361 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
362 result |= EEH_STATE_NOT_SUPPORT;
363 break;
364 default:
365 pr_warning("%s: Unexpected EEH status 0x%x "
366 "on PHB#%x-PE#%x\n",
367 __func__, fstate, hose->global_number, pe_no);
368 }
369
370 /* Dump PHB diag-data for frozen PE */
371 if (result != EEH_STATE_NOT_SUPPORT &&
372 (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
373 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
374 !(pe->state & EEH_PE_ISOLATED)) {
375 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
376 ioda_eeh_phb_diag(hose);
377 }
378
379 return result;
380 }
381
382 static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
383 {
384 s64 rc = OPAL_HARDWARE;
385
386 while (1) {
387 rc = opal_pci_poll(phb->opal_id);
388 if (rc <= 0)
389 break;
390
391 if (system_state < SYSTEM_RUNNING)
392 udelay(1000 * rc);
393 else
394 msleep(rc);
395 }
396
397 return rc;
398 }
399
400 int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
401 {
402 struct pnv_phb *phb = hose->private_data;
403 s64 rc = OPAL_HARDWARE;
404
405 pr_debug("%s: Reset PHB#%x, option=%d\n",
406 __func__, hose->global_number, option);
407
408 /* Issue PHB complete reset request */
409 if (option == EEH_RESET_FUNDAMENTAL ||
410 option == EEH_RESET_HOT)
411 rc = opal_pci_reset(phb->opal_id,
412 OPAL_PHB_COMPLETE,
413 OPAL_ASSERT_RESET);
414 else if (option == EEH_RESET_DEACTIVATE)
415 rc = opal_pci_reset(phb->opal_id,
416 OPAL_PHB_COMPLETE,
417 OPAL_DEASSERT_RESET);
418 if (rc < 0)
419 goto out;
420
421 /*
422 * Poll state of the PHB until the request is done
423 * successfully. The PHB reset is usually PHB complete
424 * reset followed by hot reset on root bus. So we also
425 * need the PCI bus settlement delay.
426 */
427 rc = ioda_eeh_phb_poll(phb);
428 if (option == EEH_RESET_DEACTIVATE) {
429 if (system_state < SYSTEM_RUNNING)
430 udelay(1000 * EEH_PE_RST_SETTLE_TIME);
431 else
432 msleep(EEH_PE_RST_SETTLE_TIME);
433 }
434 out:
435 if (rc != OPAL_SUCCESS)
436 return -EIO;
437
438 return 0;
439 }
440
441 static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
442 {
443 struct pnv_phb *phb = hose->private_data;
444 s64 rc = OPAL_SUCCESS;
445
446 pr_debug("%s: Reset PHB#%x, option=%d\n",
447 __func__, hose->global_number, option);
448
449 /*
450 * During the reset deassert time, we needn't care
451 * the reset scope because the firmware does nothing
452 * for fundamental or hot reset during deassert phase.
453 */
454 if (option == EEH_RESET_FUNDAMENTAL)
455 rc = opal_pci_reset(phb->opal_id,
456 OPAL_PCI_FUNDAMENTAL_RESET,
457 OPAL_ASSERT_RESET);
458 else if (option == EEH_RESET_HOT)
459 rc = opal_pci_reset(phb->opal_id,
460 OPAL_PCI_HOT_RESET,
461 OPAL_ASSERT_RESET);
462 else if (option == EEH_RESET_DEACTIVATE)
463 rc = opal_pci_reset(phb->opal_id,
464 OPAL_PCI_HOT_RESET,
465 OPAL_DEASSERT_RESET);
466 if (rc < 0)
467 goto out;
468
469 /* Poll state of the PHB until the request is done */
470 rc = ioda_eeh_phb_poll(phb);
471 if (option == EEH_RESET_DEACTIVATE)
472 msleep(EEH_PE_RST_SETTLE_TIME);
473 out:
474 if (rc != OPAL_SUCCESS)
475 return -EIO;
476
477 return 0;
478 }
479
480 static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option)
481
482 {
483 struct device_node *dn = pci_device_to_OF_node(dev);
484 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
485 int aer = edev ? edev->aer_cap : 0;
486 u32 ctrl;
487
488 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
489 __func__, pci_domain_nr(dev->bus),
490 dev->bus->number, option);
491
492 switch (option) {
493 case EEH_RESET_FUNDAMENTAL:
494 case EEH_RESET_HOT:
495 /* Don't report linkDown event */
496 if (aer) {
497 eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
498 4, &ctrl);
499 ctrl |= PCI_ERR_UNC_SURPDN;
500 eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
501 4, ctrl);
502 }
503
504 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
505 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
506 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
507 msleep(EEH_PE_RST_HOLD_TIME);
508
509 break;
510 case EEH_RESET_DEACTIVATE:
511 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
512 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
513 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
514 msleep(EEH_PE_RST_SETTLE_TIME);
515
516 /* Continue reporting linkDown event */
517 if (aer) {
518 eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
519 4, &ctrl);
520 ctrl &= ~PCI_ERR_UNC_SURPDN;
521 eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
522 4, ctrl);
523 }
524
525 break;
526 }
527
528 return 0;
529 }
530
531 void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
532 {
533 struct pci_controller *hose;
534
535 if (pci_is_root_bus(dev->bus)) {
536 hose = pci_bus_to_host(dev->bus);
537 ioda_eeh_root_reset(hose, EEH_RESET_HOT);
538 ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
539 } else {
540 ioda_eeh_bridge_reset(dev, EEH_RESET_HOT);
541 ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
542 }
543 }
544
545 /**
546 * ioda_eeh_reset - Reset the indicated PE
547 * @pe: EEH PE
548 * @option: reset option
549 *
550 * Do reset on the indicated PE. For PCI bus sensitive PE,
551 * we need to reset the parent p2p bridge. The PHB has to
552 * be reinitialized if the p2p bridge is root bridge. For
553 * PCI device sensitive PE, we will try to reset the device
554 * through FLR. For now, we don't have OPAL APIs to do HARD
555 * reset yet, so all reset would be SOFT (HOT) reset.
556 */
557 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
558 {
559 struct pci_controller *hose = pe->phb;
560 struct pci_bus *bus;
561 int ret;
562
563 /*
564 * For PHB reset, we always have complete reset. For those PEs whose
565 * primary bus derived from root complex (root bus) or root port
566 * (usually bus#1), we apply hot or fundamental reset on the root port.
567 * For other PEs, we always have hot reset on the PE primary bus.
568 *
569 * Here, we have different design to pHyp, which always clear the
570 * frozen state during PE reset. However, the good idea here from
571 * benh is to keep frozen state before we get PE reset done completely
572 * (until BAR restore). With the frozen state, HW drops illegal IO
573 * or MMIO access, which can incur recrusive frozen PE during PE
574 * reset. The side effect is that EEH core has to clear the frozen
575 * state explicitly after BAR restore.
576 */
577 if (pe->type & EEH_PE_PHB) {
578 ret = ioda_eeh_phb_reset(hose, option);
579 } else {
580 bus = eeh_pe_bus_get(pe);
581 if (pci_is_root_bus(bus) ||
582 pci_is_root_bus(bus->parent))
583 ret = ioda_eeh_root_reset(hose, option);
584 else
585 ret = ioda_eeh_bridge_reset(bus->self, option);
586 }
587
588 return ret;
589 }
590
591 /**
592 * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
593 * @pe: EEH PE
594 *
595 * For particular PE, it might have included PCI bridges. In order
596 * to make the PE work properly, those PCI bridges should be configured
597 * correctly. However, we need do nothing on P7IOC since the reset
598 * function will do everything that should be covered by the function.
599 */
600 static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
601 {
602 return 0;
603 }
604
605 static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
606 {
607 /* GEM */
608 pr_info(" GEM XFIR: %016llx\n", data->gemXfir);
609 pr_info(" GEM RFIR: %016llx\n", data->gemRfir);
610 pr_info(" GEM RIRQFIR: %016llx\n", data->gemRirqfir);
611 pr_info(" GEM Mask: %016llx\n", data->gemMask);
612 pr_info(" GEM RWOF: %016llx\n", data->gemRwof);
613
614 /* LEM */
615 pr_info(" LEM FIR: %016llx\n", data->lemFir);
616 pr_info(" LEM Error Mask: %016llx\n", data->lemErrMask);
617 pr_info(" LEM Action 0: %016llx\n", data->lemAction0);
618 pr_info(" LEM Action 1: %016llx\n", data->lemAction1);
619 pr_info(" LEM WOF: %016llx\n", data->lemWof);
620 }
621
622 static void ioda_eeh_hub_diag(struct pci_controller *hose)
623 {
624 struct pnv_phb *phb = hose->private_data;
625 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
626 long rc;
627
628 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
629 if (rc != OPAL_SUCCESS) {
630 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
631 __func__, phb->hub_id, rc);
632 return;
633 }
634
635 switch (data->type) {
636 case OPAL_P7IOC_DIAG_TYPE_RGC:
637 pr_info("P7IOC diag-data for RGC\n\n");
638 ioda_eeh_hub_diag_common(data);
639 pr_info(" RGC Status: %016llx\n", data->rgc.rgcStatus);
640 pr_info(" RGC LDCP: %016llx\n", data->rgc.rgcLdcp);
641 break;
642 case OPAL_P7IOC_DIAG_TYPE_BI:
643 pr_info("P7IOC diag-data for BI %s\n\n",
644 data->bi.biDownbound ? "Downbound" : "Upbound");
645 ioda_eeh_hub_diag_common(data);
646 pr_info(" BI LDCP 0: %016llx\n", data->bi.biLdcp0);
647 pr_info(" BI LDCP 1: %016llx\n", data->bi.biLdcp1);
648 pr_info(" BI LDCP 2: %016llx\n", data->bi.biLdcp2);
649 pr_info(" BI Fence Status: %016llx\n", data->bi.biFenceStatus);
650 break;
651 case OPAL_P7IOC_DIAG_TYPE_CI:
652 pr_info("P7IOC diag-data for CI Port %d\\nn",
653 data->ci.ciPort);
654 ioda_eeh_hub_diag_common(data);
655 pr_info(" CI Port Status: %016llx\n", data->ci.ciPortStatus);
656 pr_info(" CI Port LDCP: %016llx\n", data->ci.ciPortLdcp);
657 break;
658 case OPAL_P7IOC_DIAG_TYPE_MISC:
659 pr_info("P7IOC diag-data for MISC\n\n");
660 ioda_eeh_hub_diag_common(data);
661 break;
662 case OPAL_P7IOC_DIAG_TYPE_I2C:
663 pr_info("P7IOC diag-data for I2C\n\n");
664 ioda_eeh_hub_diag_common(data);
665 break;
666 default:
667 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
668 __func__, phb->hub_id, data->type);
669 }
670 }
671
672 static int ioda_eeh_get_pe(struct pci_controller *hose,
673 u16 pe_no, struct eeh_pe **pe)
674 {
675 struct eeh_pe *phb_pe, *dev_pe;
676 struct eeh_dev dev;
677
678 /* Find the PHB PE */
679 phb_pe = eeh_phb_pe_get(hose);
680 if (!phb_pe)
681 return -EEXIST;
682
683 /* Find the PE according to PE# */
684 memset(&dev, 0, sizeof(struct eeh_dev));
685 dev.phb = hose;
686 dev.pe_config_addr = pe_no;
687 dev_pe = eeh_pe_get(&dev);
688 if (!dev_pe) return -EEXIST;
689
690 *pe = dev_pe;
691 return 0;
692 }
693
694 /**
695 * ioda_eeh_next_error - Retrieve next error for EEH core to handle
696 * @pe: The affected PE
697 *
698 * The function is expected to be called by EEH core while it gets
699 * special EEH event (without binding PE). The function calls to
700 * OPAL APIs for next error to handle. The informational error is
701 * handled internally by platform. However, the dead IOC, dead PHB,
702 * fenced PHB and frozen PE should be handled by EEH core eventually.
703 */
704 static int ioda_eeh_next_error(struct eeh_pe **pe)
705 {
706 struct pci_controller *hose;
707 struct pnv_phb *phb;
708 struct eeh_pe *phb_pe, *parent_pe;
709 __be64 frozen_pe_no;
710 __be16 err_type, severity;
711 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
712 long rc;
713 int state, ret = EEH_NEXT_ERR_NONE;
714
715 /*
716 * While running here, it's safe to purge the event queue.
717 * And we should keep the cached OPAL notifier event sychronized
718 * between the kernel and firmware.
719 */
720 eeh_remove_event(NULL, false);
721 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
722
723 list_for_each_entry(hose, &hose_list, list_node) {
724 /*
725 * If the subordinate PCI buses of the PHB has been
726 * removed or is exactly under error recovery, we
727 * needn't take care of it any more.
728 */
729 phb = hose->private_data;
730 phb_pe = eeh_phb_pe_get(hose);
731 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
732 continue;
733
734 rc = opal_pci_next_error(phb->opal_id,
735 &frozen_pe_no, &err_type, &severity);
736
737 /* If OPAL API returns error, we needn't proceed */
738 if (rc != OPAL_SUCCESS) {
739 pr_devel("%s: Invalid return value on "
740 "PHB#%x (0x%lx) from opal_pci_next_error",
741 __func__, hose->global_number, rc);
742 continue;
743 }
744
745 /* If the PHB doesn't have error, stop processing */
746 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
747 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
748 pr_devel("%s: No error found on PHB#%x\n",
749 __func__, hose->global_number);
750 continue;
751 }
752
753 /*
754 * Processing the error. We're expecting the error with
755 * highest priority reported upon multiple errors on the
756 * specific PHB.
757 */
758 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
759 __func__, be16_to_cpu(err_type), be16_to_cpu(severity),
760 be64_to_cpu(frozen_pe_no), hose->global_number);
761 switch (be16_to_cpu(err_type)) {
762 case OPAL_EEH_IOC_ERROR:
763 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
764 pr_err("EEH: dead IOC detected\n");
765 ret = EEH_NEXT_ERR_DEAD_IOC;
766 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
767 pr_info("EEH: IOC informative error "
768 "detected\n");
769 ioda_eeh_hub_diag(hose);
770 ret = EEH_NEXT_ERR_NONE;
771 }
772
773 break;
774 case OPAL_EEH_PHB_ERROR:
775 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
776 *pe = phb_pe;
777 pr_err("EEH: dead PHB#%x detected\n",
778 hose->global_number);
779 ret = EEH_NEXT_ERR_DEAD_PHB;
780 } else if (be16_to_cpu(severity) ==
781 OPAL_EEH_SEV_PHB_FENCED) {
782 *pe = phb_pe;
783 pr_err("EEH: fenced PHB#%x detected\n",
784 hose->global_number);
785 ret = EEH_NEXT_ERR_FENCED_PHB;
786 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
787 pr_info("EEH: PHB#%x informative error "
788 "detected\n",
789 hose->global_number);
790 ioda_eeh_phb_diag(hose);
791 ret = EEH_NEXT_ERR_NONE;
792 }
793
794 break;
795 case OPAL_EEH_PE_ERROR:
796 /*
797 * If we can't find the corresponding PE, we
798 * just try to unfreeze.
799 */
800 if (ioda_eeh_get_pe(hose,
801 be64_to_cpu(frozen_pe_no), pe)) {
802 /* Try best to clear it */
803 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
804 hose->global_number, frozen_pe_no);
805 opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
806 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
807 ret = EEH_NEXT_ERR_NONE;
808 } else if ((*pe)->state & EEH_PE_ISOLATED) {
809 ret = EEH_NEXT_ERR_NONE;
810 } else {
811 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
812 (*pe)->addr, (*pe)->phb->global_number);
813 ret = EEH_NEXT_ERR_FROZEN_PE;
814 }
815
816 break;
817 default:
818 pr_warn("%s: Unexpected error type %d\n",
819 __func__, be16_to_cpu(err_type));
820 }
821
822 /*
823 * EEH core will try recover from fenced PHB or
824 * frozen PE. In the time for frozen PE, EEH core
825 * enable IO path for that before collecting logs,
826 * but it ruins the site. So we have to dump the
827 * log in advance here.
828 */
829 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
830 ret == EEH_NEXT_ERR_FENCED_PHB) &&
831 !((*pe)->state & EEH_PE_ISOLATED)) {
832 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
833 ioda_eeh_phb_diag(hose);
834 }
835
836 /*
837 * We probably have the frozen parent PE out there and
838 * we need have to handle frozen parent PE firstly.
839 */
840 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
841 parent_pe = (*pe)->parent;
842 while (parent_pe) {
843 /* Hit the ceiling ? */
844 if (parent_pe->type & EEH_PE_PHB)
845 break;
846
847 /* Frozen parent PE ? */
848 state = ioda_eeh_get_state(parent_pe);
849 if (state > 0 &&
850 (state & active_flags) != active_flags)
851 *pe = parent_pe;
852
853 /* Next parent level */
854 parent_pe = parent_pe->parent;
855 }
856
857 /* We possibly migrate to another PE */
858 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
859 }
860
861 /*
862 * If we have no errors on the specific PHB or only
863 * informative error there, we continue poking it.
864 * Otherwise, we need actions to be taken by upper
865 * layer.
866 */
867 if (ret > EEH_NEXT_ERR_INF)
868 break;
869 }
870
871 return ret;
872 }
873
874 struct pnv_eeh_ops ioda_eeh_ops = {
875 .post_init = ioda_eeh_post_init,
876 .set_option = ioda_eeh_set_option,
877 .get_state = ioda_eeh_get_state,
878 .reset = ioda_eeh_reset,
879 .configure_bridge = ioda_eeh_configure_bridge,
880 .next_error = ioda_eeh_next_error
881 };
This page took 0.053014 seconds and 5 git commands to generate.