2 * PowerNV OPAL high level interfaces
4 * Copyright 2011 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) "opal: " fmt
14 #include <linux/printk.h>
15 #include <linux/types.h>
17 #include <linux/of_fdt.h>
18 #include <linux/of_platform.h>
19 #include <linux/interrupt.h>
20 #include <linux/notifier.h>
21 #include <linux/slab.h>
22 #include <linux/sched.h>
23 #include <linux/kobject.h>
24 #include <linux/delay.h>
25 #include <linux/memblock.h>
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
29 #include <asm/machdep.h>
31 #include <asm/firmware.h>
36 /* /sys/firmware/opal */
37 struct kobject
*opal_kobj
;
45 struct mcheck_recoverable_range
{
51 static struct mcheck_recoverable_range
*mc_recoverable_range
;
52 static int mc_recoverable_range_len
;
54 struct device_node
*opal_node
;
55 static DEFINE_SPINLOCK(opal_write_lock
);
56 static struct atomic_notifier_head opal_msg_notifier_head
[OPAL_MSG_TYPE_MAX
];
57 static uint32_t opal_heartbeat
;
58 static struct task_struct
*kopald_tsk
;
60 void opal_configure_cores(void)
62 /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
64 * It will preserve non volatile GPRs and HSPRG0/1. It will
65 * also restore HIDs and other SPRs to their original value
66 * but it might clobber a bunch.
69 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE
);
71 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE
);
74 /* Restore some bits */
75 if (cur_cpu_spec
->cpu_restore
)
76 cur_cpu_spec
->cpu_restore();
79 int __init
early_init_dt_scan_opal(unsigned long node
,
80 const char *uname
, int depth
, void *data
)
82 const void *basep
, *entryp
, *sizep
;
83 int basesz
, entrysz
, runtimesz
;
85 if (depth
!= 1 || strcmp(uname
, "ibm,opal") != 0)
88 basep
= of_get_flat_dt_prop(node
, "opal-base-address", &basesz
);
89 entryp
= of_get_flat_dt_prop(node
, "opal-entry-address", &entrysz
);
90 sizep
= of_get_flat_dt_prop(node
, "opal-runtime-size", &runtimesz
);
92 if (!basep
|| !entryp
|| !sizep
)
95 opal
.base
= of_read_number(basep
, basesz
/4);
96 opal
.entry
= of_read_number(entryp
, entrysz
/4);
97 opal
.size
= of_read_number(sizep
, runtimesz
/4);
99 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
100 opal
.base
, basep
, basesz
);
101 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
102 opal
.entry
, entryp
, entrysz
);
103 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
104 opal
.size
, sizep
, runtimesz
);
106 if (of_flat_dt_is_compatible(node
, "ibm,opal-v3")) {
107 powerpc_firmware_features
|= FW_FEATURE_OPAL
;
108 pr_info("OPAL detected !\n");
110 panic("OPAL != V3 detected, no longer supported.\n");
116 int __init
early_init_dt_scan_recoverable_ranges(unsigned long node
,
117 const char *uname
, int depth
, void *data
)
122 if (depth
!= 1 || strcmp(uname
, "ibm,opal") != 0)
125 prop
= of_get_flat_dt_prop(node
, "mcheck-recoverable-ranges", &psize
);
130 pr_debug("Found machine check recoverable ranges.\n");
133 * Calculate number of available entries.
135 * Each recoverable address range entry is (start address, len,
136 * recovery address), 2 cells each for start and recovery address,
137 * 1 cell for len, totalling 5 cells per entry.
139 mc_recoverable_range_len
= psize
/ (sizeof(*prop
) * 5);
142 if (!mc_recoverable_range_len
)
145 /* Size required to hold all the entries. */
146 size
= mc_recoverable_range_len
*
147 sizeof(struct mcheck_recoverable_range
);
150 * Allocate a buffer to hold the MC recoverable ranges. We would be
151 * accessing them in real mode, hence it needs to be within
154 mc_recoverable_range
=__va(memblock_alloc_base(size
, __alignof__(u64
),
156 memset(mc_recoverable_range
, 0, size
);
158 for (i
= 0; i
< mc_recoverable_range_len
; i
++) {
159 mc_recoverable_range
[i
].start_addr
=
160 of_read_number(prop
+ (i
* 5) + 0, 2);
161 mc_recoverable_range
[i
].end_addr
=
162 mc_recoverable_range
[i
].start_addr
+
163 of_read_number(prop
+ (i
* 5) + 2, 1);
164 mc_recoverable_range
[i
].recover_addr
=
165 of_read_number(prop
+ (i
* 5) + 3, 2);
167 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
168 mc_recoverable_range
[i
].start_addr
,
169 mc_recoverable_range
[i
].end_addr
,
170 mc_recoverable_range
[i
].recover_addr
);
175 static int __init
opal_register_exception_handlers(void)
177 #ifdef __BIG_ENDIAN__
180 if (!(powerpc_firmware_features
& FW_FEATURE_OPAL
))
183 /* Hookup some exception handlers except machine check. We use the
184 * fwnmi area at 0x7000 to provide the glue space to OPAL
189 * Check if we are running on newer firmware that exports
190 * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch
191 * the HMI interrupt and we catch it directly in Linux.
193 * For older firmware (i.e currently released POWER8 System Firmware
194 * as of today <= SV810_087), we fallback to old behavior and let OPAL
195 * patch the HMI vector and handle it inside OPAL firmware.
197 * For newer firmware (in development/yet to be released) we will
198 * start catching/handling HMI directly in Linux.
200 if (!opal_check_token(OPAL_HANDLE_HMI
)) {
201 pr_info("Old firmware detected, OPAL handles HMIs.\n");
202 opal_register_exception_handler(
203 OPAL_HYPERVISOR_MAINTENANCE_HANDLER
,
208 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER
, 0, glue
);
213 machine_early_initcall(powernv
, opal_register_exception_handlers
);
216 * Opal message notifier based on message type. Allow subscribers to get
217 * notified for specific messgae type.
219 int opal_message_notifier_register(enum opal_msg_type msg_type
,
220 struct notifier_block
*nb
)
222 if (!nb
|| msg_type
>= OPAL_MSG_TYPE_MAX
) {
223 pr_warning("%s: Invalid arguments, msg_type:%d\n",
228 return atomic_notifier_chain_register(
229 &opal_msg_notifier_head
[msg_type
], nb
);
231 EXPORT_SYMBOL_GPL(opal_message_notifier_register
);
233 int opal_message_notifier_unregister(enum opal_msg_type msg_type
,
234 struct notifier_block
*nb
)
236 return atomic_notifier_chain_unregister(
237 &opal_msg_notifier_head
[msg_type
], nb
);
239 EXPORT_SYMBOL_GPL(opal_message_notifier_unregister
);
241 static void opal_message_do_notify(uint32_t msg_type
, void *msg
)
243 /* notify subscribers */
244 atomic_notifier_call_chain(&opal_msg_notifier_head
[msg_type
],
248 static void opal_handle_message(void)
252 * TODO: pre-allocate a message buffer depending on opal-msg-size
253 * value in /proc/device-tree.
255 static struct opal_msg msg
;
258 ret
= opal_get_msg(__pa(&msg
), sizeof(msg
));
259 /* No opal message pending. */
260 if (ret
== OPAL_RESOURCE
)
263 /* check for errors. */
265 pr_warning("%s: Failed to retrieve opal message, err=%lld\n",
270 type
= be32_to_cpu(msg
.msg_type
);
273 if (type
>= OPAL_MSG_TYPE_MAX
) {
274 pr_warn_once("%s: Unknown message type: %u\n", __func__
, type
);
277 opal_message_do_notify(type
, (void *)&msg
);
280 static irqreturn_t
opal_message_notify(int irq
, void *data
)
282 opal_handle_message();
286 static int __init
opal_message_init(void)
290 for (i
= 0; i
< OPAL_MSG_TYPE_MAX
; i
++)
291 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head
[i
]);
293 irq
= opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING
));
295 pr_err("%s: Can't register OPAL event irq (%d)\n",
300 ret
= request_irq(irq
, opal_message_notify
,
301 IRQ_TYPE_LEVEL_HIGH
, "opal-msg", NULL
);
303 pr_err("%s: Can't request OPAL event irq (%d)\n",
311 int opal_get_chars(uint32_t vtermno
, char *buf
, int count
)
318 opal_poll_events(&evt
);
319 if ((be64_to_cpu(evt
) & OPAL_EVENT_CONSOLE_INPUT
) == 0)
321 len
= cpu_to_be64(count
);
322 rc
= opal_console_read(vtermno
, &len
, buf
);
323 if (rc
== OPAL_SUCCESS
)
324 return be64_to_cpu(len
);
328 int opal_put_chars(uint32_t vtermno
, const char *data
, int total_len
)
339 /* We want put_chars to be atomic to avoid mangling of hvsi
340 * packets. To do that, we first test for room and return
341 * -EAGAIN if there isn't enough.
343 * Unfortunately, opal_console_write_buffer_space() doesn't
344 * appear to work on opal v1, so we just assume there is
345 * enough room and be done with it
347 spin_lock_irqsave(&opal_write_lock
, flags
);
348 rc
= opal_console_write_buffer_space(vtermno
, &olen
);
349 len
= be64_to_cpu(olen
);
350 if (rc
|| len
< total_len
) {
351 spin_unlock_irqrestore(&opal_write_lock
, flags
);
352 /* Closed -> drop characters */
355 opal_poll_events(NULL
);
359 /* We still try to handle partial completions, though they
360 * should no longer happen.
363 while(total_len
> 0 && (rc
== OPAL_BUSY
||
364 rc
== OPAL_BUSY_EVENT
|| rc
== OPAL_SUCCESS
)) {
365 olen
= cpu_to_be64(total_len
);
366 rc
= opal_console_write(vtermno
, &olen
, data
);
367 len
= be64_to_cpu(olen
);
369 /* Closed or other error drop */
370 if (rc
!= OPAL_SUCCESS
&& rc
!= OPAL_BUSY
&&
371 rc
!= OPAL_BUSY_EVENT
) {
375 if (rc
== OPAL_SUCCESS
) {
380 /* This is a bit nasty but we need that for the console to
381 * flush when there aren't any interrupts. We will clean
382 * things a bit later to limit that to synchronous path
383 * such as the kernel console and xmon/udbg
386 opal_poll_events(&evt
);
387 while(rc
== OPAL_SUCCESS
&&
388 (be64_to_cpu(evt
) & OPAL_EVENT_CONSOLE_OUTPUT
));
390 spin_unlock_irqrestore(&opal_write_lock
, flags
);
394 static int opal_recover_mce(struct pt_regs
*regs
,
395 struct machine_check_event
*evt
)
398 uint64_t ea
= get_mce_fault_addr(evt
);
400 if (!(regs
->msr
& MSR_RI
)) {
401 /* If MSR_RI isn't set, we cannot recover */
403 } else if (evt
->disposition
== MCE_DISPOSITION_RECOVERED
) {
404 /* Platform corrected itself */
406 } else if (ea
&& !is_kernel_addr(ea
)) {
408 * Faulting address is not in kernel text. We should be fine.
409 * We need to find which process uses this address.
410 * For now, kill the task if we have received exception when
413 * TODO: Queue up this address for hwpoisioning later.
415 if (user_mode(regs
) && !is_global_init(current
)) {
416 _exception(SIGBUS
, regs
, BUS_MCEERR_AR
, regs
->nip
);
420 } else if (user_mode(regs
) && !is_global_init(current
) &&
421 evt
->severity
== MCE_SEV_ERROR_SYNC
) {
423 * If we have received a synchronous error when in userspace
426 _exception(SIGBUS
, regs
, BUS_MCEERR_AR
, regs
->nip
);
432 int opal_machine_check(struct pt_regs
*regs
)
434 struct machine_check_event evt
;
437 if (!get_mce_event(&evt
, MCE_EVENT_RELEASE
))
440 /* Print things out */
441 if (evt
.version
!= MCE_V1
) {
442 pr_err("Machine Check Exception, Unknown event version %d !\n",
446 machine_check_print_event_info(&evt
);
448 if (opal_recover_mce(regs
, &evt
))
452 * Unrecovered machine check, we are heading to panic path.
454 * We may have hit this MCE in very early stage of kernel
455 * initialization even before opal-prd has started running. If
456 * this is the case then this MCE error may go un-noticed or
457 * un-analyzed if we go down panic path. We need to inform
458 * BMC/OCC about this error so that they can collect relevant
459 * data for error analysis before rebooting.
460 * Use opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR) to do so.
461 * This function may not return on BMC based system.
463 ret
= opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR
,
464 "Unrecoverable Machine Check exception");
465 if (ret
== OPAL_UNSUPPORTED
) {
466 pr_emerg("Reboot type %d not supported\n",
467 OPAL_REBOOT_PLATFORM_ERROR
);
471 * We reached here. There can be three possibilities:
472 * 1. We are running on a firmware level that do not support
474 * 2. We are running on a firmware level that do not support
475 * OPAL_REBOOT_PLATFORM_ERROR reboot type.
476 * 3. We are running on FSP based system that does not need opal
477 * to trigger checkstop explicitly for error analysis. The FSP
478 * PRD component would have already got notified about this
479 * error through other channels.
481 * If hardware marked this as an unrecoverable MCE, we are
482 * going to panic anyway. Even if it didn't, it's not safe to
483 * continue at this point, so we should explicitly panic.
486 panic("PowerNV Unrecovered Machine Check");
490 /* Early hmi handler called in real mode. */
491 int opal_hmi_exception_early(struct pt_regs
*regs
)
496 * call opal hmi handler. Pass paca address as token.
497 * The return value OPAL_SUCCESS is an indication that there is
498 * an HMI event generated waiting to pull by Linux.
500 rc
= opal_handle_hmi();
501 if (rc
== OPAL_SUCCESS
) {
502 local_paca
->hmi_event_available
= 1;
508 /* HMI exception handler called in virtual mode during check_irq_replay. */
509 int opal_handle_hmi_exception(struct pt_regs
*regs
)
515 * Check if HMI event is available.
516 * if Yes, then call opal_poll_events to pull opal messages and
519 if (!local_paca
->hmi_event_available
)
522 local_paca
->hmi_event_available
= 0;
523 rc
= opal_poll_events(&evt
);
524 if (rc
== OPAL_SUCCESS
&& evt
)
525 opal_handle_events(be64_to_cpu(evt
));
530 static uint64_t find_recovery_address(uint64_t nip
)
534 for (i
= 0; i
< mc_recoverable_range_len
; i
++)
535 if ((nip
>= mc_recoverable_range
[i
].start_addr
) &&
536 (nip
< mc_recoverable_range
[i
].end_addr
))
537 return mc_recoverable_range
[i
].recover_addr
;
541 bool opal_mce_check_early_recovery(struct pt_regs
*regs
)
543 uint64_t recover_addr
= 0;
545 if (!opal
.base
|| !opal
.size
)
548 if ((regs
->nip
>= opal
.base
) &&
549 (regs
->nip
< (opal
.base
+ opal
.size
)))
550 recover_addr
= find_recovery_address(regs
->nip
);
553 * Setup regs->nip to rfi into fixup address.
556 regs
->nip
= recover_addr
;
559 return !!recover_addr
;
562 static int opal_sysfs_init(void)
564 opal_kobj
= kobject_create_and_add("opal", firmware_kobj
);
566 pr_warn("kobject_create_and_add opal failed\n");
573 static ssize_t
symbol_map_read(struct file
*fp
, struct kobject
*kobj
,
574 struct bin_attribute
*bin_attr
,
575 char *buf
, loff_t off
, size_t count
)
577 return memory_read_from_buffer(buf
, count
, &off
, bin_attr
->private,
581 static BIN_ATTR_RO(symbol_map
, 0);
583 static void opal_export_symmap(void)
587 struct device_node
*fw
;
590 fw
= of_find_node_by_path("/ibm,opal/firmware");
593 syms
= of_get_property(fw
, "symbol-map", &size
);
594 if (!syms
|| size
!= 2 * sizeof(__be64
))
597 /* Setup attributes */
598 bin_attr_symbol_map
.private = __va(be64_to_cpu(syms
[0]));
599 bin_attr_symbol_map
.size
= be64_to_cpu(syms
[1]);
601 rc
= sysfs_create_bin_file(opal_kobj
, &bin_attr_symbol_map
);
603 pr_warn("Error %d creating OPAL symbols file\n", rc
);
606 static void __init
opal_dump_region_init(void)
612 if (!opal_check_token(OPAL_REGISTER_DUMP_REGION
))
615 /* Register kernel log buffer */
616 addr
= log_buf_addr_get();
620 size
= log_buf_len_get();
624 rc
= opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF
,
626 /* Don't warn if this is just an older OPAL that doesn't
627 * know about that call
629 if (rc
&& rc
!= OPAL_UNSUPPORTED
)
630 pr_warn("DUMP: Failed to register kernel log buffer. "
634 static void opal_pdev_init(struct device_node
*opal_node
,
635 const char *compatible
)
637 struct device_node
*np
;
639 for_each_child_of_node(opal_node
, np
)
640 if (of_device_is_compatible(np
, compatible
))
641 of_platform_device_create(np
, NULL
, NULL
);
644 static void opal_i2c_create_devs(void)
646 struct device_node
*np
;
648 for_each_compatible_node(np
, NULL
, "ibm,opal-i2c")
649 of_platform_device_create(np
, NULL
, NULL
);
652 static int kopald(void *unused
)
654 unsigned long timeout
= msecs_to_jiffies(opal_heartbeat
) + 1;
660 opal_poll_events(&events
);
661 opal_handle_events(be64_to_cpu(events
));
662 schedule_timeout_interruptible(timeout
);
663 } while (!kthread_should_stop());
668 void opal_wake_poller(void)
671 wake_up_process(kopald_tsk
);
674 static void opal_init_heartbeat(void)
676 /* Old firwmware, we assume the HVC heartbeat is sufficient */
677 if (of_property_read_u32(opal_node
, "ibm,heartbeat-ms",
678 &opal_heartbeat
) != 0)
682 kopald_tsk
= kthread_run(kopald
, NULL
, "kopald");
685 static int __init
opal_init(void)
687 struct device_node
*np
, *consoles
, *leds
;
690 opal_node
= of_find_node_by_path("/ibm,opal");
692 pr_warn("Device node not found\n");
696 /* Register OPAL consoles if any ports */
697 consoles
= of_find_node_by_path("/ibm,opal/consoles");
699 for_each_child_of_node(consoles
, np
) {
700 if (strcmp(np
->name
, "serial"))
702 of_platform_device_create(np
, NULL
, NULL
);
704 of_node_put(consoles
);
707 /* Initialise OPAL messaging system */
710 /* Initialise OPAL asynchronous completion interface */
711 opal_async_comp_init();
713 /* Initialise OPAL sensor interface */
716 /* Initialise OPAL hypervisor maintainence interrupt handling */
717 opal_hmi_handler_init();
719 /* Create i2c platform devices */
720 opal_i2c_create_devs();
722 /* Setup a heatbeat thread if requested by OPAL */
723 opal_init_heartbeat();
725 /* Create leds platform devices */
726 leds
= of_find_node_by_path("/ibm,opal/leds");
728 of_platform_device_create(leds
, "opal_leds", NULL
);
732 /* Initialise OPAL message log interface */
735 /* Create "opal" kobject under /sys/firmware */
736 rc
= opal_sysfs_init();
738 /* Export symbol map to userspace */
739 opal_export_symmap();
740 /* Setup dump region interface */
741 opal_dump_region_init();
742 /* Setup error log interface */
743 rc
= opal_elog_init();
744 /* Setup code update interface */
745 opal_flash_update_init();
746 /* Setup platform dump extract interface */
747 opal_platform_dump_init();
748 /* Setup system parameters interface */
749 opal_sys_param_init();
750 /* Setup message log sysfs interface. */
751 opal_msglog_sysfs_init();
754 /* Initialize platform devices: IPMI backend, PRD & flash interface */
755 opal_pdev_init(opal_node
, "ibm,opal-ipmi");
756 opal_pdev_init(opal_node
, "ibm,opal-flash");
757 opal_pdev_init(opal_node
, "ibm,opal-prd");
759 /* Initialise platform device: oppanel interface */
760 opal_pdev_init(opal_node
, "ibm,opal-oppanel");
762 /* Initialise OPAL kmsg dumper for flushing console on panic */
767 machine_subsys_initcall(powernv
, opal_init
);
769 void opal_shutdown(void)
773 opal_event_shutdown();
776 * Then sync with OPAL which ensure anything that can
777 * potentially write to our memory has completed such
778 * as an ongoing dump retrieval
780 while (rc
== OPAL_BUSY
|| rc
== OPAL_BUSY_EVENT
) {
781 rc
= opal_sync_host_reboot();
783 opal_poll_events(NULL
);
788 /* Unregister memory dump region */
789 if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION
))
790 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF
);
793 /* Export this so that test modules can use it */
794 EXPORT_SYMBOL_GPL(opal_invalid_call
);
795 EXPORT_SYMBOL_GPL(opal_xscom_read
);
796 EXPORT_SYMBOL_GPL(opal_xscom_write
);
797 EXPORT_SYMBOL_GPL(opal_ipmi_send
);
798 EXPORT_SYMBOL_GPL(opal_ipmi_recv
);
799 EXPORT_SYMBOL_GPL(opal_flash_read
);
800 EXPORT_SYMBOL_GPL(opal_flash_write
);
801 EXPORT_SYMBOL_GPL(opal_flash_erase
);
802 EXPORT_SYMBOL_GPL(opal_prd_msg
);
804 /* Convert a region of vmalloc memory to an opal sg list */
805 struct opal_sg_list
*opal_vmalloc_to_sg_list(void *vmalloc_addr
,
806 unsigned long vmalloc_size
)
808 struct opal_sg_list
*sg
, *first
= NULL
;
811 sg
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
817 while (vmalloc_size
> 0) {
818 uint64_t data
= vmalloc_to_pfn(vmalloc_addr
) << PAGE_SHIFT
;
819 uint64_t length
= min(vmalloc_size
, PAGE_SIZE
);
821 sg
->entry
[i
].data
= cpu_to_be64(data
);
822 sg
->entry
[i
].length
= cpu_to_be64(length
);
825 if (i
>= SG_ENTRIES_PER_NODE
) {
826 struct opal_sg_list
*next
;
828 next
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
832 sg
->length
= cpu_to_be64(
833 i
* sizeof(struct opal_sg_entry
) + 16);
835 sg
->next
= cpu_to_be64(__pa(next
));
839 vmalloc_addr
+= length
;
840 vmalloc_size
-= length
;
843 sg
->length
= cpu_to_be64(i
* sizeof(struct opal_sg_entry
) + 16);
848 pr_err("%s : Failed to allocate memory\n", __func__
);
849 opal_free_sg_list(first
);
853 void opal_free_sg_list(struct opal_sg_list
*sg
)
856 uint64_t next
= be64_to_cpu(sg
->next
);
867 int opal_error_code(int rc
)
870 case OPAL_SUCCESS
: return 0;
872 case OPAL_PARAMETER
: return -EINVAL
;
873 case OPAL_ASYNC_COMPLETION
: return -EINPROGRESS
;
874 case OPAL_BUSY_EVENT
: return -EBUSY
;
875 case OPAL_NO_MEM
: return -ENOMEM
;
876 case OPAL_PERMISSION
: return -EPERM
;
878 case OPAL_UNSUPPORTED
: return -EIO
;
879 case OPAL_HARDWARE
: return -EIO
;
880 case OPAL_INTERNAL_ERROR
: return -EIO
;
882 pr_err("%s: unexpected OPAL error %d\n", __func__
, rc
);
887 EXPORT_SYMBOL_GPL(opal_poll_events
);
888 EXPORT_SYMBOL_GPL(opal_rtc_read
);
889 EXPORT_SYMBOL_GPL(opal_rtc_write
);
890 EXPORT_SYMBOL_GPL(opal_tpo_read
);
891 EXPORT_SYMBOL_GPL(opal_tpo_write
);
892 EXPORT_SYMBOL_GPL(opal_i2c_request
);
893 /* Export these symbols for PowerNV LED class driver */
894 EXPORT_SYMBOL_GPL(opal_leds_get_ind
);
895 EXPORT_SYMBOL_GPL(opal_leds_set_ind
);
896 /* Export this symbol for PowerNV Operator Panel class driver */
897 EXPORT_SYMBOL_GPL(opal_write_oppanel_async
);