Merge tag 'xfs-rmap-for-linus-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / acpi / osl.c
CommitLineData
1da177e4
LT
1/*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
f1241c87
MW
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
1da177e4
LT
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
1da177e4
LT
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25
1da177e4
LT
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
ba242d5b 30#include <linux/highmem.h>
1da177e4 31#include <linux/pci.h>
1da177e4
LT
32#include <linux/interrupt.h>
33#include <linux/kmod.h>
34#include <linux/delay.h>
35#include <linux/workqueue.h>
36#include <linux/nmi.h>
ad71860a 37#include <linux/acpi.h>
1da177e4 38#include <linux/efi.h>
df92e695
TR
39#include <linux/ioport.h>
40#include <linux/list.h>
f1241c87
MW
41#include <linux/jiffies.h>
42#include <linux/semaphore.h>
43
44#include <asm/io.h>
45#include <asm/uaccess.h>
2f8e2c87 46#include <linux/io-64-nonatomic-lo-hi.h>
f1241c87 47
1129c92f 48#include "internal.h"
1da177e4 49
1da177e4 50#define _COMPONENT ACPI_OS_SERVICES
f52fd66d 51ACPI_MODULE_NAME("osl");
07070e12 52
4be44fcd
LB
53struct acpi_os_dpc {
54 acpi_osd_exec_callback function;
55 void *context;
65f27f38 56 struct work_struct work;
1da177e4
LT
57};
58
1da177e4
LT
59#ifdef ENABLE_DEBUGGER
60#include <linux/kdb.h>
61
62/* stuff for debugger support */
63int acpi_in_debugger;
64EXPORT_SYMBOL(acpi_in_debugger);
4be44fcd 65#endif /*ENABLE_DEBUGGER */
1da177e4 66
09f98a82
TL
67static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
68 u32 pm1b_ctrl);
d6b47b12
BG
69static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
70 u32 val_b);
09f98a82 71
1da177e4
LT
72static acpi_osd_handler acpi_irq_handler;
73static void *acpi_irq_context;
74static struct workqueue_struct *kacpid_wq;
88db5e14 75static struct workqueue_struct *kacpi_notify_wq;
92d8aff3 76static struct workqueue_struct *kacpi_hotplug_wq;
7901a052 77static bool acpi_os_initialized;
49e4b843 78unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
1da177e4 79
620242ae
MS
80/*
81 * This list of permanent mappings is for memory that may be accessed from
82 * interrupt context, where we can't do the ioremap().
83 */
84struct acpi_ioremap {
85 struct list_head list;
86 void __iomem *virt;
87 acpi_physical_address phys;
88 acpi_size size;
b7c1fadd 89 unsigned long refcount;
620242ae
MS
90};
91
92static LIST_HEAD(acpi_ioremaps);
7bbb8903 93static DEFINE_MUTEX(acpi_ioremap_lock);
620242ae 94
bc9ffce2 95static void __init acpi_request_region (struct acpi_generic_address *gas,
9a47cdb1
BH
96 unsigned int length, char *desc)
97{
bc9ffce2
MS
98 u64 addr;
99
100 /* Handle possible alignment issues */
101 memcpy(&addr, &gas->address, sizeof(addr));
102 if (!addr || !length)
9a47cdb1
BH
103 return;
104
0294112e
RW
105 /* Resources are never freed */
106 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
107 request_region(addr, length, desc);
108 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
109 request_mem_region(addr, length, desc);
9a47cdb1
BH
110}
111
0294112e 112static int __init acpi_reserve_resources(void)
9a47cdb1 113{
eee3c859 114 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
9a47cdb1
BH
115 "ACPI PM1a_EVT_BLK");
116
eee3c859 117 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
9a47cdb1
BH
118 "ACPI PM1b_EVT_BLK");
119
eee3c859 120 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
9a47cdb1
BH
121 "ACPI PM1a_CNT_BLK");
122
eee3c859 123 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
9a47cdb1
BH
124 "ACPI PM1b_CNT_BLK");
125
eee3c859
LB
126 if (acpi_gbl_FADT.pm_timer_length == 4)
127 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
9a47cdb1 128
eee3c859 129 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
9a47cdb1
BH
130 "ACPI PM2_CNT_BLK");
131
132 /* Length of GPE blocks must be a non-negative multiple of 2 */
133
eee3c859
LB
134 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
135 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
136 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
9a47cdb1 137
eee3c859
LB
138 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
139 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
140 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
0294112e
RW
141
142 return 0;
9a47cdb1 143}
0294112e 144fs_initcall_sync(acpi_reserve_resources);
9a47cdb1 145
4be44fcd 146void acpi_os_printf(const char *fmt, ...)
1da177e4
LT
147{
148 va_list args;
149 va_start(args, fmt);
150 acpi_os_vprintf(fmt, args);
151 va_end(args);
152}
836d0830 153EXPORT_SYMBOL(acpi_os_printf);
4be44fcd 154
4be44fcd 155void acpi_os_vprintf(const char *fmt, va_list args)
1da177e4
LT
156{
157 static char buffer[512];
4be44fcd 158
1da177e4
LT
159 vsprintf(buffer, fmt, args);
160
161#ifdef ENABLE_DEBUGGER
162 if (acpi_in_debugger) {
163 kdb_printf("%s", buffer);
164 } else {
4d939155 165 printk(KERN_CONT "%s", buffer);
1da177e4
LT
166 }
167#else
836d0830 168 if (acpi_debugger_write_log(buffer) < 0)
8cfb0cdf 169 printk(KERN_CONT "%s", buffer);
1da177e4
LT
170#endif
171}
172
4996c023
TI
173#ifdef CONFIG_KEXEC
174static unsigned long acpi_rsdp;
175static int __init setup_acpi_rsdp(char *arg)
176{
3d915894
CJ
177 if (kstrtoul(arg, 16, &acpi_rsdp))
178 return -EINVAL;
4996c023
TI
179 return 0;
180}
181early_param("acpi_rsdp", setup_acpi_rsdp);
182#endif
183
ad71860a 184acpi_physical_address __init acpi_os_get_root_pointer(void)
1da177e4 185{
4996c023
TI
186#ifdef CONFIG_KEXEC
187 if (acpi_rsdp)
188 return acpi_rsdp;
189#endif
190
83e68189 191 if (efi_enabled(EFI_CONFIG_TABLES)) {
b2c99e3c 192 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
ad71860a 193 return efi.acpi20;
b2c99e3c 194 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
ad71860a 195 return efi.acpi;
1da177e4 196 else {
4be44fcd
LB
197 printk(KERN_ERR PREFIX
198 "System description tables not found\n");
ad71860a 199 return 0;
1da177e4 200 }
8a1664be 201 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
239665a3
LB
202 acpi_physical_address pa = 0;
203
204 acpi_find_root_pointer(&pa);
205 return pa;
206 }
8a1664be
GG
207
208 return 0;
1da177e4
LT
209}
210
78cdb3ed 211/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
4a3cba5e
MS
212static struct acpi_ioremap *
213acpi_map_lookup(acpi_physical_address phys, acpi_size size)
620242ae
MS
214{
215 struct acpi_ioremap *map;
216
78cdb3ed 217 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
620242ae
MS
218 if (map->phys <= phys &&
219 phys + size <= map->phys + map->size)
4a3cba5e
MS
220 return map;
221
222 return NULL;
223}
224
225/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
226static void __iomem *
227acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
228{
229 struct acpi_ioremap *map;
230
231 map = acpi_map_lookup(phys, size);
232 if (map)
233 return map->virt + (phys - map->phys);
620242ae
MS
234
235 return NULL;
236}
237
13606a2d
RW
238void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
239{
240 struct acpi_ioremap *map;
241 void __iomem *virt = NULL;
242
243 mutex_lock(&acpi_ioremap_lock);
244 map = acpi_map_lookup(phys, size);
245 if (map) {
246 virt = map->virt + (phys - map->phys);
247 map->refcount++;
248 }
249 mutex_unlock(&acpi_ioremap_lock);
250 return virt;
251}
252EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
253
78cdb3ed 254/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
620242ae
MS
255static struct acpi_ioremap *
256acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
257{
258 struct acpi_ioremap *map;
259
78cdb3ed 260 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
4a3cba5e
MS
261 if (map->virt <= virt &&
262 virt + size <= map->virt + map->size)
620242ae
MS
263 return map;
264
265 return NULL;
266}
267
aafc65c7 268#if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
ba242d5b
MS
269/* ioremap will take care of cache attributes */
270#define should_use_kmap(pfn) 0
aafc65c7
GG
271#else
272#define should_use_kmap(pfn) page_is_ram(pfn)
ba242d5b
MS
273#endif
274
275static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
276{
277 unsigned long pfn;
278
279 pfn = pg_off >> PAGE_SHIFT;
280 if (should_use_kmap(pfn)) {
281 if (pg_sz > PAGE_SIZE)
282 return NULL;
283 return (void __iomem __force *)kmap(pfn_to_page(pfn));
284 } else
285 return acpi_os_ioremap(pg_off, pg_sz);
286}
287
288static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
289{
290 unsigned long pfn;
291
292 pfn = pg_off >> PAGE_SHIFT;
e252675f 293 if (should_use_kmap(pfn))
ba242d5b
MS
294 kunmap(pfn_to_page(pfn));
295 else
296 iounmap(vaddr);
297}
298
9d128ed1
RW
299/**
300 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
301 * @phys: Start of the physical address range to map.
302 * @size: Size of the physical address range to map.
303 *
304 * Look up the given physical address range in the list of existing ACPI memory
305 * mappings. If found, get a reference to it and return a pointer to it (its
306 * virtual address). If not found, map it, add it to that list and return a
307 * pointer to it.
308 *
309 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
310 * routine simply calls __acpi_map_table() to get the job done.
311 */
bd721ea7 312void __iomem *__ref
a238317c 313acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
1da177e4 314{
7ffd0443 315 struct acpi_ioremap *map;
620242ae 316 void __iomem *virt;
2d6d9fd3
RW
317 acpi_physical_address pg_off;
318 acpi_size pg_sz;
620242ae 319
9f4fd61f
BH
320 if (phys > ULONG_MAX) {
321 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
70c0846e 322 return NULL;
1da177e4 323 }
620242ae
MS
324
325 if (!acpi_gbl_permanent_mmap)
ad71860a 326 return __acpi_map_table((unsigned long)phys, size);
620242ae 327
7ffd0443
RW
328 mutex_lock(&acpi_ioremap_lock);
329 /* Check if there's a suitable mapping already. */
330 map = acpi_map_lookup(phys, size);
331 if (map) {
b7c1fadd 332 map->refcount++;
7ffd0443
RW
333 goto out;
334 }
335
620242ae 336 map = kzalloc(sizeof(*map), GFP_KERNEL);
7ffd0443
RW
337 if (!map) {
338 mutex_unlock(&acpi_ioremap_lock);
620242ae 339 return NULL;
7ffd0443 340 }
620242ae 341
4a3cba5e
MS
342 pg_off = round_down(phys, PAGE_SIZE);
343 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
ba242d5b 344 virt = acpi_map(pg_off, pg_sz);
620242ae 345 if (!virt) {
7ffd0443 346 mutex_unlock(&acpi_ioremap_lock);
620242ae
MS
347 kfree(map);
348 return NULL;
349 }
350
351 INIT_LIST_HEAD(&map->list);
352 map->virt = virt;
4a3cba5e
MS
353 map->phys = pg_off;
354 map->size = pg_sz;
b7c1fadd 355 map->refcount = 1;
620242ae 356
78cdb3ed 357 list_add_tail_rcu(&map->list, &acpi_ioremaps);
620242ae 358
a238317c 359out:
7ffd0443 360 mutex_unlock(&acpi_ioremap_lock);
4a3cba5e 361 return map->virt + (phys - map->phys);
1da177e4 362}
a238317c
LZ
363EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
364
bd721ea7 365void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
a238317c
LZ
366{
367 return (void *)acpi_os_map_iomem(phys, size);
368}
55a82ab3 369EXPORT_SYMBOL_GPL(acpi_os_map_memory);
1da177e4 370
b7c1fadd 371static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
4a3cba5e 372{
b7c1fadd
RW
373 if (!--map->refcount)
374 list_del_rcu(&map->list);
4a3cba5e 375}
4a3cba5e 376
b7c1fadd 377static void acpi_os_map_cleanup(struct acpi_ioremap *map)
7fe135dc 378{
b7c1fadd 379 if (!map->refcount) {
74b51ee1 380 synchronize_rcu_expedited();
ba242d5b 381 acpi_unmap(map->phys, map->virt);
b7c1fadd
RW
382 kfree(map);
383 }
4a3cba5e
MS
384}
385
9d128ed1
RW
386/**
387 * acpi_os_unmap_iomem - Drop a memory mapping reference.
388 * @virt: Start of the address range to drop a reference to.
389 * @size: Size of the address range to drop a reference to.
390 *
391 * Look up the given virtual address range in the list of existing ACPI memory
392 * mappings, drop a reference to it and unmap it if there are no more active
393 * references to it.
394 *
395 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
396 * routine simply calls __acpi_unmap_table() to get the job done. Since
397 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
398 * here.
399 */
a238317c 400void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
1da177e4 401{
620242ae 402 struct acpi_ioremap *map;
620242ae
MS
403
404 if (!acpi_gbl_permanent_mmap) {
7d97277b 405 __acpi_unmap_table(virt, size);
620242ae
MS
406 return;
407 }
408
7bbb8903 409 mutex_lock(&acpi_ioremap_lock);
620242ae
MS
410 map = acpi_map_lookup_virt(virt, size);
411 if (!map) {
7bbb8903 412 mutex_unlock(&acpi_ioremap_lock);
7fe135dc 413 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
620242ae
MS
414 return;
415 }
b7c1fadd 416 acpi_os_drop_map_ref(map);
7bbb8903 417 mutex_unlock(&acpi_ioremap_lock);
620242ae 418
b7c1fadd 419 acpi_os_map_cleanup(map);
1da177e4 420}
a238317c
LZ
421EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
422
423void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
424{
425 return acpi_os_unmap_iomem((void __iomem *)virt, size);
426}
55a82ab3 427EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
1da177e4 428
0d3a9cf5 429void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
7d97277b
YL
430{
431 if (!acpi_gbl_permanent_mmap)
432 __acpi_unmap_table(virt, size);
433}
434
6f68c91c 435int acpi_os_map_generic_address(struct acpi_generic_address *gas)
29718521 436{
bc9ffce2 437 u64 addr;
29718521
MS
438 void __iomem *virt;
439
bc9ffce2 440 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
29718521
MS
441 return 0;
442
bc9ffce2
MS
443 /* Handle possible alignment issues */
444 memcpy(&addr, &gas->address, sizeof(addr));
445 if (!addr || !gas->bit_width)
29718521
MS
446 return -EINVAL;
447
a238317c 448 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
29718521
MS
449 if (!virt)
450 return -EIO;
451
452 return 0;
453}
6f68c91c 454EXPORT_SYMBOL(acpi_os_map_generic_address);
29718521 455
6f68c91c 456void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
29718521 457{
bc9ffce2 458 u64 addr;
7fe135dc 459 struct acpi_ioremap *map;
29718521 460
bc9ffce2 461 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
29718521
MS
462 return;
463
bc9ffce2
MS
464 /* Handle possible alignment issues */
465 memcpy(&addr, &gas->address, sizeof(addr));
466 if (!addr || !gas->bit_width)
29718521
MS
467 return;
468
7bbb8903 469 mutex_lock(&acpi_ioremap_lock);
bc9ffce2 470 map = acpi_map_lookup(addr, gas->bit_width / 8);
7fe135dc
RW
471 if (!map) {
472 mutex_unlock(&acpi_ioremap_lock);
473 return;
474 }
b7c1fadd 475 acpi_os_drop_map_ref(map);
7bbb8903 476 mutex_unlock(&acpi_ioremap_lock);
29718521 477
b7c1fadd 478 acpi_os_map_cleanup(map);
29718521 479}
6f68c91c 480EXPORT_SYMBOL(acpi_os_unmap_generic_address);
29718521 481
1da177e4
LT
482#ifdef ACPI_FUTURE_USAGE
483acpi_status
4be44fcd 484acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
1da177e4 485{
4be44fcd 486 if (!phys || !virt)
1da177e4
LT
487 return AE_BAD_PARAMETER;
488
489 *phys = virt_to_phys(virt);
490
491 return AE_OK;
492}
493#endif
494
18d78b64
RW
495#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
496static bool acpi_rev_override;
497
498int __init acpi_rev_override_setup(char *str)
499{
500 acpi_rev_override = true;
501 return 1;
502}
503__setup("acpi_rev_override", acpi_rev_override_setup);
504#else
505#define acpi_rev_override false
506#endif
507
1da177e4
LT
508#define ACPI_MAX_OVERRIDE_LEN 100
509
510static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
511
512acpi_status
4be44fcd 513acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
80b28810 514 acpi_string *new_val)
1da177e4
LT
515{
516 if (!init_val || !new_val)
517 return AE_BAD_PARAMETER;
518
519 *new_val = NULL;
4be44fcd 520 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
1da177e4 521 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
4be44fcd 522 acpi_os_name);
1da177e4
LT
523 *new_val = acpi_os_name;
524 }
525
18d78b64
RW
526 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
527 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
528 *new_val = (char *)5;
529 }
530
1da177e4
LT
531 return AE_OK;
532}
533
7d12e780 534static irqreturn_t acpi_irq(int irq, void *dev_id)
1da177e4 535{
5229e87d
LB
536 u32 handled;
537
538 handled = (*acpi_irq_handler) (acpi_irq_context);
539
540 if (handled) {
541 acpi_irq_handled++;
542 return IRQ_HANDLED;
88bea188
LB
543 } else {
544 acpi_irq_not_handled++;
5229e87d 545 return IRQ_NONE;
88bea188 546 }
1da177e4
LT
547}
548
549acpi_status
4be44fcd
LB
550acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
551 void *context)
1da177e4
LT
552{
553 unsigned int irq;
554
5229e87d
LB
555 acpi_irq_stats_init();
556
1da177e4 557 /*
23fe3630
RW
558 * ACPI interrupts different from the SCI in our copy of the FADT are
559 * not supported.
1da177e4 560 */
23fe3630
RW
561 if (gsi != acpi_gbl_FADT.sci_interrupt)
562 return AE_BAD_PARAMETER;
563
564 if (acpi_irq_handler)
565 return AE_ALREADY_ACQUIRED;
566
1da177e4
LT
567 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
568 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
569 gsi);
570 return AE_OK;
571 }
572
573 acpi_irq_handler = handler;
574 acpi_irq_context = context;
a8d46b9e 575 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
1da177e4 576 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
23fe3630 577 acpi_irq_handler = NULL;
1da177e4
LT
578 return AE_NOT_ACQUIRED;
579 }
49e4b843 580 acpi_sci_irq = irq;
1da177e4
LT
581
582 return AE_OK;
583}
584
49e4b843 585acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
1da177e4 586{
49e4b843 587 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
23fe3630
RW
588 return AE_BAD_PARAMETER;
589
49e4b843 590 free_irq(acpi_sci_irq, acpi_irq);
23fe3630 591 acpi_irq_handler = NULL;
49e4b843 592 acpi_sci_irq = INVALID_ACPI_IRQ;
1da177e4
LT
593
594 return AE_OK;
595}
596
597/*
598 * Running in interpreter thread context, safe to sleep
599 */
600
439913ff 601void acpi_os_sleep(u64 ms)
1da177e4 602{
30282299 603 msleep(ms);
1da177e4 604}
4be44fcd 605
4be44fcd 606void acpi_os_stall(u32 us)
1da177e4
LT
607{
608 while (us) {
609 u32 delay = 1000;
610
611 if (delay > us)
612 delay = us;
613 udelay(delay);
614 touch_nmi_watchdog();
615 us -= delay;
616 }
617}
4be44fcd 618
1da177e4
LT
619/*
620 * Support ACPI 3.0 AML Timer operand
621 * Returns 64-bit free-running, monotonically increasing timer
622 * with 100ns granularity
623 */
4be44fcd 624u64 acpi_os_get_timer(void)
1da177e4 625{
10619066
MW
626 u64 time_ns = ktime_to_ns(ktime_get());
627 do_div(time_ns, 100);
628 return time_ns;
1da177e4
LT
629}
630
4be44fcd 631acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
1da177e4
LT
632{
633 u32 dummy;
634
635 if (!value)
636 value = &dummy;
637
49fbabf5
ZY
638 *value = 0;
639 if (width <= 8) {
4be44fcd 640 *(u8 *) value = inb(port);
49fbabf5 641 } else if (width <= 16) {
4be44fcd 642 *(u16 *) value = inw(port);
49fbabf5 643 } else if (width <= 32) {
4be44fcd 644 *(u32 *) value = inl(port);
49fbabf5 645 } else {
1da177e4
LT
646 BUG();
647 }
648
649 return AE_OK;
650}
4be44fcd 651
1da177e4
LT
652EXPORT_SYMBOL(acpi_os_read_port);
653
4be44fcd 654acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
1da177e4 655{
49fbabf5 656 if (width <= 8) {
1da177e4 657 outb(value, port);
49fbabf5 658 } else if (width <= 16) {
1da177e4 659 outw(value, port);
49fbabf5 660 } else if (width <= 32) {
1da177e4 661 outl(value, port);
49fbabf5 662 } else {
1da177e4
LT
663 BUG();
664 }
665
666 return AE_OK;
667}
4be44fcd 668
1da177e4
LT
669EXPORT_SYMBOL(acpi_os_write_port);
670
e615bf5b 671acpi_status
653f4b53 672acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
e615bf5b
MS
673{
674 void __iomem *virt_addr;
675 unsigned int size = width / 8;
676 bool unmap = false;
677 u64 dummy;
678
679 rcu_read_lock();
680 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
681 if (!virt_addr) {
682 rcu_read_unlock();
683 virt_addr = acpi_os_ioremap(phys_addr, size);
684 if (!virt_addr)
685 return AE_BAD_ADDRESS;
686 unmap = true;
687 }
688
689 if (!value)
690 value = &dummy;
691
692 switch (width) {
693 case 8:
694 *(u8 *) value = readb(virt_addr);
695 break;
696 case 16:
697 *(u16 *) value = readw(virt_addr);
698 break;
699 case 32:
700 *(u32 *) value = readl(virt_addr);
701 break;
702 case 64:
3277b4ea 703 *(u64 *) value = readq(virt_addr);
e615bf5b
MS
704 break;
705 default:
706 BUG();
707 }
708
709 if (unmap)
710 iounmap(virt_addr);
711 else
712 rcu_read_unlock();
713
714 return AE_OK;
715}
716
e615bf5b 717acpi_status
653f4b53 718acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
e615bf5b
MS
719{
720 void __iomem *virt_addr;
721 unsigned int size = width / 8;
722 bool unmap = false;
723
724 rcu_read_lock();
725 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
726 if (!virt_addr) {
727 rcu_read_unlock();
728 virt_addr = acpi_os_ioremap(phys_addr, size);
729 if (!virt_addr)
730 return AE_BAD_ADDRESS;
731 unmap = true;
732 }
733
734 switch (width) {
735 case 8:
736 writeb(value, virt_addr);
737 break;
738 case 16:
739 writew(value, virt_addr);
740 break;
741 case 32:
742 writel(value, virt_addr);
743 break;
744 case 64:
3277b4ea 745 writeq(value, virt_addr);
e615bf5b
MS
746 break;
747 default:
748 BUG();
749 }
750
751 if (unmap)
752 iounmap(virt_addr);
753 else
754 rcu_read_unlock();
755
756 return AE_OK;
757}
758
1da177e4 759acpi_status
4be44fcd 760acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
c5f0231e 761 u64 *value, u32 width)
1da177e4
LT
762{
763 int result, size;
c5f0231e 764 u32 value32;
1da177e4
LT
765
766 if (!value)
767 return AE_BAD_PARAMETER;
768
769 switch (width) {
770 case 8:
771 size = 1;
772 break;
773 case 16:
774 size = 2;
775 break;
776 case 32:
777 size = 4;
778 break;
779 default:
780 return AE_ERROR;
781 }
782
b6ce068a
MW
783 result = raw_pci_read(pci_id->segment, pci_id->bus,
784 PCI_DEVFN(pci_id->device, pci_id->function),
c5f0231e
BM
785 reg, size, &value32);
786 *value = value32;
1da177e4
LT
787
788 return (result ? AE_ERROR : AE_OK);
789}
4be44fcd 790
1da177e4 791acpi_status
4be44fcd 792acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
439913ff 793 u64 value, u32 width)
1da177e4
LT
794{
795 int result, size;
796
797 switch (width) {
798 case 8:
799 size = 1;
800 break;
801 case 16:
802 size = 2;
803 break;
804 case 32:
805 size = 4;
806 break;
807 default:
808 return AE_ERROR;
809 }
810
b6ce068a
MW
811 result = raw_pci_write(pci_id->segment, pci_id->bus,
812 PCI_DEVFN(pci_id->device, pci_id->function),
813 reg, size, value);
1da177e4
LT
814
815 return (result ? AE_ERROR : AE_OK);
816}
817
65f27f38 818static void acpi_os_execute_deferred(struct work_struct *work)
88db5e14
AS
819{
820 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
88db5e14 821
19cd847a
ZR
822 dpc->function(dpc->context);
823 kfree(dpc);
19cd847a
ZR
824}
825
836d0830
LZ
826#ifdef CONFIG_ACPI_DEBUGGER
827static struct acpi_debugger acpi_debugger;
828static bool acpi_debugger_initialized;
829
830int acpi_register_debugger(struct module *owner,
831 const struct acpi_debugger_ops *ops)
832{
833 int ret = 0;
834
835 mutex_lock(&acpi_debugger.lock);
836 if (acpi_debugger.ops) {
837 ret = -EBUSY;
838 goto err_lock;
839 }
840
841 acpi_debugger.owner = owner;
842 acpi_debugger.ops = ops;
843
844err_lock:
845 mutex_unlock(&acpi_debugger.lock);
846 return ret;
847}
848EXPORT_SYMBOL(acpi_register_debugger);
849
850void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
851{
852 mutex_lock(&acpi_debugger.lock);
853 if (ops == acpi_debugger.ops) {
854 acpi_debugger.ops = NULL;
855 acpi_debugger.owner = NULL;
856 }
857 mutex_unlock(&acpi_debugger.lock);
858}
859EXPORT_SYMBOL(acpi_unregister_debugger);
860
861int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
862{
863 int ret;
864 int (*func)(acpi_osd_exec_callback, void *);
865 struct module *owner;
866
867 if (!acpi_debugger_initialized)
868 return -ENODEV;
869 mutex_lock(&acpi_debugger.lock);
870 if (!acpi_debugger.ops) {
871 ret = -ENODEV;
872 goto err_lock;
873 }
874 if (!try_module_get(acpi_debugger.owner)) {
875 ret = -ENODEV;
876 goto err_lock;
877 }
878 func = acpi_debugger.ops->create_thread;
879 owner = acpi_debugger.owner;
880 mutex_unlock(&acpi_debugger.lock);
881
882 ret = func(function, context);
883
884 mutex_lock(&acpi_debugger.lock);
885 module_put(owner);
886err_lock:
887 mutex_unlock(&acpi_debugger.lock);
888 return ret;
889}
890
891ssize_t acpi_debugger_write_log(const char *msg)
892{
893 ssize_t ret;
894 ssize_t (*func)(const char *);
895 struct module *owner;
896
897 if (!acpi_debugger_initialized)
898 return -ENODEV;
899 mutex_lock(&acpi_debugger.lock);
900 if (!acpi_debugger.ops) {
901 ret = -ENODEV;
902 goto err_lock;
903 }
904 if (!try_module_get(acpi_debugger.owner)) {
905 ret = -ENODEV;
906 goto err_lock;
907 }
908 func = acpi_debugger.ops->write_log;
909 owner = acpi_debugger.owner;
910 mutex_unlock(&acpi_debugger.lock);
911
912 ret = func(msg);
913
914 mutex_lock(&acpi_debugger.lock);
915 module_put(owner);
916err_lock:
917 mutex_unlock(&acpi_debugger.lock);
918 return ret;
919}
920
921ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
922{
923 ssize_t ret;
924 ssize_t (*func)(char *, size_t);
925 struct module *owner;
926
927 if (!acpi_debugger_initialized)
928 return -ENODEV;
929 mutex_lock(&acpi_debugger.lock);
930 if (!acpi_debugger.ops) {
931 ret = -ENODEV;
932 goto err_lock;
933 }
934 if (!try_module_get(acpi_debugger.owner)) {
935 ret = -ENODEV;
936 goto err_lock;
937 }
938 func = acpi_debugger.ops->read_cmd;
939 owner = acpi_debugger.owner;
940 mutex_unlock(&acpi_debugger.lock);
941
942 ret = func(buffer, buffer_length);
943
944 mutex_lock(&acpi_debugger.lock);
945 module_put(owner);
946err_lock:
947 mutex_unlock(&acpi_debugger.lock);
948 return ret;
949}
950
951int acpi_debugger_wait_command_ready(void)
952{
953 int ret;
954 int (*func)(bool, char *, size_t);
955 struct module *owner;
956
957 if (!acpi_debugger_initialized)
958 return -ENODEV;
959 mutex_lock(&acpi_debugger.lock);
960 if (!acpi_debugger.ops) {
961 ret = -ENODEV;
962 goto err_lock;
963 }
964 if (!try_module_get(acpi_debugger.owner)) {
965 ret = -ENODEV;
966 goto err_lock;
967 }
968 func = acpi_debugger.ops->wait_command_ready;
969 owner = acpi_debugger.owner;
970 mutex_unlock(&acpi_debugger.lock);
971
972 ret = func(acpi_gbl_method_executing,
973 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
974
975 mutex_lock(&acpi_debugger.lock);
976 module_put(owner);
977err_lock:
978 mutex_unlock(&acpi_debugger.lock);
979 return ret;
980}
981
982int acpi_debugger_notify_command_complete(void)
983{
984 int ret;
985 int (*func)(void);
986 struct module *owner;
987
988 if (!acpi_debugger_initialized)
989 return -ENODEV;
990 mutex_lock(&acpi_debugger.lock);
991 if (!acpi_debugger.ops) {
992 ret = -ENODEV;
993 goto err_lock;
994 }
995 if (!try_module_get(acpi_debugger.owner)) {
996 ret = -ENODEV;
997 goto err_lock;
998 }
999 func = acpi_debugger.ops->notify_command_complete;
1000 owner = acpi_debugger.owner;
1001 mutex_unlock(&acpi_debugger.lock);
1002
1003 ret = func();
1004
1005 mutex_lock(&acpi_debugger.lock);
1006 module_put(owner);
1007err_lock:
1008 mutex_unlock(&acpi_debugger.lock);
1009 return ret;
1010}
1011
1012int __init acpi_debugger_init(void)
1013{
1014 mutex_init(&acpi_debugger.lock);
1015 acpi_debugger_initialized = true;
1016 return 0;
1017}
1018#endif
1019
b8d35192
AS
1020/*******************************************************************************
1021 *
1022 * FUNCTION: acpi_os_execute
1023 *
1024 * PARAMETERS: Type - Type of the callback
1025 * Function - Function to be executed
1026 * Context - Function parameters
1027 *
1028 * RETURN: Status
1029 *
1030 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1031 * immediately executes function on a separate thread.
1032 *
1033 ******************************************************************************/
1034
7b98118a
RW
1035acpi_status acpi_os_execute(acpi_execute_type type,
1036 acpi_osd_exec_callback function, void *context)
1da177e4 1037{
4be44fcd
LB
1038 acpi_status status = AE_OK;
1039 struct acpi_os_dpc *dpc;
17bc54ee 1040 struct workqueue_struct *queue;
19cd847a 1041 int ret;
72945b2b
LB
1042 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1043 "Scheduling function [%p(%p)] for deferred execution.\n",
1044 function, context));
1da177e4 1045
8cfb0cdf 1046 if (type == OSL_DEBUGGER_MAIN_THREAD) {
836d0830 1047 ret = acpi_debugger_create_thread(function, context);
8cfb0cdf
LZ
1048 if (ret) {
1049 pr_err("Call to kthread_create() failed.\n");
1050 status = AE_ERROR;
1051 }
1052 goto out_thread;
1053 }
1054
1da177e4
LT
1055 /*
1056 * Allocate/initialize DPC structure. Note that this memory will be
65f27f38 1057 * freed by the callee. The kernel handles the work_struct list in a
1da177e4
LT
1058 * way that allows us to also free its memory inside the callee.
1059 * Because we may want to schedule several tasks with different
1060 * parameters we can't use the approach some kernel code uses of
65f27f38 1061 * having a static work_struct.
1da177e4 1062 */
72945b2b 1063
3ae45a27 1064 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1da177e4 1065 if (!dpc)
889c78be 1066 return AE_NO_MEMORY;
b976fe19 1067
1da177e4
LT
1068 dpc->function = function;
1069 dpc->context = context;
b976fe19 1070
c02256be 1071 /*
3ae45a27
RW
1072 * To prevent lockdep from complaining unnecessarily, make sure that
1073 * there is a different static lockdep key for each workqueue by using
1074 * INIT_WORK() for each of them separately.
c02256be 1075 */
7b98118a 1076 if (type == OSL_NOTIFY_HANDLER) {
3ae45a27 1077 queue = kacpi_notify_wq;
bc73675b 1078 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
8cfb0cdf 1079 } else if (type == OSL_GPE_HANDLER) {
3ae45a27 1080 queue = kacpid_wq;
bc73675b 1081 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
8cfb0cdf
LZ
1082 } else {
1083 pr_err("Unsupported os_execute type %d.\n", type);
1084 status = AE_ERROR;
3ae45a27 1085 }
bc73675b 1086
8cfb0cdf
LZ
1087 if (ACPI_FAILURE(status))
1088 goto err_workqueue;
1089
8fec62b2
TH
1090 /*
1091 * On some machines, a software-initiated SMI causes corruption unless
1092 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1093 * typically it's done in GPE-related methods that are run via
1094 * workqueues, so we can avoid the known corruption cases by always
1095 * queueing on CPU 0.
1096 */
1097 ret = queue_work_on(0, queue, &dpc->work);
19cd847a 1098 if (!ret) {
55ac9a01
LM
1099 printk(KERN_ERR PREFIX
1100 "Call to queue_work() failed.\n");
17bc54ee 1101 status = AE_ERROR;
1da177e4 1102 }
8cfb0cdf
LZ
1103err_workqueue:
1104 if (ACPI_FAILURE(status))
1105 kfree(dpc);
1106out_thread:
889c78be 1107 return status;
1da177e4 1108}
7b98118a 1109EXPORT_SYMBOL(acpi_os_execute);
4be44fcd 1110
7b98118a 1111void acpi_os_wait_events_complete(void)
19cd847a 1112{
90253a79
LZ
1113 /*
1114 * Make sure the GPE handler or the fixed event handler is not used
1115 * on another CPU after removal.
1116 */
efb1cf7d
CY
1117 if (acpi_sci_irq_valid())
1118 synchronize_hardirq(acpi_sci_irq);
7b98118a
RW
1119 flush_workqueue(kacpid_wq);
1120 flush_workqueue(kacpi_notify_wq);
19cd847a 1121}
1da177e4 1122
7b98118a
RW
1123struct acpi_hp_work {
1124 struct work_struct work;
1e3bcb59 1125 struct acpi_device *adev;
7b98118a
RW
1126 u32 src;
1127};
1128
1129static void acpi_hotplug_work_fn(struct work_struct *work)
19cd847a 1130{
7b98118a
RW
1131 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1132
1133 acpi_os_wait_events_complete();
1e3bcb59 1134 acpi_device_hotplug(hpw->adev, hpw->src);
7b98118a 1135 kfree(hpw);
19cd847a
ZR
1136}
1137
1e3bcb59 1138acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1da177e4 1139{
7b98118a
RW
1140 struct acpi_hp_work *hpw;
1141
1142 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1e3bcb59
RW
1143 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1144 adev, src));
7b98118a
RW
1145
1146 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1147 if (!hpw)
1148 return AE_NO_MEMORY;
1149
1150 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1e3bcb59 1151 hpw->adev = adev;
7b98118a
RW
1152 hpw->src = src;
1153 /*
1154 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1155 * the hotplug code may call driver .remove() functions, which may
1156 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1157 * these workqueues.
1158 */
1159 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1160 kfree(hpw);
1161 return AE_ERROR;
1162 }
1163 return AE_OK;
1da177e4 1164}
4be44fcd 1165
d783156e
RW
1166bool acpi_queue_hotplug_work(struct work_struct *work)
1167{
1168 return queue_work(kacpi_hotplug_wq, work);
1169}
1da177e4 1170
1da177e4 1171acpi_status
4be44fcd 1172acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1da177e4 1173{
4be44fcd 1174 struct semaphore *sem = NULL;
1da177e4 1175
2d0acb4a 1176 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1da177e4 1177 if (!sem)
d550d98d 1178 return AE_NO_MEMORY;
1da177e4
LT
1179
1180 sema_init(sem, initial_units);
1181
4be44fcd 1182 *handle = (acpi_handle *) sem;
1da177e4 1183
4be44fcd
LB
1184 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1185 *handle, initial_units));
1da177e4 1186
d550d98d 1187 return AE_OK;
1da177e4 1188}
1da177e4 1189
1da177e4
LT
1190/*
1191 * TODO: A better way to delete semaphores? Linux doesn't have a
1192 * 'delete_semaphore()' function -- may result in an invalid
1193 * pointer dereference for non-synchronized consumers. Should
1194 * we at least check for blocked threads and signal/cancel them?
1195 */
1196
4be44fcd 1197acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1da177e4 1198{
4be44fcd 1199 struct semaphore *sem = (struct semaphore *)handle;
1da177e4 1200
1da177e4 1201 if (!sem)
d550d98d 1202 return AE_BAD_PARAMETER;
1da177e4 1203
4be44fcd 1204 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1da177e4 1205
f1241c87 1206 BUG_ON(!list_empty(&sem->wait_list));
02438d87 1207 kfree(sem);
4be44fcd 1208 sem = NULL;
1da177e4 1209
d550d98d 1210 return AE_OK;
1da177e4 1211}
1da177e4 1212
1da177e4 1213/*
1da177e4
LT
1214 * TODO: Support for units > 1?
1215 */
4be44fcd 1216acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1da177e4 1217{
4be44fcd
LB
1218 acpi_status status = AE_OK;
1219 struct semaphore *sem = (struct semaphore *)handle;
f1241c87 1220 long jiffies;
4be44fcd 1221 int ret = 0;
1da177e4 1222
7901a052
LZ
1223 if (!acpi_os_initialized)
1224 return AE_OK;
1225
1da177e4 1226 if (!sem || (units < 1))
d550d98d 1227 return AE_BAD_PARAMETER;
1da177e4
LT
1228
1229 if (units > 1)
d550d98d 1230 return AE_SUPPORT;
1da177e4 1231
4be44fcd
LB
1232 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1233 handle, units, timeout));
1da177e4 1234
f1241c87
MW
1235 if (timeout == ACPI_WAIT_FOREVER)
1236 jiffies = MAX_SCHEDULE_TIMEOUT;
1237 else
1238 jiffies = msecs_to_jiffies(timeout);
cad1525a 1239
f1241c87
MW
1240 ret = down_timeout(sem, jiffies);
1241 if (ret)
1242 status = AE_TIME;
1da177e4
LT
1243
1244 if (ACPI_FAILURE(status)) {
9e7e2c04 1245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
a6fc6720 1246 "Failed to acquire semaphore[%p|%d|%d], %s",
4be44fcd
LB
1247 handle, units, timeout,
1248 acpi_format_exception(status)));
1249 } else {
1250 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
a6fc6720 1251 "Acquired semaphore[%p|%d|%d]", handle,
4be44fcd 1252 units, timeout));
1da177e4
LT
1253 }
1254
d550d98d 1255 return status;
1da177e4 1256}
1da177e4 1257
1da177e4
LT
1258/*
1259 * TODO: Support for units > 1?
1260 */
4be44fcd 1261acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1da177e4 1262{
4be44fcd 1263 struct semaphore *sem = (struct semaphore *)handle;
1da177e4 1264
7901a052
LZ
1265 if (!acpi_os_initialized)
1266 return AE_OK;
1267
1da177e4 1268 if (!sem || (units < 1))
d550d98d 1269 return AE_BAD_PARAMETER;
1da177e4
LT
1270
1271 if (units > 1)
d550d98d 1272 return AE_SUPPORT;
1da177e4 1273
4be44fcd
LB
1274 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1275 units));
1da177e4
LT
1276
1277 up(sem);
1278
d550d98d 1279 return AE_OK;
1da177e4 1280}
4be44fcd 1281
4d946f79 1282acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1da177e4 1283{
1da177e4
LT
1284#ifdef ENABLE_DEBUGGER
1285 if (acpi_in_debugger) {
1286 u32 chars;
1287
4d946f79 1288 kdb_read(buffer, buffer_length);
1da177e4
LT
1289
1290 /* remove the CR kdb includes */
1291 chars = strlen(buffer) - 1;
1292 buffer[chars] = '\0';
1293 }
8cfb0cdf
LZ
1294#else
1295 int ret;
1296
836d0830 1297 ret = acpi_debugger_read_cmd(buffer, buffer_length);
8cfb0cdf
LZ
1298 if (ret < 0)
1299 return AE_ERROR;
1300 if (bytes_read)
1301 *bytes_read = ret;
1da177e4
LT
1302#endif
1303
4d946f79 1304 return AE_OK;
1da177e4 1305}
836d0830 1306EXPORT_SYMBOL(acpi_os_get_line);
1da177e4 1307
8cfb0cdf
LZ
1308acpi_status acpi_os_wait_command_ready(void)
1309{
1310 int ret;
1311
836d0830 1312 ret = acpi_debugger_wait_command_ready();
8cfb0cdf
LZ
1313 if (ret < 0)
1314 return AE_ERROR;
1315 return AE_OK;
1316}
1317
1318acpi_status acpi_os_notify_command_complete(void)
1319{
1320 int ret;
1321
836d0830 1322 ret = acpi_debugger_notify_command_complete();
8cfb0cdf
LZ
1323 if (ret < 0)
1324 return AE_ERROR;
1325 return AE_OK;
1326}
1327
4be44fcd 1328acpi_status acpi_os_signal(u32 function, void *info)
1da177e4 1329{
4be44fcd 1330 switch (function) {
1da177e4
LT
1331 case ACPI_SIGNAL_FATAL:
1332 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1333 break;
1334 case ACPI_SIGNAL_BREAKPOINT:
1335 /*
1336 * AML Breakpoint
1337 * ACPI spec. says to treat it as a NOP unless
1338 * you are debugging. So if/when we integrate
1339 * AML debugger into the kernel debugger its
1340 * hook will go here. But until then it is
1341 * not useful to print anything on breakpoints.
1342 */
1343 break;
1344 default:
1345 break;
1346 }
1347
1348 return AE_OK;
1349}
4be44fcd 1350
4be44fcd 1351static int __init acpi_os_name_setup(char *str)
1da177e4
LT
1352{
1353 char *p = acpi_os_name;
4be44fcd 1354 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1da177e4
LT
1355
1356 if (!str || !*str)
1357 return 0;
1358
5e2be4e0 1359 for (; count-- && *str; str++) {
1da177e4
LT
1360 if (isalnum(*str) || *str == ' ' || *str == ':')
1361 *p++ = *str;
1362 else if (*str == '\'' || *str == '"')
1363 continue;
1364 else
1365 break;
1366 }
1367 *p = 0;
1368
1369 return 1;
4be44fcd 1370
1da177e4
LT
1371}
1372
1373__setup("acpi_os_name=", acpi_os_name_setup);
1374
22b5afce 1375/*
08e1d7c0 1376 * Disable the auto-serialization of named objects creation methods.
22b5afce 1377 *
08e1d7c0 1378 * This feature is enabled by default. It marks the AML control methods
22b5afce
BM
1379 * that contain the opcodes to create named objects as "Serialized".
1380 */
08e1d7c0 1381static int __init acpi_no_auto_serialize_setup(char *str)
1da177e4 1382{
08e1d7c0
LZ
1383 acpi_gbl_auto_serialize_methods = FALSE;
1384 pr_info("ACPI: auto-serialization disabled\n");
1da177e4
LT
1385
1386 return 1;
1387}
1388
08e1d7c0 1389__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1da177e4 1390
df92e695
TR
1391/* Check of resource interference between native drivers and ACPI
1392 * OperationRegions (SystemIO and System Memory only).
1393 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1394 * in arbitrary AML code and can interfere with legacy drivers.
1395 * acpi_enforce_resources= can be set to:
1396 *
7e90560c 1397 * - strict (default) (2)
df92e695 1398 * -> further driver trying to access the resources will not load
7e90560c 1399 * - lax (1)
df92e695
TR
1400 * -> further driver trying to access the resources will load, but you
1401 * get a system message that something might go wrong...
1402 *
1403 * - no (0)
1404 * -> ACPI Operation Region resources will not be registered
1405 *
1406 */
1407#define ENFORCE_RESOURCES_STRICT 2
1408#define ENFORCE_RESOURCES_LAX 1
1409#define ENFORCE_RESOURCES_NO 0
1410
7e90560c 1411static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
df92e695
TR
1412
1413static int __init acpi_enforce_resources_setup(char *str)
1414{
1415 if (str == NULL || *str == '\0')
1416 return 0;
1417
1418 if (!strcmp("strict", str))
1419 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1420 else if (!strcmp("lax", str))
1421 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1422 else if (!strcmp("no", str))
1423 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1424
1425 return 1;
1426}
1427
1428__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1429
1430/* Check for resource conflicts between ACPI OperationRegions and native
1431 * drivers */
876fba43 1432int acpi_check_resource_conflict(const struct resource *res)
df92e695 1433{
f654c0fe
LM
1434 acpi_adr_space_type space_id;
1435 acpi_size length;
1436 u8 warn = 0;
1437 int clash = 0;
df92e695
TR
1438
1439 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1440 return 0;
1441 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1442 return 0;
1443
f654c0fe
LM
1444 if (res->flags & IORESOURCE_IO)
1445 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1446 else
1447 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
df92e695 1448
e4f52244 1449 length = resource_size(res);
f654c0fe
LM
1450 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1451 warn = 1;
1452 clash = acpi_check_address_range(space_id, res->start, length, warn);
df92e695
TR
1453
1454 if (clash) {
1455 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
14f03343
JD
1456 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1457 printk(KERN_NOTICE "ACPI: This conflict may"
1458 " cause random problems and system"
1459 " instability\n");
1460 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1461 " for this device, you should use it instead of"
1462 " the native driver\n");
df92e695
TR
1463 }
1464 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1465 return -EBUSY;
1466 }
1467 return 0;
1468}
443dea72 1469EXPORT_SYMBOL(acpi_check_resource_conflict);
df92e695
TR
1470
1471int acpi_check_region(resource_size_t start, resource_size_t n,
1472 const char *name)
1473{
1474 struct resource res = {
1475 .start = start,
1476 .end = start + n - 1,
1477 .name = name,
1478 .flags = IORESOURCE_IO,
1479 };
1480
1481 return acpi_check_resource_conflict(&res);
1482}
1483EXPORT_SYMBOL(acpi_check_region);
1484
70dd6bea
JD
1485/*
1486 * Let drivers know whether the resource checks are effective
1487 */
1488int acpi_resources_are_enforced(void)
1489{
1490 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1491}
1492EXPORT_SYMBOL(acpi_resources_are_enforced);
1493
9f63b88b
LM
1494/*
1495 * Deallocate the memory for a spinlock.
1496 */
1497void acpi_os_delete_lock(acpi_spinlock handle)
1498{
1499 ACPI_FREE(handle);
1500}
1501
73459f73
RM
1502/*
1503 * Acquire a spinlock.
1504 *
1505 * handle is a pointer to the spinlock_t.
73459f73
RM
1506 */
1507
967440e3 1508acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
73459f73 1509{
b8e4d893 1510 acpi_cpu_flags flags;
967440e3 1511 spin_lock_irqsave(lockp, flags);
73459f73
RM
1512 return flags;
1513}
1514
1515/*
1516 * Release a spinlock. See above.
1517 */
1518
967440e3 1519void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
73459f73 1520{
967440e3 1521 spin_unlock_irqrestore(lockp, flags);
73459f73
RM
1522}
1523
73459f73
RM
1524#ifndef ACPI_USE_LOCAL_CACHE
1525
1526/*******************************************************************************
1527 *
1528 * FUNCTION: acpi_os_create_cache
1529 *
b229cf92
BM
1530 * PARAMETERS: name - Ascii name for the cache
1531 * size - Size of each cached object
1532 * depth - Maximum depth of the cache (in objects) <ignored>
1533 * cache - Where the new cache object is returned
73459f73 1534 *
b229cf92 1535 * RETURN: status
73459f73
RM
1536 *
1537 * DESCRIPTION: Create a cache object
1538 *
1539 ******************************************************************************/
1540
1541acpi_status
4be44fcd 1542acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
73459f73 1543{
20c2df83 1544 *cache = kmem_cache_create(name, size, 0, 0, NULL);
a6fdbf90 1545 if (*cache == NULL)
b229cf92
BM
1546 return AE_ERROR;
1547 else
1548 return AE_OK;
73459f73
RM
1549}
1550
1551/*******************************************************************************
1552 *
1553 * FUNCTION: acpi_os_purge_cache
1554 *
1555 * PARAMETERS: Cache - Handle to cache object
1556 *
1557 * RETURN: Status
1558 *
1559 * DESCRIPTION: Free all objects within the requested cache.
1560 *
1561 ******************************************************************************/
1562
4be44fcd 1563acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
73459f73 1564{
50dd0969 1565 kmem_cache_shrink(cache);
4be44fcd 1566 return (AE_OK);
73459f73
RM
1567}
1568
1569/*******************************************************************************
1570 *
1571 * FUNCTION: acpi_os_delete_cache
1572 *
1573 * PARAMETERS: Cache - Handle to cache object
1574 *
1575 * RETURN: Status
1576 *
1577 * DESCRIPTION: Free all objects within the requested cache and delete the
1578 * cache object.
1579 *
1580 ******************************************************************************/
1581
4be44fcd 1582acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
73459f73 1583{
1a1d92c1 1584 kmem_cache_destroy(cache);
4be44fcd 1585 return (AE_OK);
73459f73
RM
1586}
1587
1588/*******************************************************************************
1589 *
1590 * FUNCTION: acpi_os_release_object
1591 *
1592 * PARAMETERS: Cache - Handle to cache object
1593 * Object - The object to be released
1594 *
1595 * RETURN: None
1596 *
1597 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1598 * the object is deleted.
1599 *
1600 ******************************************************************************/
1601
4be44fcd 1602acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
73459f73 1603{
4be44fcd
LB
1604 kmem_cache_free(cache, object);
1605 return (AE_OK);
73459f73 1606}
73459f73 1607#endif
d362edaf 1608
a94e88cd 1609static int __init acpi_no_static_ssdt_setup(char *s)
b75dd297 1610{
a94e88cd
LZ
1611 acpi_gbl_disable_ssdt_table_install = TRUE;
1612 pr_info("ACPI: static SSDT installation disabled\n");
b75dd297 1613
a94e88cd 1614 return 0;
b75dd297
LZ
1615}
1616
a94e88cd 1617early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
b75dd297 1618
4dde507f
LZ
1619static int __init acpi_disable_return_repair(char *s)
1620{
1621 printk(KERN_NOTICE PREFIX
1622 "ACPI: Predefined validation mechanism disabled\n");
1623 acpi_gbl_disable_auto_repair = TRUE;
1624
1625 return 1;
1626}
1627
1628__setup("acpica_no_return_repair", acpi_disable_return_repair);
1629
d362edaf
MS
1630acpi_status __init acpi_os_initialize(void)
1631{
1632 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1633 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1634 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1635 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
a4714a89
RW
1636 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1637 /*
1638 * Use acpi_os_map_generic_address to pre-map the reset
1639 * register if it's in system memory.
1640 */
1641 int rv;
1642
1643 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1644 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1645 }
7901a052 1646 acpi_os_initialized = true;
d362edaf
MS
1647
1648 return AE_OK;
1649}
1650
32d47eef 1651acpi_status __init acpi_os_initialize1(void)
d362edaf 1652{
44d2588e
TH
1653 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1654 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
d783156e 1655 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
d362edaf
MS
1656 BUG_ON(!kacpid_wq);
1657 BUG_ON(!kacpi_notify_wq);
1658 BUG_ON(!kacpi_hotplug_wq);
e5f660eb 1659 acpi_osi_init();
d362edaf
MS
1660 return AE_OK;
1661}
1662
1663acpi_status acpi_os_terminate(void)
1664{
1665 if (acpi_irq_handler) {
23fe3630 1666 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
d362edaf
MS
1667 acpi_irq_handler);
1668 }
1669
1670 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1671 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1672 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1673 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
a4714a89
RW
1674 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1675 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
d362edaf
MS
1676
1677 destroy_workqueue(kacpid_wq);
1678 destroy_workqueue(kacpi_notify_wq);
1679 destroy_workqueue(kacpi_hotplug_wq);
1680
1681 return AE_OK;
1682}
09f98a82
TL
1683
1684acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1685 u32 pm1b_control)
1686{
1687 int rc = 0;
1688 if (__acpi_os_prepare_sleep)
1689 rc = __acpi_os_prepare_sleep(sleep_state,
1690 pm1a_control, pm1b_control);
1691 if (rc < 0)
1692 return AE_ERROR;
1693 else if (rc > 0)
1694 return AE_CTRL_SKIP;
1695
1696 return AE_OK;
1697}
1698
1699void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1700 u32 pm1a_ctrl, u32 pm1b_ctrl))
1701{
1702 __acpi_os_prepare_sleep = func;
1703}
92d8aff3 1704
d6b47b12
BG
1705acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1706 u32 val_b)
1707{
1708 int rc = 0;
1709 if (__acpi_os_prepare_extended_sleep)
1710 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1711 val_a, val_b);
1712 if (rc < 0)
1713 return AE_ERROR;
1714 else if (rc > 0)
1715 return AE_CTRL_SKIP;
1716
1717 return AE_OK;
1718}
1719
1720void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1721 u32 val_a, u32 val_b))
1722{
1723 __acpi_os_prepare_extended_sleep = func;
1724}
This page took 1.079438 seconds and 5 git commands to generate.