ocfs2: update gfp/slab.h includes
[deliverable/linux.git] / drivers / acpi / apei / apei-base.c
CommitLineData
a643ce20
HY
1/*
2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
3 * infrastructure
4 *
5 * APEI allows to report errors (for example from the chipset) to the
6 * the operating system. This improves NMI handling especially. In
7 * addition it supports error serialization and error injection.
8 *
9 * For more information about APEI, please refer to ACPI Specification
10 * version 4.0, chapter 17.
11 *
12 * This file has Common functions used by more than one APEI table,
13 * including framework of interpreter for ERST and EINJ; resource
14 * management for APEI registers.
15 *
16 * Copyright (C) 2009, Intel Corp.
17 * Author: Huang Ying <ying.huang@intel.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License version
21 * 2 as published by the Free Software Foundation.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/acpi.h>
37#include <linux/io.h>
38#include <linux/kref.h>
39#include <linux/rculist.h>
40#include <linux/interrupt.h>
41#include <linux/debugfs.h>
42#include <acpi/atomicio.h>
43
44#include "apei-internal.h"
45
46#define APEI_PFX "APEI: "
47
48/*
49 * APEI ERST (Error Record Serialization Table) and EINJ (Error
50 * INJection) interpreter framework.
51 */
52
53#define APEI_EXEC_PRESERVE_REGISTER 0x1
54
55void apei_exec_ctx_init(struct apei_exec_context *ctx,
56 struct apei_exec_ins_type *ins_table,
57 u32 instructions,
58 struct acpi_whea_header *action_table,
59 u32 entries)
60{
61 ctx->ins_table = ins_table;
62 ctx->instructions = instructions;
63 ctx->action_table = action_table;
64 ctx->entries = entries;
65}
66EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
67
68int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
69{
70 int rc;
71
72 rc = acpi_atomic_read(val, &entry->register_region);
73 if (rc)
74 return rc;
75 *val >>= entry->register_region.bit_offset;
76 *val &= entry->mask;
77
78 return 0;
79}
80
81int apei_exec_read_register(struct apei_exec_context *ctx,
82 struct acpi_whea_header *entry)
83{
84 int rc;
85 u64 val = 0;
86
87 rc = __apei_exec_read_register(entry, &val);
88 if (rc)
89 return rc;
90 ctx->value = val;
91
92 return 0;
93}
94EXPORT_SYMBOL_GPL(apei_exec_read_register);
95
96int apei_exec_read_register_value(struct apei_exec_context *ctx,
97 struct acpi_whea_header *entry)
98{
99 int rc;
100
101 rc = apei_exec_read_register(ctx, entry);
102 if (rc)
103 return rc;
104 ctx->value = (ctx->value == entry->value);
105
106 return 0;
107}
108EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
109
110int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
111{
112 int rc;
113
114 val &= entry->mask;
115 val <<= entry->register_region.bit_offset;
116 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
117 u64 valr = 0;
118 rc = acpi_atomic_read(&valr, &entry->register_region);
119 if (rc)
120 return rc;
121 valr &= ~(entry->mask << entry->register_region.bit_offset);
122 val |= valr;
123 }
124 rc = acpi_atomic_write(val, &entry->register_region);
125
126 return rc;
127}
128
129int apei_exec_write_register(struct apei_exec_context *ctx,
130 struct acpi_whea_header *entry)
131{
132 return __apei_exec_write_register(entry, ctx->value);
133}
134EXPORT_SYMBOL_GPL(apei_exec_write_register);
135
136int apei_exec_write_register_value(struct apei_exec_context *ctx,
137 struct acpi_whea_header *entry)
138{
139 int rc;
140
141 ctx->value = entry->value;
142 rc = apei_exec_write_register(ctx, entry);
143
144 return rc;
145}
146EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
147
148int apei_exec_noop(struct apei_exec_context *ctx,
149 struct acpi_whea_header *entry)
150{
151 return 0;
152}
153EXPORT_SYMBOL_GPL(apei_exec_noop);
154
155/*
156 * Interpret the specified action. Go through whole action table,
157 * execute all instructions belong to the action.
158 */
159int apei_exec_run(struct apei_exec_context *ctx, u8 action)
160{
161 int rc;
162 u32 i, ip;
163 struct acpi_whea_header *entry;
164 apei_exec_ins_func_t run;
165
166 ctx->ip = 0;
167
168 /*
169 * "ip" is the instruction pointer of current instruction,
170 * "ctx->ip" specifies the next instruction to executed,
171 * instruction "run" function may change the "ctx->ip" to
172 * implement "goto" semantics.
173 */
174rewind:
175 ip = 0;
176 for (i = 0; i < ctx->entries; i++) {
177 entry = &ctx->action_table[i];
178 if (entry->action != action)
179 continue;
180 if (ip == ctx->ip) {
181 if (entry->instruction >= ctx->instructions ||
182 !ctx->ins_table[entry->instruction].run) {
183 pr_warning(FW_WARN APEI_PFX
184 "Invalid action table, unknown instruction type: %d\n",
185 entry->instruction);
186 return -EINVAL;
187 }
188 run = ctx->ins_table[entry->instruction].run;
189 rc = run(ctx, entry);
190 if (rc < 0)
191 return rc;
192 else if (rc != APEI_EXEC_SET_IP)
193 ctx->ip++;
194 }
195 ip++;
196 if (ctx->ip < ip)
197 goto rewind;
198 }
199
200 return 0;
201}
202EXPORT_SYMBOL_GPL(apei_exec_run);
203
204typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
205 struct acpi_whea_header *entry,
206 void *data);
207
208static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
209 apei_exec_entry_func_t func,
210 void *data,
211 int *end)
212{
213 u8 ins;
214 int i, rc;
215 struct acpi_whea_header *entry;
216 struct apei_exec_ins_type *ins_table = ctx->ins_table;
217
218 for (i = 0; i < ctx->entries; i++) {
219 entry = ctx->action_table + i;
220 ins = entry->instruction;
221 if (end)
222 *end = i;
223 if (ins >= ctx->instructions || !ins_table[ins].run) {
224 pr_warning(FW_WARN APEI_PFX
225 "Invalid action table, unknown instruction type: %d\n",
226 ins);
227 return -EINVAL;
228 }
229 rc = func(ctx, entry, data);
230 if (rc)
231 return rc;
232 }
233
234 return 0;
235}
236
237static int pre_map_gar_callback(struct apei_exec_context *ctx,
238 struct acpi_whea_header *entry,
239 void *data)
240{
241 u8 ins = entry->instruction;
242
243 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
244 return acpi_pre_map_gar(&entry->register_region);
245
246 return 0;
247}
248
249/*
250 * Pre-map all GARs in action table to make it possible to access them
251 * in NMI handler.
252 */
253int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
254{
255 int rc, end;
256
257 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
258 NULL, &end);
259 if (rc) {
260 struct apei_exec_context ctx_unmap;
261 memcpy(&ctx_unmap, ctx, sizeof(*ctx));
262 ctx_unmap.entries = end;
263 apei_exec_post_unmap_gars(&ctx_unmap);
264 }
265
266 return rc;
267}
268EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
269
270static int post_unmap_gar_callback(struct apei_exec_context *ctx,
271 struct acpi_whea_header *entry,
272 void *data)
273{
274 u8 ins = entry->instruction;
275
276 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
277 acpi_post_unmap_gar(&entry->register_region);
278
279 return 0;
280}
281
282/* Post-unmap all GAR in action table. */
283int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
284{
285 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
286 NULL, NULL);
287}
288EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
289
290/*
291 * Resource management for GARs in APEI
292 */
293struct apei_res {
294 struct list_head list;
295 unsigned long start;
296 unsigned long end;
297};
298
299/* Collect all resources requested, to avoid conflict */
300struct apei_resources apei_resources_all = {
301 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
302 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
303};
304
305static int apei_res_add(struct list_head *res_list,
306 unsigned long start, unsigned long size)
307{
308 struct apei_res *res, *resn, *res_ins = NULL;
309 unsigned long end = start + size;
310
311 if (end <= start)
312 return 0;
313repeat:
314 list_for_each_entry_safe(res, resn, res_list, list) {
315 if (res->start > end || res->end < start)
316 continue;
317 else if (end <= res->end && start >= res->start) {
318 kfree(res_ins);
319 return 0;
320 }
321 list_del(&res->list);
322 res->start = start = min(res->start, start);
323 res->end = end = max(res->end, end);
324 kfree(res_ins);
325 res_ins = res;
326 goto repeat;
327 }
328
329 if (res_ins)
330 list_add(&res_ins->list, res_list);
331 else {
332 res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
333 if (!res_ins)
334 return -ENOMEM;
335 res_ins->start = start;
336 res_ins->end = end;
337 list_add(&res_ins->list, res_list);
338 }
339
340 return 0;
341}
342
343static int apei_res_sub(struct list_head *res_list1,
344 struct list_head *res_list2)
345{
346 struct apei_res *res1, *resn1, *res2, *res;
347 res1 = list_entry(res_list1->next, struct apei_res, list);
348 resn1 = list_entry(res1->list.next, struct apei_res, list);
349 while (&res1->list != res_list1) {
350 list_for_each_entry(res2, res_list2, list) {
351 if (res1->start >= res2->end ||
352 res1->end <= res2->start)
353 continue;
354 else if (res1->end <= res2->end &&
355 res1->start >= res2->start) {
356 list_del(&res1->list);
357 kfree(res1);
358 break;
359 } else if (res1->end > res2->end &&
360 res1->start < res2->start) {
361 res = kmalloc(sizeof(*res), GFP_KERNEL);
362 if (!res)
363 return -ENOMEM;
364 res->start = res2->end;
365 res->end = res1->end;
366 res1->end = res2->start;
367 list_add(&res->list, &res1->list);
368 resn1 = res;
369 } else {
370 if (res1->start < res2->start)
371 res1->end = res2->start;
372 else
373 res1->start = res2->end;
374 }
375 }
376 res1 = resn1;
377 resn1 = list_entry(resn1->list.next, struct apei_res, list);
378 }
379
380 return 0;
381}
382
383static void apei_res_clean(struct list_head *res_list)
384{
385 struct apei_res *res, *resn;
386
387 list_for_each_entry_safe(res, resn, res_list, list) {
388 list_del(&res->list);
389 kfree(res);
390 }
391}
392
393void apei_resources_fini(struct apei_resources *resources)
394{
395 apei_res_clean(&resources->iomem);
396 apei_res_clean(&resources->ioport);
397}
398EXPORT_SYMBOL_GPL(apei_resources_fini);
399
400static int apei_resources_merge(struct apei_resources *resources1,
401 struct apei_resources *resources2)
402{
403 int rc;
404 struct apei_res *res;
405
406 list_for_each_entry(res, &resources2->iomem, list) {
407 rc = apei_res_add(&resources1->iomem, res->start,
408 res->end - res->start);
409 if (rc)
410 return rc;
411 }
412 list_for_each_entry(res, &resources2->ioport, list) {
413 rc = apei_res_add(&resources1->ioport, res->start,
414 res->end - res->start);
415 if (rc)
416 return rc;
417 }
418
419 return 0;
420}
421
422/*
423 * EINJ has two groups of GARs (EINJ table entry and trigger table
424 * entry), so common resources are subtracted from the trigger table
425 * resources before the second requesting.
426 */
427int apei_resources_sub(struct apei_resources *resources1,
428 struct apei_resources *resources2)
429{
430 int rc;
431
432 rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
433 if (rc)
434 return rc;
435 return apei_res_sub(&resources1->ioport, &resources2->ioport);
436}
437EXPORT_SYMBOL_GPL(apei_resources_sub);
438
439/*
440 * IO memory/port rersource management mechanism is used to check
441 * whether memory/port area used by GARs conflicts with normal memory
442 * or IO memory/port of devices.
443 */
444int apei_resources_request(struct apei_resources *resources,
445 const char *desc)
446{
447 struct apei_res *res, *res_bak;
448 struct resource *r;
449
450 apei_resources_sub(resources, &apei_resources_all);
451
452 list_for_each_entry(res, &resources->iomem, list) {
453 r = request_mem_region(res->start, res->end - res->start,
454 desc);
455 if (!r) {
456 pr_err(APEI_PFX
457 "Can not request iomem region <%016llx-%016llx> for GARs.\n",
458 (unsigned long long)res->start,
459 (unsigned long long)res->end);
460 res_bak = res;
461 goto err_unmap_iomem;
462 }
463 }
464
465 list_for_each_entry(res, &resources->ioport, list) {
466 r = request_region(res->start, res->end - res->start, desc);
467 if (!r) {
468 pr_err(APEI_PFX
469 "Can not request ioport region <%016llx-%016llx> for GARs.\n",
470 (unsigned long long)res->start,
471 (unsigned long long)res->end);
472 res_bak = res;
473 goto err_unmap_ioport;
474 }
475 }
476
477 apei_resources_merge(&apei_resources_all, resources);
478
479 return 0;
480err_unmap_ioport:
481 list_for_each_entry(res, &resources->ioport, list) {
482 if (res == res_bak)
483 break;
484 release_mem_region(res->start, res->end - res->start);
485 }
486 res_bak = NULL;
487err_unmap_iomem:
488 list_for_each_entry(res, &resources->iomem, list) {
489 if (res == res_bak)
490 break;
491 release_region(res->start, res->end - res->start);
492 }
493 return -EINVAL;
494}
495EXPORT_SYMBOL_GPL(apei_resources_request);
496
497void apei_resources_release(struct apei_resources *resources)
498{
499 struct apei_res *res;
500
501 list_for_each_entry(res, &resources->iomem, list)
502 release_mem_region(res->start, res->end - res->start);
503 list_for_each_entry(res, &resources->ioport, list)
504 release_region(res->start, res->end - res->start);
505
506 apei_resources_sub(&apei_resources_all, resources);
507}
508EXPORT_SYMBOL_GPL(apei_resources_release);
509
510static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
511{
512 u32 width, space_id;
513
514 width = reg->bit_width;
515 space_id = reg->space_id;
516 /* Handle possible alignment issues */
517 memcpy(paddr, &reg->address, sizeof(*paddr));
518 if (!*paddr) {
519 pr_warning(FW_BUG APEI_PFX
520 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
521 *paddr, width, space_id);
522 return -EINVAL;
523 }
524
525 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
526 pr_warning(FW_BUG APEI_PFX
527 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
528 *paddr, width, space_id);
529 return -EINVAL;
530 }
531
532 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
533 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
534 pr_warning(FW_BUG APEI_PFX
535 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
536 *paddr, width, space_id);
537 return -EINVAL;
538 }
539
540 return 0;
541}
542
543static int collect_res_callback(struct apei_exec_context *ctx,
544 struct acpi_whea_header *entry,
545 void *data)
546{
547 struct apei_resources *resources = data;
548 struct acpi_generic_address *reg = &entry->register_region;
549 u8 ins = entry->instruction;
550 u64 paddr;
551 int rc;
552
553 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
554 return 0;
555
556 rc = apei_check_gar(reg, &paddr);
557 if (rc)
558 return rc;
559
560 switch (reg->space_id) {
561 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
562 return apei_res_add(&resources->iomem, paddr,
563 reg->bit_width / 8);
564 case ACPI_ADR_SPACE_SYSTEM_IO:
565 return apei_res_add(&resources->ioport, paddr,
566 reg->bit_width / 8);
567 default:
568 return -EINVAL;
569 }
570}
571
572/*
573 * Same register may be used by multiple instructions in GARs, so
574 * resources are collected before requesting.
575 */
576int apei_exec_collect_resources(struct apei_exec_context *ctx,
577 struct apei_resources *resources)
578{
579 return apei_exec_for_each_entry(ctx, collect_res_callback,
580 resources, NULL);
581}
582EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
583
584struct dentry *apei_get_debugfs_dir(void)
585{
586 static struct dentry *dapei;
587
588 if (!dapei)
589 dapei = debugfs_create_dir("apei", NULL);
590
591 return dapei;
592}
593EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
This page took 0.04965 seconds and 5 git commands to generate.