vmcore: allocate buffer for ELF headers on page-size alignment
[deliverable/linux.git] / fs / proc / vmcore.c
CommitLineData
666bfddb
VG
1/*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
666bfddb 10#include <linux/mm.h>
2f96b8c1 11#include <linux/kcore.h>
666bfddb 12#include <linux/user.h>
666bfddb
VG
13#include <linux/elf.h>
14#include <linux/elfcore.h>
afeacc8c 15#include <linux/export.h>
5a0e3ad6 16#include <linux/slab.h>
666bfddb 17#include <linux/highmem.h>
87ebdc00 18#include <linux/printk.h>
666bfddb
VG
19#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <asm/uaccess.h>
24#include <asm/io.h>
2f96b8c1 25#include "internal.h"
666bfddb
VG
26
27/* List representing chunks of contiguous memory areas and their offsets in
28 * vmcore file.
29 */
30static LIST_HEAD(vmcore_list);
31
32/* Stores the pointer to the buffer containing kernel elf core headers. */
33static char *elfcorebuf;
34static size_t elfcorebuf_sz;
f2bdacdd 35static size_t elfcorebuf_sz_orig;
666bfddb
VG
36
37/* Total size of vmcore file. */
38static u64 vmcore_size;
39
5aa140c2 40static struct proc_dir_entry *proc_vmcore = NULL;
666bfddb 41
997c136f
OH
42/*
43 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
44 * The called function has to take care of module refcounting.
45 */
46static int (*oldmem_pfn_is_ram)(unsigned long pfn);
47
48int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
49{
50 if (oldmem_pfn_is_ram)
51 return -EBUSY;
52 oldmem_pfn_is_ram = fn;
53 return 0;
54}
55EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
56
57void unregister_oldmem_pfn_is_ram(void)
58{
59 oldmem_pfn_is_ram = NULL;
60 wmb();
61}
62EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
63
64static int pfn_is_ram(unsigned long pfn)
65{
66 int (*fn)(unsigned long pfn);
67 /* pfn is ram unless fn() checks pagetype */
68 int ret = 1;
69
70 /*
71 * Ask hypervisor if the pfn is really ram.
72 * A ballooned page contains no data and reading from such a page
73 * will cause high load in the hypervisor.
74 */
75 fn = oldmem_pfn_is_ram;
76 if (fn)
77 ret = fn(pfn);
78
79 return ret;
80}
81
666bfddb
VG
82/* Reads a page from the oldmem device from given offset. */
83static ssize_t read_from_oldmem(char *buf, size_t count,
9e9e3941 84 u64 *ppos, int userbuf)
666bfddb
VG
85{
86 unsigned long pfn, offset;
87 size_t nr_bytes;
88 ssize_t read = 0, tmp;
89
90 if (!count)
91 return 0;
92
93 offset = (unsigned long)(*ppos % PAGE_SIZE);
94 pfn = (unsigned long)(*ppos / PAGE_SIZE);
666bfddb
VG
95
96 do {
97 if (count > (PAGE_SIZE - offset))
98 nr_bytes = PAGE_SIZE - offset;
99 else
100 nr_bytes = count;
101
997c136f
OH
102 /* If pfn is not ram, return zeros for sparse dump files */
103 if (pfn_is_ram(pfn) == 0)
104 memset(buf, 0, nr_bytes);
105 else {
106 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
107 offset, userbuf);
108 if (tmp < 0)
109 return tmp;
110 }
666bfddb
VG
111 *ppos += nr_bytes;
112 count -= nr_bytes;
113 buf += nr_bytes;
114 read += nr_bytes;
115 ++pfn;
116 offset = 0;
117 } while (count);
118
119 return read;
120}
121
666bfddb
VG
122/* Read from the ELF header and then the crash dump. On error, negative value is
123 * returned otherwise number of bytes read are returned.
124 */
125static ssize_t read_vmcore(struct file *file, char __user *buffer,
126 size_t buflen, loff_t *fpos)
127{
128 ssize_t acc = 0, tmp;
80e8ff63 129 size_t tsz;
b27eb186
HD
130 u64 start;
131 struct vmcore *m = NULL;
666bfddb
VG
132
133 if (buflen == 0 || *fpos >= vmcore_size)
134 return 0;
135
136 /* trim buflen to not go beyond EOF */
137 if (buflen > vmcore_size - *fpos)
138 buflen = vmcore_size - *fpos;
139
140 /* Read ELF core header */
141 if (*fpos < elfcorebuf_sz) {
142 tsz = elfcorebuf_sz - *fpos;
143 if (buflen < tsz)
144 tsz = buflen;
145 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
146 return -EFAULT;
147 buflen -= tsz;
148 *fpos += tsz;
149 buffer += tsz;
150 acc += tsz;
151
152 /* leave now if filled buffer already */
153 if (buflen == 0)
154 return acc;
155 }
156
b27eb186
HD
157 list_for_each_entry(m, &vmcore_list, list) {
158 if (*fpos < m->offset + m->size) {
159 tsz = m->offset + m->size - *fpos;
160 if (buflen < tsz)
161 tsz = buflen;
162 start = m->paddr + *fpos - m->offset;
163 tmp = read_from_oldmem(buffer, tsz, &start, 1);
164 if (tmp < 0)
165 return tmp;
166 buflen -= tsz;
167 *fpos += tsz;
168 buffer += tsz;
169 acc += tsz;
170
171 /* leave now if filled buffer already */
172 if (buflen == 0)
173 return acc;
666bfddb 174 }
666bfddb 175 }
b27eb186 176
666bfddb
VG
177 return acc;
178}
179
5aa140c2 180static const struct file_operations proc_vmcore_operations = {
666bfddb 181 .read = read_vmcore,
c227e690 182 .llseek = default_llseek,
666bfddb
VG
183};
184
185static struct vmcore* __init get_new_element(void)
186{
2f6d3110 187 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
666bfddb
VG
188}
189
f2bdacdd 190static u64 __init get_vmcore_size_elf64(char *elfptr, size_t elfsz)
666bfddb
VG
191{
192 int i;
193 u64 size;
194 Elf64_Ehdr *ehdr_ptr;
195 Elf64_Phdr *phdr_ptr;
196
197 ehdr_ptr = (Elf64_Ehdr *)elfptr;
198 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
f2bdacdd 199 size = elfsz;
666bfddb
VG
200 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
201 size += phdr_ptr->p_memsz;
202 phdr_ptr++;
203 }
204 return size;
205}
206
f2bdacdd 207static u64 __init get_vmcore_size_elf32(char *elfptr, size_t elfsz)
72658e9d
VG
208{
209 int i;
210 u64 size;
211 Elf32_Ehdr *ehdr_ptr;
212 Elf32_Phdr *phdr_ptr;
213
214 ehdr_ptr = (Elf32_Ehdr *)elfptr;
215 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
f2bdacdd 216 size = elfsz;
72658e9d
VG
217 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
218 size += phdr_ptr->p_memsz;
219 phdr_ptr++;
220 }
221 return size;
222}
223
666bfddb
VG
224/* Merges all the PT_NOTE headers into one. */
225static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
226 struct list_head *vc_list)
227{
228 int i, nr_ptnote=0, rc=0;
229 char *tmp;
230 Elf64_Ehdr *ehdr_ptr;
231 Elf64_Phdr phdr, *phdr_ptr;
232 Elf64_Nhdr *nhdr_ptr;
233 u64 phdr_sz = 0, note_off;
234
235 ehdr_ptr = (Elf64_Ehdr *)elfptr;
236 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
237 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
238 int j;
239 void *notes_section;
240 struct vmcore *new;
241 u64 offset, max_sz, sz, real_sz = 0;
242 if (phdr_ptr->p_type != PT_NOTE)
243 continue;
244 nr_ptnote++;
245 max_sz = phdr_ptr->p_memsz;
246 offset = phdr_ptr->p_offset;
247 notes_section = kmalloc(max_sz, GFP_KERNEL);
248 if (!notes_section)
249 return -ENOMEM;
250 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
251 if (rc < 0) {
252 kfree(notes_section);
253 return rc;
254 }
255 nhdr_ptr = notes_section;
256 for (j = 0; j < max_sz; j += sz) {
257 if (nhdr_ptr->n_namesz == 0)
258 break;
259 sz = sizeof(Elf64_Nhdr) +
260 ((nhdr_ptr->n_namesz + 3) & ~3) +
261 ((nhdr_ptr->n_descsz + 3) & ~3);
262 real_sz += sz;
263 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
264 }
265
266 /* Add this contiguous chunk of notes section to vmcore list.*/
267 new = get_new_element();
268 if (!new) {
269 kfree(notes_section);
270 return -ENOMEM;
271 }
272 new->paddr = phdr_ptr->p_offset;
273 new->size = real_sz;
274 list_add_tail(&new->list, vc_list);
275 phdr_sz += real_sz;
276 kfree(notes_section);
277 }
278
279 /* Prepare merged PT_NOTE program header. */
280 phdr.p_type = PT_NOTE;
281 phdr.p_flags = 0;
282 note_off = sizeof(Elf64_Ehdr) +
283 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
284 phdr.p_offset = note_off;
285 phdr.p_vaddr = phdr.p_paddr = 0;
286 phdr.p_filesz = phdr.p_memsz = phdr_sz;
287 phdr.p_align = 0;
288
289 /* Add merged PT_NOTE program header*/
290 tmp = elfptr + sizeof(Elf64_Ehdr);
291 memcpy(tmp, &phdr, sizeof(phdr));
292 tmp += sizeof(phdr);
293
294 /* Remove unwanted PT_NOTE program headers. */
295 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
296 *elfsz = *elfsz - i;
297 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
f2bdacdd
HD
298 memset(elfptr + *elfsz, 0, i);
299 *elfsz = roundup(*elfsz, PAGE_SIZE);
666bfddb
VG
300
301 /* Modify e_phnum to reflect merged headers. */
302 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
303
304 return 0;
305}
306
72658e9d
VG
307/* Merges all the PT_NOTE headers into one. */
308static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
309 struct list_head *vc_list)
310{
311 int i, nr_ptnote=0, rc=0;
312 char *tmp;
313 Elf32_Ehdr *ehdr_ptr;
314 Elf32_Phdr phdr, *phdr_ptr;
315 Elf32_Nhdr *nhdr_ptr;
316 u64 phdr_sz = 0, note_off;
317
318 ehdr_ptr = (Elf32_Ehdr *)elfptr;
319 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
320 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
321 int j;
322 void *notes_section;
323 struct vmcore *new;
324 u64 offset, max_sz, sz, real_sz = 0;
325 if (phdr_ptr->p_type != PT_NOTE)
326 continue;
327 nr_ptnote++;
328 max_sz = phdr_ptr->p_memsz;
329 offset = phdr_ptr->p_offset;
330 notes_section = kmalloc(max_sz, GFP_KERNEL);
331 if (!notes_section)
332 return -ENOMEM;
333 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
334 if (rc < 0) {
335 kfree(notes_section);
336 return rc;
337 }
338 nhdr_ptr = notes_section;
339 for (j = 0; j < max_sz; j += sz) {
340 if (nhdr_ptr->n_namesz == 0)
341 break;
342 sz = sizeof(Elf32_Nhdr) +
343 ((nhdr_ptr->n_namesz + 3) & ~3) +
344 ((nhdr_ptr->n_descsz + 3) & ~3);
345 real_sz += sz;
346 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
347 }
348
349 /* Add this contiguous chunk of notes section to vmcore list.*/
350 new = get_new_element();
351 if (!new) {
352 kfree(notes_section);
353 return -ENOMEM;
354 }
355 new->paddr = phdr_ptr->p_offset;
356 new->size = real_sz;
357 list_add_tail(&new->list, vc_list);
358 phdr_sz += real_sz;
359 kfree(notes_section);
360 }
361
362 /* Prepare merged PT_NOTE program header. */
363 phdr.p_type = PT_NOTE;
364 phdr.p_flags = 0;
365 note_off = sizeof(Elf32_Ehdr) +
366 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
367 phdr.p_offset = note_off;
368 phdr.p_vaddr = phdr.p_paddr = 0;
369 phdr.p_filesz = phdr.p_memsz = phdr_sz;
370 phdr.p_align = 0;
371
372 /* Add merged PT_NOTE program header*/
373 tmp = elfptr + sizeof(Elf32_Ehdr);
374 memcpy(tmp, &phdr, sizeof(phdr));
375 tmp += sizeof(phdr);
376
377 /* Remove unwanted PT_NOTE program headers. */
378 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
379 *elfsz = *elfsz - i;
380 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
f2bdacdd
HD
381 memset(elfptr + *elfsz, 0, i);
382 *elfsz = roundup(*elfsz, PAGE_SIZE);
72658e9d
VG
383
384 /* Modify e_phnum to reflect merged headers. */
385 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
386
387 return 0;
388}
389
666bfddb
VG
390/* Add memory chunks represented by program headers to vmcore list. Also update
391 * the new offset fields of exported program headers. */
392static int __init process_ptload_program_headers_elf64(char *elfptr,
393 size_t elfsz,
394 struct list_head *vc_list)
395{
396 int i;
397 Elf64_Ehdr *ehdr_ptr;
398 Elf64_Phdr *phdr_ptr;
399 loff_t vmcore_off;
400 struct vmcore *new;
401
402 ehdr_ptr = (Elf64_Ehdr *)elfptr;
403 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
404
405 /* First program header is PT_NOTE header. */
f2bdacdd 406 vmcore_off = elfsz +
666bfddb
VG
407 phdr_ptr->p_memsz; /* Note sections */
408
409 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
410 if (phdr_ptr->p_type != PT_LOAD)
411 continue;
412
413 /* Add this contiguous chunk of memory to vmcore list.*/
414 new = get_new_element();
415 if (!new)
416 return -ENOMEM;
417 new->paddr = phdr_ptr->p_offset;
418 new->size = phdr_ptr->p_memsz;
419 list_add_tail(&new->list, vc_list);
420
421 /* Update the program header offset. */
422 phdr_ptr->p_offset = vmcore_off;
423 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
424 }
425 return 0;
426}
427
72658e9d
VG
428static int __init process_ptload_program_headers_elf32(char *elfptr,
429 size_t elfsz,
430 struct list_head *vc_list)
431{
432 int i;
433 Elf32_Ehdr *ehdr_ptr;
434 Elf32_Phdr *phdr_ptr;
435 loff_t vmcore_off;
436 struct vmcore *new;
437
438 ehdr_ptr = (Elf32_Ehdr *)elfptr;
439 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
440
441 /* First program header is PT_NOTE header. */
f2bdacdd 442 vmcore_off = elfsz +
72658e9d
VG
443 phdr_ptr->p_memsz; /* Note sections */
444
445 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
446 if (phdr_ptr->p_type != PT_LOAD)
447 continue;
448
449 /* Add this contiguous chunk of memory to vmcore list.*/
450 new = get_new_element();
451 if (!new)
452 return -ENOMEM;
453 new->paddr = phdr_ptr->p_offset;
454 new->size = phdr_ptr->p_memsz;
455 list_add_tail(&new->list, vc_list);
456
457 /* Update the program header offset */
458 phdr_ptr->p_offset = vmcore_off;
459 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
460 }
461 return 0;
462}
463
666bfddb 464/* Sets offset fields of vmcore elements. */
f2bdacdd
HD
465static void __init set_vmcore_list_offsets(size_t elfsz,
466 struct list_head *vc_list)
666bfddb
VG
467{
468 loff_t vmcore_off;
666bfddb
VG
469 struct vmcore *m;
470
666bfddb 471 /* Skip Elf header and program headers. */
f2bdacdd 472 vmcore_off = elfsz;
666bfddb
VG
473
474 list_for_each_entry(m, vc_list, list) {
475 m->offset = vmcore_off;
476 vmcore_off += m->size;
477 }
478}
479
f2bdacdd 480static void free_elfcorebuf(void)
72658e9d 481{
f2bdacdd
HD
482 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
483 elfcorebuf = NULL;
72658e9d
VG
484}
485
666bfddb
VG
486static int __init parse_crash_elf64_headers(void)
487{
488 int rc=0;
489 Elf64_Ehdr ehdr;
490 u64 addr;
491
492 addr = elfcorehdr_addr;
493
494 /* Read Elf header */
495 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
496 if (rc < 0)
497 return rc;
498
499 /* Do some basic Verification. */
500 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
501 (ehdr.e_type != ET_CORE) ||
9833c394 502 !vmcore_elf64_check_arch(&ehdr) ||
666bfddb
VG
503 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
504 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
505 ehdr.e_version != EV_CURRENT ||
506 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
507 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
508 ehdr.e_phnum == 0) {
87ebdc00 509 pr_warn("Warning: Core image elf header is not sane\n");
666bfddb
VG
510 return -EINVAL;
511 }
512
513 /* Read in all elf headers. */
f2bdacdd
HD
514 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
515 ehdr.e_phnum * sizeof(Elf64_Phdr);
516 elfcorebuf_sz = elfcorebuf_sz_orig;
517 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
518 get_order(elfcorebuf_sz_orig));
666bfddb
VG
519 if (!elfcorebuf)
520 return -ENOMEM;
521 addr = elfcorehdr_addr;
f2bdacdd
HD
522 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
523 if (rc < 0)
524 goto fail;
666bfddb
VG
525
526 /* Merge all PT_NOTE headers into one. */
527 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
f2bdacdd
HD
528 if (rc)
529 goto fail;
666bfddb
VG
530 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
531 &vmcore_list);
f2bdacdd
HD
532 if (rc)
533 goto fail;
534 set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
666bfddb 535 return 0;
f2bdacdd
HD
536fail:
537 free_elfcorebuf();
538 return rc;
666bfddb
VG
539}
540
72658e9d
VG
541static int __init parse_crash_elf32_headers(void)
542{
543 int rc=0;
544 Elf32_Ehdr ehdr;
545 u64 addr;
546
547 addr = elfcorehdr_addr;
548
549 /* Read Elf header */
550 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
551 if (rc < 0)
552 return rc;
553
554 /* Do some basic Verification. */
555 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
556 (ehdr.e_type != ET_CORE) ||
557 !elf_check_arch(&ehdr) ||
558 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
559 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
560 ehdr.e_version != EV_CURRENT ||
561 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
562 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
563 ehdr.e_phnum == 0) {
87ebdc00 564 pr_warn("Warning: Core image elf header is not sane\n");
72658e9d
VG
565 return -EINVAL;
566 }
567
568 /* Read in all elf headers. */
f2bdacdd
HD
569 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
570 elfcorebuf_sz = elfcorebuf_sz_orig;
571 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
572 get_order(elfcorebuf_sz_orig));
72658e9d
VG
573 if (!elfcorebuf)
574 return -ENOMEM;
575 addr = elfcorehdr_addr;
f2bdacdd
HD
576 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
577 if (rc < 0)
578 goto fail;
72658e9d
VG
579
580 /* Merge all PT_NOTE headers into one. */
581 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
f2bdacdd
HD
582 if (rc)
583 goto fail;
72658e9d
VG
584 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
585 &vmcore_list);
f2bdacdd
HD
586 if (rc)
587 goto fail;
588 set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
72658e9d 589 return 0;
f2bdacdd
HD
590fail:
591 free_elfcorebuf();
592 return rc;
72658e9d
VG
593}
594
666bfddb
VG
595static int __init parse_crash_elf_headers(void)
596{
597 unsigned char e_ident[EI_NIDENT];
598 u64 addr;
599 int rc=0;
600
601 addr = elfcorehdr_addr;
602 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
603 if (rc < 0)
604 return rc;
605 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
87ebdc00 606 pr_warn("Warning: Core image elf header not found\n");
666bfddb
VG
607 return -EINVAL;
608 }
609
610 if (e_ident[EI_CLASS] == ELFCLASS64) {
611 rc = parse_crash_elf64_headers();
612 if (rc)
613 return rc;
614
615 /* Determine vmcore size. */
f2bdacdd 616 vmcore_size = get_vmcore_size_elf64(elfcorebuf, elfcorebuf_sz);
72658e9d
VG
617 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
618 rc = parse_crash_elf32_headers();
619 if (rc)
620 return rc;
621
622 /* Determine vmcore size. */
f2bdacdd 623 vmcore_size = get_vmcore_size_elf32(elfcorebuf, elfcorebuf_sz);
666bfddb 624 } else {
87ebdc00 625 pr_warn("Warning: Core image elf header is not sane\n");
666bfddb
VG
626 return -EINVAL;
627 }
628 return 0;
629}
630
631/* Init function for vmcore module. */
632static int __init vmcore_init(void)
633{
634 int rc = 0;
635
636 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
85a0ee34 637 if (!(is_vmcore_usable()))
666bfddb
VG
638 return rc;
639 rc = parse_crash_elf_headers();
640 if (rc) {
87ebdc00 641 pr_warn("Kdump: vmcore not initialized\n");
666bfddb
VG
642 return rc;
643 }
644
5aa140c2 645 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
666bfddb
VG
646 if (proc_vmcore)
647 proc_vmcore->size = vmcore_size;
648 return 0;
649}
650module_init(vmcore_init)
16257393
MS
651
652/* Cleanup function for vmcore module. */
653void vmcore_cleanup(void)
654{
655 struct list_head *pos, *next;
656
657 if (proc_vmcore) {
a8ca16ea 658 proc_remove(proc_vmcore);
16257393
MS
659 proc_vmcore = NULL;
660 }
661
662 /* clear the vmcore list. */
663 list_for_each_safe(pos, next, &vmcore_list) {
664 struct vmcore *m;
665
666 m = list_entry(pos, struct vmcore, list);
667 list_del(&m->list);
668 kfree(m);
669 }
f2bdacdd 670 free_elfcorebuf();
16257393
MS
671}
672EXPORT_SYMBOL_GPL(vmcore_cleanup);
This page took 0.634425 seconds and 5 git commands to generate.