irqs: make irq_timer_state to use dyn_array
[deliverable/linux.git] / fs / proc / vmcore.c
1 /*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
10 #include <linux/mm.h>
11 #include <linux/proc_fs.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/highmem.h>
16 #include <linux/bootmem.h>
17 #include <linux/init.h>
18 #include <linux/crash_dump.h>
19 #include <linux/list.h>
20 #include <asm/uaccess.h>
21 #include <asm/io.h>
22
23 /* List representing chunks of contiguous memory areas and their offsets in
24 * vmcore file.
25 */
26 static LIST_HEAD(vmcore_list);
27
28 /* Stores the pointer to the buffer containing kernel elf core headers. */
29 static char *elfcorebuf;
30 static size_t elfcorebuf_sz;
31
32 /* Total size of vmcore file. */
33 static u64 vmcore_size;
34
35 /* Stores the physical address of elf header of crash image. */
36 unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
37
38 struct proc_dir_entry *proc_vmcore = NULL;
39
40 /* Reads a page from the oldmem device from given offset. */
41 static ssize_t read_from_oldmem(char *buf, size_t count,
42 u64 *ppos, int userbuf)
43 {
44 unsigned long pfn, offset;
45 size_t nr_bytes;
46 ssize_t read = 0, tmp;
47
48 if (!count)
49 return 0;
50
51 offset = (unsigned long)(*ppos % PAGE_SIZE);
52 pfn = (unsigned long)(*ppos / PAGE_SIZE);
53 if (pfn > saved_max_pfn)
54 return -EINVAL;
55
56 do {
57 if (count > (PAGE_SIZE - offset))
58 nr_bytes = PAGE_SIZE - offset;
59 else
60 nr_bytes = count;
61
62 tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf);
63 if (tmp < 0)
64 return tmp;
65 *ppos += nr_bytes;
66 count -= nr_bytes;
67 buf += nr_bytes;
68 read += nr_bytes;
69 ++pfn;
70 offset = 0;
71 } while (count);
72
73 return read;
74 }
75
76 /* Maps vmcore file offset to respective physical address in memroy. */
77 static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
78 struct vmcore **m_ptr)
79 {
80 struct vmcore *m;
81 u64 paddr;
82
83 list_for_each_entry(m, vc_list, list) {
84 u64 start, end;
85 start = m->offset;
86 end = m->offset + m->size - 1;
87 if (offset >= start && offset <= end) {
88 paddr = m->paddr + offset - start;
89 *m_ptr = m;
90 return paddr;
91 }
92 }
93 *m_ptr = NULL;
94 return 0;
95 }
96
97 /* Read from the ELF header and then the crash dump. On error, negative value is
98 * returned otherwise number of bytes read are returned.
99 */
100 static ssize_t read_vmcore(struct file *file, char __user *buffer,
101 size_t buflen, loff_t *fpos)
102 {
103 ssize_t acc = 0, tmp;
104 size_t tsz;
105 u64 start, nr_bytes;
106 struct vmcore *curr_m = NULL;
107
108 if (buflen == 0 || *fpos >= vmcore_size)
109 return 0;
110
111 /* trim buflen to not go beyond EOF */
112 if (buflen > vmcore_size - *fpos)
113 buflen = vmcore_size - *fpos;
114
115 /* Read ELF core header */
116 if (*fpos < elfcorebuf_sz) {
117 tsz = elfcorebuf_sz - *fpos;
118 if (buflen < tsz)
119 tsz = buflen;
120 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
121 return -EFAULT;
122 buflen -= tsz;
123 *fpos += tsz;
124 buffer += tsz;
125 acc += tsz;
126
127 /* leave now if filled buffer already */
128 if (buflen == 0)
129 return acc;
130 }
131
132 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
133 if (!curr_m)
134 return -EINVAL;
135 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
136 tsz = buflen;
137
138 /* Calculate left bytes in current memory segment. */
139 nr_bytes = (curr_m->size - (start - curr_m->paddr));
140 if (tsz > nr_bytes)
141 tsz = nr_bytes;
142
143 while (buflen) {
144 tmp = read_from_oldmem(buffer, tsz, &start, 1);
145 if (tmp < 0)
146 return tmp;
147 buflen -= tsz;
148 *fpos += tsz;
149 buffer += tsz;
150 acc += tsz;
151 if (start >= (curr_m->paddr + curr_m->size)) {
152 if (curr_m->list.next == &vmcore_list)
153 return acc; /*EOF*/
154 curr_m = list_entry(curr_m->list.next,
155 struct vmcore, list);
156 start = curr_m->paddr;
157 }
158 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
159 tsz = buflen;
160 /* Calculate left bytes in current memory segment. */
161 nr_bytes = (curr_m->size - (start - curr_m->paddr));
162 if (tsz > nr_bytes)
163 tsz = nr_bytes;
164 }
165 return acc;
166 }
167
168 const struct file_operations proc_vmcore_operations = {
169 .read = read_vmcore,
170 };
171
172 static struct vmcore* __init get_new_element(void)
173 {
174 struct vmcore *p;
175
176 p = kmalloc(sizeof(*p), GFP_KERNEL);
177 if (p)
178 memset(p, 0, sizeof(*p));
179 return p;
180 }
181
182 static u64 __init get_vmcore_size_elf64(char *elfptr)
183 {
184 int i;
185 u64 size;
186 Elf64_Ehdr *ehdr_ptr;
187 Elf64_Phdr *phdr_ptr;
188
189 ehdr_ptr = (Elf64_Ehdr *)elfptr;
190 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
191 size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
192 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
193 size += phdr_ptr->p_memsz;
194 phdr_ptr++;
195 }
196 return size;
197 }
198
199 static u64 __init get_vmcore_size_elf32(char *elfptr)
200 {
201 int i;
202 u64 size;
203 Elf32_Ehdr *ehdr_ptr;
204 Elf32_Phdr *phdr_ptr;
205
206 ehdr_ptr = (Elf32_Ehdr *)elfptr;
207 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
208 size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr));
209 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
210 size += phdr_ptr->p_memsz;
211 phdr_ptr++;
212 }
213 return size;
214 }
215
216 /* Merges all the PT_NOTE headers into one. */
217 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
218 struct list_head *vc_list)
219 {
220 int i, nr_ptnote=0, rc=0;
221 char *tmp;
222 Elf64_Ehdr *ehdr_ptr;
223 Elf64_Phdr phdr, *phdr_ptr;
224 Elf64_Nhdr *nhdr_ptr;
225 u64 phdr_sz = 0, note_off;
226
227 ehdr_ptr = (Elf64_Ehdr *)elfptr;
228 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
229 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
230 int j;
231 void *notes_section;
232 struct vmcore *new;
233 u64 offset, max_sz, sz, real_sz = 0;
234 if (phdr_ptr->p_type != PT_NOTE)
235 continue;
236 nr_ptnote++;
237 max_sz = phdr_ptr->p_memsz;
238 offset = phdr_ptr->p_offset;
239 notes_section = kmalloc(max_sz, GFP_KERNEL);
240 if (!notes_section)
241 return -ENOMEM;
242 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
243 if (rc < 0) {
244 kfree(notes_section);
245 return rc;
246 }
247 nhdr_ptr = notes_section;
248 for (j = 0; j < max_sz; j += sz) {
249 if (nhdr_ptr->n_namesz == 0)
250 break;
251 sz = sizeof(Elf64_Nhdr) +
252 ((nhdr_ptr->n_namesz + 3) & ~3) +
253 ((nhdr_ptr->n_descsz + 3) & ~3);
254 real_sz += sz;
255 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
256 }
257
258 /* Add this contiguous chunk of notes section to vmcore list.*/
259 new = get_new_element();
260 if (!new) {
261 kfree(notes_section);
262 return -ENOMEM;
263 }
264 new->paddr = phdr_ptr->p_offset;
265 new->size = real_sz;
266 list_add_tail(&new->list, vc_list);
267 phdr_sz += real_sz;
268 kfree(notes_section);
269 }
270
271 /* Prepare merged PT_NOTE program header. */
272 phdr.p_type = PT_NOTE;
273 phdr.p_flags = 0;
274 note_off = sizeof(Elf64_Ehdr) +
275 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
276 phdr.p_offset = note_off;
277 phdr.p_vaddr = phdr.p_paddr = 0;
278 phdr.p_filesz = phdr.p_memsz = phdr_sz;
279 phdr.p_align = 0;
280
281 /* Add merged PT_NOTE program header*/
282 tmp = elfptr + sizeof(Elf64_Ehdr);
283 memcpy(tmp, &phdr, sizeof(phdr));
284 tmp += sizeof(phdr);
285
286 /* Remove unwanted PT_NOTE program headers. */
287 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
288 *elfsz = *elfsz - i;
289 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
290
291 /* Modify e_phnum to reflect merged headers. */
292 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
293
294 return 0;
295 }
296
297 /* Merges all the PT_NOTE headers into one. */
298 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
299 struct list_head *vc_list)
300 {
301 int i, nr_ptnote=0, rc=0;
302 char *tmp;
303 Elf32_Ehdr *ehdr_ptr;
304 Elf32_Phdr phdr, *phdr_ptr;
305 Elf32_Nhdr *nhdr_ptr;
306 u64 phdr_sz = 0, note_off;
307
308 ehdr_ptr = (Elf32_Ehdr *)elfptr;
309 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
310 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
311 int j;
312 void *notes_section;
313 struct vmcore *new;
314 u64 offset, max_sz, sz, real_sz = 0;
315 if (phdr_ptr->p_type != PT_NOTE)
316 continue;
317 nr_ptnote++;
318 max_sz = phdr_ptr->p_memsz;
319 offset = phdr_ptr->p_offset;
320 notes_section = kmalloc(max_sz, GFP_KERNEL);
321 if (!notes_section)
322 return -ENOMEM;
323 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
324 if (rc < 0) {
325 kfree(notes_section);
326 return rc;
327 }
328 nhdr_ptr = notes_section;
329 for (j = 0; j < max_sz; j += sz) {
330 if (nhdr_ptr->n_namesz == 0)
331 break;
332 sz = sizeof(Elf32_Nhdr) +
333 ((nhdr_ptr->n_namesz + 3) & ~3) +
334 ((nhdr_ptr->n_descsz + 3) & ~3);
335 real_sz += sz;
336 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
337 }
338
339 /* Add this contiguous chunk of notes section to vmcore list.*/
340 new = get_new_element();
341 if (!new) {
342 kfree(notes_section);
343 return -ENOMEM;
344 }
345 new->paddr = phdr_ptr->p_offset;
346 new->size = real_sz;
347 list_add_tail(&new->list, vc_list);
348 phdr_sz += real_sz;
349 kfree(notes_section);
350 }
351
352 /* Prepare merged PT_NOTE program header. */
353 phdr.p_type = PT_NOTE;
354 phdr.p_flags = 0;
355 note_off = sizeof(Elf32_Ehdr) +
356 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
357 phdr.p_offset = note_off;
358 phdr.p_vaddr = phdr.p_paddr = 0;
359 phdr.p_filesz = phdr.p_memsz = phdr_sz;
360 phdr.p_align = 0;
361
362 /* Add merged PT_NOTE program header*/
363 tmp = elfptr + sizeof(Elf32_Ehdr);
364 memcpy(tmp, &phdr, sizeof(phdr));
365 tmp += sizeof(phdr);
366
367 /* Remove unwanted PT_NOTE program headers. */
368 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
369 *elfsz = *elfsz - i;
370 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
371
372 /* Modify e_phnum to reflect merged headers. */
373 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
374
375 return 0;
376 }
377
378 /* Add memory chunks represented by program headers to vmcore list. Also update
379 * the new offset fields of exported program headers. */
380 static int __init process_ptload_program_headers_elf64(char *elfptr,
381 size_t elfsz,
382 struct list_head *vc_list)
383 {
384 int i;
385 Elf64_Ehdr *ehdr_ptr;
386 Elf64_Phdr *phdr_ptr;
387 loff_t vmcore_off;
388 struct vmcore *new;
389
390 ehdr_ptr = (Elf64_Ehdr *)elfptr;
391 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
392
393 /* First program header is PT_NOTE header. */
394 vmcore_off = sizeof(Elf64_Ehdr) +
395 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
396 phdr_ptr->p_memsz; /* Note sections */
397
398 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
399 if (phdr_ptr->p_type != PT_LOAD)
400 continue;
401
402 /* Add this contiguous chunk of memory to vmcore list.*/
403 new = get_new_element();
404 if (!new)
405 return -ENOMEM;
406 new->paddr = phdr_ptr->p_offset;
407 new->size = phdr_ptr->p_memsz;
408 list_add_tail(&new->list, vc_list);
409
410 /* Update the program header offset. */
411 phdr_ptr->p_offset = vmcore_off;
412 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
413 }
414 return 0;
415 }
416
417 static int __init process_ptload_program_headers_elf32(char *elfptr,
418 size_t elfsz,
419 struct list_head *vc_list)
420 {
421 int i;
422 Elf32_Ehdr *ehdr_ptr;
423 Elf32_Phdr *phdr_ptr;
424 loff_t vmcore_off;
425 struct vmcore *new;
426
427 ehdr_ptr = (Elf32_Ehdr *)elfptr;
428 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
429
430 /* First program header is PT_NOTE header. */
431 vmcore_off = sizeof(Elf32_Ehdr) +
432 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
433 phdr_ptr->p_memsz; /* Note sections */
434
435 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
436 if (phdr_ptr->p_type != PT_LOAD)
437 continue;
438
439 /* Add this contiguous chunk of memory to vmcore list.*/
440 new = get_new_element();
441 if (!new)
442 return -ENOMEM;
443 new->paddr = phdr_ptr->p_offset;
444 new->size = phdr_ptr->p_memsz;
445 list_add_tail(&new->list, vc_list);
446
447 /* Update the program header offset */
448 phdr_ptr->p_offset = vmcore_off;
449 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
450 }
451 return 0;
452 }
453
454 /* Sets offset fields of vmcore elements. */
455 static void __init set_vmcore_list_offsets_elf64(char *elfptr,
456 struct list_head *vc_list)
457 {
458 loff_t vmcore_off;
459 Elf64_Ehdr *ehdr_ptr;
460 struct vmcore *m;
461
462 ehdr_ptr = (Elf64_Ehdr *)elfptr;
463
464 /* Skip Elf header and program headers. */
465 vmcore_off = sizeof(Elf64_Ehdr) +
466 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
467
468 list_for_each_entry(m, vc_list, list) {
469 m->offset = vmcore_off;
470 vmcore_off += m->size;
471 }
472 }
473
474 /* Sets offset fields of vmcore elements. */
475 static void __init set_vmcore_list_offsets_elf32(char *elfptr,
476 struct list_head *vc_list)
477 {
478 loff_t vmcore_off;
479 Elf32_Ehdr *ehdr_ptr;
480 struct vmcore *m;
481
482 ehdr_ptr = (Elf32_Ehdr *)elfptr;
483
484 /* Skip Elf header and program headers. */
485 vmcore_off = sizeof(Elf32_Ehdr) +
486 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
487
488 list_for_each_entry(m, vc_list, list) {
489 m->offset = vmcore_off;
490 vmcore_off += m->size;
491 }
492 }
493
494 static int __init parse_crash_elf64_headers(void)
495 {
496 int rc=0;
497 Elf64_Ehdr ehdr;
498 u64 addr;
499
500 addr = elfcorehdr_addr;
501
502 /* Read Elf header */
503 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
504 if (rc < 0)
505 return rc;
506
507 /* Do some basic Verification. */
508 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
509 (ehdr.e_type != ET_CORE) ||
510 !vmcore_elf_check_arch(&ehdr) ||
511 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
512 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
513 ehdr.e_version != EV_CURRENT ||
514 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
515 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
516 ehdr.e_phnum == 0) {
517 printk(KERN_WARNING "Warning: Core image elf header is not"
518 "sane\n");
519 return -EINVAL;
520 }
521
522 /* Read in all elf headers. */
523 elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
524 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
525 if (!elfcorebuf)
526 return -ENOMEM;
527 addr = elfcorehdr_addr;
528 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
529 if (rc < 0) {
530 kfree(elfcorebuf);
531 return rc;
532 }
533
534 /* Merge all PT_NOTE headers into one. */
535 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
536 if (rc) {
537 kfree(elfcorebuf);
538 return rc;
539 }
540 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
541 &vmcore_list);
542 if (rc) {
543 kfree(elfcorebuf);
544 return rc;
545 }
546 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
547 return 0;
548 }
549
550 static int __init parse_crash_elf32_headers(void)
551 {
552 int rc=0;
553 Elf32_Ehdr ehdr;
554 u64 addr;
555
556 addr = elfcorehdr_addr;
557
558 /* Read Elf header */
559 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
560 if (rc < 0)
561 return rc;
562
563 /* Do some basic Verification. */
564 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
565 (ehdr.e_type != ET_CORE) ||
566 !elf_check_arch(&ehdr) ||
567 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
568 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
569 ehdr.e_version != EV_CURRENT ||
570 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
571 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
572 ehdr.e_phnum == 0) {
573 printk(KERN_WARNING "Warning: Core image elf header is not"
574 "sane\n");
575 return -EINVAL;
576 }
577
578 /* Read in all elf headers. */
579 elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
580 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
581 if (!elfcorebuf)
582 return -ENOMEM;
583 addr = elfcorehdr_addr;
584 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
585 if (rc < 0) {
586 kfree(elfcorebuf);
587 return rc;
588 }
589
590 /* Merge all PT_NOTE headers into one. */
591 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
592 if (rc) {
593 kfree(elfcorebuf);
594 return rc;
595 }
596 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
597 &vmcore_list);
598 if (rc) {
599 kfree(elfcorebuf);
600 return rc;
601 }
602 set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
603 return 0;
604 }
605
606 static int __init parse_crash_elf_headers(void)
607 {
608 unsigned char e_ident[EI_NIDENT];
609 u64 addr;
610 int rc=0;
611
612 addr = elfcorehdr_addr;
613 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
614 if (rc < 0)
615 return rc;
616 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
617 printk(KERN_WARNING "Warning: Core image elf header"
618 " not found\n");
619 return -EINVAL;
620 }
621
622 if (e_ident[EI_CLASS] == ELFCLASS64) {
623 rc = parse_crash_elf64_headers();
624 if (rc)
625 return rc;
626
627 /* Determine vmcore size. */
628 vmcore_size = get_vmcore_size_elf64(elfcorebuf);
629 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
630 rc = parse_crash_elf32_headers();
631 if (rc)
632 return rc;
633
634 /* Determine vmcore size. */
635 vmcore_size = get_vmcore_size_elf32(elfcorebuf);
636 } else {
637 printk(KERN_WARNING "Warning: Core image elf header is not"
638 " sane\n");
639 return -EINVAL;
640 }
641 return 0;
642 }
643
644 /* Init function for vmcore module. */
645 static int __init vmcore_init(void)
646 {
647 int rc = 0;
648
649 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
650 if (!(elfcorehdr_addr < ELFCORE_ADDR_MAX))
651 return rc;
652 rc = parse_crash_elf_headers();
653 if (rc) {
654 printk(KERN_WARNING "Kdump: vmcore not initialized\n");
655 return rc;
656 }
657
658 /* Initialize /proc/vmcore size if proc is already up. */
659 if (proc_vmcore)
660 proc_vmcore->size = vmcore_size;
661 return 0;
662 }
663 module_init(vmcore_init)
This page took 0.044043 seconds and 5 git commands to generate.