0b1c04e5e2c50eea54d13b7b953700dcd74c399d
[cascardo/linux.git] / fs / proc / vmcore.c
1 /*
2  *      fs/proc/vmcore.c Interface for accessing the crash
3  *                               dump from the system's previous life.
4  *      Heavily borrowed from fs/proc/kcore.c
5  *      Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6  *      Copyright (C) IBM Corporation, 2004. All rights reserved
7  *
8  */
9
10 #include <linux/mm.h>
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <asm/uaccess.h>
24 #include <asm/io.h>
25 #include "internal.h"
26
27 /* List representing chunks of contiguous memory areas and their offsets in
28  * vmcore file.
29  */
30 static LIST_HEAD(vmcore_list);
31
32 /* Stores the pointer to the buffer containing kernel elf core headers. */
33 static char *elfcorebuf;
34 static size_t elfcorebuf_sz;
35 static size_t elfcorebuf_sz_orig;
36
37 /* Total size of vmcore file. */
38 static u64 vmcore_size;
39
40 static struct proc_dir_entry *proc_vmcore = NULL;
41
42 /*
43  * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
44  * The called function has to take care of module refcounting.
45  */
46 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
47
48 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
49 {
50         if (oldmem_pfn_is_ram)
51                 return -EBUSY;
52         oldmem_pfn_is_ram = fn;
53         return 0;
54 }
55 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
56
57 void unregister_oldmem_pfn_is_ram(void)
58 {
59         oldmem_pfn_is_ram = NULL;
60         wmb();
61 }
62 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
63
64 static int pfn_is_ram(unsigned long pfn)
65 {
66         int (*fn)(unsigned long pfn);
67         /* pfn is ram unless fn() checks pagetype */
68         int ret = 1;
69
70         /*
71          * Ask hypervisor if the pfn is really ram.
72          * A ballooned page contains no data and reading from such a page
73          * will cause high load in the hypervisor.
74          */
75         fn = oldmem_pfn_is_ram;
76         if (fn)
77                 ret = fn(pfn);
78
79         return ret;
80 }
81
82 /* Reads a page from the oldmem device from given offset. */
83 static ssize_t read_from_oldmem(char *buf, size_t count,
84                                 u64 *ppos, int userbuf)
85 {
86         unsigned long pfn, offset;
87         size_t nr_bytes;
88         ssize_t read = 0, tmp;
89
90         if (!count)
91                 return 0;
92
93         offset = (unsigned long)(*ppos % PAGE_SIZE);
94         pfn = (unsigned long)(*ppos / PAGE_SIZE);
95
96         do {
97                 if (count > (PAGE_SIZE - offset))
98                         nr_bytes = PAGE_SIZE - offset;
99                 else
100                         nr_bytes = count;
101
102                 /* If pfn is not ram, return zeros for sparse dump files */
103                 if (pfn_is_ram(pfn) == 0)
104                         memset(buf, 0, nr_bytes);
105                 else {
106                         tmp = copy_oldmem_page(pfn, buf, nr_bytes,
107                                                 offset, userbuf);
108                         if (tmp < 0)
109                                 return tmp;
110                 }
111                 *ppos += nr_bytes;
112                 count -= nr_bytes;
113                 buf += nr_bytes;
114                 read += nr_bytes;
115                 ++pfn;
116                 offset = 0;
117         } while (count);
118
119         return read;
120 }
121
122 /* Read from the ELF header and then the crash dump. On error, negative value is
123  * returned otherwise number of bytes read are returned.
124  */
125 static ssize_t read_vmcore(struct file *file, char __user *buffer,
126                                 size_t buflen, loff_t *fpos)
127 {
128         ssize_t acc = 0, tmp;
129         size_t tsz;
130         u64 start;
131         struct vmcore *m = NULL;
132
133         if (buflen == 0 || *fpos >= vmcore_size)
134                 return 0;
135
136         /* trim buflen to not go beyond EOF */
137         if (buflen > vmcore_size - *fpos)
138                 buflen = vmcore_size - *fpos;
139
140         /* Read ELF core header */
141         if (*fpos < elfcorebuf_sz) {
142                 tsz = elfcorebuf_sz - *fpos;
143                 if (buflen < tsz)
144                         tsz = buflen;
145                 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
146                         return -EFAULT;
147                 buflen -= tsz;
148                 *fpos += tsz;
149                 buffer += tsz;
150                 acc += tsz;
151
152                 /* leave now if filled buffer already */
153                 if (buflen == 0)
154                         return acc;
155         }
156
157         list_for_each_entry(m, &vmcore_list, list) {
158                 if (*fpos < m->offset + m->size) {
159                         tsz = m->offset + m->size - *fpos;
160                         if (buflen < tsz)
161                                 tsz = buflen;
162                         start = m->paddr + *fpos - m->offset;
163                         tmp = read_from_oldmem(buffer, tsz, &start, 1);
164                         if (tmp < 0)
165                                 return tmp;
166                         buflen -= tsz;
167                         *fpos += tsz;
168                         buffer += tsz;
169                         acc += tsz;
170
171                         /* leave now if filled buffer already */
172                         if (buflen == 0)
173                                 return acc;
174                 }
175         }
176
177         return acc;
178 }
179
180 static const struct file_operations proc_vmcore_operations = {
181         .read           = read_vmcore,
182         .llseek         = default_llseek,
183 };
184
185 static struct vmcore* __init get_new_element(void)
186 {
187         return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
188 }
189
190 static u64 __init get_vmcore_size_elf64(char *elfptr, size_t elfsz)
191 {
192         int i;
193         u64 size;
194         Elf64_Ehdr *ehdr_ptr;
195         Elf64_Phdr *phdr_ptr;
196
197         ehdr_ptr = (Elf64_Ehdr *)elfptr;
198         phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
199         size = elfsz;
200         for (i = 0; i < ehdr_ptr->e_phnum; i++) {
201                 size += phdr_ptr->p_memsz;
202                 phdr_ptr++;
203         }
204         return size;
205 }
206
207 static u64 __init get_vmcore_size_elf32(char *elfptr, size_t elfsz)
208 {
209         int i;
210         u64 size;
211         Elf32_Ehdr *ehdr_ptr;
212         Elf32_Phdr *phdr_ptr;
213
214         ehdr_ptr = (Elf32_Ehdr *)elfptr;
215         phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
216         size = elfsz;
217         for (i = 0; i < ehdr_ptr->e_phnum; i++) {
218                 size += phdr_ptr->p_memsz;
219                 phdr_ptr++;
220         }
221         return size;
222 }
223
224 /* Merges all the PT_NOTE headers into one. */
225 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
226                                                 struct list_head *vc_list)
227 {
228         int i, nr_ptnote=0, rc=0;
229         char *tmp;
230         Elf64_Ehdr *ehdr_ptr;
231         Elf64_Phdr phdr, *phdr_ptr;
232         Elf64_Nhdr *nhdr_ptr;
233         u64 phdr_sz = 0, note_off;
234
235         ehdr_ptr = (Elf64_Ehdr *)elfptr;
236         phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
237         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
238                 int j;
239                 void *notes_section;
240                 struct vmcore *new;
241                 u64 offset, max_sz, sz, real_sz = 0;
242                 if (phdr_ptr->p_type != PT_NOTE)
243                         continue;
244                 nr_ptnote++;
245                 max_sz = phdr_ptr->p_memsz;
246                 offset = phdr_ptr->p_offset;
247                 notes_section = kmalloc(max_sz, GFP_KERNEL);
248                 if (!notes_section)
249                         return -ENOMEM;
250                 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
251                 if (rc < 0) {
252                         kfree(notes_section);
253                         return rc;
254                 }
255                 nhdr_ptr = notes_section;
256                 for (j = 0; j < max_sz; j += sz) {
257                         if (nhdr_ptr->n_namesz == 0)
258                                 break;
259                         sz = sizeof(Elf64_Nhdr) +
260                                 ((nhdr_ptr->n_namesz + 3) & ~3) +
261                                 ((nhdr_ptr->n_descsz + 3) & ~3);
262                         real_sz += sz;
263                         nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
264                 }
265
266                 /* Add this contiguous chunk of notes section to vmcore list.*/
267                 new = get_new_element();
268                 if (!new) {
269                         kfree(notes_section);
270                         return -ENOMEM;
271                 }
272                 new->paddr = phdr_ptr->p_offset;
273                 new->size = real_sz;
274                 list_add_tail(&new->list, vc_list);
275                 phdr_sz += real_sz;
276                 kfree(notes_section);
277         }
278
279         /* Prepare merged PT_NOTE program header. */
280         phdr.p_type    = PT_NOTE;
281         phdr.p_flags   = 0;
282         note_off = sizeof(Elf64_Ehdr) +
283                         (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
284         phdr.p_offset  = note_off;
285         phdr.p_vaddr   = phdr.p_paddr = 0;
286         phdr.p_filesz  = phdr.p_memsz = phdr_sz;
287         phdr.p_align   = 0;
288
289         /* Add merged PT_NOTE program header*/
290         tmp = elfptr + sizeof(Elf64_Ehdr);
291         memcpy(tmp, &phdr, sizeof(phdr));
292         tmp += sizeof(phdr);
293
294         /* Remove unwanted PT_NOTE program headers. */
295         i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
296         *elfsz = *elfsz - i;
297         memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
298         memset(elfptr + *elfsz, 0, i);
299         *elfsz = roundup(*elfsz, PAGE_SIZE);
300
301         /* Modify e_phnum to reflect merged headers. */
302         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
303
304         return 0;
305 }
306
307 /* Merges all the PT_NOTE headers into one. */
308 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
309                                                 struct list_head *vc_list)
310 {
311         int i, nr_ptnote=0, rc=0;
312         char *tmp;
313         Elf32_Ehdr *ehdr_ptr;
314         Elf32_Phdr phdr, *phdr_ptr;
315         Elf32_Nhdr *nhdr_ptr;
316         u64 phdr_sz = 0, note_off;
317
318         ehdr_ptr = (Elf32_Ehdr *)elfptr;
319         phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
320         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
321                 int j;
322                 void *notes_section;
323                 struct vmcore *new;
324                 u64 offset, max_sz, sz, real_sz = 0;
325                 if (phdr_ptr->p_type != PT_NOTE)
326                         continue;
327                 nr_ptnote++;
328                 max_sz = phdr_ptr->p_memsz;
329                 offset = phdr_ptr->p_offset;
330                 notes_section = kmalloc(max_sz, GFP_KERNEL);
331                 if (!notes_section)
332                         return -ENOMEM;
333                 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
334                 if (rc < 0) {
335                         kfree(notes_section);
336                         return rc;
337                 }
338                 nhdr_ptr = notes_section;
339                 for (j = 0; j < max_sz; j += sz) {
340                         if (nhdr_ptr->n_namesz == 0)
341                                 break;
342                         sz = sizeof(Elf32_Nhdr) +
343                                 ((nhdr_ptr->n_namesz + 3) & ~3) +
344                                 ((nhdr_ptr->n_descsz + 3) & ~3);
345                         real_sz += sz;
346                         nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
347                 }
348
349                 /* Add this contiguous chunk of notes section to vmcore list.*/
350                 new = get_new_element();
351                 if (!new) {
352                         kfree(notes_section);
353                         return -ENOMEM;
354                 }
355                 new->paddr = phdr_ptr->p_offset;
356                 new->size = real_sz;
357                 list_add_tail(&new->list, vc_list);
358                 phdr_sz += real_sz;
359                 kfree(notes_section);
360         }
361
362         /* Prepare merged PT_NOTE program header. */
363         phdr.p_type    = PT_NOTE;
364         phdr.p_flags   = 0;
365         note_off = sizeof(Elf32_Ehdr) +
366                         (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
367         phdr.p_offset  = note_off;
368         phdr.p_vaddr   = phdr.p_paddr = 0;
369         phdr.p_filesz  = phdr.p_memsz = phdr_sz;
370         phdr.p_align   = 0;
371
372         /* Add merged PT_NOTE program header*/
373         tmp = elfptr + sizeof(Elf32_Ehdr);
374         memcpy(tmp, &phdr, sizeof(phdr));
375         tmp += sizeof(phdr);
376
377         /* Remove unwanted PT_NOTE program headers. */
378         i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
379         *elfsz = *elfsz - i;
380         memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
381         memset(elfptr + *elfsz, 0, i);
382         *elfsz = roundup(*elfsz, PAGE_SIZE);
383
384         /* Modify e_phnum to reflect merged headers. */
385         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
386
387         return 0;
388 }
389
390 /* Add memory chunks represented by program headers to vmcore list. Also update
391  * the new offset fields of exported program headers. */
392 static int __init process_ptload_program_headers_elf64(char *elfptr,
393                                                 size_t elfsz,
394                                                 struct list_head *vc_list)
395 {
396         int i;
397         Elf64_Ehdr *ehdr_ptr;
398         Elf64_Phdr *phdr_ptr;
399         loff_t vmcore_off;
400         struct vmcore *new;
401
402         ehdr_ptr = (Elf64_Ehdr *)elfptr;
403         phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
404
405         /* First program header is PT_NOTE header. */
406         vmcore_off = elfsz +
407                         phdr_ptr->p_memsz; /* Note sections */
408
409         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
410                 if (phdr_ptr->p_type != PT_LOAD)
411                         continue;
412
413                 /* Add this contiguous chunk of memory to vmcore list.*/
414                 new = get_new_element();
415                 if (!new)
416                         return -ENOMEM;
417                 new->paddr = phdr_ptr->p_offset;
418                 new->size = phdr_ptr->p_memsz;
419                 list_add_tail(&new->list, vc_list);
420
421                 /* Update the program header offset. */
422                 phdr_ptr->p_offset = vmcore_off;
423                 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
424         }
425         return 0;
426 }
427
428 static int __init process_ptload_program_headers_elf32(char *elfptr,
429                                                 size_t elfsz,
430                                                 struct list_head *vc_list)
431 {
432         int i;
433         Elf32_Ehdr *ehdr_ptr;
434         Elf32_Phdr *phdr_ptr;
435         loff_t vmcore_off;
436         struct vmcore *new;
437
438         ehdr_ptr = (Elf32_Ehdr *)elfptr;
439         phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
440
441         /* First program header is PT_NOTE header. */
442         vmcore_off = elfsz +
443                         phdr_ptr->p_memsz; /* Note sections */
444
445         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
446                 if (phdr_ptr->p_type != PT_LOAD)
447                         continue;
448
449                 /* Add this contiguous chunk of memory to vmcore list.*/
450                 new = get_new_element();
451                 if (!new)
452                         return -ENOMEM;
453                 new->paddr = phdr_ptr->p_offset;
454                 new->size = phdr_ptr->p_memsz;
455                 list_add_tail(&new->list, vc_list);
456
457                 /* Update the program header offset */
458                 phdr_ptr->p_offset = vmcore_off;
459                 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
460         }
461         return 0;
462 }
463
464 /* Sets offset fields of vmcore elements. */
465 static void __init set_vmcore_list_offsets(size_t elfsz,
466                                            struct list_head *vc_list)
467 {
468         loff_t vmcore_off;
469         struct vmcore *m;
470
471         /* Skip Elf header and program headers. */
472         vmcore_off = elfsz;
473
474         list_for_each_entry(m, vc_list, list) {
475                 m->offset = vmcore_off;
476                 vmcore_off += m->size;
477         }
478 }
479
480 static void free_elfcorebuf(void)
481 {
482         free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
483         elfcorebuf = NULL;
484 }
485
486 static int __init parse_crash_elf64_headers(void)
487 {
488         int rc=0;
489         Elf64_Ehdr ehdr;
490         u64 addr;
491
492         addr = elfcorehdr_addr;
493
494         /* Read Elf header */
495         rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
496         if (rc < 0)
497                 return rc;
498
499         /* Do some basic Verification. */
500         if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
501                 (ehdr.e_type != ET_CORE) ||
502                 !vmcore_elf64_check_arch(&ehdr) ||
503                 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
504                 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
505                 ehdr.e_version != EV_CURRENT ||
506                 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
507                 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
508                 ehdr.e_phnum == 0) {
509                 pr_warn("Warning: Core image elf header is not sane\n");
510                 return -EINVAL;
511         }
512
513         /* Read in all elf headers. */
514         elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
515                                 ehdr.e_phnum * sizeof(Elf64_Phdr);
516         elfcorebuf_sz = elfcorebuf_sz_orig;
517         elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
518                                               get_order(elfcorebuf_sz_orig));
519         if (!elfcorebuf)
520                 return -ENOMEM;
521         addr = elfcorehdr_addr;
522         rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
523         if (rc < 0)
524                 goto fail;
525
526         /* Merge all PT_NOTE headers into one. */
527         rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
528         if (rc)
529                 goto fail;
530         rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
531                                                         &vmcore_list);
532         if (rc)
533                 goto fail;
534         set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
535         return 0;
536 fail:
537         free_elfcorebuf();
538         return rc;
539 }
540
541 static int __init parse_crash_elf32_headers(void)
542 {
543         int rc=0;
544         Elf32_Ehdr ehdr;
545         u64 addr;
546
547         addr = elfcorehdr_addr;
548
549         /* Read Elf header */
550         rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
551         if (rc < 0)
552                 return rc;
553
554         /* Do some basic Verification. */
555         if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
556                 (ehdr.e_type != ET_CORE) ||
557                 !elf_check_arch(&ehdr) ||
558                 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
559                 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
560                 ehdr.e_version != EV_CURRENT ||
561                 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
562                 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
563                 ehdr.e_phnum == 0) {
564                 pr_warn("Warning: Core image elf header is not sane\n");
565                 return -EINVAL;
566         }
567
568         /* Read in all elf headers. */
569         elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
570         elfcorebuf_sz = elfcorebuf_sz_orig;
571         elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
572                                               get_order(elfcorebuf_sz_orig));
573         if (!elfcorebuf)
574                 return -ENOMEM;
575         addr = elfcorehdr_addr;
576         rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
577         if (rc < 0)
578                 goto fail;
579
580         /* Merge all PT_NOTE headers into one. */
581         rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
582         if (rc)
583                 goto fail;
584         rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
585                                                                 &vmcore_list);
586         if (rc)
587                 goto fail;
588         set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
589         return 0;
590 fail:
591         free_elfcorebuf();
592         return rc;
593 }
594
595 static int __init parse_crash_elf_headers(void)
596 {
597         unsigned char e_ident[EI_NIDENT];
598         u64 addr;
599         int rc=0;
600
601         addr = elfcorehdr_addr;
602         rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
603         if (rc < 0)
604                 return rc;
605         if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
606                 pr_warn("Warning: Core image elf header not found\n");
607                 return -EINVAL;
608         }
609
610         if (e_ident[EI_CLASS] == ELFCLASS64) {
611                 rc = parse_crash_elf64_headers();
612                 if (rc)
613                         return rc;
614
615                 /* Determine vmcore size. */
616                 vmcore_size = get_vmcore_size_elf64(elfcorebuf, elfcorebuf_sz);
617         } else if (e_ident[EI_CLASS] == ELFCLASS32) {
618                 rc = parse_crash_elf32_headers();
619                 if (rc)
620                         return rc;
621
622                 /* Determine vmcore size. */
623                 vmcore_size = get_vmcore_size_elf32(elfcorebuf, elfcorebuf_sz);
624         } else {
625                 pr_warn("Warning: Core image elf header is not sane\n");
626                 return -EINVAL;
627         }
628         return 0;
629 }
630
631 /* Init function for vmcore module. */
632 static int __init vmcore_init(void)
633 {
634         int rc = 0;
635
636         /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
637         if (!(is_vmcore_usable()))
638                 return rc;
639         rc = parse_crash_elf_headers();
640         if (rc) {
641                 pr_warn("Kdump: vmcore not initialized\n");
642                 return rc;
643         }
644
645         proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
646         if (proc_vmcore)
647                 proc_vmcore->size = vmcore_size;
648         return 0;
649 }
650 module_init(vmcore_init)
651
652 /* Cleanup function for vmcore module. */
653 void vmcore_cleanup(void)
654 {
655         struct list_head *pos, *next;
656
657         if (proc_vmcore) {
658                 proc_remove(proc_vmcore);
659                 proc_vmcore = NULL;
660         }
661
662         /* clear the vmcore list. */
663         list_for_each_safe(pos, next, &vmcore_list) {
664                 struct vmcore *m;
665
666                 m = list_entry(pos, struct vmcore, list);
667                 list_del(&m->list);
668                 kfree(m);
669         }
670         free_elfcorebuf();
671 }
672 EXPORT_SYMBOL_GPL(vmcore_cleanup);