]>
Commit | Line | Data |
---|---|---|
666bfddb VG |
1 | /* |
2 | * fs/proc/vmcore.c Interface for accessing the crash | |
3 | * dump from the system's previous life. | |
4 | * Heavily borrowed from fs/proc/kcore.c | |
5 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) | |
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved | |
7 | * | |
8 | */ | |
9 | ||
666bfddb | 10 | #include <linux/mm.h> |
2f96b8c1 | 11 | #include <linux/kcore.h> |
666bfddb | 12 | #include <linux/user.h> |
666bfddb VG |
13 | #include <linux/elf.h> |
14 | #include <linux/elfcore.h> | |
afeacc8c | 15 | #include <linux/export.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
666bfddb | 17 | #include <linux/highmem.h> |
87ebdc00 | 18 | #include <linux/printk.h> |
666bfddb VG |
19 | #include <linux/bootmem.h> |
20 | #include <linux/init.h> | |
21 | #include <linux/crash_dump.h> | |
22 | #include <linux/list.h> | |
23 | #include <asm/uaccess.h> | |
24 | #include <asm/io.h> | |
2f96b8c1 | 25 | #include "internal.h" |
666bfddb VG |
26 | |
27 | /* List representing chunks of contiguous memory areas and their offsets in | |
28 | * vmcore file. | |
29 | */ | |
30 | static LIST_HEAD(vmcore_list); | |
31 | ||
32 | /* Stores the pointer to the buffer containing kernel elf core headers. */ | |
33 | static char *elfcorebuf; | |
34 | static size_t elfcorebuf_sz; | |
35 | ||
36 | /* Total size of vmcore file. */ | |
37 | static u64 vmcore_size; | |
38 | ||
5aa140c2 | 39 | static struct proc_dir_entry *proc_vmcore = NULL; |
666bfddb | 40 | |
997c136f OH |
41 | /* |
42 | * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error | |
43 | * The called function has to take care of module refcounting. | |
44 | */ | |
45 | static int (*oldmem_pfn_is_ram)(unsigned long pfn); | |
46 | ||
47 | int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) | |
48 | { | |
49 | if (oldmem_pfn_is_ram) | |
50 | return -EBUSY; | |
51 | oldmem_pfn_is_ram = fn; | |
52 | return 0; | |
53 | } | |
54 | EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); | |
55 | ||
56 | void unregister_oldmem_pfn_is_ram(void) | |
57 | { | |
58 | oldmem_pfn_is_ram = NULL; | |
59 | wmb(); | |
60 | } | |
61 | EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); | |
62 | ||
63 | static int pfn_is_ram(unsigned long pfn) | |
64 | { | |
65 | int (*fn)(unsigned long pfn); | |
66 | /* pfn is ram unless fn() checks pagetype */ | |
67 | int ret = 1; | |
68 | ||
69 | /* | |
70 | * Ask hypervisor if the pfn is really ram. | |
71 | * A ballooned page contains no data and reading from such a page | |
72 | * will cause high load in the hypervisor. | |
73 | */ | |
74 | fn = oldmem_pfn_is_ram; | |
75 | if (fn) | |
76 | ret = fn(pfn); | |
77 | ||
78 | return ret; | |
79 | } | |
80 | ||
666bfddb VG |
81 | /* Reads a page from the oldmem device from given offset. */ |
82 | static ssize_t read_from_oldmem(char *buf, size_t count, | |
9e9e3941 | 83 | u64 *ppos, int userbuf) |
666bfddb VG |
84 | { |
85 | unsigned long pfn, offset; | |
86 | size_t nr_bytes; | |
87 | ssize_t read = 0, tmp; | |
88 | ||
89 | if (!count) | |
90 | return 0; | |
91 | ||
92 | offset = (unsigned long)(*ppos % PAGE_SIZE); | |
93 | pfn = (unsigned long)(*ppos / PAGE_SIZE); | |
666bfddb VG |
94 | |
95 | do { | |
96 | if (count > (PAGE_SIZE - offset)) | |
97 | nr_bytes = PAGE_SIZE - offset; | |
98 | else | |
99 | nr_bytes = count; | |
100 | ||
997c136f OH |
101 | /* If pfn is not ram, return zeros for sparse dump files */ |
102 | if (pfn_is_ram(pfn) == 0) | |
103 | memset(buf, 0, nr_bytes); | |
104 | else { | |
105 | tmp = copy_oldmem_page(pfn, buf, nr_bytes, | |
106 | offset, userbuf); | |
107 | if (tmp < 0) | |
108 | return tmp; | |
109 | } | |
666bfddb VG |
110 | *ppos += nr_bytes; |
111 | count -= nr_bytes; | |
112 | buf += nr_bytes; | |
113 | read += nr_bytes; | |
114 | ++pfn; | |
115 | offset = 0; | |
116 | } while (count); | |
117 | ||
118 | return read; | |
119 | } | |
120 | ||
666bfddb VG |
121 | /* Read from the ELF header and then the crash dump. On error, negative value is |
122 | * returned otherwise number of bytes read are returned. | |
123 | */ | |
124 | static ssize_t read_vmcore(struct file *file, char __user *buffer, | |
125 | size_t buflen, loff_t *fpos) | |
126 | { | |
127 | ssize_t acc = 0, tmp; | |
80e8ff63 | 128 | size_t tsz; |
b27eb186 HD |
129 | u64 start; |
130 | struct vmcore *m = NULL; | |
666bfddb VG |
131 | |
132 | if (buflen == 0 || *fpos >= vmcore_size) | |
133 | return 0; | |
134 | ||
135 | /* trim buflen to not go beyond EOF */ | |
136 | if (buflen > vmcore_size - *fpos) | |
137 | buflen = vmcore_size - *fpos; | |
138 | ||
139 | /* Read ELF core header */ | |
140 | if (*fpos < elfcorebuf_sz) { | |
141 | tsz = elfcorebuf_sz - *fpos; | |
142 | if (buflen < tsz) | |
143 | tsz = buflen; | |
144 | if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) | |
145 | return -EFAULT; | |
146 | buflen -= tsz; | |
147 | *fpos += tsz; | |
148 | buffer += tsz; | |
149 | acc += tsz; | |
150 | ||
151 | /* leave now if filled buffer already */ | |
152 | if (buflen == 0) | |
153 | return acc; | |
154 | } | |
155 | ||
b27eb186 HD |
156 | list_for_each_entry(m, &vmcore_list, list) { |
157 | if (*fpos < m->offset + m->size) { | |
158 | tsz = m->offset + m->size - *fpos; | |
159 | if (buflen < tsz) | |
160 | tsz = buflen; | |
161 | start = m->paddr + *fpos - m->offset; | |
162 | tmp = read_from_oldmem(buffer, tsz, &start, 1); | |
163 | if (tmp < 0) | |
164 | return tmp; | |
165 | buflen -= tsz; | |
166 | *fpos += tsz; | |
167 | buffer += tsz; | |
168 | acc += tsz; | |
169 | ||
170 | /* leave now if filled buffer already */ | |
171 | if (buflen == 0) | |
172 | return acc; | |
666bfddb | 173 | } |
666bfddb | 174 | } |
b27eb186 | 175 | |
666bfddb VG |
176 | return acc; |
177 | } | |
178 | ||
5aa140c2 | 179 | static const struct file_operations proc_vmcore_operations = { |
666bfddb | 180 | .read = read_vmcore, |
c227e690 | 181 | .llseek = default_llseek, |
666bfddb VG |
182 | }; |
183 | ||
184 | static struct vmcore* __init get_new_element(void) | |
185 | { | |
2f6d3110 | 186 | return kzalloc(sizeof(struct vmcore), GFP_KERNEL); |
666bfddb VG |
187 | } |
188 | ||
189 | static u64 __init get_vmcore_size_elf64(char *elfptr) | |
190 | { | |
191 | int i; | |
192 | u64 size; | |
193 | Elf64_Ehdr *ehdr_ptr; | |
194 | Elf64_Phdr *phdr_ptr; | |
195 | ||
196 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
197 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); | |
198 | size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); | |
199 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { | |
200 | size += phdr_ptr->p_memsz; | |
201 | phdr_ptr++; | |
202 | } | |
203 | return size; | |
204 | } | |
205 | ||
72658e9d VG |
206 | static u64 __init get_vmcore_size_elf32(char *elfptr) |
207 | { | |
208 | int i; | |
209 | u64 size; | |
210 | Elf32_Ehdr *ehdr_ptr; | |
211 | Elf32_Phdr *phdr_ptr; | |
212 | ||
213 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
214 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); | |
215 | size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); | |
216 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { | |
217 | size += phdr_ptr->p_memsz; | |
218 | phdr_ptr++; | |
219 | } | |
220 | return size; | |
221 | } | |
222 | ||
666bfddb VG |
223 | /* Merges all the PT_NOTE headers into one. */ |
224 | static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | |
225 | struct list_head *vc_list) | |
226 | { | |
227 | int i, nr_ptnote=0, rc=0; | |
228 | char *tmp; | |
229 | Elf64_Ehdr *ehdr_ptr; | |
230 | Elf64_Phdr phdr, *phdr_ptr; | |
231 | Elf64_Nhdr *nhdr_ptr; | |
232 | u64 phdr_sz = 0, note_off; | |
233 | ||
234 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
235 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); | |
236 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
237 | int j; | |
238 | void *notes_section; | |
239 | struct vmcore *new; | |
240 | u64 offset, max_sz, sz, real_sz = 0; | |
241 | if (phdr_ptr->p_type != PT_NOTE) | |
242 | continue; | |
243 | nr_ptnote++; | |
244 | max_sz = phdr_ptr->p_memsz; | |
245 | offset = phdr_ptr->p_offset; | |
246 | notes_section = kmalloc(max_sz, GFP_KERNEL); | |
247 | if (!notes_section) | |
248 | return -ENOMEM; | |
249 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); | |
250 | if (rc < 0) { | |
251 | kfree(notes_section); | |
252 | return rc; | |
253 | } | |
254 | nhdr_ptr = notes_section; | |
255 | for (j = 0; j < max_sz; j += sz) { | |
256 | if (nhdr_ptr->n_namesz == 0) | |
257 | break; | |
258 | sz = sizeof(Elf64_Nhdr) + | |
259 | ((nhdr_ptr->n_namesz + 3) & ~3) + | |
260 | ((nhdr_ptr->n_descsz + 3) & ~3); | |
261 | real_sz += sz; | |
262 | nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); | |
263 | } | |
264 | ||
265 | /* Add this contiguous chunk of notes section to vmcore list.*/ | |
266 | new = get_new_element(); | |
267 | if (!new) { | |
268 | kfree(notes_section); | |
269 | return -ENOMEM; | |
270 | } | |
271 | new->paddr = phdr_ptr->p_offset; | |
272 | new->size = real_sz; | |
273 | list_add_tail(&new->list, vc_list); | |
274 | phdr_sz += real_sz; | |
275 | kfree(notes_section); | |
276 | } | |
277 | ||
278 | /* Prepare merged PT_NOTE program header. */ | |
279 | phdr.p_type = PT_NOTE; | |
280 | phdr.p_flags = 0; | |
281 | note_off = sizeof(Elf64_Ehdr) + | |
282 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); | |
283 | phdr.p_offset = note_off; | |
284 | phdr.p_vaddr = phdr.p_paddr = 0; | |
285 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | |
286 | phdr.p_align = 0; | |
287 | ||
288 | /* Add merged PT_NOTE program header*/ | |
289 | tmp = elfptr + sizeof(Elf64_Ehdr); | |
290 | memcpy(tmp, &phdr, sizeof(phdr)); | |
291 | tmp += sizeof(phdr); | |
292 | ||
293 | /* Remove unwanted PT_NOTE program headers. */ | |
294 | i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); | |
295 | *elfsz = *elfsz - i; | |
296 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); | |
297 | ||
298 | /* Modify e_phnum to reflect merged headers. */ | |
299 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | |
300 | ||
301 | return 0; | |
302 | } | |
303 | ||
72658e9d VG |
304 | /* Merges all the PT_NOTE headers into one. */ |
305 | static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | |
306 | struct list_head *vc_list) | |
307 | { | |
308 | int i, nr_ptnote=0, rc=0; | |
309 | char *tmp; | |
310 | Elf32_Ehdr *ehdr_ptr; | |
311 | Elf32_Phdr phdr, *phdr_ptr; | |
312 | Elf32_Nhdr *nhdr_ptr; | |
313 | u64 phdr_sz = 0, note_off; | |
314 | ||
315 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
316 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); | |
317 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
318 | int j; | |
319 | void *notes_section; | |
320 | struct vmcore *new; | |
321 | u64 offset, max_sz, sz, real_sz = 0; | |
322 | if (phdr_ptr->p_type != PT_NOTE) | |
323 | continue; | |
324 | nr_ptnote++; | |
325 | max_sz = phdr_ptr->p_memsz; | |
326 | offset = phdr_ptr->p_offset; | |
327 | notes_section = kmalloc(max_sz, GFP_KERNEL); | |
328 | if (!notes_section) | |
329 | return -ENOMEM; | |
330 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); | |
331 | if (rc < 0) { | |
332 | kfree(notes_section); | |
333 | return rc; | |
334 | } | |
335 | nhdr_ptr = notes_section; | |
336 | for (j = 0; j < max_sz; j += sz) { | |
337 | if (nhdr_ptr->n_namesz == 0) | |
338 | break; | |
339 | sz = sizeof(Elf32_Nhdr) + | |
340 | ((nhdr_ptr->n_namesz + 3) & ~3) + | |
341 | ((nhdr_ptr->n_descsz + 3) & ~3); | |
342 | real_sz += sz; | |
343 | nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); | |
344 | } | |
345 | ||
346 | /* Add this contiguous chunk of notes section to vmcore list.*/ | |
347 | new = get_new_element(); | |
348 | if (!new) { | |
349 | kfree(notes_section); | |
350 | return -ENOMEM; | |
351 | } | |
352 | new->paddr = phdr_ptr->p_offset; | |
353 | new->size = real_sz; | |
354 | list_add_tail(&new->list, vc_list); | |
355 | phdr_sz += real_sz; | |
356 | kfree(notes_section); | |
357 | } | |
358 | ||
359 | /* Prepare merged PT_NOTE program header. */ | |
360 | phdr.p_type = PT_NOTE; | |
361 | phdr.p_flags = 0; | |
362 | note_off = sizeof(Elf32_Ehdr) + | |
363 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); | |
364 | phdr.p_offset = note_off; | |
365 | phdr.p_vaddr = phdr.p_paddr = 0; | |
366 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | |
367 | phdr.p_align = 0; | |
368 | ||
369 | /* Add merged PT_NOTE program header*/ | |
370 | tmp = elfptr + sizeof(Elf32_Ehdr); | |
371 | memcpy(tmp, &phdr, sizeof(phdr)); | |
372 | tmp += sizeof(phdr); | |
373 | ||
374 | /* Remove unwanted PT_NOTE program headers. */ | |
375 | i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); | |
376 | *elfsz = *elfsz - i; | |
377 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); | |
378 | ||
379 | /* Modify e_phnum to reflect merged headers. */ | |
380 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
666bfddb VG |
385 | /* Add memory chunks represented by program headers to vmcore list. Also update |
386 | * the new offset fields of exported program headers. */ | |
387 | static int __init process_ptload_program_headers_elf64(char *elfptr, | |
388 | size_t elfsz, | |
389 | struct list_head *vc_list) | |
390 | { | |
391 | int i; | |
392 | Elf64_Ehdr *ehdr_ptr; | |
393 | Elf64_Phdr *phdr_ptr; | |
394 | loff_t vmcore_off; | |
395 | struct vmcore *new; | |
396 | ||
397 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
398 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ | |
399 | ||
400 | /* First program header is PT_NOTE header. */ | |
401 | vmcore_off = sizeof(Elf64_Ehdr) + | |
402 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) + | |
403 | phdr_ptr->p_memsz; /* Note sections */ | |
404 | ||
405 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
406 | if (phdr_ptr->p_type != PT_LOAD) | |
407 | continue; | |
408 | ||
409 | /* Add this contiguous chunk of memory to vmcore list.*/ | |
410 | new = get_new_element(); | |
411 | if (!new) | |
412 | return -ENOMEM; | |
413 | new->paddr = phdr_ptr->p_offset; | |
414 | new->size = phdr_ptr->p_memsz; | |
415 | list_add_tail(&new->list, vc_list); | |
416 | ||
417 | /* Update the program header offset. */ | |
418 | phdr_ptr->p_offset = vmcore_off; | |
419 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; | |
420 | } | |
421 | return 0; | |
422 | } | |
423 | ||
72658e9d VG |
424 | static int __init process_ptload_program_headers_elf32(char *elfptr, |
425 | size_t elfsz, | |
426 | struct list_head *vc_list) | |
427 | { | |
428 | int i; | |
429 | Elf32_Ehdr *ehdr_ptr; | |
430 | Elf32_Phdr *phdr_ptr; | |
431 | loff_t vmcore_off; | |
432 | struct vmcore *new; | |
433 | ||
434 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
435 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ | |
436 | ||
437 | /* First program header is PT_NOTE header. */ | |
438 | vmcore_off = sizeof(Elf32_Ehdr) + | |
439 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) + | |
440 | phdr_ptr->p_memsz; /* Note sections */ | |
441 | ||
442 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
443 | if (phdr_ptr->p_type != PT_LOAD) | |
444 | continue; | |
445 | ||
446 | /* Add this contiguous chunk of memory to vmcore list.*/ | |
447 | new = get_new_element(); | |
448 | if (!new) | |
449 | return -ENOMEM; | |
450 | new->paddr = phdr_ptr->p_offset; | |
451 | new->size = phdr_ptr->p_memsz; | |
452 | list_add_tail(&new->list, vc_list); | |
453 | ||
454 | /* Update the program header offset */ | |
455 | phdr_ptr->p_offset = vmcore_off; | |
456 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; | |
457 | } | |
458 | return 0; | |
459 | } | |
460 | ||
666bfddb VG |
461 | /* Sets offset fields of vmcore elements. */ |
462 | static void __init set_vmcore_list_offsets_elf64(char *elfptr, | |
463 | struct list_head *vc_list) | |
464 | { | |
465 | loff_t vmcore_off; | |
466 | Elf64_Ehdr *ehdr_ptr; | |
467 | struct vmcore *m; | |
468 | ||
469 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
470 | ||
471 | /* Skip Elf header and program headers. */ | |
472 | vmcore_off = sizeof(Elf64_Ehdr) + | |
473 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr); | |
474 | ||
475 | list_for_each_entry(m, vc_list, list) { | |
476 | m->offset = vmcore_off; | |
477 | vmcore_off += m->size; | |
478 | } | |
479 | } | |
480 | ||
72658e9d VG |
481 | /* Sets offset fields of vmcore elements. */ |
482 | static void __init set_vmcore_list_offsets_elf32(char *elfptr, | |
483 | struct list_head *vc_list) | |
484 | { | |
485 | loff_t vmcore_off; | |
486 | Elf32_Ehdr *ehdr_ptr; | |
487 | struct vmcore *m; | |
488 | ||
489 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
490 | ||
491 | /* Skip Elf header and program headers. */ | |
492 | vmcore_off = sizeof(Elf32_Ehdr) + | |
493 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr); | |
494 | ||
495 | list_for_each_entry(m, vc_list, list) { | |
496 | m->offset = vmcore_off; | |
497 | vmcore_off += m->size; | |
498 | } | |
499 | } | |
500 | ||
666bfddb VG |
501 | static int __init parse_crash_elf64_headers(void) |
502 | { | |
503 | int rc=0; | |
504 | Elf64_Ehdr ehdr; | |
505 | u64 addr; | |
506 | ||
507 | addr = elfcorehdr_addr; | |
508 | ||
509 | /* Read Elf header */ | |
510 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0); | |
511 | if (rc < 0) | |
512 | return rc; | |
513 | ||
514 | /* Do some basic Verification. */ | |
515 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || | |
516 | (ehdr.e_type != ET_CORE) || | |
9833c394 | 517 | !vmcore_elf64_check_arch(&ehdr) || |
666bfddb VG |
518 | ehdr.e_ident[EI_CLASS] != ELFCLASS64 || |
519 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || | |
520 | ehdr.e_version != EV_CURRENT || | |
521 | ehdr.e_ehsize != sizeof(Elf64_Ehdr) || | |
522 | ehdr.e_phentsize != sizeof(Elf64_Phdr) || | |
523 | ehdr.e_phnum == 0) { | |
87ebdc00 | 524 | pr_warn("Warning: Core image elf header is not sane\n"); |
666bfddb VG |
525 | return -EINVAL; |
526 | } | |
527 | ||
528 | /* Read in all elf headers. */ | |
529 | elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); | |
530 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); | |
531 | if (!elfcorebuf) | |
532 | return -ENOMEM; | |
533 | addr = elfcorehdr_addr; | |
534 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); | |
535 | if (rc < 0) { | |
536 | kfree(elfcorebuf); | |
537 | return rc; | |
538 | } | |
539 | ||
540 | /* Merge all PT_NOTE headers into one. */ | |
541 | rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); | |
542 | if (rc) { | |
543 | kfree(elfcorebuf); | |
544 | return rc; | |
545 | } | |
546 | rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, | |
547 | &vmcore_list); | |
548 | if (rc) { | |
549 | kfree(elfcorebuf); | |
550 | return rc; | |
551 | } | |
552 | set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); | |
553 | return 0; | |
554 | } | |
555 | ||
72658e9d VG |
556 | static int __init parse_crash_elf32_headers(void) |
557 | { | |
558 | int rc=0; | |
559 | Elf32_Ehdr ehdr; | |
560 | u64 addr; | |
561 | ||
562 | addr = elfcorehdr_addr; | |
563 | ||
564 | /* Read Elf header */ | |
565 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0); | |
566 | if (rc < 0) | |
567 | return rc; | |
568 | ||
569 | /* Do some basic Verification. */ | |
570 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || | |
571 | (ehdr.e_type != ET_CORE) || | |
572 | !elf_check_arch(&ehdr) || | |
573 | ehdr.e_ident[EI_CLASS] != ELFCLASS32|| | |
574 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || | |
575 | ehdr.e_version != EV_CURRENT || | |
576 | ehdr.e_ehsize != sizeof(Elf32_Ehdr) || | |
577 | ehdr.e_phentsize != sizeof(Elf32_Phdr) || | |
578 | ehdr.e_phnum == 0) { | |
87ebdc00 | 579 | pr_warn("Warning: Core image elf header is not sane\n"); |
72658e9d VG |
580 | return -EINVAL; |
581 | } | |
582 | ||
583 | /* Read in all elf headers. */ | |
584 | elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); | |
585 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); | |
586 | if (!elfcorebuf) | |
587 | return -ENOMEM; | |
588 | addr = elfcorehdr_addr; | |
589 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); | |
590 | if (rc < 0) { | |
591 | kfree(elfcorebuf); | |
592 | return rc; | |
593 | } | |
594 | ||
595 | /* Merge all PT_NOTE headers into one. */ | |
596 | rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); | |
597 | if (rc) { | |
598 | kfree(elfcorebuf); | |
599 | return rc; | |
600 | } | |
601 | rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, | |
602 | &vmcore_list); | |
603 | if (rc) { | |
604 | kfree(elfcorebuf); | |
605 | return rc; | |
606 | } | |
607 | set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); | |
608 | return 0; | |
609 | } | |
610 | ||
666bfddb VG |
611 | static int __init parse_crash_elf_headers(void) |
612 | { | |
613 | unsigned char e_ident[EI_NIDENT]; | |
614 | u64 addr; | |
615 | int rc=0; | |
616 | ||
617 | addr = elfcorehdr_addr; | |
618 | rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0); | |
619 | if (rc < 0) | |
620 | return rc; | |
621 | if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { | |
87ebdc00 | 622 | pr_warn("Warning: Core image elf header not found\n"); |
666bfddb VG |
623 | return -EINVAL; |
624 | } | |
625 | ||
626 | if (e_ident[EI_CLASS] == ELFCLASS64) { | |
627 | rc = parse_crash_elf64_headers(); | |
628 | if (rc) | |
629 | return rc; | |
630 | ||
631 | /* Determine vmcore size. */ | |
632 | vmcore_size = get_vmcore_size_elf64(elfcorebuf); | |
72658e9d VG |
633 | } else if (e_ident[EI_CLASS] == ELFCLASS32) { |
634 | rc = parse_crash_elf32_headers(); | |
635 | if (rc) | |
636 | return rc; | |
637 | ||
638 | /* Determine vmcore size. */ | |
639 | vmcore_size = get_vmcore_size_elf32(elfcorebuf); | |
666bfddb | 640 | } else { |
87ebdc00 | 641 | pr_warn("Warning: Core image elf header is not sane\n"); |
666bfddb VG |
642 | return -EINVAL; |
643 | } | |
644 | return 0; | |
645 | } | |
646 | ||
647 | /* Init function for vmcore module. */ | |
648 | static int __init vmcore_init(void) | |
649 | { | |
650 | int rc = 0; | |
651 | ||
652 | /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/ | |
85a0ee34 | 653 | if (!(is_vmcore_usable())) |
666bfddb VG |
654 | return rc; |
655 | rc = parse_crash_elf_headers(); | |
656 | if (rc) { | |
87ebdc00 | 657 | pr_warn("Kdump: vmcore not initialized\n"); |
666bfddb VG |
658 | return rc; |
659 | } | |
660 | ||
5aa140c2 | 661 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
666bfddb VG |
662 | if (proc_vmcore) |
663 | proc_vmcore->size = vmcore_size; | |
664 | return 0; | |
665 | } | |
666 | module_init(vmcore_init) | |
16257393 MS |
667 | |
668 | /* Cleanup function for vmcore module. */ | |
669 | void vmcore_cleanup(void) | |
670 | { | |
671 | struct list_head *pos, *next; | |
672 | ||
673 | if (proc_vmcore) { | |
a8ca16ea | 674 | proc_remove(proc_vmcore); |
16257393 MS |
675 | proc_vmcore = NULL; |
676 | } | |
677 | ||
678 | /* clear the vmcore list. */ | |
679 | list_for_each_safe(pos, next, &vmcore_list) { | |
680 | struct vmcore *m; | |
681 | ||
682 | m = list_entry(pos, struct vmcore, list); | |
683 | list_del(&m->list); | |
684 | kfree(m); | |
685 | } | |
686 | kfree(elfcorebuf); | |
687 | elfcorebuf = NULL; | |
688 | } | |
689 | EXPORT_SYMBOL_GPL(vmcore_cleanup); |