]>
Commit | Line | Data |
---|---|---|
666bfddb VG |
1 | /* |
2 | * fs/proc/vmcore.c Interface for accessing the crash | |
3 | * dump from the system's previous life. | |
4 | * Heavily borrowed from fs/proc/kcore.c | |
5 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) | |
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved | |
7 | * | |
8 | */ | |
9 | ||
666bfddb | 10 | #include <linux/mm.h> |
2f96b8c1 | 11 | #include <linux/kcore.h> |
666bfddb | 12 | #include <linux/user.h> |
666bfddb VG |
13 | #include <linux/elf.h> |
14 | #include <linux/elfcore.h> | |
afeacc8c | 15 | #include <linux/export.h> |
5a0e3ad6 | 16 | #include <linux/slab.h> |
666bfddb | 17 | #include <linux/highmem.h> |
87ebdc00 | 18 | #include <linux/printk.h> |
666bfddb VG |
19 | #include <linux/bootmem.h> |
20 | #include <linux/init.h> | |
21 | #include <linux/crash_dump.h> | |
22 | #include <linux/list.h> | |
23 | #include <asm/uaccess.h> | |
24 | #include <asm/io.h> | |
2f96b8c1 | 25 | #include "internal.h" |
666bfddb VG |
26 | |
27 | /* List representing chunks of contiguous memory areas and their offsets in | |
28 | * vmcore file. | |
29 | */ | |
30 | static LIST_HEAD(vmcore_list); | |
31 | ||
32 | /* Stores the pointer to the buffer containing kernel elf core headers. */ | |
33 | static char *elfcorebuf; | |
34 | static size_t elfcorebuf_sz; | |
35 | ||
36 | /* Total size of vmcore file. */ | |
37 | static u64 vmcore_size; | |
38 | ||
5aa140c2 | 39 | static struct proc_dir_entry *proc_vmcore = NULL; |
666bfddb | 40 | |
997c136f OH |
41 | /* |
42 | * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error | |
43 | * The called function has to take care of module refcounting. | |
44 | */ | |
45 | static int (*oldmem_pfn_is_ram)(unsigned long pfn); | |
46 | ||
47 | int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) | |
48 | { | |
49 | if (oldmem_pfn_is_ram) | |
50 | return -EBUSY; | |
51 | oldmem_pfn_is_ram = fn; | |
52 | return 0; | |
53 | } | |
54 | EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); | |
55 | ||
56 | void unregister_oldmem_pfn_is_ram(void) | |
57 | { | |
58 | oldmem_pfn_is_ram = NULL; | |
59 | wmb(); | |
60 | } | |
61 | EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); | |
62 | ||
63 | static int pfn_is_ram(unsigned long pfn) | |
64 | { | |
65 | int (*fn)(unsigned long pfn); | |
66 | /* pfn is ram unless fn() checks pagetype */ | |
67 | int ret = 1; | |
68 | ||
69 | /* | |
70 | * Ask hypervisor if the pfn is really ram. | |
71 | * A ballooned page contains no data and reading from such a page | |
72 | * will cause high load in the hypervisor. | |
73 | */ | |
74 | fn = oldmem_pfn_is_ram; | |
75 | if (fn) | |
76 | ret = fn(pfn); | |
77 | ||
78 | return ret; | |
79 | } | |
80 | ||
666bfddb VG |
81 | /* Reads a page from the oldmem device from given offset. */ |
82 | static ssize_t read_from_oldmem(char *buf, size_t count, | |
9e9e3941 | 83 | u64 *ppos, int userbuf) |
666bfddb VG |
84 | { |
85 | unsigned long pfn, offset; | |
86 | size_t nr_bytes; | |
87 | ssize_t read = 0, tmp; | |
88 | ||
89 | if (!count) | |
90 | return 0; | |
91 | ||
92 | offset = (unsigned long)(*ppos % PAGE_SIZE); | |
93 | pfn = (unsigned long)(*ppos / PAGE_SIZE); | |
666bfddb VG |
94 | |
95 | do { | |
96 | if (count > (PAGE_SIZE - offset)) | |
97 | nr_bytes = PAGE_SIZE - offset; | |
98 | else | |
99 | nr_bytes = count; | |
100 | ||
997c136f OH |
101 | /* If pfn is not ram, return zeros for sparse dump files */ |
102 | if (pfn_is_ram(pfn) == 0) | |
103 | memset(buf, 0, nr_bytes); | |
104 | else { | |
105 | tmp = copy_oldmem_page(pfn, buf, nr_bytes, | |
106 | offset, userbuf); | |
107 | if (tmp < 0) | |
108 | return tmp; | |
109 | } | |
666bfddb VG |
110 | *ppos += nr_bytes; |
111 | count -= nr_bytes; | |
112 | buf += nr_bytes; | |
113 | read += nr_bytes; | |
114 | ++pfn; | |
115 | offset = 0; | |
116 | } while (count); | |
117 | ||
118 | return read; | |
119 | } | |
120 | ||
121 | /* Maps vmcore file offset to respective physical address in memroy. */ | |
122 | static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list, | |
123 | struct vmcore **m_ptr) | |
124 | { | |
125 | struct vmcore *m; | |
126 | u64 paddr; | |
127 | ||
128 | list_for_each_entry(m, vc_list, list) { | |
129 | u64 start, end; | |
130 | start = m->offset; | |
131 | end = m->offset + m->size - 1; | |
132 | if (offset >= start && offset <= end) { | |
133 | paddr = m->paddr + offset - start; | |
134 | *m_ptr = m; | |
135 | return paddr; | |
136 | } | |
137 | } | |
138 | *m_ptr = NULL; | |
139 | return 0; | |
140 | } | |
141 | ||
142 | /* Read from the ELF header and then the crash dump. On error, negative value is | |
143 | * returned otherwise number of bytes read are returned. | |
144 | */ | |
145 | static ssize_t read_vmcore(struct file *file, char __user *buffer, | |
146 | size_t buflen, loff_t *fpos) | |
147 | { | |
148 | ssize_t acc = 0, tmp; | |
80e8ff63 VG |
149 | size_t tsz; |
150 | u64 start, nr_bytes; | |
666bfddb VG |
151 | struct vmcore *curr_m = NULL; |
152 | ||
153 | if (buflen == 0 || *fpos >= vmcore_size) | |
154 | return 0; | |
155 | ||
156 | /* trim buflen to not go beyond EOF */ | |
157 | if (buflen > vmcore_size - *fpos) | |
158 | buflen = vmcore_size - *fpos; | |
159 | ||
160 | /* Read ELF core header */ | |
161 | if (*fpos < elfcorebuf_sz) { | |
162 | tsz = elfcorebuf_sz - *fpos; | |
163 | if (buflen < tsz) | |
164 | tsz = buflen; | |
165 | if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) | |
166 | return -EFAULT; | |
167 | buflen -= tsz; | |
168 | *fpos += tsz; | |
169 | buffer += tsz; | |
170 | acc += tsz; | |
171 | ||
172 | /* leave now if filled buffer already */ | |
173 | if (buflen == 0) | |
174 | return acc; | |
175 | } | |
176 | ||
177 | start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m); | |
178 | if (!curr_m) | |
179 | return -EINVAL; | |
666bfddb VG |
180 | |
181 | while (buflen) { | |
c2c1b089 ZY |
182 | tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK)); |
183 | ||
184 | /* Calculate left bytes in current memory segment. */ | |
185 | nr_bytes = (curr_m->size - (start - curr_m->paddr)); | |
186 | if (tsz > nr_bytes) | |
187 | tsz = nr_bytes; | |
188 | ||
666bfddb VG |
189 | tmp = read_from_oldmem(buffer, tsz, &start, 1); |
190 | if (tmp < 0) | |
191 | return tmp; | |
192 | buflen -= tsz; | |
193 | *fpos += tsz; | |
194 | buffer += tsz; | |
195 | acc += tsz; | |
196 | if (start >= (curr_m->paddr + curr_m->size)) { | |
197 | if (curr_m->list.next == &vmcore_list) | |
198 | return acc; /*EOF*/ | |
199 | curr_m = list_entry(curr_m->list.next, | |
200 | struct vmcore, list); | |
201 | start = curr_m->paddr; | |
202 | } | |
666bfddb VG |
203 | } |
204 | return acc; | |
205 | } | |
206 | ||
5aa140c2 | 207 | static const struct file_operations proc_vmcore_operations = { |
666bfddb | 208 | .read = read_vmcore, |
c227e690 | 209 | .llseek = default_llseek, |
666bfddb VG |
210 | }; |
211 | ||
212 | static struct vmcore* __init get_new_element(void) | |
213 | { | |
2f6d3110 | 214 | return kzalloc(sizeof(struct vmcore), GFP_KERNEL); |
666bfddb VG |
215 | } |
216 | ||
217 | static u64 __init get_vmcore_size_elf64(char *elfptr) | |
218 | { | |
219 | int i; | |
220 | u64 size; | |
221 | Elf64_Ehdr *ehdr_ptr; | |
222 | Elf64_Phdr *phdr_ptr; | |
223 | ||
224 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
225 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); | |
226 | size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); | |
227 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { | |
228 | size += phdr_ptr->p_memsz; | |
229 | phdr_ptr++; | |
230 | } | |
231 | return size; | |
232 | } | |
233 | ||
72658e9d VG |
234 | static u64 __init get_vmcore_size_elf32(char *elfptr) |
235 | { | |
236 | int i; | |
237 | u64 size; | |
238 | Elf32_Ehdr *ehdr_ptr; | |
239 | Elf32_Phdr *phdr_ptr; | |
240 | ||
241 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
242 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); | |
243 | size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); | |
244 | for (i = 0; i < ehdr_ptr->e_phnum; i++) { | |
245 | size += phdr_ptr->p_memsz; | |
246 | phdr_ptr++; | |
247 | } | |
248 | return size; | |
249 | } | |
250 | ||
666bfddb VG |
251 | /* Merges all the PT_NOTE headers into one. */ |
252 | static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, | |
253 | struct list_head *vc_list) | |
254 | { | |
255 | int i, nr_ptnote=0, rc=0; | |
256 | char *tmp; | |
257 | Elf64_Ehdr *ehdr_ptr; | |
258 | Elf64_Phdr phdr, *phdr_ptr; | |
259 | Elf64_Nhdr *nhdr_ptr; | |
260 | u64 phdr_sz = 0, note_off; | |
261 | ||
262 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
263 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); | |
264 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
265 | int j; | |
266 | void *notes_section; | |
267 | struct vmcore *new; | |
268 | u64 offset, max_sz, sz, real_sz = 0; | |
269 | if (phdr_ptr->p_type != PT_NOTE) | |
270 | continue; | |
271 | nr_ptnote++; | |
272 | max_sz = phdr_ptr->p_memsz; | |
273 | offset = phdr_ptr->p_offset; | |
274 | notes_section = kmalloc(max_sz, GFP_KERNEL); | |
275 | if (!notes_section) | |
276 | return -ENOMEM; | |
277 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); | |
278 | if (rc < 0) { | |
279 | kfree(notes_section); | |
280 | return rc; | |
281 | } | |
282 | nhdr_ptr = notes_section; | |
283 | for (j = 0; j < max_sz; j += sz) { | |
284 | if (nhdr_ptr->n_namesz == 0) | |
285 | break; | |
286 | sz = sizeof(Elf64_Nhdr) + | |
287 | ((nhdr_ptr->n_namesz + 3) & ~3) + | |
288 | ((nhdr_ptr->n_descsz + 3) & ~3); | |
289 | real_sz += sz; | |
290 | nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); | |
291 | } | |
292 | ||
293 | /* Add this contiguous chunk of notes section to vmcore list.*/ | |
294 | new = get_new_element(); | |
295 | if (!new) { | |
296 | kfree(notes_section); | |
297 | return -ENOMEM; | |
298 | } | |
299 | new->paddr = phdr_ptr->p_offset; | |
300 | new->size = real_sz; | |
301 | list_add_tail(&new->list, vc_list); | |
302 | phdr_sz += real_sz; | |
303 | kfree(notes_section); | |
304 | } | |
305 | ||
306 | /* Prepare merged PT_NOTE program header. */ | |
307 | phdr.p_type = PT_NOTE; | |
308 | phdr.p_flags = 0; | |
309 | note_off = sizeof(Elf64_Ehdr) + | |
310 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); | |
311 | phdr.p_offset = note_off; | |
312 | phdr.p_vaddr = phdr.p_paddr = 0; | |
313 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | |
314 | phdr.p_align = 0; | |
315 | ||
316 | /* Add merged PT_NOTE program header*/ | |
317 | tmp = elfptr + sizeof(Elf64_Ehdr); | |
318 | memcpy(tmp, &phdr, sizeof(phdr)); | |
319 | tmp += sizeof(phdr); | |
320 | ||
321 | /* Remove unwanted PT_NOTE program headers. */ | |
322 | i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); | |
323 | *elfsz = *elfsz - i; | |
324 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); | |
325 | ||
326 | /* Modify e_phnum to reflect merged headers. */ | |
327 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | |
328 | ||
329 | return 0; | |
330 | } | |
331 | ||
72658e9d VG |
332 | /* Merges all the PT_NOTE headers into one. */ |
333 | static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, | |
334 | struct list_head *vc_list) | |
335 | { | |
336 | int i, nr_ptnote=0, rc=0; | |
337 | char *tmp; | |
338 | Elf32_Ehdr *ehdr_ptr; | |
339 | Elf32_Phdr phdr, *phdr_ptr; | |
340 | Elf32_Nhdr *nhdr_ptr; | |
341 | u64 phdr_sz = 0, note_off; | |
342 | ||
343 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
344 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); | |
345 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
346 | int j; | |
347 | void *notes_section; | |
348 | struct vmcore *new; | |
349 | u64 offset, max_sz, sz, real_sz = 0; | |
350 | if (phdr_ptr->p_type != PT_NOTE) | |
351 | continue; | |
352 | nr_ptnote++; | |
353 | max_sz = phdr_ptr->p_memsz; | |
354 | offset = phdr_ptr->p_offset; | |
355 | notes_section = kmalloc(max_sz, GFP_KERNEL); | |
356 | if (!notes_section) | |
357 | return -ENOMEM; | |
358 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); | |
359 | if (rc < 0) { | |
360 | kfree(notes_section); | |
361 | return rc; | |
362 | } | |
363 | nhdr_ptr = notes_section; | |
364 | for (j = 0; j < max_sz; j += sz) { | |
365 | if (nhdr_ptr->n_namesz == 0) | |
366 | break; | |
367 | sz = sizeof(Elf32_Nhdr) + | |
368 | ((nhdr_ptr->n_namesz + 3) & ~3) + | |
369 | ((nhdr_ptr->n_descsz + 3) & ~3); | |
370 | real_sz += sz; | |
371 | nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); | |
372 | } | |
373 | ||
374 | /* Add this contiguous chunk of notes section to vmcore list.*/ | |
375 | new = get_new_element(); | |
376 | if (!new) { | |
377 | kfree(notes_section); | |
378 | return -ENOMEM; | |
379 | } | |
380 | new->paddr = phdr_ptr->p_offset; | |
381 | new->size = real_sz; | |
382 | list_add_tail(&new->list, vc_list); | |
383 | phdr_sz += real_sz; | |
384 | kfree(notes_section); | |
385 | } | |
386 | ||
387 | /* Prepare merged PT_NOTE program header. */ | |
388 | phdr.p_type = PT_NOTE; | |
389 | phdr.p_flags = 0; | |
390 | note_off = sizeof(Elf32_Ehdr) + | |
391 | (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); | |
392 | phdr.p_offset = note_off; | |
393 | phdr.p_vaddr = phdr.p_paddr = 0; | |
394 | phdr.p_filesz = phdr.p_memsz = phdr_sz; | |
395 | phdr.p_align = 0; | |
396 | ||
397 | /* Add merged PT_NOTE program header*/ | |
398 | tmp = elfptr + sizeof(Elf32_Ehdr); | |
399 | memcpy(tmp, &phdr, sizeof(phdr)); | |
400 | tmp += sizeof(phdr); | |
401 | ||
402 | /* Remove unwanted PT_NOTE program headers. */ | |
403 | i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); | |
404 | *elfsz = *elfsz - i; | |
405 | memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); | |
406 | ||
407 | /* Modify e_phnum to reflect merged headers. */ | |
408 | ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
666bfddb VG |
413 | /* Add memory chunks represented by program headers to vmcore list. Also update |
414 | * the new offset fields of exported program headers. */ | |
415 | static int __init process_ptload_program_headers_elf64(char *elfptr, | |
416 | size_t elfsz, | |
417 | struct list_head *vc_list) | |
418 | { | |
419 | int i; | |
420 | Elf64_Ehdr *ehdr_ptr; | |
421 | Elf64_Phdr *phdr_ptr; | |
422 | loff_t vmcore_off; | |
423 | struct vmcore *new; | |
424 | ||
425 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
426 | phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ | |
427 | ||
428 | /* First program header is PT_NOTE header. */ | |
429 | vmcore_off = sizeof(Elf64_Ehdr) + | |
430 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) + | |
431 | phdr_ptr->p_memsz; /* Note sections */ | |
432 | ||
433 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
434 | if (phdr_ptr->p_type != PT_LOAD) | |
435 | continue; | |
436 | ||
437 | /* Add this contiguous chunk of memory to vmcore list.*/ | |
438 | new = get_new_element(); | |
439 | if (!new) | |
440 | return -ENOMEM; | |
441 | new->paddr = phdr_ptr->p_offset; | |
442 | new->size = phdr_ptr->p_memsz; | |
443 | list_add_tail(&new->list, vc_list); | |
444 | ||
445 | /* Update the program header offset. */ | |
446 | phdr_ptr->p_offset = vmcore_off; | |
447 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; | |
448 | } | |
449 | return 0; | |
450 | } | |
451 | ||
72658e9d VG |
452 | static int __init process_ptload_program_headers_elf32(char *elfptr, |
453 | size_t elfsz, | |
454 | struct list_head *vc_list) | |
455 | { | |
456 | int i; | |
457 | Elf32_Ehdr *ehdr_ptr; | |
458 | Elf32_Phdr *phdr_ptr; | |
459 | loff_t vmcore_off; | |
460 | struct vmcore *new; | |
461 | ||
462 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
463 | phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ | |
464 | ||
465 | /* First program header is PT_NOTE header. */ | |
466 | vmcore_off = sizeof(Elf32_Ehdr) + | |
467 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) + | |
468 | phdr_ptr->p_memsz; /* Note sections */ | |
469 | ||
470 | for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { | |
471 | if (phdr_ptr->p_type != PT_LOAD) | |
472 | continue; | |
473 | ||
474 | /* Add this contiguous chunk of memory to vmcore list.*/ | |
475 | new = get_new_element(); | |
476 | if (!new) | |
477 | return -ENOMEM; | |
478 | new->paddr = phdr_ptr->p_offset; | |
479 | new->size = phdr_ptr->p_memsz; | |
480 | list_add_tail(&new->list, vc_list); | |
481 | ||
482 | /* Update the program header offset */ | |
483 | phdr_ptr->p_offset = vmcore_off; | |
484 | vmcore_off = vmcore_off + phdr_ptr->p_memsz; | |
485 | } | |
486 | return 0; | |
487 | } | |
488 | ||
666bfddb VG |
489 | /* Sets offset fields of vmcore elements. */ |
490 | static void __init set_vmcore_list_offsets_elf64(char *elfptr, | |
491 | struct list_head *vc_list) | |
492 | { | |
493 | loff_t vmcore_off; | |
494 | Elf64_Ehdr *ehdr_ptr; | |
495 | struct vmcore *m; | |
496 | ||
497 | ehdr_ptr = (Elf64_Ehdr *)elfptr; | |
498 | ||
499 | /* Skip Elf header and program headers. */ | |
500 | vmcore_off = sizeof(Elf64_Ehdr) + | |
501 | (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr); | |
502 | ||
503 | list_for_each_entry(m, vc_list, list) { | |
504 | m->offset = vmcore_off; | |
505 | vmcore_off += m->size; | |
506 | } | |
507 | } | |
508 | ||
72658e9d VG |
509 | /* Sets offset fields of vmcore elements. */ |
510 | static void __init set_vmcore_list_offsets_elf32(char *elfptr, | |
511 | struct list_head *vc_list) | |
512 | { | |
513 | loff_t vmcore_off; | |
514 | Elf32_Ehdr *ehdr_ptr; | |
515 | struct vmcore *m; | |
516 | ||
517 | ehdr_ptr = (Elf32_Ehdr *)elfptr; | |
518 | ||
519 | /* Skip Elf header and program headers. */ | |
520 | vmcore_off = sizeof(Elf32_Ehdr) + | |
521 | (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr); | |
522 | ||
523 | list_for_each_entry(m, vc_list, list) { | |
524 | m->offset = vmcore_off; | |
525 | vmcore_off += m->size; | |
526 | } | |
527 | } | |
528 | ||
666bfddb VG |
529 | static int __init parse_crash_elf64_headers(void) |
530 | { | |
531 | int rc=0; | |
532 | Elf64_Ehdr ehdr; | |
533 | u64 addr; | |
534 | ||
535 | addr = elfcorehdr_addr; | |
536 | ||
537 | /* Read Elf header */ | |
538 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0); | |
539 | if (rc < 0) | |
540 | return rc; | |
541 | ||
542 | /* Do some basic Verification. */ | |
543 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || | |
544 | (ehdr.e_type != ET_CORE) || | |
9833c394 | 545 | !vmcore_elf64_check_arch(&ehdr) || |
666bfddb VG |
546 | ehdr.e_ident[EI_CLASS] != ELFCLASS64 || |
547 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || | |
548 | ehdr.e_version != EV_CURRENT || | |
549 | ehdr.e_ehsize != sizeof(Elf64_Ehdr) || | |
550 | ehdr.e_phentsize != sizeof(Elf64_Phdr) || | |
551 | ehdr.e_phnum == 0) { | |
87ebdc00 | 552 | pr_warn("Warning: Core image elf header is not sane\n"); |
666bfddb VG |
553 | return -EINVAL; |
554 | } | |
555 | ||
556 | /* Read in all elf headers. */ | |
557 | elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); | |
558 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); | |
559 | if (!elfcorebuf) | |
560 | return -ENOMEM; | |
561 | addr = elfcorehdr_addr; | |
562 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); | |
563 | if (rc < 0) { | |
564 | kfree(elfcorebuf); | |
565 | return rc; | |
566 | } | |
567 | ||
568 | /* Merge all PT_NOTE headers into one. */ | |
569 | rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); | |
570 | if (rc) { | |
571 | kfree(elfcorebuf); | |
572 | return rc; | |
573 | } | |
574 | rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, | |
575 | &vmcore_list); | |
576 | if (rc) { | |
577 | kfree(elfcorebuf); | |
578 | return rc; | |
579 | } | |
580 | set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); | |
581 | return 0; | |
582 | } | |
583 | ||
72658e9d VG |
584 | static int __init parse_crash_elf32_headers(void) |
585 | { | |
586 | int rc=0; | |
587 | Elf32_Ehdr ehdr; | |
588 | u64 addr; | |
589 | ||
590 | addr = elfcorehdr_addr; | |
591 | ||
592 | /* Read Elf header */ | |
593 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0); | |
594 | if (rc < 0) | |
595 | return rc; | |
596 | ||
597 | /* Do some basic Verification. */ | |
598 | if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || | |
599 | (ehdr.e_type != ET_CORE) || | |
600 | !elf_check_arch(&ehdr) || | |
601 | ehdr.e_ident[EI_CLASS] != ELFCLASS32|| | |
602 | ehdr.e_ident[EI_VERSION] != EV_CURRENT || | |
603 | ehdr.e_version != EV_CURRENT || | |
604 | ehdr.e_ehsize != sizeof(Elf32_Ehdr) || | |
605 | ehdr.e_phentsize != sizeof(Elf32_Phdr) || | |
606 | ehdr.e_phnum == 0) { | |
87ebdc00 | 607 | pr_warn("Warning: Core image elf header is not sane\n"); |
72658e9d VG |
608 | return -EINVAL; |
609 | } | |
610 | ||
611 | /* Read in all elf headers. */ | |
612 | elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); | |
613 | elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); | |
614 | if (!elfcorebuf) | |
615 | return -ENOMEM; | |
616 | addr = elfcorehdr_addr; | |
617 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); | |
618 | if (rc < 0) { | |
619 | kfree(elfcorebuf); | |
620 | return rc; | |
621 | } | |
622 | ||
623 | /* Merge all PT_NOTE headers into one. */ | |
624 | rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); | |
625 | if (rc) { | |
626 | kfree(elfcorebuf); | |
627 | return rc; | |
628 | } | |
629 | rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, | |
630 | &vmcore_list); | |
631 | if (rc) { | |
632 | kfree(elfcorebuf); | |
633 | return rc; | |
634 | } | |
635 | set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); | |
636 | return 0; | |
637 | } | |
638 | ||
666bfddb VG |
639 | static int __init parse_crash_elf_headers(void) |
640 | { | |
641 | unsigned char e_ident[EI_NIDENT]; | |
642 | u64 addr; | |
643 | int rc=0; | |
644 | ||
645 | addr = elfcorehdr_addr; | |
646 | rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0); | |
647 | if (rc < 0) | |
648 | return rc; | |
649 | if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { | |
87ebdc00 | 650 | pr_warn("Warning: Core image elf header not found\n"); |
666bfddb VG |
651 | return -EINVAL; |
652 | } | |
653 | ||
654 | if (e_ident[EI_CLASS] == ELFCLASS64) { | |
655 | rc = parse_crash_elf64_headers(); | |
656 | if (rc) | |
657 | return rc; | |
658 | ||
659 | /* Determine vmcore size. */ | |
660 | vmcore_size = get_vmcore_size_elf64(elfcorebuf); | |
72658e9d VG |
661 | } else if (e_ident[EI_CLASS] == ELFCLASS32) { |
662 | rc = parse_crash_elf32_headers(); | |
663 | if (rc) | |
664 | return rc; | |
665 | ||
666 | /* Determine vmcore size. */ | |
667 | vmcore_size = get_vmcore_size_elf32(elfcorebuf); | |
666bfddb | 668 | } else { |
87ebdc00 | 669 | pr_warn("Warning: Core image elf header is not sane\n"); |
666bfddb VG |
670 | return -EINVAL; |
671 | } | |
672 | return 0; | |
673 | } | |
674 | ||
675 | /* Init function for vmcore module. */ | |
676 | static int __init vmcore_init(void) | |
677 | { | |
678 | int rc = 0; | |
679 | ||
680 | /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/ | |
85a0ee34 | 681 | if (!(is_vmcore_usable())) |
666bfddb VG |
682 | return rc; |
683 | rc = parse_crash_elf_headers(); | |
684 | if (rc) { | |
87ebdc00 | 685 | pr_warn("Kdump: vmcore not initialized\n"); |
666bfddb VG |
686 | return rc; |
687 | } | |
688 | ||
5aa140c2 | 689 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
666bfddb VG |
690 | if (proc_vmcore) |
691 | proc_vmcore->size = vmcore_size; | |
692 | return 0; | |
693 | } | |
694 | module_init(vmcore_init) | |
16257393 MS |
695 | |
696 | /* Cleanup function for vmcore module. */ | |
697 | void vmcore_cleanup(void) | |
698 | { | |
699 | struct list_head *pos, *next; | |
700 | ||
701 | if (proc_vmcore) { | |
a8ca16ea | 702 | proc_remove(proc_vmcore); |
16257393 MS |
703 | proc_vmcore = NULL; |
704 | } | |
705 | ||
706 | /* clear the vmcore list. */ | |
707 | list_for_each_safe(pos, next, &vmcore_list) { | |
708 | struct vmcore *m; | |
709 | ||
710 | m = list_entry(pos, struct vmcore, list); | |
711 | list_del(&m->list); | |
712 | kfree(m); | |
713 | } | |
714 | kfree(elfcorebuf); | |
715 | elfcorebuf = NULL; | |
716 | } | |
717 | EXPORT_SYMBOL_GPL(vmcore_cleanup); |