diff options
author | Vivek Goyal <vgoyal@in.ibm.com> | 2006-01-09 20:51:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-10 08:01:28 -0800 |
commit | ec9ce0dbaa734bc95ec73cf5c13f202f1adb219d (patch) | |
tree | 26d799567fe0325ae918e4f0bffa5080a03faeff | |
parent | cffe632a25b017dac4b6f060cad31940c6c167b4 (diff) |
[PATCH] kdump: x86_64 save cpu registers upon crash
- Saving the cpu registers of all cpus before booting in to the crash
kernel.
- crash_setup_regs will save the registers of the cpu on which panic has
occured. One of the concerns ppc64 folks raised is that after capturing the
register states, one should not pop the current call frame and push new one.
Hence it has been inlined. More call frames later get pushed on to stack
(machine_crash_shutdown() and machine_kexec()), but one will not want to
backtrace those.
- Not very sure about the CFI annotations. With this patch I am getting
decent backtrace with gdb. Assuming, compiler has generated enough
debugging information for crash_kexec(). Coding crash_setup_regs() in pure
assembly makes it tricky because then it can not be inlined and we don't
want to return back after capturing register states we don't want to pop
this call frame.
- Saving the non-panicing cpus registers will be done in the NMI handler
while shooting down them in machine_crash_shutdown.
- Introducing CRASH_DUMP option in Kconfig for x86_64.
Signed-off-by: Murali M Chakravarthy <muralim@in.ibm.com>
Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@muc.de>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/x86_64/Kconfig | 7 | ||||
-rw-r--r-- | arch/x86_64/kernel/crash.c | 70 | ||||
-rw-r--r-- | include/asm-x86_64/kexec.h | 36 |
3 files changed, 113 insertions, 0 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 4f3e925962c3..dd2d116b9ab8 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -427,6 +427,13 @@ config KEXEC support. As of this writing the exact hardware interface is strongly in flux, so no good recommendation can be made. +config CRASH_DUMP + bool "kernel crash dumps (EXPERIMENTAL)" + depends on EMBEDDED + depends on EXPERIMENTAL + help + Generate crash dump after being started by kexec. + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" depends on PROC_FS diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c index abc601f36b6e..4e6c3b729e39 100644 --- a/arch/x86_64/kernel/crash.c +++ b/arch/x86_64/kernel/crash.c @@ -11,9 +11,12 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/smp.h> +#include <linux/irq.h> #include <linux/reboot.h> #include <linux/kexec.h> #include <linux/delay.h> +#include <linux/elf.h> +#include <linux/elfcore.h> #include <asm/processor.h> #include <asm/hardirq.h> @@ -24,6 +27,71 @@ /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; +static u32 *append_elf_note(u32 *buf, char *name, unsigned type, + void *data, size_t data_len) +{ + struct elf_note note; + + note.n_namesz = strlen(name) + 1; + note.n_descsz = data_len; + note.n_type = type; + memcpy(buf, ¬e, sizeof(note)); + buf += (sizeof(note) +3)/4; + memcpy(buf, name, note.n_namesz); + buf += (note.n_namesz + 3)/4; + memcpy(buf, data, note.n_descsz); + buf += (note.n_descsz + 3)/4; + + return buf; +} + +static void final_note(u32 *buf) +{ + struct elf_note note; + + note.n_namesz = 0; + note.n_descsz = 0; + note.n_type = 0; + memcpy(buf, ¬e, sizeof(note)); +} + +static void crash_save_this_cpu(struct pt_regs *regs, int cpu) +{ + struct elf_prstatus prstatus; + u32 *buf; + + if ((cpu < 0) || (cpu >= NR_CPUS)) + return; + + /* Using ELF notes here is opportunistic. + * I need a well defined structure format + * for the data I pass, and I need tags + * on the data to indicate what information I have + * squirrelled away. ELF notes happen to provide + * all of that that no need to invent something new. + */ + + buf = (u32*)per_cpu_ptr(crash_notes, cpu); + + if (!buf) + return; + + memset(&prstatus, 0, sizeof(prstatus)); + prstatus.pr_pid = current->pid; + elf_core_copy_regs(&prstatus.pr_reg, regs); + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, + sizeof(prstatus)); + final_note(buf); +} + +static void crash_save_self(struct pt_regs *regs) +{ + int cpu; + + cpu = smp_processor_id(); + crash_save_this_cpu(regs, cpu); +} + #ifdef CONFIG_SMP static atomic_t waiting_for_crash_ipi; @@ -38,6 +106,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) return 1; local_irq_disable(); + crash_save_this_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ @@ -113,4 +182,5 @@ void machine_crash_shutdown(struct pt_regs *regs) disable_IO_APIC(); #endif + crash_save_self(regs); } diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h index cea78543a574..ae28cd44bcd3 100644 --- a/include/asm-x86_64/kexec.h +++ b/include/asm-x86_64/kexec.h @@ -3,6 +3,7 @@ #include <asm/page.h> #include <asm/proto.h> +#include <asm/ptrace.h> /* * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. @@ -27,4 +28,39 @@ #define MAX_NOTE_BYTES 1024 +/* + * Saving the registers of the cpu on which panic occured in + * crash_kexec to save a valid sp. The registers of other cpus + * will be saved in machine_crash_shutdown while shooting down them. + */ + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(*newregs)); + else { + __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx)); + __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx)); + __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx)); + __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi)); + __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi)); + __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp)); + __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax)); + __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp)); + __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); + __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); + __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); + __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); + __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); + __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); + __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); + __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); + __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); + __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); + __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags)); + + newregs->rip = (unsigned long)current_text_addr(); + } +} #endif /* _X86_64_KEXEC_H */ |