Commit fe7cacc1 authored by Jan Beulich's avatar Jan Beulich Committed by Linus Torvalds

[PATCH] i386: reliable stack trace support i386 entry.S

To increase the usefulness of reliable stack unwinding, this adds CFI
unwind annotations to many low-level i386 routines.
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 176a2718
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/dwarf2.h>
#include "irq_vectors.h" #include "irq_vectors.h"
#define nr_syscalls ((syscall_table_size)/4) #define nr_syscalls ((syscall_table_size)/4)
...@@ -85,31 +86,67 @@ VM_MASK = 0x00020000 ...@@ -85,31 +86,67 @@ VM_MASK = 0x00020000
#define SAVE_ALL \ #define SAVE_ALL \
cld; \ cld; \
pushl %es; \ pushl %es; \
CFI_ADJUST_CFA_OFFSET 4;\
/*CFI_REL_OFFSET es, 0;*/\
pushl %ds; \ pushl %ds; \
CFI_ADJUST_CFA_OFFSET 4;\
/*CFI_REL_OFFSET ds, 0;*/\
pushl %eax; \ pushl %eax; \
CFI_ADJUST_CFA_OFFSET 4;\
CFI_REL_OFFSET eax, 0;\
pushl %ebp; \ pushl %ebp; \
CFI_ADJUST_CFA_OFFSET 4;\
CFI_REL_OFFSET ebp, 0;\
pushl %edi; \ pushl %edi; \
CFI_ADJUST_CFA_OFFSET 4;\
CFI_REL_OFFSET edi, 0;\
pushl %esi; \ pushl %esi; \
CFI_ADJUST_CFA_OFFSET 4;\
CFI_REL_OFFSET esi, 0;\
pushl %edx; \ pushl %edx; \
CFI_ADJUST_CFA_OFFSET 4;\
CFI_REL_OFFSET edx, 0;\
pushl %ecx; \ pushl %ecx; \
CFI_ADJUST_CFA_OFFSET 4;\
CFI_REL_OFFSET ecx, 0;\
pushl %ebx; \ pushl %ebx; \
CFI_ADJUST_CFA_OFFSET 4;\
CFI_REL_OFFSET ebx, 0;\
movl $(__USER_DS), %edx; \ movl $(__USER_DS), %edx; \
movl %edx, %ds; \ movl %edx, %ds; \
movl %edx, %es; movl %edx, %es;
#define RESTORE_INT_REGS \ #define RESTORE_INT_REGS \
popl %ebx; \ popl %ebx; \
CFI_ADJUST_CFA_OFFSET -4;\
CFI_RESTORE ebx;\
popl %ecx; \ popl %ecx; \
CFI_ADJUST_CFA_OFFSET -4;\
CFI_RESTORE ecx;\
popl %edx; \ popl %edx; \
CFI_ADJUST_CFA_OFFSET -4;\
CFI_RESTORE edx;\
popl %esi; \ popl %esi; \
CFI_ADJUST_CFA_OFFSET -4;\
CFI_RESTORE esi;\
popl %edi; \ popl %edi; \
CFI_ADJUST_CFA_OFFSET -4;\
CFI_RESTORE edi;\
popl %ebp; \ popl %ebp; \
popl %eax CFI_ADJUST_CFA_OFFSET -4;\
CFI_RESTORE ebp;\
popl %eax; \
CFI_ADJUST_CFA_OFFSET -4;\
CFI_RESTORE eax
#define RESTORE_REGS \ #define RESTORE_REGS \
RESTORE_INT_REGS; \ RESTORE_INT_REGS; \
1: popl %ds; \ 1: popl %ds; \
CFI_ADJUST_CFA_OFFSET -4;\
/*CFI_RESTORE ds;*/\
2: popl %es; \ 2: popl %es; \
CFI_ADJUST_CFA_OFFSET -4;\
/*CFI_RESTORE es;*/\
.section .fixup,"ax"; \ .section .fixup,"ax"; \
3: movl $0,(%esp); \ 3: movl $0,(%esp); \
jmp 1b; \ jmp 1b; \
...@@ -122,13 +159,43 @@ VM_MASK = 0x00020000 ...@@ -122,13 +159,43 @@ VM_MASK = 0x00020000
.long 2b,4b; \ .long 2b,4b; \
.previous .previous
#define RING0_INT_FRAME \
CFI_STARTPROC simple;\
CFI_DEF_CFA esp, 3*4;\
/*CFI_OFFSET cs, -2*4;*/\
CFI_OFFSET eip, -3*4
#define RING0_EC_FRAME \
CFI_STARTPROC simple;\
CFI_DEF_CFA esp, 4*4;\
/*CFI_OFFSET cs, -2*4;*/\
CFI_OFFSET eip, -3*4
#define RING0_PTREGS_FRAME \
CFI_STARTPROC simple;\
CFI_DEF_CFA esp, OLDESP-EBX;\
/*CFI_OFFSET cs, CS-OLDESP;*/\
CFI_OFFSET eip, EIP-OLDESP;\
/*CFI_OFFSET es, ES-OLDESP;*/\
/*CFI_OFFSET ds, DS-OLDESP;*/\
CFI_OFFSET eax, EAX-OLDESP;\
CFI_OFFSET ebp, EBP-OLDESP;\
CFI_OFFSET edi, EDI-OLDESP;\
CFI_OFFSET esi, ESI-OLDESP;\
CFI_OFFSET edx, EDX-OLDESP;\
CFI_OFFSET ecx, ECX-OLDESP;\
CFI_OFFSET ebx, EBX-OLDESP
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
CFI_STARTPROC
pushl %eax pushl %eax
CFI_ADJUST_CFA_OFFSET -4
call schedule_tail call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl %eax popl %eax
CFI_ADJUST_CFA_OFFSET -4
jmp syscall_exit jmp syscall_exit
CFI_ENDPROC
/* /*
* Return to user mode is not as complex as all this looks, * Return to user mode is not as complex as all this looks,
...@@ -139,6 +206,7 @@ ENTRY(ret_from_fork) ...@@ -139,6 +206,7 @@ ENTRY(ret_from_fork)
# userspace resumption stub bypassing syscall exit tracing # userspace resumption stub bypassing syscall exit tracing
ALIGN ALIGN
RING0_PTREGS_FRAME
ret_from_exception: ret_from_exception:
preempt_stop preempt_stop
ret_from_intr: ret_from_intr:
...@@ -171,20 +239,33 @@ need_resched: ...@@ -171,20 +239,33 @@ need_resched:
call preempt_schedule_irq call preempt_schedule_irq
jmp need_resched jmp need_resched
#endif #endif
CFI_ENDPROC
/* SYSENTER_RETURN points to after the "sysenter" instruction in /* SYSENTER_RETURN points to after the "sysenter" instruction in
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
# sysenter call handler stub # sysenter call handler stub
ENTRY(sysenter_entry) ENTRY(sysenter_entry)
CFI_STARTPROC simple
CFI_DEF_CFA esp, 0
CFI_REGISTER esp, ebp
movl TSS_sysenter_esp0(%esp),%esp movl TSS_sysenter_esp0(%esp),%esp
sysenter_past_esp: sysenter_past_esp:
sti sti
pushl $(__USER_DS) pushl $(__USER_DS)
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ss, 0*/
pushl %ebp pushl %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET esp, 0
pushfl pushfl
CFI_ADJUST_CFA_OFFSET 4
pushl $(__USER_CS) pushl $(__USER_CS)
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET cs, 0*/
pushl $SYSENTER_RETURN pushl $SYSENTER_RETURN
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eip, 0
/* /*
* Load the potential sixth argument from user stack. * Load the potential sixth argument from user stack.
...@@ -199,6 +280,7 @@ sysenter_past_esp: ...@@ -199,6 +280,7 @@ sysenter_past_esp:
.previous .previous
pushl %eax pushl %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
...@@ -219,11 +301,14 @@ sysenter_past_esp: ...@@ -219,11 +301,14 @@ sysenter_past_esp:
xorl %ebp,%ebp xorl %ebp,%ebp
sti sti
sysexit sysexit
CFI_ENDPROC
# system call handler stub # system call handler stub
ENTRY(system_call) ENTRY(system_call)
RING0_INT_FRAME # can't unwind into user space anyway
pushl %eax # save orig_eax pushl %eax # save orig_eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
testl $TF_MASK,EFLAGS(%esp) testl $TF_MASK,EFLAGS(%esp)
...@@ -256,10 +341,12 @@ restore_all: ...@@ -256,10 +341,12 @@ restore_all:
movb CS(%esp), %al movb CS(%esp), %al
andl $(VM_MASK | (4 << 8) | 3), %eax andl $(VM_MASK | (4 << 8) | 3), %eax
cmpl $((4 << 8) | 3), %eax cmpl $((4 << 8) | 3), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS je ldt_ss # returning to user-space with LDT SS
restore_nocheck: restore_nocheck:
RESTORE_REGS RESTORE_REGS
addl $4, %esp addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4
1: iret 1: iret
.section .fixup,"ax" .section .fixup,"ax"
iret_exc: iret_exc:
...@@ -273,6 +360,7 @@ iret_exc: ...@@ -273,6 +360,7 @@ iret_exc:
.long 1b,iret_exc .long 1b,iret_exc
.previous .previous
CFI_RESTORE_STATE
ldt_ss: ldt_ss:
larl OLDSS(%esp), %eax larl OLDSS(%esp), %eax
jnz restore_nocheck jnz restore_nocheck
...@@ -285,11 +373,13 @@ ldt_ss: ...@@ -285,11 +373,13 @@ ldt_ss:
* CPUs, which we can try to work around to make * CPUs, which we can try to work around to make
* dosemu and wine happy. */ * dosemu and wine happy. */
subl $8, %esp # reserve space for switch16 pointer subl $8, %esp # reserve space for switch16 pointer
CFI_ADJUST_CFA_OFFSET 8
cli cli
movl %esp, %eax movl %esp, %eax
/* Set up the 16bit stack frame with switch32 pointer on top, /* Set up the 16bit stack frame with switch32 pointer on top,
* and a switch16 pointer on top of the current frame. */ * and a switch16 pointer on top of the current frame. */
call setup_x86_bogus_stack call setup_x86_bogus_stack
CFI_ADJUST_CFA_OFFSET -8 # frame has moved
RESTORE_REGS RESTORE_REGS
lss 20+4(%esp), %esp # switch to 16bit stack lss 20+4(%esp), %esp # switch to 16bit stack
1: iret 1: iret
...@@ -297,9 +387,11 @@ ldt_ss: ...@@ -297,9 +387,11 @@ ldt_ss:
.align 4 .align 4
.long 1b,iret_exc .long 1b,iret_exc
.previous .previous
CFI_ENDPROC
# perform work that needs to be done immediately before resumption # perform work that needs to be done immediately before resumption
ALIGN ALIGN
RING0_PTREGS_FRAME # can't unwind into user space anyway
work_pending: work_pending:
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig jz work_notifysig
...@@ -329,8 +421,10 @@ work_notifysig: # deal with pending signals and ...@@ -329,8 +421,10 @@ work_notifysig: # deal with pending signals and
work_notifysig_v86: work_notifysig_v86:
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
pushl %ecx # save ti_flags for do_notify_resume pushl %ecx # save ti_flags for do_notify_resume
CFI_ADJUST_CFA_OFFSET 4
call save_v86_state # %eax contains pt_regs pointer call save_v86_state # %eax contains pt_regs pointer
popl %ecx popl %ecx
CFI_ADJUST_CFA_OFFSET -4
movl %eax, %esp movl %eax, %esp
xorl %edx, %edx xorl %edx, %edx
call do_notify_resume call do_notify_resume
...@@ -363,19 +457,21 @@ syscall_exit_work: ...@@ -363,19 +457,21 @@ syscall_exit_work:
movl $1, %edx movl $1, %edx
call do_syscall_trace call do_syscall_trace
jmp resume_userspace jmp resume_userspace
CFI_ENDPROC
ALIGN RING0_INT_FRAME # can't unwind into user space anyway
syscall_fault: syscall_fault:
pushl %eax # save orig_eax pushl %eax # save orig_eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
movl $-EFAULT,EAX(%esp) movl $-EFAULT,EAX(%esp)
jmp resume_userspace jmp resume_userspace
ALIGN
syscall_badsys: syscall_badsys:
movl $-ENOSYS,EAX(%esp) movl $-ENOSYS,EAX(%esp)
jmp resume_userspace jmp resume_userspace
CFI_ENDPROC
#define FIXUP_ESPFIX_STACK \ #define FIXUP_ESPFIX_STACK \
movl %esp, %eax; \ movl %esp, %eax; \
...@@ -387,16 +483,21 @@ syscall_badsys: ...@@ -387,16 +483,21 @@ syscall_badsys:
movl %eax, %esp; movl %eax, %esp;
#define UNWIND_ESPFIX_STACK \ #define UNWIND_ESPFIX_STACK \
pushl %eax; \ pushl %eax; \
CFI_ADJUST_CFA_OFFSET 4; \
movl %ss, %eax; \ movl %ss, %eax; \
/* see if on 16bit stack */ \ /* see if on 16bit stack */ \
cmpw $__ESPFIX_SS, %ax; \ cmpw $__ESPFIX_SS, %ax; \
jne 28f; \ je 28f; \
movl $__KERNEL_DS, %edx; \ 27: popl %eax; \
movl %edx, %ds; \ CFI_ADJUST_CFA_OFFSET -4; \
movl %edx, %es; \ .section .fixup,"ax"; \
28: movl $__KERNEL_DS, %eax; \
movl %eax, %ds; \
movl %eax, %es; \
/* switch to 32bit stack */ \ /* switch to 32bit stack */ \
FIXUP_ESPFIX_STACK \ FIXUP_ESPFIX_STACK; \
28: popl %eax; jmp 27b; \
.previous
/* /*
* Build the entry stubs and pointer table with * Build the entry stubs and pointer table with
...@@ -408,9 +509,14 @@ ENTRY(interrupt) ...@@ -408,9 +509,14 @@ ENTRY(interrupt)
vector=0 vector=0
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
RING0_INT_FRAME
.rept NR_IRQS .rept NR_IRQS
ALIGN ALIGN
.if vector
CFI_ADJUST_CFA_OFFSET -4
.endif
1: pushl $vector-256 1: pushl $vector-256
CFI_ADJUST_CFA_OFFSET 4
jmp common_interrupt jmp common_interrupt
.data .data
.long 1b .long 1b
...@@ -424,60 +530,99 @@ common_interrupt: ...@@ -424,60 +530,99 @@ common_interrupt:
movl %esp,%eax movl %esp,%eax
call do_IRQ call do_IRQ
jmp ret_from_intr jmp ret_from_intr
CFI_ENDPROC
#define BUILD_INTERRUPT(name, nr) \ #define BUILD_INTERRUPT(name, nr) \
ENTRY(name) \ ENTRY(name) \
RING0_INT_FRAME; \
pushl $nr-256; \ pushl $nr-256; \
SAVE_ALL \ CFI_ADJUST_CFA_OFFSET 4; \
SAVE_ALL; \
movl %esp,%eax; \ movl %esp,%eax; \
call smp_/**/name; \ call smp_/**/name; \
jmp ret_from_intr; jmp ret_from_intr; \
CFI_ENDPROC
/* The include is where all of the SMP etc. interrupts come from */ /* The include is where all of the SMP etc. interrupts come from */
#include "entry_arch.h" #include "entry_arch.h"
ENTRY(divide_error) ENTRY(divide_error)
RING0_INT_FRAME
pushl $0 # no error code pushl $0 # no error code
CFI_ADJUST_CFA_OFFSET 4
pushl $do_divide_error pushl $do_divide_error
CFI_ADJUST_CFA_OFFSET 4
ALIGN ALIGN
error_code: error_code:
pushl %ds pushl %ds
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ds, 0*/
pushl %eax pushl %eax
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eax, 0
xorl %eax, %eax xorl %eax, %eax
pushl %ebp pushl %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebp, 0
pushl %edi pushl %edi
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edi, 0
pushl %esi pushl %esi
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET esi, 0
pushl %edx pushl %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx, 0
decl %eax # eax = -1 decl %eax # eax = -1
pushl %ecx pushl %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx, 0
pushl %ebx pushl %ebx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebx, 0
cld cld
pushl %es pushl %es
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET es, 0*/
UNWIND_ESPFIX_STACK UNWIND_ESPFIX_STACK
popl %ecx popl %ecx
CFI_ADJUST_CFA_OFFSET -4
/*CFI_REGISTER es, ecx*/
movl ES(%esp), %edi # get the function address movl ES(%esp), %edi # get the function address
movl ORIG_EAX(%esp), %edx # get the error code movl ORIG_EAX(%esp), %edx # get the error code
movl %eax, ORIG_EAX(%esp) movl %eax, ORIG_EAX(%esp)
movl %ecx, ES(%esp) movl %ecx, ES(%esp)
/*CFI_REL_OFFSET es, ES*/
movl $(__USER_DS), %ecx movl $(__USER_DS), %ecx
movl %ecx, %ds movl %ecx, %ds
movl %ecx, %es movl %ecx, %es
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call *%edi call *%edi
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
ENTRY(coprocessor_error) ENTRY(coprocessor_error)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl $do_coprocessor_error pushl $do_coprocessor_error
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(simd_coprocessor_error) ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl $do_simd_coprocessor_error pushl $do_simd_coprocessor_error
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(device_not_available) ENTRY(device_not_available)
RING0_INT_FRAME
pushl $-1 # mark this as an int pushl $-1 # mark this as an int
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
movl %cr0, %eax movl %cr0, %eax
testl $0x4, %eax # EM (math emulation bit) testl $0x4, %eax # EM (math emulation bit)
...@@ -487,9 +632,12 @@ ENTRY(device_not_available) ...@@ -487,9 +632,12 @@ ENTRY(device_not_available)
jmp ret_from_exception jmp ret_from_exception
device_not_available_emulate: device_not_available_emulate:
pushl $0 # temporary storage for ORIG_EIP pushl $0 # temporary storage for ORIG_EIP
CFI_ADJUST_CFA_OFFSET 4
call math_emulate call math_emulate
addl $4, %esp addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
/* /*
* Debug traps and NMI can happen at the one SYSENTER instruction * Debug traps and NMI can happen at the one SYSENTER instruction
...@@ -514,16 +662,19 @@ label: \ ...@@ -514,16 +662,19 @@ label: \
pushl $sysenter_past_esp pushl $sysenter_past_esp
KPROBE_ENTRY(debug) KPROBE_ENTRY(debug)
RING0_INT_FRAME
cmpl $sysenter_entry,(%esp) cmpl $sysenter_entry,(%esp)
jne debug_stack_correct jne debug_stack_correct
FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
debug_stack_correct: debug_stack_correct:
pushl $-1 # mark this as an int pushl $-1 # mark this as an int
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
xorl %edx,%edx # error code 0 xorl %edx,%edx # error code 0
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call do_debug call do_debug
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
.previous .text .previous .text
/* /*
* NMI is doubly nasty. It can happen _while_ we're handling * NMI is doubly nasty. It can happen _while_ we're handling
...@@ -534,14 +685,18 @@ debug_stack_correct: ...@@ -534,14 +685,18 @@ debug_stack_correct:
* fault happened on the sysenter path. * fault happened on the sysenter path.
*/ */
ENTRY(nmi) ENTRY(nmi)
RING0_INT_FRAME
pushl %eax pushl %eax
CFI_ADJUST_CFA_OFFSET 4
movl %ss, %eax movl %ss, %eax
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
popl %eax popl %eax
CFI_ADJUST_CFA_OFFSET -4
je nmi_16bit_stack je nmi_16bit_stack
cmpl $sysenter_entry,(%esp) cmpl $sysenter_entry,(%esp)
je nmi_stack_fixup je nmi_stack_fixup
pushl %eax pushl %eax
CFI_ADJUST_CFA_OFFSET 4
movl %esp,%eax movl %esp,%eax
/* Do not access memory above the end of our stack page, /* Do not access memory above the end of our stack page,
* it might not exist. * it might not exist.
...@@ -549,16 +704,19 @@ ENTRY(nmi) ...@@ -549,16 +704,19 @@ ENTRY(nmi)
andl $(THREAD_SIZE-1),%eax andl $(THREAD_SIZE-1),%eax
cmpl $(THREAD_SIZE-20),%eax cmpl $(THREAD_SIZE-20),%eax
popl %eax popl %eax
CFI_ADJUST_CFA_OFFSET -4
jae nmi_stack_correct jae nmi_stack_correct
cmpl $sysenter_entry,12(%esp) cmpl $sysenter_entry,12(%esp)
je nmi_debug_stack_check je nmi_debug_stack_check
nmi_stack_correct: nmi_stack_correct:
pushl %eax pushl %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call do_nmi call do_nmi
jmp restore_all jmp restore_all
CFI_ENDPROC
nmi_stack_fixup: nmi_stack_fixup:
FIX_STACK(12,nmi_stack_correct, 1) FIX_STACK(12,nmi_stack_correct, 1)
...@@ -574,97 +732,150 @@ nmi_debug_stack_check: ...@@ -574,97 +732,150 @@ nmi_debug_stack_check:
jmp nmi_stack_correct jmp nmi_stack_correct
nmi_16bit_stack: nmi_16bit_stack:
RING0_INT_FRAME
/* create the pointer to lss back */ /* create the pointer to lss back */
pushl %ss pushl %ss
CFI_ADJUST_CFA_OFFSET 4
pushl %esp pushl %esp
CFI_ADJUST_CFA_OFFSET 4
movzwl %sp, %esp movzwl %sp, %esp
addw $4, (%esp) addw $4, (%esp)
/* copy the iret frame of 12 bytes */ /* copy the iret frame of 12 bytes */
.rept 3 .rept 3
pushl 16(%esp) pushl 16(%esp)
CFI_ADJUST_CFA_OFFSET 4
.endr .endr
pushl %eax pushl %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp FIXUP_ESPFIX_STACK # %eax == %esp
CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
call do_nmi call do_nmi
RESTORE_REGS RESTORE_REGS
lss 12+4(%esp), %esp # back to 16bit stack lss 12+4(%esp), %esp # back to 16bit stack
1: iret 1: iret
CFI_ENDPROC
.section __ex_table,"a" .section __ex_table,"a"
.align 4 .align 4
.long 1b,iret_exc .long 1b,iret_exc
.previous .previous
KPROBE_ENTRY(int3) KPROBE_ENTRY(int3)
RING0_INT_FRAME
pushl $-1 # mark this as an int pushl $-1 # mark this as an int
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call do_int3 call do_int3
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
.previous .text .previous .text
ENTRY(overflow) ENTRY(overflow)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl $do_overflow pushl $do_overflow
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(bounds) ENTRY(bounds)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl $do_bounds pushl $do_bounds
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(invalid_op) ENTRY(invalid_op)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl $do_invalid_op pushl $do_invalid_op
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(coprocessor_segment_overrun) ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl $do_coprocessor_segment_overrun pushl $do_coprocessor_segment_overrun
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(invalid_TSS) ENTRY(invalid_TSS)
RING0_EC_FRAME
pushl $do_invalid_TSS pushl $do_invalid_TSS
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(segment_not_present) ENTRY(segment_not_present)
RING0_EC_FRAME
pushl $do_segment_not_present pushl $do_segment_not_present
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
ENTRY(stack_segment) ENTRY(stack_segment)
RING0_EC_FRAME
pushl $do_stack_segment pushl $do_stack_segment
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
KPROBE_ENTRY(general_protection) KPROBE_ENTRY(general_protection)
RING0_EC_FRAME
pushl $do_general_protection pushl $do_general_protection
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
.previous .text .previous .text
ENTRY(alignment_check) ENTRY(alignment_check)
RING0_EC_FRAME
pushl $do_alignment_check pushl $do_alignment_check
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
KPROBE_ENTRY(page_fault) KPROBE_ENTRY(page_fault)
RING0_EC_FRAME
pushl $do_page_fault pushl $do_page_fault
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
.previous .text .previous .text
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
ENTRY(machine_check) ENTRY(machine_check)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl machine_check_vector pushl machine_check_vector
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
#endif #endif
ENTRY(spurious_interrupt_bug) ENTRY(spurious_interrupt_bug)
RING0_INT_FRAME
pushl $0 pushl $0
CFI_ADJUST_CFA_OFFSET 4
pushl $do_spurious_interrupt_bug pushl $do_spurious_interrupt_bug
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC
#ifdef CONFIG_STACK_UNWIND #ifdef CONFIG_STACK_UNWIND
ENTRY(arch_unwind_init_running) ENTRY(arch_unwind_init_running)
CFI_STARTPROC
movl 4(%esp), %edx movl 4(%esp), %edx
movl (%esp), %ecx movl (%esp), %ecx
leal 4(%esp), %eax leal 4(%esp), %eax
...@@ -689,6 +900,7 @@ ENTRY(arch_unwind_init_running) ...@@ -689,6 +900,7 @@ ENTRY(arch_unwind_init_running)
movl EBX(%edx), %ebx movl EBX(%edx), %ebx
movl $__KERNEL_DS, OLDSS(%edx) movl $__KERNEL_DS, OLDSS(%edx)
jmpl *%eax jmpl *%eax
CFI_ENDPROC
ENDPROC(arch_unwind_init_running) ENDPROC(arch_unwind_init_running)
#endif #endif
......
#ifndef _DWARF2_H
#define _DWARF2_H
#include <linux/config.h>
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
Macros for dwarf2 CFI unwind table entries.
See "as.info" for details on these pseudo ops. Unfortunately
they are only supported in very new binutils, so define them
away for older version.
*/
#ifdef CONFIG_UNWIND_INFO
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#else
/* Due to the structure of pre-exisiting code, don't use assembler line
comment character # to ignore the arguments. Instead, use a dummy macro. */
.macro ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC ignore
#define CFI_ENDPROC ignore
#define CFI_DEF_CFA ignore
#define CFI_DEF_CFA_REGISTER ignore
#define CFI_DEF_CFA_OFFSET ignore
#define CFI_ADJUST_CFA_OFFSET ignore
#define CFI_OFFSET ignore
#define CFI_REL_OFFSET ignore
#define CFI_REGISTER ignore
#define CFI_RESTORE ignore
#define CFI_REMEMBER_STATE ignore
#define CFI_RESTORE_STATE ignore
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment