Commit 2601e64d authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] lockdep: irqtrace subsystem, x86_64 support

Add irqflags-tracing support to x86_64.

[akpm@osdl.org: build fix]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c8558fcd
menu "Kernel hacking" menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
config DEBUG_RODATA config DEBUG_RODATA
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/vsyscall32.h> #include <asm/vsyscall32.h>
#include <asm/irqflags.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
...@@ -75,6 +76,10 @@ ENTRY(ia32_sysenter_target) ...@@ -75,6 +76,10 @@ ENTRY(ia32_sysenter_target)
swapgs swapgs
movq %gs:pda_kernelstack, %rsp movq %gs:pda_kernelstack, %rsp
addq $(PDA_STACKOFFSET),%rsp addq $(PDA_STACKOFFSET),%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs, here we enable it straight after entry:
*/
sti sti
movl %ebp,%ebp /* zero extension */ movl %ebp,%ebp /* zero extension */
pushq $__USER32_DS pushq $__USER32_DS
...@@ -118,6 +123,7 @@ sysenter_do_call: ...@@ -118,6 +123,7 @@ sysenter_do_call:
movq %rax,RAX-ARGOFFSET(%rsp) movq %rax,RAX-ARGOFFSET(%rsp)
GET_THREAD_INFO(%r10) GET_THREAD_INFO(%r10)
cli cli
TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call jnz int_ret_from_sys_call
andl $~TS_COMPAT,threadinfo_status(%r10) andl $~TS_COMPAT,threadinfo_status(%r10)
...@@ -132,6 +138,7 @@ sysenter_do_call: ...@@ -132,6 +138,7 @@ sysenter_do_call:
CFI_REGISTER rsp,rcx CFI_REGISTER rsp,rcx
movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */ movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
CFI_REGISTER rip,rdx CFI_REGISTER rip,rdx
TRACE_IRQS_ON
swapgs swapgs
sti /* sti only takes effect after the next instruction */ sti /* sti only takes effect after the next instruction */
/* sysexit */ /* sysexit */
...@@ -186,6 +193,10 @@ ENTRY(ia32_cstar_target) ...@@ -186,6 +193,10 @@ ENTRY(ia32_cstar_target)
movl %esp,%r8d movl %esp,%r8d
CFI_REGISTER rsp,r8 CFI_REGISTER rsp,r8
movq %gs:pda_kernelstack,%rsp movq %gs:pda_kernelstack,%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
*/
sti sti
SAVE_ARGS 8,1,1 SAVE_ARGS 8,1,1
movl %eax,%eax /* zero extension */ movl %eax,%eax /* zero extension */
...@@ -220,6 +231,7 @@ cstar_do_call: ...@@ -220,6 +231,7 @@ cstar_do_call:
movq %rax,RAX-ARGOFFSET(%rsp) movq %rax,RAX-ARGOFFSET(%rsp)
GET_THREAD_INFO(%r10) GET_THREAD_INFO(%r10)
cli cli
TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call jnz int_ret_from_sys_call
andl $~TS_COMPAT,threadinfo_status(%r10) andl $~TS_COMPAT,threadinfo_status(%r10)
...@@ -228,6 +240,7 @@ cstar_do_call: ...@@ -228,6 +240,7 @@ cstar_do_call:
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
movl EFLAGS-ARGOFFSET(%rsp),%r11d movl EFLAGS-ARGOFFSET(%rsp),%r11d
/*CFI_REGISTER rflags,r11*/ /*CFI_REGISTER rflags,r11*/
TRACE_IRQS_ON
movl RSP-ARGOFFSET(%rsp),%esp movl RSP-ARGOFFSET(%rsp),%esp
CFI_RESTORE rsp CFI_RESTORE rsp
swapgs swapgs
...@@ -287,6 +300,10 @@ ENTRY(ia32_syscall) ...@@ -287,6 +300,10 @@ ENTRY(ia32_syscall)
/*CFI_REL_OFFSET cs,CS-RIP*/ /*CFI_REL_OFFSET cs,CS-RIP*/
CFI_REL_OFFSET rip,RIP-RIP CFI_REL_OFFSET rip,RIP-RIP
swapgs swapgs
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
*/
sti sti
movl %eax,%eax movl %eax,%eax
pushq %rax pushq %rax
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/irqflags.h>
.code64 .code64
...@@ -48,6 +49,16 @@ ...@@ -48,6 +49,16 @@
#define retint_kernel retint_restore_args #define retint_kernel retint_restore_args
#endif #endif
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON
1:
#endif
.endm
/* /*
* C code is not supposed to know about undefined top of stack. Every time * C code is not supposed to know about undefined top of stack. Every time
* a C function with an pt_regs argument is called from the SYSCALL based * a C function with an pt_regs argument is called from the SYSCALL based
...@@ -194,6 +205,10 @@ ENTRY(system_call) ...@@ -194,6 +205,10 @@ ENTRY(system_call)
swapgs swapgs
movq %rsp,%gs:pda_oldrsp movq %rsp,%gs:pda_oldrsp
movq %gs:pda_kernelstack,%rsp movq %gs:pda_kernelstack,%rsp
/*
* No need to follow this irqs off/on section - it's straight
* and short:
*/
sti sti
SAVE_ARGS 8,1 SAVE_ARGS 8,1
movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
...@@ -219,10 +234,15 @@ ret_from_sys_call: ...@@ -219,10 +234,15 @@ ret_from_sys_call:
sysret_check: sysret_check:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
cli cli
TRACE_IRQS_OFF
movl threadinfo_flags(%rcx),%edx movl threadinfo_flags(%rcx),%edx
andl %edi,%edx andl %edi,%edx
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
jnz sysret_careful jnz sysret_careful
/*
* sysretq will re-enable interrupts:
*/
TRACE_IRQS_ON
movq RIP-ARGOFFSET(%rsp),%rcx movq RIP-ARGOFFSET(%rsp),%rcx
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
RESTORE_ARGS 0,-ARG_SKIP,1 RESTORE_ARGS 0,-ARG_SKIP,1
...@@ -237,6 +257,7 @@ sysret_careful: ...@@ -237,6 +257,7 @@ sysret_careful:
CFI_RESTORE_STATE CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx bt $TIF_NEED_RESCHED,%edx
jnc sysret_signal jnc sysret_signal
TRACE_IRQS_ON
sti sti
pushq %rdi pushq %rdi
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
...@@ -247,6 +268,7 @@ sysret_careful: ...@@ -247,6 +268,7 @@ sysret_careful:
/* Handle a signal */ /* Handle a signal */
sysret_signal: sysret_signal:
TRACE_IRQS_ON
sti sti
testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
jz 1f jz 1f
...@@ -261,6 +283,7 @@ sysret_signal: ...@@ -261,6 +283,7 @@ sysret_signal:
/* Use IRET because user could have changed frame. This /* Use IRET because user could have changed frame. This
works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
cli cli
TRACE_IRQS_OFF
jmp int_with_check jmp int_with_check
badsys: badsys:
...@@ -309,6 +332,7 @@ ENTRY(int_ret_from_sys_call) ...@@ -309,6 +332,7 @@ ENTRY(int_ret_from_sys_call)
CFI_REL_OFFSET r10,R10-ARGOFFSET CFI_REL_OFFSET r10,R10-ARGOFFSET
CFI_REL_OFFSET r11,R11-ARGOFFSET CFI_REL_OFFSET r11,R11-ARGOFFSET
cli cli
TRACE_IRQS_OFF
testl $3,CS-ARGOFFSET(%rsp) testl $3,CS-ARGOFFSET(%rsp)
je retint_restore_args je retint_restore_args
movl $_TIF_ALLWORK_MASK,%edi movl $_TIF_ALLWORK_MASK,%edi
...@@ -327,6 +351,7 @@ int_with_check: ...@@ -327,6 +351,7 @@ int_with_check:
int_careful: int_careful:
bt $TIF_NEED_RESCHED,%edx bt $TIF_NEED_RESCHED,%edx
jnc int_very_careful jnc int_very_careful
TRACE_IRQS_ON
sti sti
pushq %rdi pushq %rdi
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
...@@ -334,10 +359,12 @@ int_careful: ...@@ -334,10 +359,12 @@ int_careful:
popq %rdi popq %rdi
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
cli cli
TRACE_IRQS_OFF
jmp int_with_check jmp int_with_check
/* handle signals and tracing -- both require a full stack frame */ /* handle signals and tracing -- both require a full stack frame */
int_very_careful: int_very_careful:
TRACE_IRQS_ON
sti sti
SAVE_REST SAVE_REST
/* Check for syscall exit trace */ /* Check for syscall exit trace */
...@@ -351,6 +378,7 @@ int_very_careful: ...@@ -351,6 +378,7 @@ int_very_careful:
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
cli cli
TRACE_IRQS_OFF
jmp int_restore_rest jmp int_restore_rest
int_signal: int_signal:
...@@ -363,6 +391,7 @@ int_signal: ...@@ -363,6 +391,7 @@ int_signal:
int_restore_rest: int_restore_rest:
RESTORE_REST RESTORE_REST
cli cli
TRACE_IRQS_OFF
jmp int_with_check jmp int_with_check
CFI_ENDPROC CFI_ENDPROC
END(int_ret_from_sys_call) END(int_ret_from_sys_call)
...@@ -484,6 +513,10 @@ END(stub_rt_sigreturn) ...@@ -484,6 +513,10 @@ END(stub_rt_sigreturn)
swapgs swapgs
1: incl %gs:pda_irqcount # RED-PEN should check preempt count 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
cmoveq %gs:pda_irqstackptr,%rsp cmoveq %gs:pda_irqstackptr,%rsp
/*
* We entered an interrupt context - irqs are off:
*/
TRACE_IRQS_OFF
call \func call \func
.endm .endm
...@@ -493,6 +526,7 @@ ENTRY(common_interrupt) ...@@ -493,6 +526,7 @@ ENTRY(common_interrupt)
/* 0(%rsp): oldrsp-ARGOFFSET */ /* 0(%rsp): oldrsp-ARGOFFSET */
ret_from_intr: ret_from_intr:
cli cli
TRACE_IRQS_OFF
decl %gs:pda_irqcount decl %gs:pda_irqcount
leaveq leaveq
CFI_DEF_CFA_REGISTER rsp CFI_DEF_CFA_REGISTER rsp
...@@ -515,9 +549,21 @@ retint_check: ...@@ -515,9 +549,21 @@ retint_check:
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
jnz retint_careful jnz retint_careful
retint_swapgs: retint_swapgs:
/*
* The iretq could re-enable interrupts:
*/
cli
TRACE_IRQS_IRETQ
swapgs swapgs
jmp restore_args
retint_restore_args: retint_restore_args:
cli cli
/*
* The iretq could re-enable interrupts:
*/
TRACE_IRQS_IRETQ
restore_args:
RESTORE_ARGS 0,8,0 RESTORE_ARGS 0,8,0
iret_label: iret_label:
iretq iretq
...@@ -530,6 +576,7 @@ iret_label: ...@@ -530,6 +576,7 @@ iret_label:
/* running with kernel gs */ /* running with kernel gs */
bad_iret: bad_iret:
movq $11,%rdi /* SIGSEGV */ movq $11,%rdi /* SIGSEGV */
TRACE_IRQS_ON
sti sti
jmp do_exit jmp do_exit
.previous .previous
...@@ -539,6 +586,7 @@ retint_careful: ...@@ -539,6 +586,7 @@ retint_careful:
CFI_RESTORE_STATE CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx bt $TIF_NEED_RESCHED,%edx
jnc retint_signal jnc retint_signal
TRACE_IRQS_ON
sti sti
pushq %rdi pushq %rdi
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
...@@ -547,11 +595,13 @@ retint_careful: ...@@ -547,11 +595,13 @@ retint_careful:
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
cli cli
TRACE_IRQS_OFF
jmp retint_check jmp retint_check
retint_signal: retint_signal:
testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
jz retint_swapgs jz retint_swapgs
TRACE_IRQS_ON
sti sti
SAVE_REST SAVE_REST
movq $-1,ORIG_RAX(%rsp) movq $-1,ORIG_RAX(%rsp)
...@@ -560,6 +610,7 @@ retint_signal: ...@@ -560,6 +610,7 @@ retint_signal:
call do_notify_resume call do_notify_resume
RESTORE_REST RESTORE_REST
cli cli
TRACE_IRQS_OFF
movl $_TIF_NEED_RESCHED,%edi movl $_TIF_NEED_RESCHED,%edi
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
jmp retint_check jmp retint_check
...@@ -666,7 +717,7 @@ END(spurious_interrupt) ...@@ -666,7 +717,7 @@ END(spurious_interrupt)
/* error code is on the stack already */ /* error code is on the stack already */
/* handle NMI like exceptions that can happen everywhere */ /* handle NMI like exceptions that can happen everywhere */
.macro paranoidentry sym, ist=0 .macro paranoidentry sym, ist=0, irqtrace=1
SAVE_ALL SAVE_ALL
cld cld
movl $1,%ebx movl $1,%ebx
...@@ -691,6 +742,71 @@ END(spurious_interrupt) ...@@ -691,6 +742,71 @@ END(spurious_interrupt)
addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
.endif .endif
cli cli
.if \irqtrace
TRACE_IRQS_OFF
.endif
.endm
/*
* "Paranoid" exit path from exception stack.
* Paranoid because this is used by NMIs and cannot take
* any kernel state for granted.
* We don't do kernel preemption checks here, because only
* NMI should be common and it does not enable IRQs and
* cannot get reschedule ticks.
*
* "trace" is 0 for the NMI handler only, because irq-tracing
* is fundamentally NMI-unsafe. (we cannot change the soft and
* hard flags at once, atomically)
*/
.macro paranoidexit trace=1
/* ebx: no swapgs flag */
paranoid_exit\trace:
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore\trace
testl $3,CS(%rsp)
jnz paranoid_userspace\trace
paranoid_swapgs\trace:
TRACE_IRQS_IRETQ 0
swapgs
paranoid_restore\trace:
RESTORE_ALL 8
iretq
paranoid_userspace\trace:
GET_THREAD_INFO(%rcx)
movl threadinfo_flags(%rcx),%ebx
andl $_TIF_WORK_MASK,%ebx
jz paranoid_swapgs\trace
movq %rsp,%rdi /* &pt_regs */
call sync_regs
movq %rax,%rsp /* switch stack for scheduling */
testl $_TIF_NEED_RESCHED,%ebx
jnz paranoid_schedule\trace
movl %ebx,%edx /* arg3: thread flags */
.if \trace
TRACE_IRQS_ON
.endif
sti
xorl %esi,%esi /* arg2: oldset */
movq %rsp,%rdi /* arg1: &pt_regs */
call do_notify_resume
cli
.if \trace
TRACE_IRQS_OFF
.endif
jmp paranoid_userspace\trace
paranoid_schedule\trace:
.if \trace
TRACE_IRQS_ON
.endif
sti
call schedule
cli
.if \trace
TRACE_IRQS_OFF
.endif
jmp paranoid_userspace\trace
CFI_ENDPROC
.endm .endm
/* /*
...@@ -748,6 +864,7 @@ error_exit: ...@@ -748,6 +864,7 @@ error_exit:
movl %ebx,%eax movl %ebx,%eax
RESTORE_REST RESTORE_REST
cli cli
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
testl %eax,%eax testl %eax,%eax
jne retint_kernel jne retint_kernel
...@@ -755,6 +872,10 @@ error_exit: ...@@ -755,6 +872,10 @@ error_exit:
movl $_TIF_WORK_MASK,%edi movl $_TIF_WORK_MASK,%edi
andl %edi,%edx andl %edi,%edx
jnz retint_careful jnz retint_careful
/*
* The iret might restore flags:
*/
TRACE_IRQS_IRETQ
swapgs swapgs
RESTORE_ARGS 0,8,0 RESTORE_ARGS 0,8,0
jmp iret_label jmp iret_label
...@@ -916,8 +1037,7 @@ KPROBE_ENTRY(debug) ...@@ -916,8 +1037,7 @@ KPROBE_ENTRY(debug)
pushq $0 pushq $0
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_debug, DEBUG_STACK paranoidentry do_debug, DEBUG_STACK
jmp paranoid_exit paranoidexit
CFI_ENDPROC
END(debug) END(debug)
.previous .text .previous .text
...@@ -926,49 +1046,13 @@ KPROBE_ENTRY(nmi) ...@@ -926,49 +1046,13 @@ KPROBE_ENTRY(nmi)
INTR_FRAME INTR_FRAME
pushq $-1 pushq $-1
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_nmi paranoidentry do_nmi, 0, 0
/* #ifdef CONFIG_TRACE_IRQFLAGS
* "Paranoid" exit path from exception stack. paranoidexit 0
* Paranoid because this is used by NMIs and cannot take #else
* any kernel state for granted. jmp paranoid_exit1
* We don't do kernel preemption checks here, because only
* NMI should be common and it does not enable IRQs and
* cannot get reschedule ticks.
*/
/* ebx: no swapgs flag */
paranoid_exit:
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
testl $3,CS(%rsp)
jnz paranoid_userspace
paranoid_swapgs:
swapgs
paranoid_restore:
RESTORE_ALL 8
iretq
paranoid_userspace:
GET_THREAD_INFO(%rcx)
movl threadinfo_flags(%rcx),%ebx
andl $_TIF_WORK_MASK,%ebx
jz paranoid_swapgs
movq %rsp,%rdi /* &pt_regs */
call sync_regs
movq %rax,%rsp /* switch stack for scheduling */
testl $_TIF_NEED_RESCHED,%ebx
jnz paranoid_schedule
movl %ebx,%edx /* arg3: thread flags */
sti
xorl %esi,%esi /* arg2: oldset */
movq %rsp,%rdi /* arg1: &pt_regs */
call do_notify_resume
cli
jmp paranoid_userspace
paranoid_schedule:
sti
call schedule
cli
jmp paranoid_userspace
CFI_ENDPROC CFI_ENDPROC
#endif
END(nmi) END(nmi)
.previous .text .previous .text
...@@ -977,7 +1061,7 @@ KPROBE_ENTRY(int3) ...@@ -977,7 +1061,7 @@ KPROBE_ENTRY(int3)
pushq $0 pushq $0
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_int3, DEBUG_STACK paranoidentry do_int3, DEBUG_STACK
jmp paranoid_exit jmp paranoid_exit1
CFI_ENDPROC CFI_ENDPROC
END(int3) END(int3)
.previous .text .previous .text
...@@ -1006,7 +1090,7 @@ END(reserved) ...@@ -1006,7 +1090,7 @@ END(reserved)
ENTRY(double_fault) ENTRY(double_fault)
XCPT_FRAME XCPT_FRAME
paranoidentry do_double_fault paranoidentry do_double_fault
jmp paranoid_exit jmp paranoid_exit1
CFI_ENDPROC CFI_ENDPROC
END(double_fault) END(double_fault)
...@@ -1022,7 +1106,7 @@ END(segment_not_present) ...@@ -1022,7 +1106,7 @@ END(segment_not_present)
ENTRY(stack_segment) ENTRY(stack_segment)
XCPT_FRAME XCPT_FRAME
paranoidentry do_stack_segment paranoidentry do_stack_segment
jmp paranoid_exit jmp paranoid_exit1
CFI_ENDPROC CFI_ENDPROC
END(stack_segment) END(stack_segment)
...@@ -1050,7 +1134,7 @@ ENTRY(machine_check) ...@@ -1050,7 +1134,7 @@ ENTRY(machine_check)
pushq $0 pushq $0
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_machine_check paranoidentry do_machine_check
jmp paranoid_exit jmp paranoid_exit1
CFI_ENDPROC CFI_ENDPROC
END(machine_check) END(machine_check)
#endif #endif
......
...@@ -177,8 +177,10 @@ asmlinkage void do_softirq(void) ...@@ -177,8 +177,10 @@ asmlinkage void do_softirq(void)
local_irq_save(flags); local_irq_save(flags);
pending = local_softirq_pending(); pending = local_softirq_pending();
/* Switch to interrupt stack */ /* Switch to interrupt stack */
if (pending) if (pending) {
call_softirq(); call_softirq();
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(do_softirq); EXPORT_SYMBOL(do_softirq);
/*
* include/asm-x86_64/irqflags.h
*
* IRQ flags handling
*
* This file gets included from lowlevel asm headers too, to provide
* wrapped versions of the local_irq_*() APIs, based on the
* raw_local_irq_*() macros from the lowlevel headers.
*/
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#ifndef __ASSEMBLY__
/* interrupt control.. */
#define raw_local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
#define raw_local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
#ifdef CONFIG_X86_VSMP
/* Interrupt control for VSMP architecture */
#define raw_local_irq_disable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
#define raw_local_irq_enable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
#define raw_irqs_disabled_flags(flags) \
({ \
(flags & (1<<18)) || !(flags & (1<<9)); \
})
/* For spinlocks etc */
#define raw_local_irq_save(x) do { raw_local_save_flags(x); raw_local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
#else /* CONFIG_X86_VSMP */
#define raw_local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define raw_local_irq_enable() __asm__ __volatile__("sti": : :"memory")
#define raw_irqs_disabled_flags(flags) \
({ \
!(flags & (1<<9)); \
})
/* For spinlocks etc */
#define raw_local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# raw_local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
#endif
#define raw_irqs_disabled() \
({ \
unsigned long flags; \
raw_local_save_flags(flags); \
raw_irqs_disabled_flags(flags); \
})
/* used in the idle loop; sti takes one instruction cycle to complete */
#define raw_safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* used when interrupts are already enabled or to shutdown the processor */
#define halt() __asm__ __volatile__("hlt": : :"memory")
#else /* __ASSEMBLY__: */
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#endif
...@@ -244,43 +244,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -244,43 +244,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
/* interrupt control.. */ #include <linux/irqflags.h>
#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
#ifdef CONFIG_X86_VSMP
/* Interrupt control for VSMP architecture */
#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
(flags & (1<<18)) || !(flags & (1<<9)); \
})
/* For spinlocks etc */
#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
#else /* CONFIG_X86_VSMP */
#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!(flags & (1<<9)); \
})
/* For spinlocks etc */
#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
#endif
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* used when interrupts are already enabled or to shutdown the processor */
#define halt() __asm__ __volatile__("hlt": : :"memory")
void cpu_idle_wait(void); void cpu_idle_wait(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment