Commit fe1b4ba4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] cio: Call cancel_halt_clear even when actl == 0.
  [S390] cio: Use path verification to check for path state.
  [S390] cio: Fix locking when calling notify function.
  [S390] Fixed handling of access register mode faults.
  [S390] dasd: Use default recovery for SNSS requests
  [S390] check_bugs() should be inline.
  [S390] tape: Compression overwrites crypto setting
  [S390] nss: disable kexec.
  [S390] reipl: move dump_prefix_page out of text section.
  [S390] smp: disable preemption in smp_call_function/smp_call_function_on
  [S390] kprobes breaks BUG_ON
parents ae5dd8e3 2470b648
...@@ -376,6 +376,8 @@ config SHARED_KERNEL ...@@ -376,6 +376,8 @@ config SHARED_KERNEL
Select this option, if you want to share the text segment of the Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory Linux kernel between different VM guests. This reduces memory
usage with lots of guests but greatly increases kernel size. usage with lots of guests but greatly increases kernel size.
Also if a kernel was IPL'ed from a shared segment the kexec system
call will not work.
You should only select this option if you know what you are You should only select this option if you know what you are
doing and want to exploit this feature. doing and want to exploit this feature.
......
...@@ -121,7 +121,7 @@ startup_continue: ...@@ -121,7 +121,7 @@ startup_continue:
.long .Lduct # cr2: dispatchable unit control table .long .Lduct # cr2: dispatchable unit control table
.long 0 # cr3: instruction authorization .long 0 # cr3: instruction authorization
.long 0 # cr4: instruction authorization .long 0 # cr4: instruction authorization
.long 0xffffffff # cr5: primary-aste origin .long .Lduct # cr5: primary-aste origin
.long 0 # cr6: I/O interrupts .long 0 # cr6: I/O interrupts
.long 0 # cr7: secondary space segment table .long 0 # cr7: secondary space segment table
.long 0 # cr8: access registers translation .long 0 # cr8: access registers translation
...@@ -132,8 +132,6 @@ startup_continue: ...@@ -132,8 +132,6 @@ startup_continue:
.long 0 # cr13: home space segment table .long 0 # cr13: home space segment table
.long 0xc0000000 # cr14: machine check handling off .long 0xc0000000 # cr14: machine check handling off
.long 0 # cr15: linkage stack operations .long 0 # cr15: linkage stack operations
.Lduct: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
...@@ -147,6 +145,13 @@ startup_continue: ...@@ -147,6 +145,13 @@ startup_continue:
.Linittu: .long init_thread_union .Linittu: .long init_thread_union
.Lstartup_init: .Lstartup_init:
.long startup_init .long startup_init
.align 64
.Lduct: .long 0,0,0,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
.org 0x12000 .org 0x12000
.globl _ehead .globl _ehead
......
...@@ -134,7 +134,7 @@ startup_continue: ...@@ -134,7 +134,7 @@ startup_continue:
.quad .Lduct # cr2: dispatchable unit control table .quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization .quad 0 # cr3: instruction authorization
.quad 0 # cr4: instruction authorization .quad 0 # cr4: instruction authorization
.quad 0xffffffffffffffff # cr5: primary-aste origin .quad .Lduct # cr5: primary-aste origin
.quad 0 # cr6: I/O interrupts .quad 0 # cr6: I/O interrupts
.quad 0 # cr7: secondary space segment table .quad 0 # cr7: secondary space segment table
.quad 0 # cr8: access registers translation .quad 0 # cr8: access registers translation
...@@ -145,14 +145,19 @@ startup_continue: ...@@ -145,14 +145,19 @@ startup_continue:
.quad 0 # cr13: home space segment table .quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off .quad 0xc0000000 # cr14: machine check handling off
.quad 0 # cr15: linkage stack operations .quad 0 # cr15: linkage stack operations
.Lduct: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.Lpcmsk:.quad 0x0000000180000000 .Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000 .L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700 .Lnop: .long 0x07000700
.Lparmaddr: .Lparmaddr:
.quad PARMAREA .quad PARMAREA
.align 64
.Lduct: .long 0,0,0,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
.org 0x12000 .org 0x12000
.globl _ehead .globl _ehead
......
...@@ -1066,7 +1066,7 @@ static void do_reset_calls(void) ...@@ -1066,7 +1066,7 @@ static void do_reset_calls(void)
reset->fn(); reset->fn();
} }
extern __u32 dump_prefix_page; u32 dump_prefix_page;
void s390_reset_system(void) void s390_reset_system(void)
{ {
...@@ -1078,7 +1078,7 @@ void s390_reset_system(void) ...@@ -1078,7 +1078,7 @@ void s390_reset_system(void)
lc->panic_stack = S390_lowcore.panic_stack; lc->panic_stack = S390_lowcore.panic_stack;
/* Save prefix page address for dump case */ /* Save prefix page address for dump case */
dump_prefix_page = (unsigned long) lc; dump_prefix_page = (u32)(unsigned long) lc;
/* Disable prefixing */ /* Disable prefixing */
set_prefix(0); set_prefix(0);
......
...@@ -337,21 +337,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) ...@@ -337,21 +337,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
} }
p = get_kprobe(addr); p = get_kprobe(addr);
if (!p) { if (!p)
if (*addr != BREAKPOINT_INSTRUCTION) { /*
/* * No kprobe at this address. The fault has not been
* The breakpoint instruction was removed right * caused by a kprobe breakpoint. The race of breakpoint
* after we hit it. Another cpu has removed * vs. kprobe remove does not exist because on s390 we
* either a probepoint or a debugger breakpoint * use stop_machine_run to arm/disarm the breakpoints.
* at this address. In either case, no further */
* handling of this interrupt is appropriate.
*
*/
ret = 1;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe; goto no_kprobe;
}
kcb->kprobe_status = KPROBE_HIT_ACTIVE; kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb); set_current_kprobe(p, regs, kcb);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/reset.h> #include <asm/reset.h>
#include <asm/ipl.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
...@@ -29,6 +30,10 @@ int machine_kexec_prepare(struct kimage *image) ...@@ -29,6 +30,10 @@ int machine_kexec_prepare(struct kimage *image)
{ {
void *reboot_code_buffer; void *reboot_code_buffer;
/* Can't replace kernel image since it is read-only. */
if (ipl_flags & IPL_NSS_VALID)
return -ENOSYS;
/* We don't support anything but the default image type for now. */ /* We don't support anything but the default image type for now. */
if (image->type != KEXEC_TYPE_DEFAULT) if (image->type != KEXEC_TYPE_DEFAULT)
return -EINVAL; return -EINVAL;
......
...@@ -8,6 +8,10 @@ ...@@ -8,6 +8,10 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
.globl do_reipl_asm .globl do_reipl_asm
do_reipl_asm: basr %r13,0 do_reipl_asm: basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
...@@ -16,12 +20,12 @@ do_reipl_asm: basr %r13,0 ...@@ -16,12 +20,12 @@ do_reipl_asm: basr %r13,0
stm %r0,%r15,__LC_GPREGS_SAVE_AREA stm %r0,%r15,__LC_GPREGS_SAVE_AREA
stctl %c0,%c15,__LC_CREGS_SAVE_AREA stctl %c0,%c15,__LC_CREGS_SAVE_AREA
stam %a0,%a15,__LC_AREGS_SAVE_AREA stam %a0,%a15,__LC_AREGS_SAVE_AREA
mvc __LC_PREFIX_SAVE_AREA(4),dump_prefix_page-.Lpg0(%r13) l %r10,.Ldump_pfx-.Lpg0(%r13)
mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
stckc .Lclkcmp-.Lpg0(%r13) stckc .Lclkcmp-.Lpg0(%r13)
mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
stpt __LC_CPU_TIMER_SAVE_AREA stpt __LC_CPU_TIMER_SAVE_AREA
st %r13, __LC_PSW_SAVE_AREA+4 st %r13, __LC_PSW_SAVE_AREA+4
lctl %c6,%c6,.Lall-.Lpg0(%r13) lctl %c6,%c6,.Lall-.Lpg0(%r13)
lr %r1,%r2 lr %r1,%r2
mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
...@@ -55,6 +59,7 @@ do_reipl_asm: basr %r13,0 ...@@ -55,6 +59,7 @@ do_reipl_asm: basr %r13,0
.align 8 .align 8
.Lclkcmp: .quad 0x0000000000000000 .Lclkcmp: .quad 0x0000000000000000
.Lall: .long 0xff000000 .Lall: .long 0xff000000
.Ldump_pfx: .long dump_prefix_page
.align 8 .align 8
.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
.Lpcnew: .long 0x00080000,0x80000000+.Lecs .Lpcnew: .long 0x00080000,0x80000000+.Lecs
...@@ -79,7 +84,3 @@ do_reipl_asm: basr %r13,0 ...@@ -79,7 +84,3 @@ do_reipl_asm: basr %r13,0
.long 0x00000000,0x00000000 .long 0x00000000,0x00000000
.long 0x00000000,0x00000000 .long 0x00000000,0x00000000
.long 0x00000000,0x00000000 .long 0x00000000,0x00000000
.globl dump_prefix_page
dump_prefix_page:
.long 0x00000000
...@@ -8,6 +8,12 @@ ...@@ -8,6 +8,12 @@
*/ */
#include <asm/lowcore.h> #include <asm/lowcore.h>
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
.globl do_reipl_asm .globl do_reipl_asm
do_reipl_asm: basr %r13,0 do_reipl_asm: basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
...@@ -20,7 +26,8 @@ do_reipl_asm: basr %r13,0 ...@@ -20,7 +26,8 @@ do_reipl_asm: basr %r13,0
stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),dump_prefix_page-.Lpg0(%r13) lg %r10,.Ldump_pfx-.Lpg0(%r13)
mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
stckc .Lclkcmp-.Lpg0(%r13) stckc .Lclkcmp-.Lpg0(%r13)
mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
...@@ -64,6 +71,7 @@ do_reipl_asm: basr %r13,0 ...@@ -64,6 +71,7 @@ do_reipl_asm: basr %r13,0
.align 8 .align 8
.Lclkcmp: .quad 0x0000000000000000 .Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000 .Lall: .quad 0x00000000ff000000
.Ldump_pfx: .quad dump_prefix_page
.Lregsave: .quad 0x0000000000000000 .Lregsave: .quad 0x0000000000000000
.align 16 .align 16
/* /*
...@@ -103,6 +111,3 @@ do_reipl_asm: basr %r13,0 ...@@ -103,6 +111,3 @@ do_reipl_asm: basr %r13,0
.long 0x00000000,0x00000000 .long 0x00000000,0x00000000
.long 0x00000000,0x00000000 .long 0x00000000,0x00000000
.long 0x00000000,0x00000000 .long 0x00000000,0x00000000
.globl dump_prefix_page
dump_prefix_page:
.long 0x00000000
...@@ -94,10 +94,9 @@ static void __smp_call_function_map(void (*func) (void *info), void *info, ...@@ -94,10 +94,9 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
int cpu, local = 0; int cpu, local = 0;
/* /*
* Can deadlock when interrupts are disabled or if in wrong context, * Can deadlock when interrupts are disabled or if in wrong context.
* caller must disable preemption
*/ */
WARN_ON(irqs_disabled() || in_irq() || preemptible()); WARN_ON(irqs_disabled() || in_irq());
/* /*
* Check for local function call. We have to have the same call order * Check for local function call. We have to have the same call order
...@@ -152,17 +151,18 @@ out: ...@@ -152,17 +151,18 @@ out:
* Run a function on all other CPUs. * Run a function on all other CPUs.
* *
* You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts or from a
* hardware interrupt handler. Must be called with preemption disabled. * hardware interrupt handler. You may call it from a bottom half.
* You may call it from a bottom half.
*/ */
int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait) int wait)
{ {
cpumask_t map; cpumask_t map;
preempt_disable();
map = cpu_online_map; map = cpu_online_map;
cpu_clear(smp_processor_id(), map); cpu_clear(smp_processor_id(), map);
__smp_call_function_map(func, info, nonatomic, wait, map); __smp_call_function_map(func, info, nonatomic, wait, map);
preempt_enable();
return 0; return 0;
} }
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
...@@ -178,16 +178,17 @@ EXPORT_SYMBOL(smp_call_function); ...@@ -178,16 +178,17 @@ EXPORT_SYMBOL(smp_call_function);
* Run a function on one processor. * Run a function on one processor.
* *
* You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts or from a
* hardware interrupt handler. Must be called with preemption disabled. * hardware interrupt handler. You may call it from a bottom half.
* You may call it from a bottom half.
*/ */
int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
int wait, int cpu) int wait, int cpu)
{ {
cpumask_t map = CPU_MASK_NONE; cpumask_t map = CPU_MASK_NONE;
preempt_disable();
cpu_set(cpu, map); cpu_set(cpu, map);
__smp_call_function_map(func, info, nonatomic, wait, map); __smp_call_function_map(func, info, nonatomic, wait, map);
preempt_enable();
return 0; return 0;
} }
EXPORT_SYMBOL(smp_call_function_on); EXPORT_SYMBOL(smp_call_function_on);
......
...@@ -108,53 +108,40 @@ void bust_spinlocks(int yes) ...@@ -108,53 +108,40 @@ void bust_spinlocks(int yes)
} }
/* /*
* Check which address space is addressed by the access * Returns the address space associated with the fault.
* register in S390_lowcore.exc_access_id. * Returns 0 for kernel space, 1 for user space and
* Returns 1 for user space and 0 for kernel space. * 2 for code execution in user space with noexec=on.
*/ */
static int __check_access_register(struct pt_regs *regs, int error_code) static inline int check_space(struct task_struct *tsk)
{
int areg = S390_lowcore.exc_access_id;
if (areg == 0)
/* Access via access register 0 -> kernel address */
return 0;
save_access_regs(current->thread.acrs);
if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1)
/*
* access register contains 0 -> kernel address,
* access register contains 1 -> user space address
*/
return current->thread.acrs[areg];
/* Something unhealthy was done with the access registers... */
die("page fault via unknown access register", regs, error_code);
do_exit(SIGKILL);
return 0;
}
/*
* Check which address space the address belongs to.
* May return 1 or 2 for user space and 0 for kernel space.
* Returns 2 for user space in primary addressing mode with
* CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
*/
static inline int check_user_space(struct pt_regs *regs, int error_code)
{ {
/* /*
* The lowest two bits of S390_lowcore.trans_exc_code indicate * The lowest two bits of S390_lowcore.trans_exc_code
* which paging table was used: * indicate which paging table was used.
* 0: Primary Segment Table Descriptor
* 1: STD determined via access register
* 2: Secondary Segment Table Descriptor
* 3: Home Segment Table Descriptor
*/ */
int descriptor = S390_lowcore.trans_exc_code & 3; int desc = S390_lowcore.trans_exc_code & 3;
if (unlikely(descriptor == 1))
return __check_access_register(regs, error_code); if (desc == 3) /* Home Segment Table Descriptor */
if (descriptor == 2) return switch_amode == 0;
return current->thread.mm_segment.ar4; if (desc == 2) /* Secondary Segment Table Descriptor */
return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; return tsk->thread.mm_segment.ar4;
#ifdef CONFIG_S390_SWITCH_AMODE
if (unlikely(desc == 1)) { /* STD determined via access register */
/* %a0 always indicates primary space. */
if (S390_lowcore.exc_access_id != 0) {
save_access_regs(tsk->thread.acrs);
/*
* An alet of 0 indicates primary space.
* An alet of 1 indicates secondary space.
* Any other alet values generate an
* alen-translation exception.
*/
if (tsk->thread.acrs[S390_lowcore.exc_access_id])
return tsk->thread.mm_segment.ar4;
}
}
#endif
/* Primary Segment Table Descriptor */
return switch_amode << s390_noexec;
} }
/* /*
...@@ -265,16 +252,16 @@ out_fault: ...@@ -265,16 +252,16 @@ out_fault:
* 11 Page translation -> Not present (nullification) * 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification) * 3b Region third trans. -> Not present (nullification)
*/ */
static inline void __kprobes static inline void
do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
{ {
struct task_struct *tsk; struct task_struct *tsk;
struct mm_struct *mm; struct mm_struct *mm;
struct vm_area_struct * vma; struct vm_area_struct * vma;
unsigned long address; unsigned long address;
int user_address;
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
int si_code = SEGV_MAPERR; int si_code;
int space;
tsk = current; tsk = current;
mm = tsk->mm; mm = tsk->mm;
...@@ -294,7 +281,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) ...@@ -294,7 +281,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
NULL pointer write access in kernel mode. */ NULL pointer write access in kernel mode. */
if (!(regs->psw.mask & PSW_MASK_PSTATE)) { if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
address = 0; address = 0;
user_address = 0; space = 0;
goto no_context; goto no_context;
} }
...@@ -309,15 +296,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) ...@@ -309,15 +296,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
* the address * the address
*/ */
address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
user_address = check_user_space(regs, error_code); space = check_space(tsk);
/* /*
* Verify that the fault happened in user space, that * Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a * we are not in an interrupt and that there is a
* user context. * user context.
*/ */
if (user_address == 0 || in_atomic() || !mm) if (unlikely(space == 0 || in_atomic() || !mm))
goto no_context; goto no_context;
/* /*
* When we get here, the fault happened in the current * When we get here, the fault happened in the current
...@@ -328,12 +315,13 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) ...@@ -328,12 +315,13 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma(mm, address); si_code = SEGV_MAPERR;
if (!vma) vma = find_vma(mm, address);
goto bad_area; if (!vma)
goto bad_area;
#ifdef CONFIG_S390_EXEC_PROTECT #ifdef CONFIG_S390_EXEC_PROTECT
if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
if (!signal_return(mm, regs, address, error_code)) if (!signal_return(mm, regs, address, error_code))
/* /*
* signal_return() has done an up_read(&mm->mmap_sem) * signal_return() has done an up_read(&mm->mmap_sem)
...@@ -389,7 +377,7 @@ survive: ...@@ -389,7 +377,7 @@ survive:
* The instruction that caused the program check will * The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP. * be repeated. Don't signal single step via SIGTRAP.
*/ */
clear_tsk_thread_flag(current, TIF_SINGLE_STEP); clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
return; return;
/* /*
...@@ -419,7 +407,7 @@ no_context: ...@@ -419,7 +407,7 @@ no_context:
* Oops. The kernel tried to access some bad page. We'll have to * Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice. * terminate things with extreme prejudice.
*/ */
if (user_address == 0) if (space == 0)
printk(KERN_ALERT "Unable to handle kernel pointer dereference" printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address); " at virtual kernel address %p\n", (void *)address);
else else
...@@ -462,13 +450,14 @@ do_sigbus: ...@@ -462,13 +450,14 @@ do_sigbus:
goto no_context; goto no_context;
} }
void do_protection_exception(struct pt_regs *regs, unsigned long error_code) void __kprobes do_protection_exception(struct pt_regs *regs,
unsigned long error_code)
{ {
regs->psw.addr -= (error_code >> 16); regs->psw.addr -= (error_code >> 16);
do_exception(regs, 4, 1); do_exception(regs, 4, 1);
} }
void do_dat_exception(struct pt_regs *regs, unsigned long error_code) void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
{ {
do_exception(regs, error_code & 0xff, 0); do_exception(regs, error_code & 0xff, 0);
} }
......
...@@ -461,6 +461,7 @@ int dasd_eer_enable(struct dasd_device *device) ...@@ -461,6 +461,7 @@ int dasd_eer_enable(struct dasd_device *device)
cqr->device = device; cqr->device = device;
cqr->retries = 255; cqr->retries = 255;
cqr->expires = 10 * HZ; cqr->expires = 10 * HZ;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
cqr->cpaddr->count = SNSS_DATA_SIZE; cqr->cpaddr->count = SNSS_DATA_SIZE;
......
...@@ -647,7 +647,10 @@ tape_std_mtcompression(struct tape_device *device, int mt_count) ...@@ -647,7 +647,10 @@ tape_std_mtcompression(struct tape_device *device, int mt_count)
return PTR_ERR(request); return PTR_ERR(request);
request->op = TO_NOP; request->op = TO_NOP;
/* setup ccws */ /* setup ccws */
*device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08; if (mt_count == 0)
*device->modeset_byte &= ~0x08;
else
*device->modeset_byte |= 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */ /* execute it */
......
...@@ -144,8 +144,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) ...@@ -144,8 +144,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
ret = stsch(sch->schid, &sch->schib); ret = stsch(sch->schid, &sch->schib);
if (ret || !sch->schib.pmcw.dnv) if (ret || !sch->schib.pmcw.dnv)
return -ENODEV; return -ENODEV;
if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) if (!sch->schib.pmcw.ena)
/* Not operational or no activity -> done. */ /* Not operational -> done. */
return 0; return 0;
/* Stage 1: cancel io. */ /* Stage 1: cancel io. */
if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
...@@ -334,20 +334,29 @@ ccw_device_oper_notify(struct work_struct *work) ...@@ -334,20 +334,29 @@ ccw_device_oper_notify(struct work_struct *work)
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int ret; int ret;
unsigned long flags;
priv = container_of(work, struct ccw_device_private, kick_work); priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev; cdev = priv->cdev;
spin_lock_irqsave(cdev->ccwlock, flags);
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
ret = (sch->driver && sch->driver->notify) ? if (sch->driver && sch->driver->notify) {
sch->driver->notify(&sch->dev, CIO_OPER) : 0; spin_unlock_irqrestore(cdev->ccwlock, flags);
if (!ret) ret = sch->driver->notify(&sch->dev, CIO_OPER);
/* Driver doesn't want device back. */ spin_lock_irqsave(cdev->ccwlock, flags);
ccw_device_do_unreg_rereg(work); } else
else { ret = 0;
if (ret) {
/* Reenable channel measurements, if needed. */ /* Reenable channel measurements, if needed. */
spin_unlock_irqrestore(cdev->ccwlock, flags);
cmf_reenable(cdev); cmf_reenable(cdev);
spin_lock_irqsave(cdev->ccwlock, flags);
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
} }
spin_unlock_irqrestore(cdev->ccwlock, flags);
if (!ret)
/* Driver doesn't want device back. */
ccw_device_do_unreg_rereg(work);
} }
/* /*
...@@ -534,15 +543,21 @@ ccw_device_nopath_notify(struct work_struct *work) ...@@ -534,15 +543,21 @@ ccw_device_nopath_notify(struct work_struct *work)
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int ret; int ret;
unsigned long flags;
priv = container_of(work, struct ccw_device_private, kick_work); priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev; cdev = priv->cdev;
spin_lock_irqsave(cdev->ccwlock, flags);
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */ /* Extra sanity. */
if (sch->lpm) if (sch->lpm)
return; goto out_unlock;
ret = (sch->driver && sch->driver->notify) ? if (sch->driver && sch->driver->notify) {
sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; spin_unlock_irqrestore(cdev->ccwlock, flags);
ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
spin_lock_irqsave(cdev->ccwlock, flags);
} else
ret = 0;
if (!ret) { if (!ret) {
if (get_device(&sch->dev)) { if (get_device(&sch->dev)) {
/* Driver doesn't want to keep device. */ /* Driver doesn't want to keep device. */
...@@ -562,6 +577,8 @@ ccw_device_nopath_notify(struct work_struct *work) ...@@ -562,6 +577,8 @@ ccw_device_nopath_notify(struct work_struct *work)
cdev->private->state = DEV_STATE_DISCONNECTED; cdev->private->state = DEV_STATE_DISCONNECTED;
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
} }
out_unlock:
spin_unlock_irqrestore(cdev->ccwlock, flags);
} }
void void
...@@ -607,10 +624,13 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) ...@@ -607,10 +624,13 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
default: default:
/* Reset oper notify indication after verify error. */ /* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0; cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work, if (cdev->online) {
ccw_device_nopath_notify); PREPARE_WORK(&cdev->private->kick_work,
queue_work(ccw_device_notify_work, &cdev->private->kick_work); ccw_device_nopath_notify);
ccw_device_done(cdev, DEV_STATE_NOT_OPER); queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break; break;
} }
} }
...@@ -756,15 +776,22 @@ static void ...@@ -756,15 +776,22 @@ static void
ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
{ {
struct subchannel *sch; struct subchannel *sch;
int ret;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (sch->driver->notify && if (sch->driver->notify) {
sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { spin_unlock_irq(cdev->ccwlock);
ccw_device_set_timeout(cdev, 0); ret = sch->driver->notify(&sch->dev,
cdev->private->flags.fake_irb = 0; sch->lpm ? CIO_GONE : CIO_NO_PATH);
cdev->private->state = DEV_STATE_DISCONNECTED; spin_lock_irq(cdev->ccwlock);
wake_up(&cdev->private->wait_q); } else
return; ret = 0;
if (ret) {
ccw_device_set_timeout(cdev, 0);
cdev->private->flags.fake_irb = 0;
cdev->private->state = DEV_STATE_DISCONNECTED;
wake_up(&cdev->private->wait_q);
return;
} }
cdev->private->state = DEV_STATE_NOT_OPER; cdev->private->state = DEV_STATE_NOT_OPER;
cio_disable_subchannel(sch); cio_disable_subchannel(sch);
...@@ -969,18 +996,12 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -969,18 +996,12 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0); ccw_device_set_timeout(cdev, 0);
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
/* OK, i/o is dead now. Call interrupt handler. */ /* OK, i/o is dead now. Call interrupt handler. */
cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler) if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm, cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO)); ERR_PTR(-EIO));
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else if (cdev->private->flags.doverify)
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
} }
static void static void
...@@ -993,21 +1014,8 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -993,21 +1014,8 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
ccw_device_set_timeout(cdev, 3*HZ); ccw_device_set_timeout(cdev, 3*HZ);
return; return;
} }
if (ret == -ENODEV) { /* Start delayed path verification. */
struct subchannel *sch; ccw_device_online_verify(cdev, 0);
sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return;
}
//FIXME: Can we get here?
cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler) if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm, cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO)); ERR_PTR(-EIO));
...@@ -1025,26 +1033,11 @@ void device_kill_io(struct subchannel *sch) ...@@ -1025,26 +1033,11 @@ void device_kill_io(struct subchannel *sch)
cdev->private->state = DEV_STATE_TIMEOUT_KILL; cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return; return;
} }
if (ret == -ENODEV) { /* Start delayed path verification. */
if (!sch->lpm) { ccw_device_online_verify(cdev, 0);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return;
}
if (cdev->handler) if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm, cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO)); ERR_PTR(-EIO));
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
} }
static void static void
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
* void check_bugs(void); * void check_bugs(void);
*/ */
static void __init check_bugs(void) static inline void check_bugs(void)
{ {
/* s390 has no bugs ... */ /* s390 has no bugs ... */
} }
...@@ -74,6 +74,7 @@ struct ipl_parameter_block { ...@@ -74,6 +74,7 @@ struct ipl_parameter_block {
extern u32 ipl_flags; extern u32 ipl_flags;
extern u16 ipl_devno; extern u16 ipl_devno;
extern u32 dump_prefix_page;
extern void do_reipl(void); extern void do_reipl(void);
extern void ipl_save_parameters(void); extern void ipl_save_parameters(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment