Commit 357d596b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6

parents e6c69bd3 d67eb16f
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/pal.h>
.bss .bss
.align 16 .align 16
...@@ -49,7 +50,11 @@ GLOBAL_ENTRY(jmp_to_kernel) ...@@ -49,7 +50,11 @@ GLOBAL_ENTRY(jmp_to_kernel)
br.sptk.few b7 br.sptk.few b7
END(jmp_to_kernel) END(jmp_to_kernel)
/*
* r28 contains the index of the PAL function
* r29--31 the args
* Return values in ret0--3 (r8--11)
*/
GLOBAL_ENTRY(pal_emulator_static) GLOBAL_ENTRY(pal_emulator_static)
mov r8=-1 mov r8=-1
mov r9=256 mov r9=256
...@@ -62,7 +67,7 @@ GLOBAL_ENTRY(pal_emulator_static) ...@@ -62,7 +67,7 @@ GLOBAL_ENTRY(pal_emulator_static)
cmp.gtu p6,p7=r9,r28 cmp.gtu p6,p7=r9,r28
(p6) br.cond.sptk.few stacked (p6) br.cond.sptk.few stacked
;; ;;
static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ static: cmp.eq p6,p7=PAL_PTCE_INFO,r28
(p7) br.cond.sptk.few 1f (p7) br.cond.sptk.few 1f
;; ;;
mov r8=0 /* status = 0 */ mov r8=0 /* status = 0 */
...@@ -70,21 +75,21 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ ...@@ -70,21 +75,21 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
movl r10=0x0000000200000003 /* count[0], count[1] */ movl r10=0x0000000200000003 /* count[0], count[1] */
movl r11=0x1000000000002000 /* stride[0], stride[1] */ movl r11=0x1000000000002000 /* stride[0], stride[1] */
br.cond.sptk.few rp br.cond.sptk.few rp
1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ 1: cmp.eq p6,p7=PAL_FREQ_RATIOS,r28
(p7) br.cond.sptk.few 1f (p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */ mov r8=0 /* status = 0 */
movl r9 =0x100000064 /* proc_ratio (1/100) */ movl r9 =0x100000064 /* proc_ratio (1/100) */
movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
;; ;;
1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ 1: cmp.eq p6,p7=PAL_RSE_INFO,r28
(p7) br.cond.sptk.few 1f (p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */ mov r8=0 /* status = 0 */
mov r9=96 /* num phys stacked */ mov r9=96 /* num phys stacked */
mov r10=0 /* hints */ mov r10=0 /* hints */
mov r11=0 mov r11=0
br.cond.sptk.few rp br.cond.sptk.few rp
1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ 1: cmp.eq p6,p7=PAL_CACHE_FLUSH,r28 /* PAL_CACHE_FLUSH */
(p7) br.cond.sptk.few 1f (p7) br.cond.sptk.few 1f
mov r9=ar.lc mov r9=ar.lc
movl r8=524288 /* flush 512k million cache lines (16MB) */ movl r8=524288 /* flush 512k million cache lines (16MB) */
...@@ -102,7 +107,7 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ ...@@ -102,7 +107,7 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
mov ar.lc=r9 mov ar.lc=r9
mov r8=r0 mov r8=r0
;; ;;
1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ 1: cmp.eq p6,p7=PAL_PERF_MON_INFO,r28
(p7) br.cond.sptk.few 1f (p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */ mov r8=0 /* status = 0 */
movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */
...@@ -138,6 +143,20 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ ...@@ -138,6 +143,20 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
st8 [r29]=r0,16 /* clear remaining bits */ st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
;; ;;
1: cmp.eq p6,p7=PAL_VM_SUMMARY,r28
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
movl r9=0x2044040020F1865 /* num_tc_levels=2, num_unique_tcs=4 */
/* max_itr_entry=64, max_dtr_entry=64 */
/* hash_tag_id=2, max_pkr=15 */
/* key_size=24, phys_add_size=50, vw=1 */
movl r10=0x183C /* rid_size=24, impl_va_msb=60 */
;;
1: cmp.eq p6,p7=PAL_MEM_ATTRIB,r28
(p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */
mov r9=0x80|0x01 /* NatPage|WB */
;;
1: br.cond.sptk.few rp 1: br.cond.sptk.few rp
stacked: stacked:
br.ret.sptk.few rp br.ret.sptk.few rp
......
...@@ -211,17 +211,41 @@ void foo(void) ...@@ -211,17 +211,41 @@ void foo(void)
#endif #endif
BLANK(); BLANK();
DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, proc_state_dump)); offsetof (struct ia64_mca_cpu, mca_stack));
DEFINE(IA64_MCA_CPU_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, stack));
DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
offsetof (struct ia64_mca_cpu, stackframe));
DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
offsetof (struct ia64_mca_cpu, rbstore));
DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, init_stack)); offsetof (struct ia64_mca_cpu, init_stack));
BLANK(); BLANK();
DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
offsetof (struct ia64_sal_os_state, sal_ra));
DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
offsetof (struct ia64_sal_os_state, os_gp));
DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
offsetof (struct ia64_sal_os_state, pal_min_state));
DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
offsetof (struct ia64_sal_os_state, proc_state_param));
DEFINE(IA64_SAL_OS_STATE_SIZE,
sizeof (struct ia64_sal_os_state));
DEFINE(IA64_PMSA_GR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_gr));
DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_bank1_gr));
DEFINE(IA64_PMSA_PR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_pr));
DEFINE(IA64_PMSA_BR0_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_br0));
DEFINE(IA64_PMSA_RSC_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_rsc));
DEFINE(IA64_PMSA_IIP_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_iip));
DEFINE(IA64_PMSA_IPSR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_ipsr));
DEFINE(IA64_PMSA_IFS_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_ifs));
DEFINE(IA64_PMSA_XIP_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_xip));
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
......
...@@ -69,7 +69,6 @@ ...@@ -69,7 +69,6 @@
# define DBG_FAULT(i) # define DBG_FAULT(i)
#endif #endif
#define MINSTATE_VIRT /* needed by minstate.h */
#include "minstate.h" #include "minstate.h"
#define FAULT(n) \ #define FAULT(n) \
......
This diff is collapsed.
This diff is collapsed.
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* *
* Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -38,10 +40,6 @@ ...@@ -38,10 +40,6 @@
/* max size of SAL error record (default) */ /* max size of SAL error record (default) */
static int sal_rec_max = 10000; static int sal_rec_max = 10000;
/* from mca.c */
static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state;
static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state;
/* from mca_drv_asm.S */ /* from mca_drv_asm.S */
extern void *mca_handler_bhhook(void); extern void *mca_handler_bhhook(void);
...@@ -316,7 +314,8 @@ init_record_index_pools(void) ...@@ -316,7 +314,8 @@ init_record_index_pools(void)
*/ */
static mca_type_t static mca_type_t
is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
...@@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) ...@@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
* Therefore it is local MCA when rendezvous has not been requested. * Therefore it is local MCA when rendezvous has not been requested.
* Failed to rendezvous, the system must be down. * Failed to rendezvous, the system must be down.
*/ */
switch (sal_to_os_handoff_state->imsto_rendez_state) { switch (sos->rv_rc) {
case -1: /* SAL rendezvous unsuccessful */ case -1: /* SAL rendezvous unsuccessful */
return MCA_IS_GLOBAL; return MCA_IS_GLOBAL;
case 0: /* SAL rendezvous not required */ case 0: /* SAL rendezvous not required */
...@@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) ...@@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
*/ */
static int static int
recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
sal_log_mod_error_info_t *smei; sal_log_mod_error_info_t *smei;
pal_min_state_area_t *pmsa; pal_min_state_area_t *pmsa;
...@@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec ...@@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
* setup for resume to bottom half of MCA, * setup for resume to bottom half of MCA,
* "mca_handler_bhhook" * "mca_handler_bhhook"
*/ */
pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61)); pmsa = sos->pal_min_state;
/* pass to bhhook as 1st argument (gr8) */ /* pass to bhhook as 1st argument (gr8) */
pmsa->pmsa_gr[8-1] = smei->target_identifier; pmsa->pmsa_gr[8-1] = smei->target_identifier;
/* set interrupted return address (but no use) */ /* set interrupted return address (but no use) */
...@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec ...@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
*/ */
static int static int
recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
int status = 0; int status = 0;
pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
...@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ ...@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
case 1: /* partial read */ case 1: /* partial read */
case 3: /* full line(cpu) read */ case 3: /* full line(cpu) read */
case 9: /* I/O space read */ case 9: /* I/O space read */
status = recover_from_read_error(slidx, peidx, pbci); status = recover_from_read_error(slidx, peidx, pbci, sos);
break; break;
case 0: /* unknown */ case 0: /* unknown */
case 2: /* partial write */ case 2: /* partial write */
...@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ ...@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
*/ */
static int static int
recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
...@@ -545,7 +547,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * ...@@ -545,7 +547,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
* This means "there are some platform errors". * This means "there are some platform errors".
*/ */
if (platform) if (platform)
return recover_from_platform_error(slidx, peidx, pbci); return recover_from_platform_error(slidx, peidx, pbci, sos);
/* /*
* On account of strange SAL error record, we cannot recover. * On account of strange SAL error record, we cannot recover.
*/ */
...@@ -562,8 +564,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * ...@@ -562,8 +564,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
static int static int
mca_try_to_recover(void *rec, mca_try_to_recover(void *rec,
ia64_mca_sal_to_os_state_t *sal_to_os_state, struct ia64_sal_os_state *sos)
ia64_mca_os_to_sal_state_t *os_to_sal_state)
{ {
int platform_err; int platform_err;
int n_proc_err; int n_proc_err;
...@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec, ...@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec,
peidx_table_t peidx; peidx_table_t peidx;
pal_bus_check_info_t pbci; pal_bus_check_info_t pbci;
/* handoff state from/to mca.c */
sal_to_os_handoff_state = sal_to_os_state;
os_to_sal_handoff_state = os_to_sal_state;
/* Make index of SAL error record */ /* Make index of SAL error record */
platform_err = mca_make_slidx(rec, &slidx); platform_err = mca_make_slidx(rec, &slidx);
...@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec, ...@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec,
*((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
/* Check whether MCA is global or not */ /* Check whether MCA is global or not */
if (is_mca_global(&peidx, &pbci)) if (is_mca_global(&peidx, &pbci, sos))
return 0; return 0;
/* Try to recover a processor error */ /* Try to recover a processor error */
return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci); return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos);
} }
/* /*
......
...@@ -4,73 +4,6 @@ ...@@ -4,73 +4,6 @@
#include "entry.h" #include "entry.h"
/*
* For ivt.s we want to access the stack virtually so we don't have to disable translation
* on interrupts.
*
* On entry:
* r1: pointer to current task (ar.k6)
*/
#define MINSTATE_START_SAVE_MIN_VIRT \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
(pUStk) mov.m r24=ar.rnat; \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
#define MINSTATE_END_SAVE_MIN_VIRT \
bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
;;
/*
* For mca_asm.S we want to access the stack physically since the state is saved before we
* go virtual and don't want to destroy the iip or ipsr.
*/
#define MINSTATE_START_SAVE_MIN_PHYS \
(pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
(pKStk) ld8 r3 = [r3];; \
(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
;; \
(pUStk) mov r24=ar.rnat; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
;; \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
#define MINSTATE_END_SAVE_MIN_PHYS \
dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
;;
#ifdef MINSTATE_VIRT
# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
#endif
#ifdef MINSTATE_PHYS
# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg
# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
#endif
/* /*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back * the minimum state necessary that allows us to turn psr.ic back
...@@ -97,7 +30,7 @@ ...@@ -97,7 +30,7 @@
* we can pass interruption state as arguments to a handler. * we can pass interruption state as arguments to a handler.
*/ */
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ mov r16=IA64_KR(CURRENT); /* M */ \
mov r27=ar.rsc; /* M */ \ mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \ mov r20=r1; /* A */ \
mov r25=ar.unat; /* M */ \ mov r25=ar.unat; /* M */ \
...@@ -118,7 +51,21 @@ ...@@ -118,7 +51,21 @@
SAVE_IFS; \ SAVE_IFS; \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
;; \ ;; \
MINSTATE_START_SAVE_MIN \ (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
(pUStk) mov.m r24=ar.rnat; \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \ adds r16=PT(CR_IPSR),r1; \
;; \ ;; \
...@@ -181,7 +128,8 @@ ...@@ -181,7 +128,8 @@
EXTRA; \ EXTRA; \
movl r1=__gp; /* establish kernel global pointer */ \ movl r1=__gp; /* establish kernel global pointer */ \
;; \ ;; \
MINSTATE_END_SAVE_MIN bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
;;
/* /*
* SAVE_REST saves the remainder of pt_regs (with psr.ic on). * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
......
...@@ -307,9 +307,7 @@ vm_info(char *page) ...@@ -307,9 +307,7 @@ vm_info(char *page)
if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
return 0; } else {
}
p += sprintf(p, p += sprintf(p,
"Physical Address Space : %d bits\n" "Physical Address Space : %d bits\n"
...@@ -319,13 +317,14 @@ vm_info(char *page) ...@@ -319,13 +317,14 @@ vm_info(char *page)
"Hash Tag ID : 0x%x\n" "Hash Tag ID : 0x%x\n"
"Size of RR.rid : %d\n", "Size of RR.rid : %d\n",
vm_info_1.pal_vm_info_1_s.phys_add_size, vm_info_1.pal_vm_info_1_s.phys_add_size,
vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, vm_info_1.pal_vm_info_1_s.max_pkr+1,
vm_info_1.pal_vm_info_1_s.key_size,
vm_info_1.pal_vm_info_1_s.hash_tag_id,
vm_info_2.pal_vm_info_2_s.rid_size); vm_info_2.pal_vm_info_2_s.rid_size);
}
if (ia64_pal_mem_attrib(&attrib) != 0) if (ia64_pal_mem_attrib(&attrib) == 0) {
return 0;
p += sprintf(p, "Supported memory attributes : "); p += sprintf(p, "Supported memory attributes : ");
sep = ""; sep = "";
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
...@@ -335,11 +334,11 @@ vm_info(char *page) ...@@ -335,11 +334,11 @@ vm_info(char *page)
} }
} }
p += sprintf(p, "\n"); p += sprintf(p, "\n");
}
if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
return 0; } else {
}
p += sprintf(p, p += sprintf(p,
"\nTLB walker : %simplemented\n" "\nTLB walker : %simplemented\n"
...@@ -356,19 +355,18 @@ vm_info(char *page) ...@@ -356,19 +355,18 @@ vm_info(char *page)
p += sprintf(p, "\nTLB purgeable page sizes : "); p += sprintf(p, "\nTLB purgeable page sizes : ");
p = bitvector_process(p, vw_pages); p = bitvector_process(p, vw_pages);
}
if ((status=ia64_get_ptce(&ptce)) != 0) { if ((status=ia64_get_ptce(&ptce)) != 0) {
printk(KERN_ERR "ia64_get_ptce=%ld\n", status); printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
return 0; } else {
}
p += sprintf(p, p += sprintf(p,
"\nPurge base address : 0x%016lx\n" "\nPurge base address : 0x%016lx\n"
"Purge outer loop count : %d\n" "Purge outer loop count : %d\n"
"Purge inner loop count : %d\n" "Purge inner loop count : %d\n"
"Purge outer loop stride : %d\n" "Purge outer loop stride : %d\n"
"Purge inner loop stride : %d\n", "Purge inner loop stride : %d\n",
ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); ptce.base, ptce.count[0], ptce.count[1],
ptce.stride[0], ptce.stride[1]);
p += sprintf(p, p += sprintf(p,
"TC Levels : %d\n" "TC Levels : %d\n"
...@@ -392,19 +390,26 @@ vm_info(char *page) ...@@ -392,19 +390,26 @@ vm_info(char *page)
"\tAssociativity : %d\n" "\tAssociativity : %d\n"
"\tNumber of entries : %d\n" "\tNumber of entries : %d\n"
"\tFlags : ", "\tFlags : ",
cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, cache_types[j+tc_info.tc_unified], i+1,
tc_info.tc_associativity, tc_info.tc_num_entries); tc_info.tc_num_sets,
tc_info.tc_associativity,
if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); tc_info.tc_num_entries);
if (tc_info.tc_unified) p += sprintf(p, "Unified ");
if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); if (tc_info.tc_pf)
p += sprintf(p, "PreferredPageSizeOptimized ");
if (tc_info.tc_unified)
p += sprintf(p, "Unified ");
if (tc_info.tc_reduce_tr)
p += sprintf(p, "TCReduction");
p += sprintf(p, "\n\tSupported page sizes: "); p += sprintf(p, "\n\tSupported page sizes: ");
p = bitvector_process(p, tc_pages); p = bitvector_process(p, tc_pages);
/* when unified date (j=2) is enough */ /* when unified date (j=2) is enough */
if (tc_info.tc_unified) break; if (tc_info.tc_unified)
break;
}
} }
} }
p += sprintf(p, "\n"); p += sprintf(p, "\n");
...@@ -440,14 +445,14 @@ register_info(char *page) ...@@ -440,14 +445,14 @@ register_info(char *page)
p += sprintf(p, "\n"); p += sprintf(p, "\n");
} }
if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
p += sprintf(p, p += sprintf(p,
"RSE stacked physical registers : %ld\n" "RSE stacked physical registers : %ld\n"
"RSE load/store hints : %ld (%s)\n", "RSE load/store hints : %ld (%s)\n",
phys_stacked, hints.ph_data, phys_stacked, hints.ph_data,
hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
}
if (ia64_pal_debug_info(&iregs, &dregs)) if (ia64_pal_debug_info(&iregs, &dregs))
return 0; return 0;
......
...@@ -22,6 +22,11 @@ ...@@ -22,6 +22,11 @@
* *
* Dec 5 2004 kaos@sgi.com * Dec 5 2004 kaos@sgi.com
* Standardize which records are cleared automatically. * Standardize which records are cleared automatically.
*
* Aug 18 2005 kaos@sgi.com
* mca.c may not pass a buffer, a NULL buffer just indicates that a new
* record is available in SAL.
* Replace some NR_CPUS by cpus_online, for hotplug cpu.
*/ */
#include <linux/types.h> #include <linux/types.h>
...@@ -193,7 +198,7 @@ shift1_data_saved (struct salinfo_data *data, int shift) ...@@ -193,7 +198,7 @@ shift1_data_saved (struct salinfo_data *data, int shift)
* The buffer passed from mca.c points to the output from ia64_log_get. This is * The buffer passed from mca.c points to the output from ia64_log_get. This is
* a persistent buffer but its contents can change between the interrupt and * a persistent buffer but its contents can change between the interrupt and
* when user space processes the record. Save the record id to identify * when user space processes the record. Save the record id to identify
* changes. * changes. If the buffer is NULL then just update the bitmap.
*/ */
void void
salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
...@@ -206,6 +211,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) ...@@ -206,6 +211,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
if (buffer) {
if (irqsafe) if (irqsafe)
spin_lock_irqsave(&data_saved_lock, flags); spin_lock_irqsave(&data_saved_lock, flags);
for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
...@@ -227,6 +233,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) ...@@ -227,6 +233,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
} }
if (irqsafe) if (irqsafe)
spin_unlock_irqrestore(&data_saved_lock, flags); spin_unlock_irqrestore(&data_saved_lock, flags);
}
if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) {
if (irqsafe) if (irqsafe)
...@@ -244,7 +251,7 @@ salinfo_timeout_check(struct salinfo_data *data) ...@@ -244,7 +251,7 @@ salinfo_timeout_check(struct salinfo_data *data)
int i; int i;
if (!data->open) if (!data->open)
return; return;
for (i = 0; i < NR_CPUS; ++i) { for_each_online_cpu(i) {
if (test_bit(i, &data->cpu_event)) { if (test_bit(i, &data->cpu_event)) {
/* double up() is not a problem, user space will see no /* double up() is not a problem, user space will see no
* records for the additional "events". * records for the additional "events".
...@@ -291,7 +298,7 @@ retry: ...@@ -291,7 +298,7 @@ retry:
n = data->cpu_check; n = data->cpu_check;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (test_bit(n, &data->cpu_event)) { if (test_bit(n, &data->cpu_event) && cpu_online(n)) {
cpu = n; cpu = n;
break; break;
} }
...@@ -585,8 +592,7 @@ salinfo_init(void) ...@@ -585,8 +592,7 @@ salinfo_init(void)
/* we missed any events before now */ /* we missed any events before now */
online = 0; online = 0;
for (j = 0; j < NR_CPUS; j++) for_each_online_cpu(j) {
if (cpu_online(j)) {
set_bit(j, &data->cpu_event); set_bit(j, &data->cpu_event);
++online; ++online;
} }
......
...@@ -2019,28 +2019,6 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t, ...@@ -2019,28 +2019,6 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t,
STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
} }
void
unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
struct pt_regs *pt, struct switch_stack *sw)
{
unsigned long sof;
init_frame_info(info, t, sw, pt->r12);
info->cfm_loc = &pt->cr_ifs;
info->unat_loc = &pt->ar_unat;
info->pfs_loc = &pt->ar_pfs;
sof = *info->cfm_loc & 0x7f;
info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
info->ip = pt->cr_iip + ia64_psr(pt)->ri;
info->pt = (unsigned long) pt;
UNW_DPRINT(3, "unwind.%s:\n"
" bsp 0x%lx\n"
" sof 0x%lx\n"
" ip 0x%lx\n",
__FUNCTION__, info->bsp, sof, info->ip);
find_save_locs(info);
}
void void
unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
{ {
......
...@@ -382,13 +382,22 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -382,13 +382,22 @@ ia64_mmu_init (void *my_cpu_data)
if (impl_va_bits < 51 || impl_va_bits > 61) if (impl_va_bits < 51 || impl_va_bits > 61)
panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
/*
* mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
* which must fit into "vmlpt_bits - pte_bits" slots. Second half of
* the test makes sure that our mapped space doesn't overlap the
* unimplemented hole in the middle of the region.
*/
if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
(mapped_space_bits > impl_va_bits - 1))
panic("Cannot build a big enough virtual-linear page table"
" to cover mapped address space.\n"
" Try using a smaller page size.\n");
/* place the VMLPT at the end of each page-table mapped region: */ /* place the VMLPT at the end of each page-table mapped region: */
pta = POW2(61) - POW2(vmlpt_bits); pta = POW2(61) - POW2(vmlpt_bits);
if (POW2(mapped_space_bits) >= pta)
panic("mm/init: overlap between virtually mapped linear page table and "
"mapped kernel space!");
/* /*
* Set the (virtually mapped linear) page table address. Bit * Set the (virtually mapped linear) page table address. Bit
* 8 selects between the short and long format, bits 2-7 the * 8 selects between the short and long format, bits 2-7 the
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include <asm/sn/clksupport.h> #include <asm/sn/clksupport.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include <asm/sn/sn_feature_sets.h>
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
#include <asm/sn/klconfig.h> #include <asm/sn/klconfig.h>
...@@ -97,6 +98,7 @@ EXPORT_SYMBOL(sn_region_size); ...@@ -97,6 +98,7 @@ EXPORT_SYMBOL(sn_region_size);
int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
short physical_node_map[MAX_PHYSNODE_ID]; short physical_node_map[MAX_PHYSNODE_ID];
static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
EXPORT_SYMBOL(physical_node_map); EXPORT_SYMBOL(physical_node_map);
...@@ -271,7 +273,10 @@ void __init sn_setup(char **cmdline_p) ...@@ -271,7 +273,10 @@ void __init sn_setup(char **cmdline_p)
u32 version = sn_sal_rev(); u32 version = sn_sal_rev();
extern void sn_cpu_init(void); extern void sn_cpu_init(void);
ia64_sn_plat_set_error_handling_features(); ia64_sn_plat_set_error_handling_features(); // obsolete
ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
/* /*
...@@ -314,16 +319,6 @@ void __init sn_setup(char **cmdline_p) ...@@ -314,16 +319,6 @@ void __init sn_setup(char **cmdline_p)
printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
/*
* Confirm the SAL we're running on is recent enough...
*/
if (version < SN_SAL_MIN_VERSION) {
printk(KERN_ERR "This kernel needs SGI SAL version >= "
"%x.%02x\n", SN_SAL_MIN_VERSION >> 8,
SN_SAL_MIN_VERSION & 0x00FF);
panic("PROM version too old\n");
}
master_nasid = boot_get_nasid(); master_nasid = boot_get_nasid();
status = status =
...@@ -480,6 +475,10 @@ void __init sn_cpu_init(void) ...@@ -480,6 +475,10 @@ void __init sn_cpu_init(void)
if (nodepdaindr[0] == NULL) if (nodepdaindr[0] == NULL)
return; return;
for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
break;
cpuid = smp_processor_id(); cpuid = smp_processor_id();
cpuphyid = get_sapicid(); cpuphyid = get_sapicid();
...@@ -651,3 +650,12 @@ nasid_slice_to_cpuid(int nasid, int slice) ...@@ -651,3 +650,12 @@ nasid_slice_to_cpuid(int nasid, int slice)
return -1; return -1;
} }
int sn_prom_feature_available(int id)
{
if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS)
return 0;
return test_bit(id, sn_prom_features);
}
EXPORT_SYMBOL(sn_prom_feature_available);
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#ifndef _ASM_IA64_MCA_H #ifndef _ASM_IA64_MCA_H
#define _ASM_IA64_MCA_H #define _ASM_IA64_MCA_H
#define IA64_MCA_STACK_SIZE 8192
#if !defined(__ASSEMBLY__) #if !defined(__ASSEMBLY__)
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -48,7 +46,8 @@ typedef union cmcv_reg_u { ...@@ -48,7 +46,8 @@ typedef union cmcv_reg_u {
enum { enum {
IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1,
IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2,
}; };
/* Information maintained by the MC infrastructure */ /* Information maintained by the MC infrastructure */
...@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s { ...@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s {
} ia64_mc_info_t; } ia64_mc_info_t;
typedef struct ia64_mca_sal_to_os_state_s { /* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
u64 imsto_os_gp; /* GP of the os registered with the SAL */ * Besides the handover state, it also contains some saved registers from the
u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ * time of the event.
u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ * Note: mca_asm.S depends on the precise layout of this structure.
u64 imsto_sal_gp; /* GP of the SAL - physical */ */
u64 imsto_rendez_state; /* Rendez state information */
u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going struct ia64_sal_os_state {
* back to SAL from OS after MCA handling. /* SAL to OS, must be at offset 0 */
u64 os_gp; /* GP of the os registered with the SAL, physical */
u64 pal_proc; /* PAL_PROC entry point, physical */
u64 sal_proc; /* SAL_PROC entry point, physical */
u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */
u64 proc_state_param; /* from R18 */
u64 monarch; /* 1 for a monarch event, 0 for a slave */
/* common, must follow SAL to OS */
u64 sal_ra; /* Return address in SAL, physical */
u64 sal_gp; /* GP of the SAL - physical */
pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
struct task_struct *prev_task; /* previous task, NULL if it is not useful */
/* Some interrupt registers are not saved in minstate, pt_regs or
* switch_stack. Because MCA/INIT can occur when interrupts are
* disabled, we need to save the additional interrupt registers over
* MCA/INIT and resume.
*/ */
u64 pal_min_state; /* from PAL in r17 */ u64 isr;
u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ u64 ifa;
} ia64_mca_sal_to_os_state_t; u64 itir;
u64 iipa;
u64 iim;
u64 iha;
/* OS to SAL, must follow common */
u64 os_status; /* OS status to SAL, enum below */
u64 context; /* 0 if return to same context
1 if return to new context */
};
enum { enum {
IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
...@@ -83,36 +106,22 @@ enum { ...@@ -83,36 +106,22 @@ enum {
IA64_MCA_HALT = -3 /* System to be halted by SAL */ IA64_MCA_HALT = -3 /* System to be halted by SAL */
}; };
enum {
IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */
IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
};
enum { enum {
IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
}; };
typedef struct ia64_mca_os_to_sal_state_s {
u64 imots_os_status; /* OS status to SAL as to what happened
* with the MCA handling.
*/
u64 imots_sal_gp; /* GP of the SAL - physical */
u64 imots_context; /* 0 if return to same context
1 if return to new context */
u64 *imots_new_min_state; /* Pointer to structure containing
* new values of registers in the min state
* save area.
*/
u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going
* back to SAL from OS after MCA handling.
*/
} ia64_mca_os_to_sal_state_t;
/* Per-CPU MCA state that is too big for normal per-CPU variables. */ /* Per-CPU MCA state that is too big for normal per-CPU variables. */
struct ia64_mca_cpu { struct ia64_mca_cpu {
u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ u64 mca_stack[KERNEL_STACK_SIZE/8];
u64 proc_state_dump[512];
u64 stackframe[32];
u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */
u64 init_stack[KERNEL_STACK_SIZE/8]; u64 init_stack[KERNEL_STACK_SIZE/8];
} __attribute__ ((aligned(16))); };
/* Array of physical addresses of each CPU's MCA area. */ /* Array of physical addresses of each CPU's MCA area. */
extern unsigned long __per_cpu_mca[NR_CPUS]; extern unsigned long __per_cpu_mca[NR_CPUS];
...@@ -121,12 +130,29 @@ extern void ia64_mca_init(void); ...@@ -121,12 +130,29 @@ extern void ia64_mca_init(void);
extern void ia64_mca_cpu_init(void *); extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void); extern void ia64_os_mca_dispatch_end(void);
extern void ia64_mca_ucmc_handler(void); extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
extern void ia64_init_handler(struct pt_regs *,
struct switch_stack *,
struct ia64_sal_os_state *);
extern void ia64_monarch_init_handler(void); extern void ia64_monarch_init_handler(void);
extern void ia64_slave_init_handler(void); extern void ia64_slave_init_handler(void);
extern void ia64_mca_cmc_vector_setup(void); extern void ia64_mca_cmc_vector_setup(void);
extern int ia64_reg_MCA_extension(void*); extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
extern void ia64_unreg_MCA_extension(void); extern void ia64_unreg_MCA_extension(void);
extern u64 ia64_get_rnat(u64 *);
#else /* __ASSEMBLY__ */
#define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */
#define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */
#define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */
#define IA64_MCA_HALT -3 /* System to be halted by SAL */
#define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */
#define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */
#define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */
#define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_MCA_H */ #endif /* _ASM_IA64_MCA_H */
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2002 Intel Corp. * Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
*/ */
#ifndef _ASM_IA64_MCA_ASM_H #ifndef _ASM_IA64_MCA_ASM_H
#define _ASM_IA64_MCA_ASM_H #define _ASM_IA64_MCA_ASM_H
...@@ -207,106 +209,33 @@ ...@@ -207,106 +209,33 @@
;; ;;
/* /*
* The following offsets capture the order in which the * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
* RSE related registers from the old context are * stacks, except that the SAL/OS state and a switch_stack are stored near the
* saved onto the new stack frame. * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
* well as MCA over INIT, each event needs its own SAL/OS state. All entries
* are 16 byte aligned.
* *
* +-----------------------+ * +---------------------------+
* |NDIRTY [BSP - BSPSTORE]| * | pt_regs |
* +-----------------------+ * +---------------------------+
* | RNAT | * | switch_stack |
* +-----------------------+ * +---------------------------+
* | BSPSTORE | * | SAL/OS state |
* +-----------------------+ * +---------------------------+
* | IFS | * | 16 byte scratch area |
* +-----------------------+ * +---------------------------+ <-------- SP at start of C MCA handler
* | PFS | * | ..... |
* +-----------------------+ * +---------------------------+
* | RSC | * | RBS for MCA/INIT handler |
* +-----------------------+ <-------- Bottom of new stack frame * +---------------------------+
* | struct task for MCA/INIT |
* +---------------------------+ <-------- Bottom of MCA/INIT stack
*/ */
#define rse_rsc_offset 0
#define rse_pfs_offset (rse_rsc_offset+0x08)
#define rse_ifs_offset (rse_pfs_offset+0x08)
#define rse_bspstore_offset (rse_ifs_offset+0x08)
#define rse_rnat_offset (rse_bspstore_offset+0x08)
#define rse_ndirty_offset (rse_rnat_offset+0x08)
/* #define ALIGN16(x) ((x)&~15)
* rse_switch_context #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
* #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
* 1. Save old RSC onto the new stack frame #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
* 2. Save PFS onto new stack frame #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
* 3. Cover the old frame and start a new frame.
* 4. Save IFS onto new stack frame
* 5. Save the old BSPSTORE on the new stack frame
* 6. Save the old RNAT on the new stack frame
* 7. Write BSPSTORE with the new backing store pointer
* 8. Read and save the new BSP to calculate the #dirty registers
* NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
*/
#define rse_switch_context(temp,p_stackframe,p_bspstore) \
;; \
mov temp=ar.rsc;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.pfs;; \
st8 [p_stackframe]=temp,8; \
cover ;; \
mov temp=cr.ifs;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.bspstore;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.rnat;; \
st8 [p_stackframe]=temp,8; \
mov ar.bspstore=p_bspstore;; \
mov temp=ar.bsp;; \
sub temp=temp,p_bspstore;; \
st8 [p_stackframe]=temp,8;;
/*
* rse_return_context
* 1. Allocate a zero-sized frame
* 2. Store the number of dirty registers RSC.loadrs field
* 3. Issue a loadrs to insure that any registers from the interrupted
* context which were saved on the new stack frame have been loaded
* back into the stacked registers
* 4. Restore BSPSTORE
* 5. Restore RNAT
* 6. Restore PFS
* 7. Restore IFS
* 8. Restore RSC
* 9. Issue an RFI
*/
#define rse_return_context(psr_mask_reg,temp,p_stackframe) \
;; \
alloc temp=ar.pfs,0,0,0,0; \
add p_stackframe=rse_ndirty_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
shl temp=temp,16;; \
mov ar.rsc=temp;; \
loadrs;; \
add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
ld8 temp=[p_stackframe];; \
mov ar.bspstore=temp;; \
add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
ld8 temp=[p_stackframe];; \
mov ar.rnat=temp;; \
add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov ar.pfs=temp;; \
add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov cr.ifs=temp;; \
add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov ar.rsc=temp ; \
mov temp=psr;; \
or temp=temp,psr_mask_reg;; \
mov cr.ipsr=temp;; \
mov temp=ip;; \
add temp=0x30,temp;; \
mov cr.iip=temp;; \
srlz.i;; \
rfi;;
#endif /* _ASM_IA64_MCA_ASM_H */ #endif /* _ASM_IA64_MCA_ASM_H */
...@@ -119,7 +119,7 @@ struct pt_regs { ...@@ -119,7 +119,7 @@ struct pt_regs {
unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
unsigned long ar_pfs; /* prev function state */ unsigned long ar_pfs; /* prev function state */
unsigned long ar_rsc; /* RSE configuration */ unsigned long ar_rsc; /* RSE configuration */
/* The following two are valid only if cr_ipsr.cpl > 0: */ /* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */
unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_rnat; /* RSE NaT */
unsigned long ar_bspstore; /* RSE bspstore */ unsigned long ar_bspstore; /* RSE bspstore */
......
#ifndef _ASM_IA64_SN_FEATURE_SETS_H
#define _ASM_IA64_SN_FEATURE_SETS_H
/*
* SN PROM Features
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/types.h>
#include <asm/bitops.h>
/* --------------------- PROM Features -----------------------------*/
extern int sn_prom_feature_available(int id);
#define MAX_PROM_FEATURE_SETS 2
/*
* The following defines features that may or may not be supported by the
* current PROM. The OS uses sn_prom_feature_available(feature) to test for
* the presence of a PROM feature. Down rev (old) PROMs will always test
* "false" for new features.
*
* Use:
* if (sn_prom_feature_available(PRF_FEATURE_XXX))
* ...
*/
/*
* Example: feature XXX
*/
#define PRF_FEATURE_XXX 0
/* --------------------- OS Features -------------------------------*/
/*
* The following defines OS features that are optionally present in
* the operating system.
* During boot, PROM is notified of these features via a series of calls:
*
* ia64_sn_set_os_feature(feature1);
*
* Once enabled, a feature cannot be disabled.
*
* By default, features are disabled unless explicitly enabled.
*/
#define OSF_MCA_SLV_TO_OS_INIT_SLV 0
#define OSF_FEAT_LOG_SBES 1
#endif /* _ASM_IA64_SN_FEATURE_SETS_H */
...@@ -80,6 +80,9 @@ ...@@ -80,6 +80,9 @@
#define SN_SAL_RESERVED_DO_NOT_USE 0x02000062 #define SN_SAL_RESERVED_DO_NOT_USE 0x02000062
#define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064 #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064
#define SN_SAL_GET_PROM_FEATURE_SET 0x02000065
#define SN_SAL_SET_OS_FEATURE_SET 0x02000066
/* /*
* Service-specific constants * Service-specific constants
*/ */
...@@ -118,8 +121,8 @@ ...@@ -118,8 +121,8 @@
/* /*
* Error Handling Features * Error Handling Features
*/ */
#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete
#define SAL_ERR_FEAT_LOG_SBES 0x2 #define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete
#define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4
#define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000
...@@ -151,12 +154,6 @@ sn_sal_rev(void) ...@@ -151,12 +154,6 @@ sn_sal_rev(void)
return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
} }
/*
* Specify the minimum PROM revsion required for this kernel.
* Note that they're stored in hex format...
*/
#define SN_SAL_MIN_VERSION 0x0404
/* /*
* Returns the master console nasid, if the call fails, return an illegal * Returns the master console nasid, if the call fails, return an illegal
* value. * value.
...@@ -336,7 +333,7 @@ ia64_sn_plat_cpei_handler(void) ...@@ -336,7 +333,7 @@ ia64_sn_plat_cpei_handler(void)
} }
/* /*
* Set Error Handling Features * Set Error Handling Features (Obsolete)
*/ */
static inline u64 static inline u64
ia64_sn_plat_set_error_handling_features(void) ia64_sn_plat_set_error_handling_features(void)
...@@ -1052,4 +1049,25 @@ ia64_sn_is_fake_prom(void) ...@@ -1052,4 +1049,25 @@ ia64_sn_is_fake_prom(void)
return (rv.status == 0); return (rv.status == 0);
} }
static inline int
ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set)
{
struct ia64_sal_retval rv;
SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0);
if (rv.status != 0)
return rv.status;
*feature_set = rv.v0;
return 0;
}
static inline int
ia64_sn_set_os_feature(int feature)
{
struct ia64_sal_retval rv;
SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0);
return rv.status;
}
#endif /* _ASM_IA64_SN_SN_SAL_H */ #endif /* _ASM_IA64_SN_SN_SAL_H */
...@@ -76,6 +76,7 @@ struct thread_info { ...@@ -76,6 +76,7 @@ struct thread_info {
#define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 #define TIF_MEMDIE 17
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
...@@ -85,6 +86,7 @@ struct thread_info { ...@@ -85,6 +86,7 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
/* "work to do on user-return" bits */ /* "work to do on user-return" bits */
#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
......
...@@ -114,13 +114,6 @@ extern void unw_remove_unwind_table (void *handle); ...@@ -114,13 +114,6 @@ extern void unw_remove_unwind_table (void *handle);
*/ */
extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
/*
* Prepare to unwind from interruption. The pt-regs and switch-stack structures must have
* be "adjacent" (no state modifications between pt-regs and switch-stack).
*/
extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
struct pt_regs *pt, struct switch_stack *sw);
extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
struct switch_stack *sw); struct switch_stack *sw);
......
...@@ -904,6 +904,8 @@ extern int task_curr(const task_t *p); ...@@ -904,6 +904,8 @@ extern int task_curr(const task_t *p);
extern int idle_cpu(int cpu); extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
extern task_t *idle_task(int cpu); extern task_t *idle_task(int cpu);
extern task_t *curr_task(int cpu);
extern void set_curr_task(int cpu, task_t *p);
void yield(void); void yield(void);
......
...@@ -3576,6 +3576,32 @@ task_t *idle_task(int cpu) ...@@ -3576,6 +3576,32 @@ task_t *idle_task(int cpu)
return cpu_rq(cpu)->idle; return cpu_rq(cpu)->idle;
} }
/**
* curr_task - return the current task for a given cpu.
* @cpu: the processor in question.
*/
task_t *curr_task(int cpu)
{
return cpu_curr(cpu);
}
/**
* set_curr_task - set the current task for a given cpu.
* @cpu: the processor in question.
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
* are serviced on a separate stack. It allows the architecture to switch the
* notion of the current task on a cpu in a non-blocking manner. This function
* must be called with interrupts disabled, the caller must save the original
* value of the current task (see curr_task() above) and restore that value
* before reenabling interrupts.
*/
void set_curr_task(int cpu, task_t *p)
{
cpu_curr(cpu) = p;
}
/** /**
* find_process_by_pid - find a process with a matching PID value. * find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question. * @pid: the pid in question.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment