Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci-2.6.23
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci-2.6.23
Commits
59af7038
Commit
59af7038
authored
Jan 14, 2006
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'release' of
git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
parents
e7de3690
d50f5c5c
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
544 additions
and
235 deletions
+544
-235
arch/ia64/hp/sim/simserial.c
arch/ia64/hp/sim/simserial.c
+2
-8
arch/ia64/kernel/fsys.S
arch/ia64/kernel/fsys.S
+1
-0
arch/ia64/kernel/jprobes.S
arch/ia64/kernel/jprobes.S
+27
-0
arch/ia64/kernel/kprobes.c
arch/ia64/kernel/kprobes.c
+57
-0
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/mca_asm.S
+1
-1
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/salinfo.c
+119
-51
arch/ia64/kernel/traps.c
arch/ia64/kernel/traps.c
+19
-7
arch/ia64/mm/tlb.c
arch/ia64/mm/tlb.c
+1
-1
arch/ia64/sn/include/xtalk/hubdev.h
arch/ia64/sn/include/xtalk/hubdev.h
+12
-4
arch/ia64/sn/kernel/bte_error.c
arch/ia64/sn/kernel/bte_error.c
+48
-10
arch/ia64/sn/kernel/huberror.c
arch/ia64/sn/kernel/huberror.c
+5
-4
arch/ia64/sn/kernel/io_init.c
arch/ia64/sn/kernel/io_init.c
+53
-41
arch/ia64/sn/kernel/xpc_channel.c
arch/ia64/sn/kernel/xpc_channel.c
+14
-10
arch/ia64/sn/kernel/xpc_main.c
arch/ia64/sn/kernel/xpc_main.c
+129
-60
arch/ia64/sn/kernel/xpc_partition.c
arch/ia64/sn/kernel/xpc_partition.c
+7
-3
arch/ia64/sn/pci/pcibr/pcibr_dma.c
arch/ia64/sn/pci/pcibr/pcibr_dma.c
+18
-16
arch/ia64/sn/pci/pcibr/pcibr_provider.c
arch/ia64/sn/pci/pcibr/pcibr_provider.c
+10
-10
include/asm-ia64/kprobes.h
include/asm-ia64/kprobes.h
+6
-0
include/asm-ia64/sn/sn_sal.h
include/asm-ia64/sn/sn_sal.h
+3
-2
include/asm-ia64/sn/xp.h
include/asm-ia64/sn/xp.h
+3
-1
include/asm-ia64/sn/xpc.h
include/asm-ia64/sn/xpc.h
+6
-5
include/asm-ia64/thread_info.h
include/asm-ia64/thread_info.h
+3
-1
No files found.
arch/ia64/hp/sim/simserial.c
View file @
59af7038
...
...
@@ -167,15 +167,9 @@ static void receive_chars(struct tty_struct *tty, struct pt_regs *regs)
}
}
seen_esc
=
0
;
if
(
tty
->
flip
.
count
>=
TTY_FLIPBUF_SIZE
)
break
;
*
tty
->
flip
.
char_buf_ptr
=
ch
;
*
tty
->
flip
.
flag_buf_ptr
=
0
;
tty
->
flip
.
flag_buf_ptr
++
;
tty
->
flip
.
char_buf_ptr
++
;
tty
->
flip
.
count
++
;
if
(
tty_insert_flip_char
(
tty
,
ch
,
TTY_NORMAL
)
==
0
)
break
;
}
tty_flip_buffer_push
(
tty
);
}
...
...
arch/ia64/kernel/fsys.S
View file @
59af7038
...
...
@@ -903,5 +903,6 @@ fsyscall_table:
data8
0
data8
0
data8
0
data8
0
//
1280
.
org
fsyscall_table
+
8
*
NR_syscalls
//
guard
against
failures
to
increase
NR_syscalls
arch/ia64/kernel/jprobes.S
View file @
59af7038
...
...
@@ -60,3 +60,30 @@ END(jprobe_break)
GLOBAL_ENTRY
(
jprobe_inst_return
)
br.call.sptk.many
b0
=
jprobe_break
END
(
jprobe_inst_return
)
GLOBAL_ENTRY
(
invalidate_stacked_regs
)
movl
r16
=
invalidate_restore_cfm
;;
mov
b6
=
r16
;;
br.ret.sptk.many
b6
;;
invalidate_restore_cfm
:
mov
r16
=
ar
.
rsc
;;
mov
ar
.
rsc
=
r0
;;
loadrs
;;
mov
ar
.
rsc
=
r16
;;
br.cond.sptk.many
rp
END
(
invalidate_stacked_regs
)
GLOBAL_ENTRY
(
flush_register_stack
)
//
flush
dirty
regs
to
backing
store
(
must
be
first
in
insn
group
)
flushrs
;;
br.ret.sptk.many
rp
END
(
flush_register_stack
)
arch/ia64/kernel/kprobes.c
View file @
59af7038
...
...
@@ -766,11 +766,56 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return
ret
;
}
struct
param_bsp_cfm
{
unsigned
long
ip
;
unsigned
long
*
bsp
;
unsigned
long
cfm
;
};
static
void
ia64_get_bsp_cfm
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
unsigned
long
ip
;
struct
param_bsp_cfm
*
lp
=
arg
;
do
{
unw_get_ip
(
info
,
&
ip
);
if
(
ip
==
0
)
break
;
if
(
ip
==
lp
->
ip
)
{
unw_get_bsp
(
info
,
(
unsigned
long
*
)
&
lp
->
bsp
);
unw_get_cfm
(
info
,
(
unsigned
long
*
)
&
lp
->
cfm
);
return
;
}
}
while
(
unw_unwind
(
info
)
>=
0
);
lp
->
bsp
=
0
;
lp
->
cfm
=
0
;
return
;
}
int
__kprobes
setjmp_pre_handler
(
struct
kprobe
*
p
,
struct
pt_regs
*
regs
)
{
struct
jprobe
*
jp
=
container_of
(
p
,
struct
jprobe
,
kp
);
unsigned
long
addr
=
((
struct
fnptr
*
)(
jp
->
entry
))
->
ip
;
struct
kprobe_ctlblk
*
kcb
=
get_kprobe_ctlblk
();
struct
param_bsp_cfm
pa
;
int
bytes
;
/*
* Callee owns the argument space and could overwrite it, eg
* tail call optimization. So to be absolutely safe
* we save the argument space before transfering the control
* to instrumented jprobe function which runs in
* the process context
*/
pa
.
ip
=
regs
->
cr_iip
;
unw_init_running
(
ia64_get_bsp_cfm
,
&
pa
);
bytes
=
(
char
*
)
ia64_rse_skip_regs
(
pa
.
bsp
,
pa
.
cfm
&
0x3f
)
-
(
char
*
)
pa
.
bsp
;
memcpy
(
kcb
->
jprobes_saved_stacked_regs
,
pa
.
bsp
,
bytes
);
kcb
->
bsp
=
pa
.
bsp
;
kcb
->
cfm
=
pa
.
cfm
;
/* save architectural state */
kcb
->
jprobe_saved_regs
=
*
regs
;
...
...
@@ -792,8 +837,20 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
int
__kprobes
longjmp_break_handler
(
struct
kprobe
*
p
,
struct
pt_regs
*
regs
)
{
struct
kprobe_ctlblk
*
kcb
=
get_kprobe_ctlblk
();
int
bytes
;
/* restoring architectural state */
*
regs
=
kcb
->
jprobe_saved_regs
;
/* restoring the original argument space */
flush_register_stack
();
bytes
=
(
char
*
)
ia64_rse_skip_regs
(
kcb
->
bsp
,
kcb
->
cfm
&
0x3f
)
-
(
char
*
)
kcb
->
bsp
;
memcpy
(
kcb
->
bsp
,
kcb
->
jprobes_saved_stacked_regs
,
bytes
);
invalidate_stacked_regs
();
preempt_enable_no_resched
();
return
1
;
}
...
...
arch/ia64/kernel/mca_asm.S
View file @
59af7038
...
...
@@ -847,7 +847,7 @@ ia64_state_restore:
;;
mov
cr
.
iim
=
temp3
mov
cr
.
iha
=
temp4
dep
r22
=
0
,
r22
,
62
,
2
//
pal_min_state
,
physical
,
uncached
dep
r22
=
0
,
r22
,
62
,
1
//
pal_min_state
,
physical
,
uncached
mov
IA64_KR
(
CURRENT
)=
r21
ld8
r8
=[
temp1
]
//
os_status
ld8
r10
=[
temp2
]
//
context
...
...
arch/ia64/kernel/salinfo.c
View file @
59af7038
...
...
@@ -3,7 +3,7 @@
*
* Creates entries in /proc/sal for various system features.
*
* Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2003
, 2006
Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2003 Hewlett-Packard Co
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
...
...
@@ -27,9 +27,17 @@
* mca.c may not pass a buffer, a NULL buffer just indicates that a new
* record is available in SAL.
* Replace some NR_CPUS by cpus_online, for hotplug cpu.
*
* Jan 5 2006 kaos@sgi.com
* Handle hotplug cpus coming online.
* Handle hotplug cpus going offline while they still have outstanding records.
* Use the cpu_* macros consistently.
* Replace the counting semaphore with a mutex and a test if the cpumask is non-empty.
* Modify the locking to make the test for "work to do" an atomic operation.
*/
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
...
...
@@ -132,8 +140,8 @@ enum salinfo_state {
};
struct
salinfo_data
{
volatile
cpumask_t
cpu_event
;
/* which cpus have outstanding events */
struct
semaphore
sem
;
/* count of cpus with outstanding events (bits set in cpu_event) */
cpumask_t
cpu_event
;
/* which cpus have outstanding events */
struct
semaphore
mutex
;
u8
*
log_buffer
;
u64
log_size
;
u8
*
oemdata
;
/* decoded oem data */
...
...
@@ -174,6 +182,21 @@ struct salinfo_platform_oemdata_parms {
int
ret
;
};
/* Kick the mutex that tells user space that there is work to do. Instead of
* trying to track the state of the mutex across multiple cpus, in user
* context, interrupt context, non-maskable interrupt context and hotplug cpu,
* it is far easier just to grab the mutex if it is free then release it.
*
* This routine must be called with data_saved_lock held, to make the down/up
* operation atomic.
*/
static
void
salinfo_work_to_do
(
struct
salinfo_data
*
data
)
{
down_trylock
(
&
data
->
mutex
);
up
(
&
data
->
mutex
);
}
static
void
salinfo_platform_oemdata_cpu
(
void
*
context
)
{
...
...
@@ -212,9 +235,9 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
BUG_ON
(
type
>=
ARRAY_SIZE
(
salinfo_log_name
));
if
(
irqsafe
)
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
if
(
buffer
)
{
if
(
irqsafe
)
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
for
(
i
=
0
,
data_saved
=
data
->
data_saved
;
i
<
saved_size
;
++
i
,
++
data_saved
)
{
if
(
!
data_saved
->
buffer
)
break
;
...
...
@@ -232,13 +255,11 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
data_saved
->
size
=
size
;
data_saved
->
buffer
=
buffer
;
}
if
(
irqsafe
)
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
}
if
(
!
test_and_set_bit
(
smp_processor_id
(),
&
data
->
cpu_event
)
)
{
if
(
irqsafe
)
up
(
&
data
->
sem
);
cpu_set
(
smp_processor_id
(),
data
->
cpu_event
);
if
(
irqsafe
)
{
salinfo_work_to_do
(
data
);
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
}
}
...
...
@@ -249,20 +270,17 @@ static struct timer_list salinfo_timer;
static
void
salinfo_timeout_check
(
struct
salinfo_data
*
data
)
{
int
i
;
unsigned
long
flags
;
if
(
!
data
->
open
)
return
;
for_each_online_cpu
(
i
)
{
if
(
test_bit
(
i
,
&
data
->
cpu_event
))
{
/* double up() is not a problem, user space will see no
* records for the additional "events".
*/
up
(
&
data
->
sem
);
}
if
(
!
cpus_empty
(
data
->
cpu_event
))
{
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
salinfo_work_to_do
(
data
);
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
}
}
static
void
static
void
salinfo_timeout
(
unsigned
long
arg
)
{
salinfo_timeout_check
(
salinfo_data
+
SAL_INFO_TYPE_MCA
);
...
...
@@ -290,16 +308,20 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
int
i
,
n
,
cpu
=
-
1
;
retry:
if
(
down_trylock
(
&
data
->
sem
))
{
if
(
cpus_empty
(
data
->
cpu_event
)
&&
down_trylock
(
&
data
->
mutex
))
{
if
(
file
->
f_flags
&
O_NONBLOCK
)
return
-
EAGAIN
;
if
(
down_interruptible
(
&
data
->
sem
))
if
(
down_interruptible
(
&
data
->
mutex
))
return
-
EINTR
;
}
n
=
data
->
cpu_check
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
if
(
test_bit
(
n
,
&
data
->
cpu_event
)
&&
cpu_online
(
n
))
{
if
(
cpu_isset
(
n
,
data
->
cpu_event
))
{
if
(
!
cpu_online
(
n
))
{
cpu_clear
(
n
,
data
->
cpu_event
);
continue
;
}
cpu
=
n
;
break
;
}
...
...
@@ -310,9 +332,6 @@ retry:
if
(
cpu
==
-
1
)
goto
retry
;
/* events are sticky until the user says "clear" */
up
(
&
data
->
sem
);
/* for next read, start checking at next CPU */
data
->
cpu_check
=
cpu
;
if
(
++
data
->
cpu_check
==
NR_CPUS
)
...
...
@@ -381,10 +400,8 @@ salinfo_log_release(struct inode *inode, struct file *file)
static
void
call_on_cpu
(
int
cpu
,
void
(
*
fn
)(
void
*
),
void
*
arg
)
{
cpumask_t
save_cpus_allowed
,
new_cpus_allowed
;
memcpy
(
&
save_cpus_allowed
,
&
current
->
cpus_allowed
,
sizeof
(
save_cpus_allowed
));
memset
(
&
new_cpus_allowed
,
0
,
sizeof
(
new_cpus_allowed
));
set_bit
(
cpu
,
&
new_cpus_allowed
);
cpumask_t
save_cpus_allowed
=
current
->
cpus_allowed
;
cpumask_t
new_cpus_allowed
=
cpumask_of_cpu
(
cpu
);
set_cpus_allowed
(
current
,
new_cpus_allowed
);
(
*
fn
)(
arg
);
set_cpus_allowed
(
current
,
save_cpus_allowed
);
...
...
@@ -433,10 +450,10 @@ retry:
if
(
!
data
->
saved_num
)
call_on_cpu
(
cpu
,
salinfo_log_read_cpu
,
data
);
if
(
!
data
->
log_size
)
{
data
->
state
=
STATE_NO_DATA
;
clear_bit
(
cpu
,
&
data
->
cpu_event
);
data
->
state
=
STATE_NO_DATA
;
cpu_clear
(
cpu
,
data
->
cpu_event
);
}
else
{
data
->
state
=
STATE_LOG_RECORD
;
data
->
state
=
STATE_LOG_RECORD
;
}
}
...
...
@@ -473,27 +490,31 @@ static int
salinfo_log_clear
(
struct
salinfo_data
*
data
,
int
cpu
)
{
sal_log_record_header_t
*
rh
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
data
->
state
=
STATE_NO_DATA
;
if
(
!
test_bit
(
cpu
,
&
data
->
cpu_event
))
if
(
!
cpu_isset
(
cpu
,
data
->
cpu_event
))
{
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
return
0
;
down
(
&
data
->
sem
);
c
lear_bit
(
cpu
,
&
data
->
cpu_event
);
}
c
pu_clear
(
cpu
,
data
->
cpu_event
);
if
(
data
->
saved_num
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
shift1_data_saved
(
data
,
data
->
saved_num
-
1
);
shift1_data_saved
(
data
,
data
->
saved_num
-
1
);
data
->
saved_num
=
0
;
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
}
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
rh
=
(
sal_log_record_header_t
*
)(
data
->
log_buffer
);
/* Corrected errors have already been cleared from SAL */
if
(
rh
->
severity
!=
sal_log_severity_corrected
)
call_on_cpu
(
cpu
,
salinfo_log_clear_cpu
,
data
);
/* clearing a record may make a new record visible */
salinfo_log_new_read
(
cpu
,
data
);
if
(
data
->
state
==
STATE_LOG_RECORD
&&
!
test_and_set_bit
(
cpu
,
&
data
->
cpu_event
))
up
(
&
data
->
sem
);
if
(
data
->
state
==
STATE_LOG_RECORD
)
{
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
cpu_set
(
cpu
,
data
->
cpu_event
);
salinfo_work_to_do
(
data
);
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
}
return
0
;
}
...
...
@@ -550,6 +571,53 @@ static struct file_operations salinfo_data_fops = {
.
write
=
salinfo_log_write
,
};
#ifdef CONFIG_HOTPLUG_CPU
static
int
__devinit
salinfo_cpu_callback
(
struct
notifier_block
*
nb
,
unsigned
long
action
,
void
*
hcpu
)
{
unsigned
int
i
,
cpu
=
(
unsigned
long
)
hcpu
;
unsigned
long
flags
;
struct
salinfo_data
*
data
;
switch
(
action
)
{
case
CPU_ONLINE
:
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
for
(
i
=
0
,
data
=
salinfo_data
;
i
<
ARRAY_SIZE
(
salinfo_data
);
++
i
,
++
data
)
{
cpu_set
(
cpu
,
data
->
cpu_event
);
salinfo_work_to_do
(
data
);
}
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
break
;
case
CPU_DEAD
:
spin_lock_irqsave
(
&
data_saved_lock
,
flags
);
for
(
i
=
0
,
data
=
salinfo_data
;
i
<
ARRAY_SIZE
(
salinfo_data
);
++
i
,
++
data
)
{
struct
salinfo_data_saved
*
data_saved
;
int
j
;
for
(
j
=
ARRAY_SIZE
(
data
->
data_saved
)
-
1
,
data_saved
=
data
->
data_saved
+
j
;
j
>=
0
;
--
j
,
--
data_saved
)
{
if
(
data_saved
->
buffer
&&
data_saved
->
cpu
==
cpu
)
{
shift1_data_saved
(
data
,
j
);
}
}
cpu_clear
(
cpu
,
data
->
cpu_event
);
}
spin_unlock_irqrestore
(
&
data_saved_lock
,
flags
);
break
;
}
return
NOTIFY_OK
;
}
static
struct
notifier_block
salinfo_cpu_notifier
=
{
.
notifier_call
=
salinfo_cpu_callback
,
.
priority
=
0
,
};
#endif
/* CONFIG_HOTPLUG_CPU */
static
int
__init
salinfo_init
(
void
)
{
...
...
@@ -557,7 +625,7 @@ salinfo_init(void)
struct
proc_dir_entry
**
sdir
=
salinfo_proc_entries
;
/* keeps track of every entry */
struct
proc_dir_entry
*
dir
,
*
entry
;
struct
salinfo_data
*
data
;
int
i
,
j
,
online
;
int
i
,
j
;
salinfo_dir
=
proc_mkdir
(
"sal"
,
NULL
);
if
(
!
salinfo_dir
)
...
...
@@ -572,7 +640,7 @@ salinfo_init(void)
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
salinfo_log_name
);
i
++
)
{
data
=
salinfo_data
+
i
;
data
->
type
=
i
;
sema_init
(
&
data
->
sem
,
0
);
init_MUTEX
(
&
data
->
mutex
);
dir
=
proc_mkdir
(
salinfo_log_name
[
i
],
salinfo_dir
);
if
(
!
dir
)
continue
;
...
...
@@ -592,12 +660,8 @@ salinfo_init(void)
*
sdir
++
=
entry
;
/* we missed any events before now */
online
=
0
;
for_each_online_cpu
(
j
)
{
set_bit
(
j
,
&
data
->
cpu_event
);
++
online
;
}
sema_init
(
&
data
->
sem
,
online
);
for_each_online_cpu
(
j
)
cpu_set
(
j
,
data
->
cpu_event
);
*
sdir
++
=
dir
;
}
...
...
@@ -609,6 +673,10 @@ salinfo_init(void)
salinfo_timer
.
function
=
&
salinfo_timeout
;
add_timer
(
&
salinfo_timer
);
#ifdef CONFIG_HOTPLUG_CPU
register_cpu_notifier
(
&
salinfo_cpu_notifier
);
#endif
return
0
;
}
...
...
arch/ia64/kernel/traps.c
View file @
59af7038
...
...
@@ -530,12 +530,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if
(
fsys_mode
(
current
,
&
regs
))
{
extern
char
__kernel_syscall_via_break
[];
/*
* Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
* need special handling; Debug trap is not supposed to happen.
* Got a trap in fsys-mode: Taken Branch Trap
* and Single Step trap need special handling;
* Debug trap is ignored (we disable it here
* and re-enable it in the lower-privilege trap).
*/
if
(
unlikely
(
vector
==
29
))
{
die
(
"Got debug trap in fsys-mode---not supposed to happen!"
,
&
regs
,
0
);
set_thread_flag
(
TIF_DB_DISABLED
);
ia64_psr
(
&
regs
)
->
db
=
0
;
ia64_psr
(
&
regs
)
->
lp
=
1
;
return
;
}
/* re-do the system call via break 0x100000: */
...
...
@@ -589,10 +592,19 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case
34
:
if
(
isr
&
0x2
)
{
/* Lower-Privilege Transfer Trap */
/* If we disabled debug traps during an fsyscall,
* re-enable them here.
*/
if
(
test_thread_flag
(
TIF_DB_DISABLED
))
{
clear_thread_flag
(
TIF_DB_DISABLED
);
ia64_psr
(
&
regs
)
->
db
=
1
;
}
/*
* Just clear PSR.lp and then return immediately:
all the
*
interesting work (e.g., signal delivery is done in the kernel
*
exit path)
.
* Just clear PSR.lp and then return immediately:
*
all the interesting work (e.g., signal delivery)
*
is done in the kernel exit path
.
*/
ia64_psr
(
&
regs
)
->
lp
=
0
;
return
;
...
...
arch/ia64/mm/tlb.c
View file @
59af7038
...
...
@@ -90,7 +90,7 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
{
static
DEFINE_SPINLOCK
(
ptcg_lock
);
if
(
mm
!=
current
->
active_mm
)
{
if
(
mm
!=
current
->
active_mm
||
!
current
->
mm
)
{
flush_tlb_all
();
return
;
}
...
...
arch/ia64/sn/include/xtalk/hubdev.h
View file @
59af7038
...
...
@@ -26,11 +26,14 @@
#define IIO_NUM_ITTES 7
#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
struct
sn_flush_device_list
{
/* This struct is shared between the PROM and the kernel.
* Changes to this struct will require corresponding changes to the kernel.
*/
struct
sn_flush_device_common
{
int
sfdl_bus
;
int
sfdl_slot
;
int
sfdl_pin
;
struct
bar_list
{
struct
common_
bar_list
{
unsigned
long
start
;
unsigned
long
end
;
}
sfdl_bar_list
[
6
];
...
...
@@ -40,14 +43,19 @@ struct sn_flush_device_list {
uint32_t
sfdl_persistent_busnum
;
uint32_t
sfdl_persistent_segment
;
struct
pcibus_info
*
sfdl_pcibus_info
;
};
/* This struct is kernel only and is not used by the PROM */
struct
sn_flush_device_kernel
{
spinlock_t
sfdl_flush_lock
;
struct
sn_flush_device_common
*
common
;
};
/*
* **widget_p - Used as an array[wid_num][device] of sn_flush_device_
list
.
* **widget_p - Used as an array[wid_num][device] of sn_flush_device_
kernel
.
*/
struct
sn_flush_nasid_entry
{
struct
sn_flush_device_
list
**
widget_p
;
/* Used as a array of wid_num */
struct
sn_flush_device_
kernel
**
widget_p
;
// Used as an array of wid_num
uint64_t
iio_itte
[
8
];
};
...
...
arch/ia64/sn/kernel/bte_error.c
View file @
59af7038
...
...
@@ -33,7 +33,7 @@ void bte_error_handler(unsigned long);
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
void
shub1_bte_error_handler
(
unsigned
long
_nodepda
)
int
shub1_bte_error_handler
(
unsigned
long
_nodepda
)
{
struct
nodepda_s
*
err_nodepda
=
(
struct
nodepda_s
*
)
_nodepda
;
struct
timer_list
*
recovery_timer
=
&
err_nodepda
->
bte_recovery_timer
;
...
...
@@ -53,7 +53,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
(
err_nodepda
->
bte_if
[
1
].
bh_error
==
BTE_SUCCESS
))
{
BTE_PRINTK
((
"eh:%p:%d Nothing to do.
\n
"
,
err_nodepda
,
smp_processor_id
()));
return
;
return
1
;
}
/* Determine information about our hub */
...
...
@@ -81,7 +81,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
mod_timer
(
recovery_timer
,
HZ
*
5
);
BTE_PRINTK
((
"eh:%p:%d Marked Giving up
\n
"
,
err_nodepda
,
smp_processor_id
()));
return
;
return
1
;
}
if
(
icmr
.
ii_icmr_fld_s
.
i_crb_vld
!=
0
)
{
...
...
@@ -99,7 +99,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
BTE_PRINTK
((
"eh:%p:%d Valid %d, Giving up
\n
"
,
err_nodepda
,
smp_processor_id
(),
i
));
return
;
return
1
;
}
}
}
...
...
@@ -124,6 +124,42 @@ void shub1_bte_error_handler(unsigned long _nodepda)
REMOTE_HUB_S
(
nasid
,
IIO_IBCR
,
ibcr
.
ii_ibcr_regval
);
del_timer
(
recovery_timer
);
return
0
;
}
/*
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
int
shub2_bte_error_handler
(
unsigned
long
_nodepda
)
{
struct
nodepda_s
*
err_nodepda
=
(
struct
nodepda_s
*
)
_nodepda
;
struct
timer_list
*
recovery_timer
=
&
err_nodepda
->
bte_recovery_timer
;
struct
bteinfo_s
*
bte
;
nasid_t
nasid
;
u64
status
;
int
i
;
nasid
=
cnodeid_to_nasid
(
err_nodepda
->
bte_if
[
0
].
bte_cnode
);
/*
* Verify that all the BTEs are complete
*/
for
(
i
=
0
;
i
<
BTES_PER_NODE
;
i
++
)
{
bte
=
&
err_nodepda
->
bte_if
[
i
];
status
=
BTE_LNSTAT_LOAD
(
bte
);
if
((
status
&
IBLS_ERROR
)
||
!
(
status
&
IBLS_BUSY
))
continue
;
mod_timer
(
recovery_timer
,
HZ
*
5
);
BTE_PRINTK
((
"eh:%p:%d Marked Giving up
\n
"
,
err_nodepda
,
smp_processor_id
()));
return
1
;
}
if
(
ia64_sn_bte_recovery
(
nasid
))
panic
(
"bte_error_handler(): Fatal BTE Error"
);
del_timer
(
recovery_timer
);
return
0
;
}
/*
...
...
@@ -135,7 +171,6 @@ void bte_error_handler(unsigned long _nodepda)
struct
nodepda_s
*
err_nodepda
=
(
struct
nodepda_s
*
)
_nodepda
;
spinlock_t
*
recovery_lock
=
&
err_nodepda
->
bte_recovery_lock
;
int
i
;
nasid_t
nasid
;
unsigned
long
irq_flags
;
volatile
u64
*
notify
;
bte_result_t
bh_error
;
...
...
@@ -160,12 +195,15 @@ void bte_error_handler(unsigned long _nodepda)
}
if
(
is_shub1
())
{
shub1_bte_error_handler
(
_nodepda
);
if
(
shub1_bte_error_handler
(
_nodepda
))
{
spin_unlock_irqrestore
(
recovery_lock
,
irq_flags
);
return
;
}
}
else
{
nasid
=
cnodeid_to_nasid
(
err_nodepda
->
bte_if
[
0
].
bte_cnode
);
if
(
ia64_sn_bte_recovery
(
nasid
))
panic
(
"bte_error_handler(): Fatal BTE Error"
);
if
(
shub2_bte_error_handler
(
_nodepda
))
{
spin_unlock_irqrestore
(
recovery_lock
,
irq_flags
);
return
;
}
}
for
(
i
=
0
;
i
<
BTES_PER_NODE
;
i
++
)
{
...
...
arch/ia64/sn/kernel/huberror.c
View file @
59af7038
...
...
@@ -32,13 +32,14 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
ret_stuff
.
v0
=
0
;
hubdev_info
=
(
struct
hubdev_info
*
)
arg
;
nasid
=
hubdev_info
->
hdi_nasid
;
SAL_CALL_NOLOCK
(
ret_stuff
,
SN_SAL_HUB_ERROR_INTERRUPT
,
if
(
is_shub1
())
{
SAL_CALL_NOLOCK
(
ret_stuff
,
SN_SAL_HUB_ERROR_INTERRUPT
,
(
u64
)
nasid
,
0
,
0
,
0
,
0
,
0
,
0
);
if
((
int
)
ret_stuff
.
v0
)
panic
(
"hubii_eint_handler(): Fatal TIO Error"
);
if
((
int
)
ret_stuff
.
v0
)
panic
(
"hubii_eint_handler(): Fatal TIO Error"
);
if
(
is_shub1
())
{
if
(
!
(
nasid
&
1
))
/* Not a TIO, handle CRB errors */
(
void
)
hubiio_crb_error_handler
(
hubdev_info
);
}
else
...
...
arch/ia64/sn/kernel/io_init.c
View file @
59af7038
...
...
@@ -76,11 +76,12 @@ static struct sn_pcibus_provider sn_pci_default_provider = {
};
/*
* Retrieve the DMA Flush List given nasid
. This list is needed
* to implement the WAR - Flush DMA data on PIO Reads.
* Retrieve the DMA Flush List given nasid
, widget, and device.
*
This list is needed
to implement the WAR - Flush DMA data on PIO Reads.
*/
static
inline
uint64_t
sal_get_widget_dmaflush_list
(
u64
nasid
,
u64
widget_num
,
u64
address
)
static
inline
u64
sal_get_device_dmaflush_list
(
u64
nasid
,
u64
widget_num
,
u64
device_num
,
u64
address
)
{
struct
ia64_sal_retval
ret_stuff
;
...
...
@@ -88,17 +89,17 @@ sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
ret_stuff
.
v0
=
0
;
SAL_CALL_NOLOCK
(
ret_stuff
,
(
u64
)
SN_SAL_IOIF_GET_
WIDGET
_DMAFLUSH_LIST
,
(
u64
)
nasid
,
(
u64
)
widget_num
,
(
u64
)
address
,
0
,
0
,
0
,
0
);
return
ret_stuff
.
v0
;
(
u64
)
SN_SAL_IOIF_GET_
DEVICE
_DMAFLUSH_LIST
,
(
u64
)
nasid
,
(
u64
)
widget_num
,
(
u64
)
device_num
,
(
u64
)
address
,
0
,
0
,
0
);
return
ret_stuff
.
status
;
}
/*
* Retrieve the hub device info structure for the given nasid.
*/
static
inline
u
int64_t
sal_get_hubdev_info
(
u64
handle
,
u64
address
)
static
inline
u
64
sal_get_hubdev_info
(
u64
handle
,
u64
address
)
{
struct
ia64_sal_retval
ret_stuff
;
...
...
@@ -114,7 +115,7 @@ static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
/*
* Retrieve the pci bus information given the bus number.
*/
static
inline
u
int64_t
sal_get_pcibus_info
(
u64
segment
,
u64
busnum
,
u64
address
)
static
inline
u
64
sal_get_pcibus_info
(
u64
segment
,
u64
busnum
,
u64
address
)
{
struct
ia64_sal_retval
ret_stuff
;
...
...
@@ -130,7 +131,7 @@ static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static
inline
u
int64_t
static
inline
u
64
sal_get_pcidev_info
(
u64
segment
,
u64
bus_number
,
u64
devfn
,
u64
pci_dev
,
u64
sn_irq_info
)
{
...
...
@@ -170,12 +171,12 @@ sn_pcidev_info_get(struct pci_dev *dev)
*/
static
void
sn_fixup_ionodes
(
void
)
{
struct
sn_flush_device_
list
*
sn_flush_device_list
;
struct
sn_flush_device_kernel
*
sn_flush_device_kernel
;
struct
sn_flush_device_
kernel
*
dev_entry
;
struct
hubdev_info
*
hubdev
;
u
int64_t
status
;
u
int64_t
nasid
;
int
i
,
widget
;
u
64
status
;
u
64
nasid
;
int
i
,
widget
,
device
;
/*
* Get SGI Specific HUB chipset information.
...
...
@@ -186,7 +187,7 @@ static void sn_fixup_ionodes(void)
nasid
=
cnodeid_to_nasid
(
i
);
hubdev
->
max_segment_number
=
0xffffffff
;
hubdev
->
max_pcibus_number
=
0xff
;
status
=
sal_get_hubdev_info
(
nasid
,
(
u
int64_t
)
__pa
(
hubdev
));
status
=
sal_get_hubdev_info
(
nasid
,
(
u
64
)
__pa
(
hubdev
));
if
(
status
)
continue
;
...
...
@@ -213,38 +214,49 @@ static void sn_fixup_ionodes(void)
hubdev
->
hdi_flush_nasid_list
.
widget_p
=
kmalloc
((
HUB_WIDGET_ID_MAX
+
1
)
*
sizeof
(
struct
sn_flush_device_
list
*
),
GFP_KERNEL
);
sizeof
(
struct
sn_flush_device_
kernel
*
),
GFP_KERNEL
);
memset
(
hubdev
->
hdi_flush_nasid_list
.
widget_p
,
0x0
,
(
HUB_WIDGET_ID_MAX
+
1
)
*
sizeof
(
struct
sn_flush_device_
list
*
));
sizeof
(
struct
sn_flush_device_
kernel
*
));
for
(
widget
=
0
;
widget
<=
HUB_WIDGET_ID_MAX
;
widget
++
)
{
sn_flush_device_list
=
kmalloc
(
DEV_PER_WIDGET
*
sizeof
(
struct
sn_flush_device_list
),
GFP_KERNEL
);
memset
(
sn_flush_device_list
,
0x0
,
sn_flush_device_kernel
=
kmalloc
(
DEV_PER_WIDGET
*
sizeof
(
struct
sn_flush_device_kernel
),
GFP_KERNEL
);
if
(
!
sn_flush_device_kernel
)
BUG
();
memset
(
sn_flush_device_kernel
,
0x0
,
DEV_PER_WIDGET
*
sizeof
(
struct
sn_flush_device_list
));
status
=
sal_get_widget_dmaflush_list
(
nasid
,
widget
,
(
uint64_t
)
__pa
(
sn_flush_device_list
));
if
(
status
)
{
kfree
(
sn_flush_device_list
);
continue
;
sizeof
(
struct
sn_flush_device_kernel
));
dev_entry
=
sn_flush_device_kernel
;
for
(
device
=
0
;
device
<
DEV_PER_WIDGET
;
device
++
,
dev_entry
++
)
{
dev_entry
->
common
=
kmalloc
(
sizeof
(
struct
sn_flush_device_common
),
GFP_KERNEL
);
if
(
!
dev_entry
->
common
)
BUG
();
memset
(
dev_entry
->
common
,
0x0
,
sizeof
(
struct
sn_flush_device_common
));
status
=
sal_get_device_dmaflush_list
(
nasid
,
widget
,
device
,
(
u64
)(
dev_entry
->
common
));
if
(
status
)
BUG
();
spin_lock_init
(
&
dev_entry
->
sfdl_flush_lock
);
}
spin_lock_init
(
&
sn_flush_device_list
->
sfdl_flush_lock
);
hubdev
->
hdi_flush_nasid_list
.
widget_p
[
widget
]
=
sn_flush_device_list
;
}
if
(
sn_flush_device_kernel
)
hubdev
->
hdi_flush_nasid_list
.
widget_p
[
widget
]
=
sn_flush_device_kernel
;
}
}
}
/*
...
...
arch/ia64/sn/kernel/xpc_channel.c
View file @
59af7038
...
...
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-200
5
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2004-200
6
Silicon Graphics, Inc. All Rights Reserved.
*/
...
...
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h>
#include
"xpc.h"
#include
<asm/sn/xpc.h>
/*
...
...
@@ -779,6 +779,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
/* both sides are disconnected now */
if
(
ch
->
flags
&
XPC_C_CONNECTCALLOUT
)
{
spin_unlock_irqrestore
(
&
ch
->
lock
,
*
irq_flags
);
xpc_disconnect_callout
(
ch
,
xpcDisconnected
);
spin_lock_irqsave
(
&
ch
->
lock
,
*
irq_flags
);
}
/* it's now safe to free the channel's message queues */
xpc_free_msgqueues
(
ch
);
...
...
@@ -1645,7 +1651,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
void
xpc_disconnect
ing_callout
(
struct
xpc_channel
*
ch
)
xpc_disconnect
_callout
(
struct
xpc_channel
*
ch
,
enum
xpc_retval
reason
)
{
/*
* Let the channel's registerer know that the channel is being
...
...
@@ -1654,15 +1660,13 @@ xpc_disconnecting_callout(struct xpc_channel *ch)
*/
if
(
ch
->
func
!=
NULL
)
{
dev_dbg
(
xpc_chan
,
"ch->func() called, reason=
xpcDisconnecting,
"
"
partid=%d, channel=%d
\n
"
,
ch
->
partid
,
ch
->
number
);
dev_dbg
(
xpc_chan
,
"ch->func() called, reason=
%d, partid=%d,
"
"
channel=%d
\n
"
,
reason
,
ch
->
partid
,
ch
->
number
);
ch
->
func
(
xpcDisconnecting
,
ch
->
partid
,
ch
->
number
,
NULL
,
ch
->
key
);
ch
->
func
(
reason
,
ch
->
partid
,
ch
->
number
,
NULL
,
ch
->
key
);
dev_dbg
(
xpc_chan
,
"ch->func() returned, reason="
"xpcDisconnecting, partid=%d, channel=%d
\n
"
,
ch
->
partid
,
ch
->
number
);
dev_dbg
(
xpc_chan
,
"ch->func() returned, reason=%d, partid=%d, "
"channel=%d
\n
"
,
reason
,
ch
->
partid
,
ch
->
number
);
}
}
...
...
arch/ia64/sn/kernel/xpc_main.c
View file @
59af7038
...
...
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-200
5
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2004-200
6
Silicon Graphics, Inc. All Rights Reserved.
*/
...
...
@@ -59,7 +59,7 @@
#include <asm/sn/sn_sal.h>
#include <asm/kdebug.h>
#include <asm/uaccess.h>
#include
"xpc.h"
#include
<asm/sn/xpc.h>
/* define two XPC debug device structures to be used with dev_dbg() et al */
...
...
@@ -82,6 +82,9 @@ struct device *xpc_part = &xpc_part_dbg_subname;
struct
device
*
xpc_chan
=
&
xpc_chan_dbg_subname
;
static
int
xpc_kdebug_ignore
;
/* systune related variables for /proc/sys directories */
static
int
xpc_hb_interval
=
XPC_HB_DEFAULT_INTERVAL
;
...
...
@@ -162,6 +165,8 @@ static ctl_table xpc_sys_dir[] = {
};
static
struct
ctl_table_header
*
xpc_sysctl
;
/* non-zero if any remote partition disengage request was timed out */
int
xpc_disengage_request_timedout
;
/* #of IRQs received */
static
atomic_t
xpc_act_IRQ_rcvd
;
...
...
@@ -773,7 +778,7 @@ xpc_daemonize_kthread(void *args)
ch
->
flags
|=
XPC_C_DISCONNECTCALLOUT
;
spin_unlock_irqrestore
(
&
ch
->
lock
,
irq_flags
);
xpc_disconnect
ing_callout
(
ch
);
xpc_disconnect
_callout
(
ch
,
xpcDisconnecting
);
}
else
{
spin_unlock_irqrestore
(
&
ch
->
lock
,
irq_flags
);
}
...
...
@@ -921,9 +926,9 @@ static void
xpc_do_exit
(
enum
xpc_retval
reason
)
{
partid_t
partid
;
int
active_part_count
;
int
active_part_count
,
printed_waiting_msg
=
0
;
struct
xpc_partition
*
part
;
unsigned
long
printmsg_time
;
unsigned
long
printmsg_time
,
disengage_request_timeout
=
0
;
/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
...
...
@@ -953,7 +958,8 @@ xpc_do_exit(enum xpc_retval reason)
/* wait for all partitions to become inactive */
printmsg_time
=
jiffies
;
printmsg_time
=
jiffies
+
(
XPC_DISENGAGE_PRINTMSG_INTERVAL
*
HZ
);
xpc_disengage_request_timedout
=
0
;
do
{
active_part_count
=
0
;
...
...
@@ -969,20 +975,39 @@ xpc_do_exit(enum xpc_retval reason)
active_part_count
++
;
XPC_DEACTIVATE_PARTITION
(
part
,
reason
);
}
if
(
active_part_count
==
0
)
{
break
;
if
(
part
->
disengage_request_timeout
>
disengage_request_timeout
)
{
disengage_request_timeout
=
part
->
disengage_request_timeout
;
}
}
if
(
jiffies
>=
printmsg_time
)
{
dev_info
(
xpc_part
,
"waiting for partitions to "
"deactivate/disengage, active count=%d, remote "
"engaged=0x%lx
\n
"
,
active_part_count
,
xpc_partition_engaged
(
1UL
<<
partid
));
printmsg_time
=
jiffies
+
if
(
xpc_partition_engaged
(
-
1UL
))
{
if
(
time_after
(
jiffies
,
printmsg_time
))
{
dev_info
(
xpc_part
,
"waiting for remote "
"partitions to disengage, timeout in "
"%ld seconds
\n
"
,
(
disengage_request_timeout
-
jiffies
)
/
HZ
);
printmsg_time
=
jiffies
+
(
XPC_DISENGAGE_PRINTMSG_INTERVAL
*
HZ
);
printed_waiting_msg
=
1
;
}
}
else
if
(
active_part_count
>
0
)
{
if
(
printed_waiting_msg
)
{
dev_info
(
xpc_part
,
"waiting for local partition"
" to disengage
\n
"
);
printed_waiting_msg
=
0
;
}
}
else
{
if
(
!
xpc_disengage_request_timedout
)
{
dev_info
(
xpc_part
,
"all partitions have "
"disengaged
\n
"
);
}
break
;
}
/* sleep for a 1/3 of a second or so */
...
...
@@ -1000,11 +1025,13 @@ xpc_do_exit(enum xpc_retval reason)
del_timer_sync
(
&
xpc_hb_timer
);
DBUG_ON
(
xpc_vars
->
heartbeating_to_mask
!=
0
);
/* take ourselves off of the reboot_notifier_list */
(
void
)
unregister_reboot_notifier
(
&
xpc_reboot_notifier
);
if
(
reason
==
xpcUnloading
)
{
/* take ourselves off of the reboot_notifier_list */
(
void
)
unregister_reboot_notifier
(
&
xpc_reboot_notifier
);
/* take ourselves off of the die_notifier list */
(
void
)
unregister_die_notifier
(
&
xpc_die_notifier
);
/* take ourselves off of the die_notifier list */
(
void
)
unregister_die_notifier
(
&
xpc_die_notifier
);
}
/* close down protections for IPI operations */
xpc_restrict_IPI_ops
();
...
...
@@ -1020,7 +1047,35 @@ xpc_do_exit(enum xpc_retval reason)
/*
* Called when the system is about to be either restarted or halted.
* This function is called when the system is being rebooted.
*/
static
int
xpc_system_reboot
(
struct
notifier_block
*
nb
,
unsigned
long
event
,
void
*
unused
)
{
enum
xpc_retval
reason
;
switch
(
event
)
{
case
SYS_RESTART
:
reason
=
xpcSystemReboot
;
break
;
case
SYS_HALT
:
reason
=
xpcSystemHalt
;
break
;
case
SYS_POWER_OFF
:
reason
=
xpcSystemPoweroff
;
break
;
default:
reason
=
xpcSystemGoingDown
;
}
xpc_do_exit
(
reason
);
return
NOTIFY_DONE
;
}
/*
* Notify other partitions to disengage from all references to our memory.
*/
static
void
xpc_die_disengage
(
void
)
...
...
@@ -1028,7 +1083,7 @@ xpc_die_disengage(void)
struct
xpc_partition
*
part
;
partid_t
partid
;
unsigned
long
engaged
;
long
time
,
print_time
,
disengage_request_timeout
;
long
time
,
print
msg
_time
,
disengage_request_timeout
;
/* keep xpc_hb_checker thread from doing anything (just in case) */
...
...
@@ -1055,57 +1110,53 @@ xpc_die_disengage(void)
}
}
print_time
=
rtc_time
();
disengage_request_timeout
=
print_time
+
time
=
rtc_time
();
printmsg_time
=
time
+
(
XPC_DISENGAGE_PRINTMSG_INTERVAL
*
sn_rtc_cycles_per_second
);
disengage_request_timeout
=
time
+
(
xpc_disengage_request_timelimit
*
sn_rtc_cycles_per_second
);
/* wait for all other partitions to disengage from us */
while
((
engaged
=
xpc_partition_engaged
(
-
1UL
))
&&
(
time
=
rtc_time
())
<
disengage_request_timeout
)
{
while
(
1
)
{
engaged
=
xpc_partition_engaged
(
-
1UL
);
if
(
!
engaged
)
{
dev_info
(
xpc_part
,
"all partitions have disengaged
\n
"
);
break
;
}
if
(
time
>=
print_time
)
{
time
=
rtc_time
();
if
(
time
>=
disengage_request_timeout
)
{
for
(
partid
=
1
;
partid
<
XP_MAX_PARTITIONS
;
partid
++
)
{
if
(
engaged
&
(
1UL
<<
partid
))
{
dev_info
(
xpc_part
,
"disengage from "
"remote partition %d timed "
"out
\n
"
,
partid
);
}
}
break
;
}
if
(
time
>=
printmsg_time
)
{
dev_info
(
xpc_part
,
"waiting for remote partitions to "
"disengage, engaged=0x%lx
\n
"
,
engaged
);
print_time
=
time
+
(
XPC_DISENGAGE_PRINTMSG_INTERVAL
*
"disengage, timeout in %ld seconds
\n
"
,
(
disengage_request_timeout
-
time
)
/
sn_rtc_cycles_per_second
);
printmsg_time
=
time
+
(
XPC_DISENGAGE_PRINTMSG_INTERVAL
*
sn_rtc_cycles_per_second
);
}
}
dev_info
(
xpc_part
,
"finished waiting for remote partitions to "
"disengage, engaged=0x%lx
\n
"
,
engaged
);
}
/*
* This function is called when the system is being rebooted.
*/
static
int
xpc_system_reboot
(
struct
notifier_block
*
nb
,
unsigned
long
event
,
void
*
unused
)
{
enum
xpc_retval
reason
;
switch
(
event
)
{
case
SYS_RESTART
:
reason
=
xpcSystemReboot
;
break
;
case
SYS_HALT
:
reason
=
xpcSystemHalt
;
break
;
case
SYS_POWER_OFF
:
reason
=
xpcSystemPoweroff
;
break
;
default:
reason
=
xpcSystemGoingDown
;
}
xpc_do_exit
(
reason
);
return
NOTIFY_DONE
;
}
/*
* This function is called when the system is being rebooted.
* This function is called when the system is being restarted or halted due
* to some sort of system failure. If this is the case we need to notify the
* other partitions to disengage from all references to our memory.
* This function can also be called when our heartbeater could be offlined
* for a time. In this case we need to notify other partitions to not worry
* about the lack of a heartbeat.
*/
static
int
xpc_system_die
(
struct
notifier_block
*
nb
,
unsigned
long
event
,
void
*
unused
)
...
...
@@ -1115,11 +1166,25 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
case
DIE_MACHINE_HALT
:
xpc_die_disengage
();
break
;
case
DIE_KDEBUG_ENTER
:
/* Should lack of heartbeat be ignored by other partitions? */
if
(
!
xpc_kdebug_ignore
)
{
break
;
}
/* fall through */
case
DIE_MCA_MONARCH_ENTER
:
case
DIE_INIT_MONARCH_ENTER
:
xpc_vars
->
heartbeat
++
;
xpc_vars
->
heartbeat_offline
=
1
;
break
;
case
DIE_KDEBUG_LEAVE
:
/* Is lack of heartbeat being ignored by other partitions? */
if
(
!
xpc_kdebug_ignore
)
{
break
;
}
/* fall through */
case
DIE_MCA_MONARCH_LEAVE
:
case
DIE_INIT_MONARCH_LEAVE
:
xpc_vars
->
heartbeat
++
;
...
...
@@ -1344,3 +1409,7 @@ module_param(xpc_disengage_request_timelimit, int, 0);
MODULE_PARM_DESC
(
xpc_disengage_request_timelimit
,
"Number of seconds to wait "
"for disengage request to complete."
);
module_param
(
xpc_kdebug_ignore
,
int
,
0
);
MODULE_PARM_DESC
(
xpc_kdebug_ignore
,
"Should lack of heartbeat be ignored by "
"other partitions when dropping into kdebug."
);
arch/ia64/sn/kernel/xpc_partition.c
View file @
59af7038
...
...
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-200
5
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2004-200
6
Silicon Graphics, Inc. All Rights Reserved.
*/
...
...
@@ -28,7 +28,7 @@
#include <asm/sn/sn_sal.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
#include
"xpc.h"
#include
<asm/sn/xpc.h>
/* XPC is exiting flag */
...
...
@@ -771,7 +771,8 @@ xpc_identify_act_IRQ_req(int nasid)
}
}
if
(
!
xpc_partition_disengaged
(
part
))
{
if
(
part
->
disengage_request_timeout
>
0
&&
!
xpc_partition_disengaged
(
part
))
{
/* still waiting on other side to disengage from us */
return
;
}
...
...
@@ -873,6 +874,9 @@ xpc_partition_disengaged(struct xpc_partition *part)
* request in a timely fashion, so assume it's dead.
*/
dev_info
(
xpc_part
,
"disengage from remote partition %d "
"timed out
\n
"
,
partid
);
xpc_disengage_request_timedout
=
1
;
xpc_clear_partition_engaged
(
1UL
<<
partid
);
disengaged
=
1
;
}
...
...
arch/ia64/sn/pci/pcibr/pcibr_dma.c
View file @
59af7038
...
...
@@ -218,7 +218,9 @@ void sn_dma_flush(uint64_t addr)
uint64_t
flags
;
uint64_t
itte
;
struct
hubdev_info
*
hubinfo
;
volatile
struct
sn_flush_device_list
*
p
;
volatile
struct
sn_flush_device_kernel
*
p
;
volatile
struct
sn_flush_device_common
*
common
;
struct
sn_flush_nasid_entry
*
flush_nasid_list
;
if
(
!
sn_ioif_inited
)
...
...
@@ -268,17 +270,17 @@ void sn_dma_flush(uint64_t addr)
p
=
&
flush_nasid_list
->
widget_p
[
wid_num
][
0
];
/* find a matching BAR */
for
(
i
=
0
;
i
<
DEV_PER_WIDGET
;
i
++
)
{
for
(
i
=
0
;
i
<
DEV_PER_WIDGET
;
i
++
,
p
++
)
{
common
=
p
->
common
;
for
(
j
=
0
;
j
<
PCI_ROM_RESOURCE
;
j
++
)
{
if
(
p
->
sfdl_bar_list
[
j
].
start
==
0
)
if
(
common
->
sfdl_bar_list
[
j
].
start
==
0
)
break
;
if
(
addr
>=
p
->
sfdl_bar_list
[
j
].
start
&&
addr
<=
p
->
sfdl_bar_list
[
j
].
end
)
if
(
addr
>=
common
->
sfdl_bar_list
[
j
].
start
&&
addr
<=
common
->
sfdl_bar_list
[
j
].
end
)
break
;
}
if
(
j
<
PCI_ROM_RESOURCE
&&
p
->
sfdl_bar_list
[
j
].
start
!=
0
)
if
(
j
<
PCI_ROM_RESOURCE
&&
common
->
sfdl_bar_list
[
j
].
start
!=
0
)
break
;
p
++
;
}
/* if no matching BAR, return without doing anything. */
...
...
@@ -304,24 +306,24 @@ void sn_dma_flush(uint64_t addr)
if
((
1
<<
XWIDGET_PART_REV_NUM_REV
(
revnum
))
&
PV907516
)
{
return
;
}
else
{
pcireg_wrb_flush_get
(
p
->
sfdl_pcibus_info
,
(
p
->
sfdl_slot
-
1
));
pcireg_wrb_flush_get
(
common
->
sfdl_pcibus_info
,
(
common
->
sfdl_slot
-
1
));
}
}
else
{
spin_lock_irqsave
(
&
((
struct
sn_flush_device_list
*
)
p
)
->
sfdl_flush_lock
,
flags
);
*
p
->
sfdl_flush_addr
=
0
;
spin_lock_irqsave
((
spinlock_t
*
)
&
p
->
sfdl_flush_lock
,
flags
);
*
common
->
sfdl_flush_addr
=
0
;
/* force an interrupt. */
*
(
volatile
uint32_t
*
)(
p
->
sfdl_force_int_addr
)
=
1
;
*
(
volatile
uint32_t
*
)(
common
->
sfdl_force_int_addr
)
=
1
;
/* wait for the interrupt to come back. */
while
(
*
(
p
->
sfdl_flush_addr
)
!=
0x10f
)
while
(
*
(
common
->
sfdl_flush_addr
)
!=
0x10f
)
cpu_relax
();
/* okay, everything is synched up. */
spin_unlock_irqrestore
((
spinlock_t
*
)
&
p
->
sfdl_flush_lock
,
flags
);
spin_unlock_irqrestore
((
spinlock_t
*
)
&
p
->
sfdl_flush_lock
,
flags
);
}
return
;
}
...
...
arch/ia64/sn/pci/pcibr/pcibr_provider.c
View file @
59af7038
...
...
@@ -92,7 +92,8 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
cnodeid_t
near_cnode
;
struct
hubdev_info
*
hubdev_info
;
struct
pcibus_info
*
soft
;
struct
sn_flush_device_list
*
sn_flush_device_list
;
struct
sn_flush_device_kernel
*
sn_flush_device_kernel
;
struct
sn_flush_device_common
*
common
;
if
(
!
IS_PCI_BRIDGE_ASIC
(
prom_bussoft
->
bs_asic_type
))
{
return
NULL
;
...
...
@@ -137,20 +138,19 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
hubdev_info
=
(
struct
hubdev_info
*
)(
NODEPDA
(
cnode
)
->
pdinfo
);
if
(
hubdev_info
->
hdi_flush_nasid_list
.
widget_p
)
{
sn_flush_device_
list
=
hubdev_info
->
hdi_flush_nasid_list
.
sn_flush_device_
kernel
=
hubdev_info
->
hdi_flush_nasid_list
.
widget_p
[(
int
)
soft
->
pbi_buscommon
.
bs_xid
];
if
(
sn_flush_device_
list
)
{
if
(
sn_flush_device_
kernel
)
{
for
(
j
=
0
;
j
<
DEV_PER_WIDGET
;
j
++
,
sn_flush_device_list
++
)
{
if
(
sn_flush_device_list
->
sfdl_slot
==
-
1
)
j
++
,
sn_flush_device_kernel
++
)
{
common
=
sn_flush_device_kernel
->
common
;
if
(
common
->
sfdl_slot
==
-
1
)
continue
;
if
((
sn_flush_device_list
->
sfdl_persistent_segment
==
if
((
common
->
sfdl_persistent_segment
==
soft
->
pbi_buscommon
.
bs_persist_segment
)
&&
(
sn_flush_device_list
->
sfdl_persistent_busnum
==
(
common
->
sfdl_persistent_busnum
==
soft
->
pbi_buscommon
.
bs_persist_busnum
))
sn_flush_device_list
->
sfdl_pcibus_info
=
common
->
sfdl_pcibus_info
=
soft
;
}
}
...
...
include/asm-ia64/kprobes.h
View file @
59af7038
...
...
@@ -68,10 +68,14 @@ struct prev_kprobe {
unsigned
long
status
;
};
#define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f)
/* per-cpu kprobe control block */
struct
kprobe_ctlblk
{
unsigned
long
kprobe_status
;
struct
pt_regs
jprobe_saved_regs
;
unsigned
long
jprobes_saved_stacked_regs
[
MAX_PARAM_RSE_SIZE
];
unsigned
long
*
bsp
;
unsigned
long
cfm
;
struct
prev_kprobe
prev_kprobe
;
};
...
...
@@ -118,5 +122,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
static
inline
void
jprobe_return
(
void
)
{
}
extern
void
invalidate_stacked_regs
(
void
);
extern
void
flush_register_stack
(
void
);
#endif
/* _ASM_KPROBES_H */
include/asm-ia64/sn/sn_sal.h
View file @
59af7038
...
...
@@ -75,7 +75,8 @@
#define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055
#define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056
#define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057
#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058
#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated
#define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a
#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
#define SN_SAL_BTE_RECOVER 0x02000061
...
...
@@ -1100,7 +1101,7 @@ ia64_sn_bte_recovery(nasid_t nasid)
struct
ia64_sal_retval
rv
;
rv
.
status
=
0
;
SAL_CALL_NOLOCK
(
rv
,
SN_SAL_BTE_RECOVER
,
0
,
0
,
0
,
0
,
0
,
0
,
0
);
SAL_CALL_NOLOCK
(
rv
,
SN_SAL_BTE_RECOVER
,
(
u64
)
nasid
,
0
,
0
,
0
,
0
,
0
,
0
);
if
(
rv
.
status
==
SALRET_NOT_IMPLEMENTED
)
return
0
;
return
(
int
)
rv
.
status
;
...
...
include/asm-ia64/sn/xp.h
View file @
59af7038
...
...
@@ -227,7 +227,9 @@ enum xpc_retval {
xpcOpenCloseError
,
/* 50: channel open/close protocol error */
xpcUnknownReason
/* 51: unknown reason -- must be last in list */
xpcDisconnected
,
/* 51: channel disconnected (closed) */
xpcUnknownReason
/* 52: unknown reason -- must be last in list */
};
...
...
arch/ia64/sn/kernel
/xpc.h
→
include/asm-ia64/sn
/xpc.h
View file @
59af7038
...
...
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-200
5
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2004-200
6
Silicon Graphics, Inc. All Rights Reserved.
*/
...
...
@@ -11,8 +11,8 @@
* Cross Partition Communication (XPC) structures and macros.
*/
#ifndef _
IA64_SN_KERNEL
_XPC_H
#define _
IA64_SN_KERNEL
_XPC_H
#ifndef _
ASM_IA64_SN
_XPC_H
#define _
ASM_IA64_SN
_XPC_H
#include <linux/config.h>
...
...
@@ -663,6 +663,7 @@ extern struct xpc_registration xpc_registrations[];
extern
struct
device
*
xpc_part
;
extern
struct
device
*
xpc_chan
;
extern
int
xpc_disengage_request_timelimit
;
extern
int
xpc_disengage_request_timedout
;
extern
irqreturn_t
xpc_notify_IRQ_handler
(
int
,
void
*
,
struct
pt_regs
*
);
extern
void
xpc_dropped_IPI_check
(
struct
xpc_partition
*
);
extern
void
xpc_activate_partition
(
struct
xpc_partition
*
);
...
...
@@ -707,7 +708,7 @@ extern void xpc_connected_callout(struct xpc_channel *);
extern
void
xpc_deliver_msg
(
struct
xpc_channel
*
);
extern
void
xpc_disconnect_channel
(
const
int
,
struct
xpc_channel
*
,
enum
xpc_retval
,
unsigned
long
*
);
extern
void
xpc_disconnect
ing_callout
(
struct
xpc_channel
*
);
extern
void
xpc_disconnect
_callout
(
struct
xpc_channel
*
,
enum
xpc_retval
);
extern
void
xpc_partition_going_down
(
struct
xpc_partition
*
,
enum
xpc_retval
);
extern
void
xpc_teardown_infrastructure
(
struct
xpc_partition
*
);
...
...
@@ -1269,5 +1270,5 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
}
#endif
/* _
IA64_SN_KERNEL
_XPC_H */
#endif
/* _
ASM_IA64_SN
_XPC_H */
include/asm-ia64/thread_info.h
View file @
59af7038
...
...
@@ -93,6 +93,7 @@ struct thread_info {
#define TIF_POLLING_NRFLAG 16
/* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17
#define TIF_MCA_INIT 18
/* this task is processing MCA or INIT */
#define TIF_DB_DISABLED 19
/* debug trap disabled for fsyscall */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
...
...
@@ -100,9 +101,10 @@ struct thread_info {
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED)
#define _TIF_SIGDELAYED
(1 << TIF_SIGDELAYED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
/* "work to do on user-return" bits */
#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment