Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci-2.6.23
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci-2.6.23
Commits
f4fbfb0d
Commit
f4fbfb0d
authored
Jul 19, 2007
by
Tony Luck
Browse files
Options
Browse Files
Download
Plain Diff
Pull vector-domain into release branch
parents
ffc72040
bf903d0a
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
634 additions
and
400 deletions
+634
-400
Documentation/kernel-parameters.txt
Documentation/kernel-parameters.txt
+3
-0
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+298
-354
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq.c
+1
-1
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/irq_ia64.c
+282
-35
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/msi_ia64.c
+19
-4
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/smpboot.c
+4
-0
include/asm-ia64/hw_irq.h
include/asm-ia64/hw_irq.h
+16
-2
include/asm-ia64/iosapic.h
include/asm-ia64/iosapic.h
+4
-2
include/asm-ia64/irq.h
include/asm-ia64/irq.h
+7
-2
No files found.
Documentation/kernel-parameters.txt
View file @
f4fbfb0d
...
...
@@ -1885,6 +1885,9 @@ and is between 256 and 4096 characters. It is defined in the file
vdso=1: enable VDSO (default)
vdso=0: disable VDSO mapping
vector= [IA-64,SMP]
vector=percpu: enable percpu vector domain
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.
...
...
arch/ia64/kernel/iosapic.c
View file @
f4fbfb0d
...
...
@@ -118,15 +118,25 @@ static DEFINE_SPINLOCK(iosapic_lock);
* vector.
*/
struct
iosapic_rte_info
{
struct
list_head
rte_list
;
/* node in list of RTEs sharing the
* same vector */
#define NO_REF_RTE 0
static
struct
iosapic
{
char
__iomem
*
addr
;
/* base address of IOSAPIC */
unsigned
int
gsi_base
;
/* first GSI assigned to this
* IOSAPIC */
unsigned
int
gsi_base
;
/* GSI base */
unsigned
short
num_rte
;
/* # of RTEs on this IOSAPIC */
int
rtes_inuse
;
/* # of RTEs in use on this IOSAPIC */
#ifdef CONFIG_NUMA
unsigned
short
node
;
/* numa node association via pxm */
#endif
spinlock_t
lock
;
/* lock for indirect reg access */
}
iosapic_lists
[
NR_IOSAPICS
];
struct
iosapic_rte_info
{
struct
list_head
rte_list
;
/* RTEs sharing the same vector */
char
rte_index
;
/* IOSAPIC RTE index */
int
refcnt
;
/* reference counter */
unsigned
int
flags
;
/* flags */
struct
iosapic
*
iosapic
;
}
____cacheline_aligned
;
static
struct
iosapic_intr_info
{
...
...
@@ -140,24 +150,23 @@ static struct iosapic_intr_info {
unsigned
char
polarity
:
1
;
/* interrupt polarity
* (see iosapic.h) */
unsigned
char
trigger
:
1
;
/* trigger mode (see iosapic.h) */
}
iosapic_intr_info
[
IA64_NUM_VECTORS
];
static
struct
iosapic
{
char
__iomem
*
addr
;
/* base address of IOSAPIC */
unsigned
int
gsi_base
;
/* first GSI assigned to this
* IOSAPIC */
unsigned
short
num_rte
;
/* # of RTEs on this IOSAPIC */
int
rtes_inuse
;
/* # of RTEs in use on this IOSAPIC */
#ifdef CONFIG_NUMA
unsigned
short
node
;
/* numa node association via pxm */
#endif
}
iosapic_lists
[
NR_IOSAPICS
];
}
iosapic_intr_info
[
NR_IRQS
];
static
unsigned
char
pcat_compat
__devinitdata
;
/* 8259 compatibility flag */
static
int
iosapic_kmalloc_ok
;
static
LIST_HEAD
(
free_rte_list
);
static
inline
void
iosapic_write
(
struct
iosapic
*
iosapic
,
unsigned
int
reg
,
u32
val
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
iosapic
->
lock
,
flags
);
__iosapic_write
(
iosapic
->
addr
,
reg
,
val
);
spin_unlock_irqrestore
(
&
iosapic
->
lock
,
flags
);
}
/*
* Find an IOSAPIC associated with a GSI
*/
...
...
@@ -175,17 +184,18 @@ find_iosapic (unsigned int gsi)
return
-
1
;
}
static
inline
int
_gsi_to_vector
(
unsigned
int
gsi
)
static
inline
int
__gsi_to_irq
(
unsigned
int
gsi
)
{
int
irq
;
struct
iosapic_intr_info
*
info
;
struct
iosapic_rte_info
*
rte
;
for
(
i
nfo
=
iosapic_intr_info
;
info
<
iosapic_intr_info
+
IA64_NUM_VECTORS
;
++
info
)
for
(
i
rq
=
0
;
irq
<
NR_IRQS
;
irq
++
)
{
info
=
&
iosapic_intr_info
[
irq
];
list_for_each_entry
(
rte
,
&
info
->
rtes
,
rte_list
)
if
(
rte
->
gsi_base
+
rte
->
rte_index
==
gsi
)
return
info
-
iosapic_intr_info
;
if
(
rte
->
iosapic
->
gsi_base
+
rte
->
rte_index
==
gsi
)
return
irq
;
}
return
-
1
;
}
...
...
@@ -196,7 +206,10 @@ _gsi_to_vector (unsigned int gsi)
inline
int
gsi_to_vector
(
unsigned
int
gsi
)
{
return
_gsi_to_vector
(
gsi
);
int
irq
=
__gsi_to_irq
(
gsi
);
if
(
check_irq_used
(
irq
)
<
0
)
return
-
1
;
return
irq_to_vector
(
irq
);
}
int
...
...
@@ -204,66 +217,48 @@ gsi_to_irq (unsigned int gsi)
{
unsigned
long
flags
;
int
irq
;
/*
* XXX fix me: this assumes an identity mapping between IA-64 vector
* and Linux irq numbers...
*/
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
irq
=
_gsi_to_vector
(
gsi
);
}
irq
=
__gsi_to_irq
(
gsi
);
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
return
irq
;
}
static
struct
iosapic_rte_info
*
gsi_vector_to_rte
(
unsigned
int
gsi
,
unsigned
int
vec
)
static
struct
iosapic_rte_info
*
find_rte
(
unsigned
int
irq
,
unsigned
int
gsi
)
{
struct
iosapic_rte_info
*
rte
;
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
vec
].
rtes
,
rte_list
)
if
(
rte
->
gsi_base
+
rte
->
rte_index
==
gsi
)
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
irq
].
rtes
,
rte_list
)
if
(
rte
->
iosapic
->
gsi_base
+
rte
->
rte_index
==
gsi
)
return
rte
;
return
NULL
;
}
static
void
set_rte
(
unsigned
int
gsi
,
unsigned
int
vector
,
unsigned
int
dest
,
int
mask
)
set_rte
(
unsigned
int
gsi
,
unsigned
int
irq
,
unsigned
int
dest
,
int
mask
)
{
unsigned
long
pol
,
trigger
,
dmode
;
u32
low32
,
high32
;
char
__iomem
*
addr
;
int
rte_index
;
char
redir
;
struct
iosapic_rte_info
*
rte
;
ia64_vector
vector
=
irq_to_vector
(
irq
);
DBG
(
KERN_DEBUG
"IOSAPIC: routing vector %d to 0x%x
\n
"
,
vector
,
dest
);
rte
=
gsi_vector_to_rte
(
gsi
,
vector
);
rte
=
find_rte
(
irq
,
gsi
);
if
(
!
rte
)
return
;
/* not an IOSAPIC interrupt */
rte_index
=
rte
->
rte_index
;
addr
=
rte
->
addr
;
pol
=
iosapic_intr_info
[
vector
].
polarity
;
trigger
=
iosapic_intr_info
[
vector
].
trigger
;
dmode
=
iosapic_intr_info
[
vector
].
dmode
;
pol
=
iosapic_intr_info
[
irq
].
polarity
;
trigger
=
iosapic_intr_info
[
irq
].
trigger
;
dmode
=
iosapic_intr_info
[
irq
].
dmode
;
redir
=
(
dmode
==
IOSAPIC_LOWEST_PRIORITY
)
?
1
:
0
;
#ifdef CONFIG_SMP
{
unsigned
int
irq
;
for
(
irq
=
0
;
irq
<
NR_IRQS
;
++
irq
)
if
(
irq_to_vector
(
irq
)
==
vector
)
{
set_irq_affinity_info
(
irq
,
(
int
)(
dest
&
0xffff
),
redir
);
break
;
}
}
set_irq_affinity_info
(
irq
,
(
int
)(
dest
&
0xffff
),
redir
);
#endif
low32
=
((
pol
<<
IOSAPIC_POLARITY_SHIFT
)
|
...
...
@@ -275,10 +270,10 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
/* dest contains both id and eid */
high32
=
(
dest
<<
IOSAPIC_DEST_SHIFT
);
iosapic_write
(
addr
,
IOSAPIC_RTE_HIGH
(
rte_index
),
high32
);
iosapic_write
(
addr
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
iosapic_intr_info
[
vector
].
low32
=
low32
;
iosapic_intr_info
[
vector
].
dest
=
dest
;
iosapic_write
(
rte
->
iosapic
,
IOSAPIC_RTE_HIGH
(
rte_index
),
high32
);
iosapic_write
(
rte
->
iosapic
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
iosapic_intr_info
[
irq
].
low32
=
low32
;
iosapic_intr_info
[
irq
].
dest
=
dest
;
}
static
void
...
...
@@ -294,15 +289,18 @@ kexec_disable_iosapic(void)
{
struct
iosapic_intr_info
*
info
;
struct
iosapic_rte_info
*
rte
;
u8
vec
=
0
;
for
(
info
=
iosapic_intr_info
;
info
<
iosapic_intr_info
+
IA64_NUM_VECTORS
;
++
info
,
++
vec
)
{
ia64_vector
vec
;
int
irq
;
for
(
irq
=
0
;
irq
<
NR_IRQS
;
irq
++
)
{
info
=
&
iosapic_intr_info
[
irq
];
vec
=
irq_to_vector
(
irq
);
list_for_each_entry
(
rte
,
&
info
->
rtes
,
rte_list
)
{
iosapic_write
(
rte
->
addr
,
iosapic_write
(
rte
->
iosapic
,
IOSAPIC_RTE_LOW
(
rte
->
rte_index
),
IOSAPIC_MASK
|
vec
);
iosapic_eoi
(
rte
->
addr
,
vec
);
iosapic_eoi
(
rte
->
iosapic
->
addr
,
vec
);
}
}
}
...
...
@@ -311,54 +309,36 @@ kexec_disable_iosapic(void)
static
void
mask_irq
(
unsigned
int
irq
)
{
unsigned
long
flags
;
char
__iomem
*
addr
;
u32
low32
;
int
rte_index
;
ia64_vector
vec
=
irq_to_vector
(
irq
);
struct
iosapic_rte_info
*
rte
;
if
(
list_empty
(
&
iosapic_intr_info
[
vec
].
rtes
))
if
(
list_empty
(
&
iosapic_intr_info
[
irq
].
rtes
))
return
;
/* not an IOSAPIC interrupt! */
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
/* set only the mask bit */
low32
=
iosapic_intr_info
[
vec
].
low32
|=
IOSAPIC_MASK
;
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
vec
].
rtes
,
rte_list
)
{
addr
=
rte
->
addr
;
rte_index
=
rte
->
rte_index
;
iosapic_write
(
addr
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
}
/* set only the mask bit */
low32
=
iosapic_intr_info
[
irq
].
low32
|=
IOSAPIC_MASK
;
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
irq
].
rtes
,
rte_list
)
{
rte_index
=
rte
->
rte_index
;
iosapic_write
(
rte
->
iosapic
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
}
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
}
static
void
unmask_irq
(
unsigned
int
irq
)
{
unsigned
long
flags
;
char
__iomem
*
addr
;
u32
low32
;
int
rte_index
;
ia64_vector
vec
=
irq_to_vector
(
irq
);
struct
iosapic_rte_info
*
rte
;
if
(
list_empty
(
&
iosapic_intr_info
[
vec
].
rtes
))
if
(
list_empty
(
&
iosapic_intr_info
[
irq
].
rtes
))
return
;
/* not an IOSAPIC interrupt! */
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
low32
=
iosapic_intr_info
[
vec
].
low32
&=
~
IOSAPIC_MASK
;
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
vec
].
rtes
,
rte_list
)
{
addr
=
rte
->
addr
;
rte_index
=
rte
->
rte_index
;
iosapic_write
(
addr
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
}
low32
=
iosapic_intr_info
[
irq
].
low32
&=
~
IOSAPIC_MASK
;
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
irq
].
rtes
,
rte_list
)
{
rte_index
=
rte
->
rte_index
;
iosapic_write
(
rte
->
iosapic
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
}
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
}
...
...
@@ -366,23 +346,24 @@ static void
iosapic_set_affinity
(
unsigned
int
irq
,
cpumask_t
mask
)
{
#ifdef CONFIG_SMP
unsigned
long
flags
;
u32
high32
,
low32
;
int
dest
,
rte_index
;
char
__iomem
*
addr
;
int
redir
=
(
irq
&
IA64_IRQ_REDIRECTED
)
?
1
:
0
;
ia64_vector
vec
;
struct
iosapic_rte_info
*
rte
;
struct
iosapic
*
iosapic
;
irq
&=
(
~
IA64_IRQ_REDIRECTED
);
vec
=
irq_to_vector
(
irq
);
cpus_and
(
mask
,
mask
,
cpu_online_map
);
if
(
cpus_empty
(
mask
))
return
;
if
(
reassign_irq_vector
(
irq
,
first_cpu
(
mask
)))
return
;
dest
=
cpu_physical_id
(
first_cpu
(
mask
));
if
(
list_empty
(
&
iosapic_intr_info
[
vec
].
rtes
))
if
(
list_empty
(
&
iosapic_intr_info
[
irq
].
rtes
))
return
;
/* not an IOSAPIC interrupt */
set_irq_affinity_info
(
irq
,
dest
,
redir
);
...
...
@@ -390,31 +371,24 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
/* dest contains both id and eid */
high32
=
dest
<<
IOSAPIC_DEST_SHIFT
;
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
low32
=
iosapic_intr_info
[
vec
].
low32
&
~
(
7
<<
IOSAPIC_DELIVERY_SHIFT
);
if
(
redir
)
/* change delivery mode to lowest priority */
low32
|=
(
IOSAPIC_LOWEST_PRIORITY
<<
IOSAPIC_DELIVERY_SHIFT
);
else
/* change delivery mode to fixed */
low32
|=
(
IOSAPIC_FIXED
<<
IOSAPIC_DELIVERY_SHIFT
);
iosapic_intr_info
[
vec
].
low32
=
low32
;
iosapic_intr_info
[
vec
].
dest
=
dest
;
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
vec
].
rtes
,
rte_list
)
{
addr
=
rte
->
addr
;
rte_index
=
rte
->
rte_index
;
iosapic_write
(
addr
,
IOSAPIC_RTE_HIGH
(
rte_index
),
high32
);
iosapic_write
(
addr
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
}
low32
=
iosapic_intr_info
[
irq
].
low32
&
~
(
7
<<
IOSAPIC_DELIVERY_SHIFT
);
if
(
redir
)
/* change delivery mode to lowest priority */
low32
|=
(
IOSAPIC_LOWEST_PRIORITY
<<
IOSAPIC_DELIVERY_SHIFT
);
else
/* change delivery mode to fixed */
low32
|=
(
IOSAPIC_FIXED
<<
IOSAPIC_DELIVERY_SHIFT
);
low32
&=
IOSAPIC_VECTOR_MASK
;
low32
|=
irq_to_vector
(
irq
);
iosapic_intr_info
[
irq
].
low32
=
low32
;
iosapic_intr_info
[
irq
].
dest
=
dest
;
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
irq
].
rtes
,
rte_list
)
{
iosapic
=
rte
->
iosapic
;
rte_index
=
rte
->
rte_index
;
iosapic_write
(
iosapic
,
IOSAPIC_RTE_HIGH
(
rte_index
),
high32
);
iosapic_write
(
iosapic
,
IOSAPIC_RTE_LOW
(
rte_index
),
low32
);
}
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
#endif
}
...
...
@@ -434,10 +408,20 @@ iosapic_end_level_irq (unsigned int irq)
{
ia64_vector
vec
=
irq_to_vector
(
irq
);
struct
iosapic_rte_info
*
rte
;
int
do_unmask_irq
=
0
;
move_native_irq
(
irq
);
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
vec
].
rtes
,
rte_list
)
iosapic_eoi
(
rte
->
addr
,
vec
);
if
(
unlikely
(
irq_desc
[
irq
].
status
&
IRQ_MOVE_PENDING
))
{
do_unmask_irq
=
1
;
mask_irq
(
irq
);
}
list_for_each_entry
(
rte
,
&
iosapic_intr_info
[
irq
].
rtes
,
rte_list
)
iosapic_eoi
(
rte
->
iosapic
->
addr
,
vec
);
if
(
unlikely
(
do_unmask_irq
))
{
move_masked_irq
(
irq
);
unmask_irq
(
irq
);
}
}
#define iosapic_shutdown_level_irq mask_irq
...
...
@@ -519,13 +503,12 @@ iosapic_version (char __iomem *addr)
* unsigned int reserved2 : 8;
* }
*/
return
iosapic_read
(
addr
,
IOSAPIC_VERSION
);
return
__
iosapic_read
(
addr
,
IOSAPIC_VERSION
);
}
static
int
iosapic_find_sharable_vector
(
unsigned
long
trigger
,
unsigned
long
pol
)
static
int
iosapic_find_sharable_irq
(
unsigned
long
trigger
,
unsigned
long
pol
)
{
int
i
,
vector
=
-
1
,
min_count
=
-
1
;
int
i
,
irq
=
-
ENOSPC
,
min_count
=
-
1
;
struct
iosapic_intr_info
*
info
;
/*
...
...
@@ -533,21 +516,21 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
* supported yet
*/
if
(
trigger
==
IOSAPIC_EDGE
)
return
-
1
;
return
-
EINVAL
;
for
(
i
=
IA64_FIRST_DEVICE_VECTOR
;
i
<=
IA64_LAST_DEVICE_VECTOR
;
i
++
)
{
for
(
i
=
0
;
i
<=
NR_IRQS
;
i
++
)
{
info
=
&
iosapic_intr_info
[
i
];
if
(
info
->
trigger
==
trigger
&&
info
->
polarity
==
pol
&&
(
info
->
dmode
==
IOSAPIC_FIXED
||
info
->
dmode
==
IOSAPIC_LOWEST_PRIORITY
))
{
(
info
->
dmode
==
IOSAPIC_FIXED
||
info
->
dmode
==
IOSAPIC_LOWEST_PRIORITY
)
&&
can_request_irq
(
i
,
IRQF_SHARED
))
{
if
(
min_count
==
-
1
||
info
->
count
<
min_count
)
{
vector
=
i
;
irq
=
i
;
min_count
=
info
->
count
;
}
}
}
return
vector
;
return
irq
;
}
/*
...
...
@@ -555,25 +538,25 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
* assign a new vector for the other and make the vector available
*/
static
void
__init
iosapic_reassign_vector
(
int
vector
)
iosapic_reassign_vector
(
int
irq
)
{
int
new_
vector
;
int
new_
irq
;
if
(
!
list_empty
(
&
iosapic_intr_info
[
vector
].
rtes
))
{
new_
vector
=
assign_irq_vector
(
AUTO_ASSIGN
);
if
(
new_
vector
<
0
)
if
(
!
list_empty
(
&
iosapic_intr_info
[
irq
].
rtes
))
{
new_
irq
=
create_irq
(
);
if
(
new_
irq
<
0
)
panic
(
"%s: out of interrupt vectors!
\n
"
,
__FUNCTION__
);
printk
(
KERN_INFO
"Reassigning vector %d to %d
\n
"
,
vector
,
new_vector
);
memcpy
(
&
iosapic_intr_info
[
new_
vector
],
&
iosapic_intr_info
[
vector
],
irq_to_vector
(
irq
),
irq_to_vector
(
new_irq
)
);
memcpy
(
&
iosapic_intr_info
[
new_
irq
],
&
iosapic_intr_info
[
irq
],
sizeof
(
struct
iosapic_intr_info
));
INIT_LIST_HEAD
(
&
iosapic_intr_info
[
new_
vector
].
rtes
);
list_move
(
iosapic_intr_info
[
vector
].
rtes
.
next
,
&
iosapic_intr_info
[
new_
vector
].
rtes
);
memset
(
&
iosapic_intr_info
[
vector
],
0
,
INIT_LIST_HEAD
(
&
iosapic_intr_info
[
new_
irq
].
rtes
);
list_move
(
iosapic_intr_info
[
irq
].
rtes
.
next
,
&
iosapic_intr_info
[
new_
irq
].
rtes
);
memset
(
&
iosapic_intr_info
[
irq
],
0
,
sizeof
(
struct
iosapic_intr_info
));
iosapic_intr_info
[
vector
].
low32
=
IOSAPIC_MASK
;
INIT_LIST_HEAD
(
&
iosapic_intr_info
[
vector
].
rtes
);
iosapic_intr_info
[
irq
].
low32
=
IOSAPIC_MASK
;
INIT_LIST_HEAD
(
&
iosapic_intr_info
[
irq
].
rtes
);
}
}
...
...
@@ -610,29 +593,18 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
return
rte
;
}
static
void
iosapic_free_rte
(
struct
iosapic_rte_info
*
rte
)
static
inline
int
irq_is_shared
(
int
irq
)
{
if
(
rte
->
flags
&
RTE_PREALLOCATED
)
list_add_tail
(
&
rte
->
rte_list
,
&
free_rte_list
);
else
kfree
(
rte
);
}
static
inline
int
vector_is_shared
(
int
vector
)
{
return
(
iosapic_intr_info
[
vector
].
count
>
1
);
return
(
iosapic_intr_info
[
irq
].
count
>
1
);
}
static
int
register_intr
(
unsigned
int
gsi
,
int
vector
,
unsigned
char
delivery
,
register_intr
(
unsigned
int
gsi
,
int
irq
,
unsigned
char
delivery
,
unsigned
long
polarity
,
unsigned
long
trigger
)
{
irq_desc_t
*
idesc
;
struct
hw_interrupt_type
*
irq_type
;
int
rte_index
;
int
index
;
unsigned
long
gsi_base
;
void
__iomem
*
iosapic_address
;
struct
iosapic_rte_info
*
rte
;
index
=
find_iosapic
(
gsi
);
...
...
@@ -642,10 +614,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
return
-
ENODEV
;
}
iosapic_address
=
iosapic_lists
[
index
].
addr
;
gsi_base
=
iosapic_lists
[
index
].
gsi_base
;
rte
=
gsi_vector_to_rte
(
gsi
,
vector
);
rte
=
find_rte
(
irq
,
gsi
);
if
(
!
rte
)
{
rte
=
iosapic_alloc_rte
();
if
(
!
rte
)
{
...
...
@@ -654,40 +623,42 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
return
-
ENOMEM
;
}
rte_index
=
gsi
-
gsi_base
;
rte
->
rte_index
=
rte_index
;
rte
->
addr
=
iosapic_address
;
rte
->
gsi_base
=
gsi_base
;
rte
->
iosapic
=
&
iosapic_lists
[
index
];
rte
->
rte_index
=
gsi
-
rte
->
iosapic
->
gsi_base
;
rte
->
refcnt
++
;
list_add_tail
(
&
rte
->
rte_list
,
&
iosapic_intr_info
[
vector
].
rtes
);
iosapic_intr_info
[
vector
].
count
++
;
list_add_tail
(
&
rte
->
rte_list
,
&
iosapic_intr_info
[
irq
].
rtes
);
iosapic_intr_info
[
irq
].
count
++
;
iosapic_lists
[
index
].
rtes_inuse
++
;
}
else
if
(
vector_is_shared
(
vector
))
{
struct
iosapic_intr_info
*
info
=
&
iosapic_intr_info
[
vector
];
if
(
info
->
trigger
!=
trigger
||
info
->
polarity
!=
polarity
)
{
else
if
(
rte
->
refcnt
==
NO_REF_RTE
)
{
struct
iosapic_intr_info
*
info
=
&
iosapic_intr_info
[
irq
];
if
(
info
->
count
>
0
&&
(
info
->
trigger
!=
trigger
||
info
->
polarity
!=
polarity
)){
printk
(
KERN_WARNING
"%s: cannot override the interrupt
\n
"
,
__FUNCTION__
);
return
-
EINVAL
;
}
rte
->
refcnt
++
;
iosapic_intr_info
[
irq
].
count
++
;
iosapic_lists
[
index
].
rtes_inuse
++
;
}
iosapic_intr_info
[
vector
].
polarity
=
polarity
;
iosapic_intr_info
[
vector
].
dmode
=
delivery
;
iosapic_intr_info
[
vector
].
trigger
=
trigger
;
iosapic_intr_info
[
irq
].
polarity
=
polarity
;
iosapic_intr_info
[
irq
].
dmode
=
delivery
;
iosapic_intr_info
[
irq
].
trigger
=
trigger
;
if
(
trigger
==
IOSAPIC_EDGE
)
irq_type
=
&
irq_type_iosapic_edge
;
else
irq_type
=
&
irq_type_iosapic_level
;
idesc
=
irq_desc
+
vector
;
idesc
=
irq_desc
+
irq
;
if
(
idesc
->
chip
!=
irq_type
)
{
if
(
idesc
->
chip
!=
&
no_irq_type
)
printk
(
KERN_WARNING
"%s: changing vector %d from %s to %s
\n
"
,
__FUNCTION__
,
vector
,
__FUNCTION__
,
irq_to_vector
(
irq
)
,
idesc
->
chip
->
name
,
irq_type
->
name
);
idesc
->
chip
=
irq_type
;
}
...
...
@@ -695,18 +666,19 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
}
static
unsigned
int
get_target_cpu
(
unsigned
int
gsi
,
int
vector
)
get_target_cpu
(
unsigned
int
gsi
,
int
irq
)
{
#ifdef CONFIG_SMP
static
int
cpu
=
-
1
;
extern
int
cpe_vector
;
cpumask_t
domain
=
irq_to_domain
(
irq
);
/*
* In case of vector shared by multiple RTEs, all RTEs that
* share the vector need to use the same destination CPU.
*/
if
(
!
list_empty
(
&
iosapic_intr_info
[
vector
].
rtes
))
return
iosapic_intr_info
[
vector
].
dest
;
if
(
!
list_empty
(
&
iosapic_intr_info
[
irq
].
rtes
))
return
iosapic_intr_info
[
irq
].
dest
;
/*
* If the platform supports redirection via XTP, let it
...
...
@@ -723,7 +695,7 @@ get_target_cpu (unsigned int gsi, int vector)
return
cpu_physical_id
(
smp_processor_id
());
#ifdef CONFIG_ACPI
if
(
cpe_vector
>
0
&&
vector
==
IA64_CPEP_VECTOR
)
if
(
cpe_vector
>
0
&&
irq_to_vector
(
irq
)
==
IA64_CPEP_VECTOR
)
return
get_cpei_target_cpu
();
#endif
...
...
@@ -738,7 +710,7 @@ get_target_cpu (unsigned int gsi, int vector)
goto
skip_numa_setup
;
cpu_mask
=
node_to_cpumask
(
iosapic_lists
[
iosapic_index
].
node
);
cpus_and
(
cpu_mask
,
cpu_mask
,
domain
);
for_each_cpu_mask
(
numa_cpu
,
cpu_mask
)
{
if
(
!
cpu_online
(
numa_cpu
))
cpu_clear
(
numa_cpu
,
cpu_mask
);
...
...
@@ -749,8 +721,8 @@ get_target_cpu (unsigned int gsi, int vector)
if
(
!
num_cpus
)
goto
skip_numa_setup
;
/* Use
vector
assignment to distribute across cpus in node */
cpu_index
=
vector
%
num_cpus
;
/* Use
irq
assignment to distribute across cpus in node */
cpu_index
=
irq
%
num_cpus
;
for
(
numa_cpu
=
first_cpu
(
cpu_mask
)
;
i
<
cpu_index
;
i
++
)
numa_cpu
=
next_cpu
(
numa_cpu
,
cpu_mask
);
...
...
@@ -768,7 +740,7 @@ skip_numa_setup:
do
{
if
(
++
cpu
>=
NR_CPUS
)
cpu
=
0
;
}
while
(
!
cpu_online
(
cpu
));
}
while
(
!
cpu_online
(
cpu
)
||
!
cpu_isset
(
cpu
,
domain
)
);
return
cpu_physical_id
(
cpu
);
#else
/* CONFIG_SMP */
...
...
@@ -785,84 +757,72 @@ int
iosapic_register_intr
(
unsigned
int
gsi
,
unsigned
long
polarity
,
unsigned
long
trigger
)
{
int
vector
,
mask
=
1
,
err
;
int
irq
,
mask
=
1
,
err
;
unsigned
int
dest
;
unsigned
long
flags
;
struct
iosapic_rte_info
*
rte
;
u32
low32
;
again:
/*
* If this GSI has already been registered (i.e., it's a
* shared interrupt, or we lost a race to register it),
* don't touch the RTE.
*/
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
vector
=
gsi_to_vector
(
gsi
);
if
(
vector
>
0
)
{
rte
=
gsi_vector_to_rte
(
gsi
,
vector
);
irq
=
__gsi_to_irq
(
gsi
);
if
(
irq
>
0
)
{
rte
=
find_rte
(
irq
,
gsi
);
if
(
iosapic_intr_info
[
irq
].
count
==
0
)
{
assign_irq_vector
(
irq
);
dynamic_irq_init
(
irq
);
}
else
if
(
rte
->
refcnt
!=
NO_REF_RTE
)
{
rte
->
refcnt
++
;
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
return
vector
;
goto
unlock_iosapic_lock
;
}
}
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
}
else
irq
=
create_irq
(
);
/* If vector is running out, we try to find a sharable vector */
vector
=
assign_irq_vector
(
AUTO_ASSIGN
);
if
(
vector
<
0
)
{
vector
=
iosapic_find_sharable_vector
(
trigger
,
polarity
);
if
(
vector
<
0
)
return
-
ENOSPC
;
if
(
irq
<
0
)
{
irq
=
iosapic_find_sharable_irq
(
trigger
,
polarity
);
if
(
irq
<
0
)
goto
unlock_iosapic_lock
;
}
spin_lock_irqsave
(
&
irq_desc
[
vector
].
lock
,
flags
);
spin_lock
(
&
iosapic_lock
);
{
if
(
gsi_to_vector
(
gsi
)
>
0
)
{
if
(
list_empty
(
&
iosapic_intr_info
[
vector
].
rtes
))
free_irq_vector
(
vector
);
spin_unlock
(
&
iosapic_lock
);
spin_unlock_irqrestore
(
&
irq_desc
[
vector
].
lock
,
flags
);
goto
again
;
}
dest
=
get_target_cpu
(
gsi
,
vector
);
err
=
register_intr
(
gsi
,
vector
,
IOSAPIC_LOWEST_PRIORITY
,
polarity
,
trigger
);
if
(
err
<
0
)
{
spin_unlock
(
&
iosapic_lock
);
spin_unlock_irqrestore
(
&
irq_desc
[
vector
].
lock
,
flags
);
return
err
;
}
/*
* If the vector is shared and already unmasked for
* other interrupt sources, don't mask it.
*/
low32
=
iosapic_intr_info
[
vector
].
low32
;
if
(
vector_is_shared
(
vector
)
&&
!
(
low32
&
IOSAPIC_MASK
))
mask
=
0
;
set_rte
(
gsi
,
vector
,
dest
,
mask
);
spin_lock
(
&
irq_desc
[
irq
].
lock
);
dest
=
get_target_cpu
(
gsi
,
irq
);
err
=
register_intr
(
gsi
,
irq
,
IOSAPIC_LOWEST_PRIORITY
,
polarity
,
trigger
);
if
(
err
<
0
)
{
irq
=
err
;
goto
unlock_all
;
}
spin_unlock
(
&
iosapic_lock
);
spin_unlock_irqrestore
(
&
irq_desc
[
vector
].
lock
,
flags
);
/*
* If the vector is shared and already unmasked for other
* interrupt sources, don't mask it.
*/
low32
=
iosapic_intr_info
[
irq
].
low32
;
if
(
irq_is_shared
(
irq
)
&&
!
(
low32
&
IOSAPIC_MASK
))
mask
=
0
;
set_rte
(
gsi
,
irq
,
dest
,
mask
);
printk
(
KERN_INFO
"GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d
\n
"
,
gsi
,
(
trigger
==
IOSAPIC_EDGE
?
"edge"
:
"level"
),
(
polarity
==
IOSAPIC_POL_HIGH
?
"high"
:
"low"
),
cpu_logical_id
(
dest
),
dest
,
vector
);
return
vector
;
cpu_logical_id
(
dest
),
dest
,
irq_to_vector
(
irq
));
unlock_all:
spin_unlock
(
&
irq_desc
[
irq
].
lock
);
unlock_iosapic_lock:
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
return
irq
;
}
void
iosapic_unregister_intr
(
unsigned
int
gsi
)
{
unsigned
long
flags
;
int
irq
,
vector
,
index
;
int
irq
,
index
;
irq_desc_t
*
idesc
;
u32
low32
;
unsigned
long
trigger
,
polarity
;
...
...
@@ -881,78 +841,56 @@ iosapic_unregister_intr (unsigned int gsi)
WARN_ON
(
1
);
return
;
}
vector
=
irq_to_vector
(
irq
);
idesc
=
irq_desc
+
irq
;
spin_lock_irqsave
(
&
idesc
->
lock
,
flags
);
spin_lock
(
&
iosapic_lock
);
{
if
((
rte
=
gsi_vector_to_rte
(
gsi
,
vector
))
==
NULL
)
{
printk
(
KERN_ERR
"iosapic_unregister_intr(%u) unbalanced
\n
"
,
gsi
);
WARN_ON
(
1
);
goto
out
;
}
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
if
((
rte
=
find_rte
(
irq
,
gsi
))
==
NULL
)
{
printk
(
KERN_ERR
"iosapic_unregister_intr(%u) unbalanced
\n
"
,
gsi
);
WARN_ON
(
1
);
goto
out
;
}
if
(
--
rte
->
refcnt
>
0
)
goto
out
;
if
(
--
rte
->
refcnt
>
0
)
goto
out
;
/* Mask the interrupt */
low32
=
iosapic_intr_info
[
vector
].
low32
|
IOSAPIC_MASK
;
iosapic_write
(
rte
->
addr
,
IOSAPIC_RTE_LOW
(
rte
->
rte_index
),
low32
);
idesc
=
irq_desc
+
irq
;
rte
->
refcnt
=
NO_REF_RTE
;
/* Remove the rte entry from the list */
list_del
(
&
rte
->
rte_list
);
iosapic_intr_info
[
vector
].
count
--
;
iosapic_free_rte
(
rte
);
index
=
find_iosapic
(
gsi
);
iosapic_lists
[
index
].
rtes_inuse
--
;
WARN_ON
(
iosapic_lists
[
index
].
rtes_inuse
<
0
);
trigger
=
iosapic_intr_info
[
vector
].
trigger
;
polarity
=
iosapic_intr_info
[
vector
].
polarity
;
dest
=
iosapic_intr_info
[
vector
].
dest
;
printk
(
KERN_INFO
"GSI %u (%s, %s) -> CPU %d (0x%04x)"
" vector %d unregistered
\n
"
,
gsi
,
(
trigger
==
IOSAPIC_EDGE
?
"edge"
:
"level"
),
(
polarity
==
IOSAPIC_POL_HIGH
?
"high"
:
"low"
),
cpu_logical_id
(
dest
),
dest
,
vector
);
/* Mask the interrupt */
low32
=
iosapic_intr_info
[
irq
].
low32
|
IOSAPIC_MASK
;
iosapic_write
(
rte
->
iosapic
,
IOSAPIC_RTE_LOW
(
rte
->
rte_index
),
low32
);
if
(
list_empty
(
&
iosapic_intr_info
[
vector
].
rtes
))
{
/* Sanity check */
BUG_ON
(
iosapic_intr_info
[
vector
].
count
);
iosapic_intr_info
[
irq
].
count
--
;
index
=
find_iosapic
(
gsi
);
iosapic_lists
[
index
].
rtes_inuse
--
;
WARN_ON
(
iosapic_lists
[
index
].
rtes_inuse
<
0
);
/* Clear the interrupt controller descriptor */
idesc
->
chip
=
&
no_irq_type
;
trigger
=
iosapic_intr_info
[
irq
].
trigger
;
polarity
=
iosapic_intr_info
[
irq
].
polarity
;
dest
=
iosapic_intr_info
[
irq
].
dest
;
printk
(
KERN_INFO
"GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered
\n
"
,
gsi
,
(
trigger
==
IOSAPIC_EDGE
?
"edge"
:
"level"
),
(
polarity
==
IOSAPIC_POL_HIGH
?
"high"
:
"low"
),
cpu_logical_id
(
dest
),
dest
,
irq_to_vector
(
irq
));
if
(
iosapic_intr_info
[
irq
].
count
==
0
)
{
#ifdef CONFIG_SMP
/* Clear affinity */
cpus_setall
(
idesc
->
affinity
);
/* Clear affinity */
cpus_setall
(
idesc
->
affinity
);
#endif
/* Clear the interrupt information */
memset
(
&
iosapic_intr_info
[
vector
],
0
,
sizeof
(
struct
iosapic_intr_info
));
iosapic_intr_info
[
vector
].
low32
|=
IOSAPIC_MASK
;
INIT_LIST_HEAD
(
&
iosapic_intr_info
[
vector
].
rtes
);
if
(
idesc
->
action
)
{
printk
(
KERN_ERR
"interrupt handlers still exist on"
"IRQ %u
\n
"
,
irq
);
WARN_ON
(
1
);
}
/* Free the interrupt vector */
free_irq_vector
(
vector
);
}
/* Clear the interrupt information */
iosapic_intr_info
[
irq
].
dest
=
0
;
iosapic_intr_info
[
irq
].
dmode
=
0
;
iosapic_intr_info
[
irq
].
polarity
=
0
;
iosapic_intr_info
[
irq
].
trigger
=
0
;
iosapic_intr_info
[
irq
].
low32
|=
IOSAPIC_MASK
;
/* Destroy and reserve IRQ */
destroy_and_reserve_irq
(
irq
);
}
out:
spin_unlock
(
&
iosapic_lock
);
spin_unlock_irqrestore
(
&
idesc
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
}
/*
...
...
@@ -965,27 +903,30 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
{
static
const
char
*
const
name
[]
=
{
"unknown"
,
"PMI"
,
"INIT"
,
"CPEI"
};
unsigned
char
delivery
;
int
vector
,
mask
=
0
;
int
irq
,
vector
,
mask
=
0
;
unsigned
int
dest
=
((
id
<<
8
)
|
eid
)
&
0xffff
;
switch
(
int_type
)
{
case
ACPI_INTERRUPT_PMI
:
vector
=
iosapic_vector
;
irq
=
vector
=
iosapic_vector
;
bind_irq_vector
(
irq
,
vector
,
CPU_MASK_ALL
);
/*
* since PMI vector is alloc'd by FW(ACPI) not by kernel,
* we need to make sure the vector is available
*/
iosapic_reassign_vector
(
vector
);
iosapic_reassign_vector
(
irq
);
delivery
=
IOSAPIC_PMI
;
break
;
case
ACPI_INTERRUPT_INIT
:
vector
=
assign_irq_vector
(
AUTO_ASSIGN
);
if
(
vector
<
0
)
irq
=
create_irq
(
);
if
(
irq
<
0
)
panic
(
"%s: out of interrupt vectors!
\n
"
,
__FUNCTION__
);
vector
=
irq_to_vector
(
irq
);
delivery
=
IOSAPIC_INIT
;
break
;
case
ACPI_INTERRUPT_CPEI
:
vector
=
IA64_CPE_VECTOR
;
irq
=
vector
=
IA64_CPE_VECTOR
;
BUG_ON
(
bind_irq_vector
(
irq
,
vector
,
CPU_MASK_ALL
));
delivery
=
IOSAPIC_LOWEST_PRIORITY
;
mask
=
1
;
break
;
...
...
@@ -995,7 +936,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
return
-
1
;
}
register_intr
(
gsi
,
vector
,
delivery
,
polarity
,
trigger
);
register_intr
(
gsi
,
irq
,
delivery
,
polarity
,
trigger
);
printk
(
KERN_INFO
"PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
...
...
@@ -1005,7 +946,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
(
polarity
==
IOSAPIC_POL_HIGH
?
"high"
:
"low"
),
cpu_logical_id
(
dest
),
dest
,
vector
);
set_rte
(
gsi
,
vector
,
dest
,
mask
);
set_rte
(
gsi
,
irq
,
dest
,
mask
);
return
vector
;
}
...
...
@@ -1017,30 +958,32 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned
long
polarity
,
unsigned
long
trigger
)
{
int
vector
;
int
vector
,
irq
;
unsigned
int
dest
=
cpu_physical_id
(
smp_processor_id
());
vector
=
isa_irq_to_vector
(
isa_irq
);
register_intr
(
gsi
,
vector
,
IOSAPIC_LOWEST_PRIORITY
,
polarity
,
trigger
);
irq
=
vector
=
isa_irq_to_vector
(
isa_irq
);
BUG_ON
(
bind_irq_vector
(
irq
,
vector
,
CPU_MASK_ALL
));
register_intr
(
gsi
,
irq
,
IOSAPIC_LOWEST_PRIORITY
,
polarity
,
trigger
);
DBG
(
"ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d
\n
"
,
isa_irq
,
gsi
,
trigger
==
IOSAPIC_EDGE
?
"edge"
:
"level"
,
polarity
==
IOSAPIC_POL_HIGH
?
"high"
:
"low"
,
cpu_logical_id
(
dest
),
dest
,
vector
);
set_rte
(
gsi
,
vector
,
dest
,
1
);
set_rte
(
gsi
,
irq
,
dest
,
1
);
}
void
__init
iosapic_system_init
(
int
system_pcat_compat
)
{
int
vector
;
int
irq
;
for
(
vector
=
0
;
vector
<
IA64_NUM_VECTORS
;
++
vector
)
{
iosapic_intr_info
[
vector
].
low32
=
IOSAPIC_MASK
;
for
(
irq
=
0
;
irq
<
NR_IRQS
;
++
irq
)
{
iosapic_intr_info
[
irq
].
low32
=
IOSAPIC_MASK
;
/* mark as unused */
INIT_LIST_HEAD
(
&
iosapic_intr_info
[
vector
].
rtes
);
INIT_LIST_HEAD
(
&
iosapic_intr_info
[
irq
].
rtes
);
iosapic_intr_info
[
irq
].
count
=
0
;
}
pcat_compat
=
system_pcat_compat
;
...
...
@@ -1108,31 +1051,35 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
unsigned
long
flags
;
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
addr
=
ioremap
(
phys_addr
,
0
);
ver
=
iosapic_version
(
addr
);
index
=
find_iosapic
(
gsi_base
);
if
(
index
>=
0
)
{
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
return
-
EBUSY
;
}
if
((
err
=
iosapic_check_gsi_range
(
gsi_base
,
ver
)))
{
iounmap
(
addr
);
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
return
err
;
}
addr
=
ioremap
(
phys_addr
,
0
);
ver
=
iosapic_version
(
addr
);
if
((
err
=
iosapic_check_gsi_range
(
gsi_base
,
ver
)))
{
iounmap
(
addr
);
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
return
err
;
}
/*
* The MAX_REDIR register holds the highest input pin
* number (starting from 0).
* We add 1 so that we can use it for
number of pins (= RTEs)
*/
num_rte
=
((
ver
>>
16
)
&
0xff
)
+
1
;
/*
* The MAX_REDIR register holds the highest input pin number
* (starting from 0). We add 1 so that we can use it for
*
number of pins (= RTEs)
*/
num_rte
=
((
ver
>>
16
)
&
0xff
)
+
1
;
index
=
iosapic_alloc
();
iosapic_lists
[
index
].
addr
=
addr
;
iosapic_lists
[
index
].
gsi_base
=
gsi_base
;
iosapic_lists
[
index
].
num_rte
=
num_rte
;
index
=
iosapic_alloc
();
iosapic_lists
[
index
].
addr
=
addr
;
iosapic_lists
[
index
].
gsi_base
=
gsi_base
;
iosapic_lists
[
index
].
num_rte
=
num_rte
;
#ifdef CONFIG_NUMA
iosapic_lists
[
index
].
node
=
MAX_NUMNODES
;
iosapic_lists
[
index
].
node
=
MAX_NUMNODES
;
#endif
}
spin_lock_init
(
&
iosapic_lists
[
index
].
lock
);
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
if
((
gsi_base
==
0
)
&&
pcat_compat
)
{
...
...
@@ -1157,25 +1104,22 @@ iosapic_remove (unsigned int gsi_base)
unsigned
long
flags
;
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
index
=
find_iosapic
(
gsi_base
);
if
(
index
<
0
)
{
printk
(
KERN_WARNING
"%s: No IOSAPIC for GSI base %u
\n
"
,
__FUNCTION__
,
gsi_base
);
goto
out
;
}
if
(
iosapic_lists
[
index
].
rtes_inuse
)
{
err
=
-
EBUSY
;
printk
(
KERN_WARNING
"%s: IOSAPIC for GSI base %u is busy
\n
"
,
__FUNCTION__
,
gsi_base
);
goto
out
;
}
index
=
find_iosapic
(
gsi_base
);
if
(
index
<
0
)
{
printk
(
KERN_WARNING
"%s: No IOSAPIC for GSI base %u
\n
"
,
__FUNCTION__
,
gsi_base
);
goto
out
;
}
iounmap
(
iosapic_lists
[
index
].
addr
);
iosapic_free
(
index
);
if
(
iosapic_lists
[
index
].
rtes_inuse
)
{
err
=
-
EBUSY
;
printk
(
KERN_WARNING
"%s: IOSAPIC for GSI base %u is busy
\n
"
,
__FUNCTION__
,
gsi_base
);
goto
out
;
}
iounmap
(
iosapic_lists
[
index
].
addr
);
iosapic_free
(
index
);
out:
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
return
err
;
...
...
arch/ia64/kernel/irq.c
View file @
f4fbfb0d
...
...
@@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq)
#ifdef CONFIG_IA64_GENERIC
unsigned
int
__ia64_local_vector_to_irq
(
ia64_vector
vec
)
{
return
(
unsigned
int
)
vec
;
return
__get_cpu_var
(
vector_irq
)[
vec
]
;
}
#endif
...
...
arch/ia64/kernel/irq_ia64.c
View file @
f4fbfb0d
...
...
@@ -46,6 +46,12 @@
#define IRQ_DEBUG 0
#define IRQ_VECTOR_UNASSIGNED (0)
#define IRQ_UNUSED (0)
#define IRQ_USED (1)
#define IRQ_RSVD (2)
/* These can be overridden in platform_irq_init */
int
ia64_first_device_vector
=
IA64_DEF_FIRST_DEVICE_VECTOR
;
int
ia64_last_device_vector
=
IA64_DEF_LAST_DEVICE_VECTOR
;
...
...
@@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
void
__iomem
*
ipi_base_addr
=
((
void
__iomem
*
)
(
__IA64_UNCACHED_OFFSET
|
IA64_IPI_DEFAULT_BASE_ADDR
));
static
cpumask_t
vector_allocation_domain
(
int
cpu
);
/*
* Legacy IRQ to IA-64 vector translation table.
*/
...
...
@@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = {
};
EXPORT_SYMBOL
(
isa_irq_to_vector_map
);
static
unsigned
long
ia64_vector_mask
[
BITS_TO_LONGS
(
IA64_MAX_DEVICE_VECTORS
)];
DEFINE_SPINLOCK
(
vector_lock
);
struct
irq_cfg
irq_cfg
[
NR_IRQS
]
__read_mostly
=
{
[
0
...
NR_IRQS
-
1
]
=
{
.
vector
=
IRQ_VECTOR_UNASSIGNED
,
.
domain
=
CPU_MASK_NONE
}
};
DEFINE_PER_CPU
(
int
[
IA64_NUM_VECTORS
],
vector_irq
)
=
{
[
0
...
IA64_NUM_VECTORS
-
1
]
=
IA64_SPURIOUS_INT_VECTOR
};
static
cpumask_t
vector_table
[
IA64_MAX_DEVICE_VECTORS
]
=
{
[
0
...
IA64_MAX_DEVICE_VECTORS
-
1
]
=
CPU_MASK_NONE
};
static
int
irq_status
[
NR_IRQS
]
=
{
[
0
...
NR_IRQS
-
1
]
=
IRQ_UNUSED
};
int
check_irq_used
(
int
irq
)
{
if
(
irq_status
[
irq
]
==
IRQ_USED
)
return
1
;
return
-
1
;
}
static
void
reserve_irq
(
unsigned
int
irq
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
irq_status
[
irq
]
=
IRQ_RSVD
;
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
}
static
inline
int
find_unassigned_irq
(
void
)
{
int
irq
;
for
(
irq
=
IA64_FIRST_DEVICE_VECTOR
;
irq
<
NR_IRQS
;
irq
++
)
if
(
irq_status
[
irq
]
==
IRQ_UNUSED
)
return
irq
;
return
-
ENOSPC
;
}
static
inline
int
find_unassigned_vector
(
cpumask_t
domain
)
{
cpumask_t
mask
;
int
pos
;
cpus_and
(
mask
,
domain
,
cpu_online_map
);
if
(
cpus_empty
(
mask
))
return
-
EINVAL
;
for
(
pos
=
0
;
pos
<
IA64_NUM_DEVICE_VECTORS
;
pos
++
)
{
cpus_and
(
mask
,
domain
,
vector_table
[
pos
]);
if
(
!
cpus_empty
(
mask
))
continue
;
return
IA64_FIRST_DEVICE_VECTOR
+
pos
;
}
return
-
ENOSPC
;
}
static
int
__bind_irq_vector
(
int
irq
,
int
vector
,
cpumask_t
domain
)
{
cpumask_t
mask
;
int
cpu
,
pos
;
struct
irq_cfg
*
cfg
=
&
irq_cfg
[
irq
];
cpus_and
(
mask
,
domain
,
cpu_online_map
);
if
(
cpus_empty
(
mask
))
return
-
EINVAL
;
if
((
cfg
->
vector
==
vector
)
&&
cpus_equal
(
cfg
->
domain
,
domain
))
return
0
;
if
(
cfg
->
vector
!=
IRQ_VECTOR_UNASSIGNED
)
return
-
EBUSY
;
for_each_cpu_mask
(
cpu
,
mask
)
per_cpu
(
vector_irq
,
cpu
)[
vector
]
=
irq
;
cfg
->
vector
=
vector
;
cfg
->
domain
=
domain
;
irq_status
[
irq
]
=
IRQ_USED
;
pos
=
vector
-
IA64_FIRST_DEVICE_VECTOR
;
cpus_or
(
vector_table
[
pos
],
vector_table
[
pos
],
domain
);
return
0
;
}
int
bind_irq_vector
(
int
irq
,
int
vector
,
cpumask_t
domain
)
{
unsigned
long
flags
;
int
ret
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
ret
=
__bind_irq_vector
(
irq
,
vector
,
domain
);
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
return
ret
;
}
static
void
__clear_irq_vector
(
int
irq
)
{
int
vector
,
cpu
,
pos
;
cpumask_t
mask
;
cpumask_t
domain
;
struct
irq_cfg
*
cfg
=
&
irq_cfg
[
irq
];
BUG_ON
((
unsigned
)
irq
>=
NR_IRQS
);
BUG_ON
(
cfg
->
vector
==
IRQ_VECTOR_UNASSIGNED
);
vector
=
cfg
->
vector
;
domain
=
cfg
->
domain
;
cpus_and
(
mask
,
cfg
->
domain
,
cpu_online_map
);
for_each_cpu_mask
(
cpu
,
mask
)
per_cpu
(
vector_irq
,
cpu
)[
vector
]
=
IA64_SPURIOUS_INT_VECTOR
;
cfg
->
vector
=
IRQ_VECTOR_UNASSIGNED
;
cfg
->
domain
=
CPU_MASK_NONE
;
irq_status
[
irq
]
=
IRQ_UNUSED
;
pos
=
vector
-
IA64_FIRST_DEVICE_VECTOR
;
cpus_andnot
(
vector_table
[
pos
],
vector_table
[
pos
],
domain
);
}
static
void
clear_irq_vector
(
int
irq
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
__clear_irq_vector
(
irq
);
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
}
int
assign_irq_vector
(
int
irq
)
{
int
pos
,
vector
;
again:
pos
=
find_first_zero_bit
(
ia64_vector_mask
,
IA64_NUM_DEVICE_VECTORS
);
vector
=
IA64_FIRST_DEVICE_VECTOR
+
pos
;
if
(
vector
>
IA64_LAST_DEVICE_VECTOR
)
return
-
ENOSPC
;
if
(
test_and_set_bit
(
pos
,
ia64_vector_mask
))
goto
again
;
unsigned
long
flags
;
int
vector
,
cpu
;
cpumask_t
domain
;
vector
=
-
ENOSPC
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
if
(
irq
<
0
)
{
goto
out
;
}
for_each_online_cpu
(
cpu
)
{
domain
=
vector_allocation_domain
(
cpu
);
vector
=
find_unassigned_vector
(
domain
);
if
(
vector
>=
0
)
break
;
}
if
(
vector
<
0
)
goto
out
;
BUG_ON
(
__bind_irq_vector
(
irq
,
vector
,
domain
));
out:
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
return
vector
;
}
void
free_irq_vector
(
int
vector
)
{
int
pos
;
if
(
vector
<
IA64_FIRST_DEVICE_VECTOR
||
vector
>
IA64_LAST_DEVICE_VECTOR
)
if
(
vector
<
IA64_FIRST_DEVICE_VECTOR
||
vector
>
IA64_LAST_DEVICE_VECTOR
)
return
;
pos
=
vector
-
IA64_FIRST_DEVICE_VECTOR
;
if
(
!
test_and_clear_bit
(
pos
,
ia64_vector_mask
))
printk
(
KERN_WARNING
"%s: double free!
\n
"
,
__FUNCTION__
);
clear_irq_vector
(
vector
);
}
int
reserve_irq_vector
(
int
vector
)
{
int
pos
;
if
(
vector
<
IA64_FIRST_DEVICE_VECTOR
||
vector
>
IA64_LAST_DEVICE_VECTOR
)
return
-
EINVAL
;
return
!!
bind_irq_vector
(
vector
,
vector
,
CPU_MASK_ALL
);
}
pos
=
vector
-
IA64_FIRST_DEVICE_VECTOR
;
return
test_and_set_bit
(
pos
,
ia64_vector_mask
);
/*
* Initialize vector_irq on a new cpu. This function must be called
* with vector_lock held.
*/
void
__setup_vector_irq
(
int
cpu
)
{
int
irq
,
vector
;
/* Clear vector_irq */
for
(
vector
=
0
;
vector
<
IA64_NUM_VECTORS
;
++
vector
)
per_cpu
(
vector_irq
,
cpu
)[
vector
]
=
IA64_SPURIOUS_INT_VECTOR
;
/* Mark the inuse vectors */
for
(
irq
=
0
;
irq
<
NR_IRQS
;
++
irq
)
{
if
(
!
cpu_isset
(
cpu
,
irq_cfg
[
irq
].
domain
))
continue
;
vector
=
irq_to_vector
(
irq
);
per_cpu
(
vector_irq
,
cpu
)[
vector
]
=
irq
;
}
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
static
enum
vector_domain_type
{
VECTOR_DOMAIN_NONE
,
VECTOR_DOMAIN_PERCPU
}
vector_domain_type
=
VECTOR_DOMAIN_NONE
;
static
cpumask_t
vector_allocation_domain
(
int
cpu
)
{
if
(
vector_domain_type
==
VECTOR_DOMAIN_PERCPU
)
return
cpumask_of_cpu
(
cpu
);
return
CPU_MASK_ALL
;
}
static
int
__init
parse_vector_domain
(
char
*
arg
)
{
if
(
!
arg
)
return
-
EINVAL
;
if
(
!
strcmp
(
arg
,
"percpu"
))
{
vector_domain_type
=
VECTOR_DOMAIN_PERCPU
;
no_int_routing
=
1
;
}
return
1
;
}
early_param
(
"vector"
,
parse_vector_domain
);
#else
static
cpumask_t
vector_allocation_domain
(
int
cpu
)
{
return
CPU_MASK_ALL
;
}
#endif
void
destroy_and_reserve_irq
(
unsigned
int
irq
)
{
dynamic_irq_cleanup
(
irq
);
clear_irq_vector
(
irq
);
reserve_irq
(
irq
);
}
static
int
__reassign_irq_vector
(
int
irq
,
int
cpu
)
{
struct
irq_cfg
*
cfg
=
&
irq_cfg
[
irq
];
int
vector
;
cpumask_t
domain
;
if
(
cfg
->
vector
==
IRQ_VECTOR_UNASSIGNED
||
!
cpu_online
(
cpu
))
return
-
EINVAL
;
if
(
cpu_isset
(
cpu
,
cfg
->
domain
))
return
0
;
domain
=
vector_allocation_domain
(
cpu
);
vector
=
find_unassigned_vector
(
domain
);
if
(
vector
<
0
)
return
-
ENOSPC
;
__clear_irq_vector
(
irq
);
BUG_ON
(
__bind_irq_vector
(
irq
,
vector
,
domain
));
return
0
;
}
int
reassign_irq_vector
(
int
irq
,
int
cpu
)
{
unsigned
long
flags
;
int
ret
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
ret
=
__reassign_irq_vector
(
irq
,
cpu
);
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
return
ret
;
}
/*
...
...
@@ -111,18 +342,35 @@ reserve_irq_vector (int vector)
*/
int
create_irq
(
void
)
{
int
vector
=
assign_irq_vector
(
AUTO_ASSIGN
);
if
(
vector
>=
0
)
dynamic_irq_init
(
vector
);
return
vector
;
unsigned
long
flags
;
int
irq
,
vector
,
cpu
;
cpumask_t
domain
;
irq
=
vector
=
-
ENOSPC
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
for_each_online_cpu
(
cpu
)
{
domain
=
vector_allocation_domain
(
cpu
);
vector
=
find_unassigned_vector
(
domain
);
if
(
vector
>=
0
)
break
;
}
if
(
vector
<
0
)
goto
out
;
irq
=
find_unassigned_irq
();
if
(
irq
<
0
)
goto
out
;
BUG_ON
(
__bind_irq_vector
(
irq
,
vector
,
domain
));
out:
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
if
(
irq
>=
0
)
dynamic_irq_init
(
irq
);
return
irq
;
}
void
destroy_irq
(
unsigned
int
irq
)
{
dynamic_irq_cleanup
(
irq
);
free
_irq_vector
(
irq
);
clear
_irq_vector
(
irq
);
}
#ifdef CONFIG_SMP
...
...
@@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
irq_desc_t
*
desc
;
unsigned
int
irq
;
for
(
irq
=
0
;
irq
<
NR_IRQS
;
++
irq
)
if
(
irq_to_vector
(
irq
)
==
vec
)
{
desc
=
irq_desc
+
irq
;
desc
->
status
|=
IRQ_PER_CPU
;
desc
->
chip
=
&
irq_type_ia64_lsapic
;
if
(
action
)
setup_irq
(
irq
,
action
);
}
irq
=
vec
;
BUG_ON
(
bind_irq_vector
(
irq
,
vec
,
CPU_MASK_ALL
));
desc
=
irq_desc
+
irq
;
desc
->
status
|=
IRQ_PER_CPU
;
desc
->
chip
=
&
irq_type_ia64_lsapic
;
if
(
action
)
setup_irq
(
irq
,
action
);
}
void
__init
...
...
arch/ia64/kernel/msi_ia64.c
View file @
f4fbfb0d
...
...
@@ -13,6 +13,7 @@
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
#define MSI_DATA_VECTOR_MASK 0xffffff00
#define MSI_DATA_DELIVERY_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
...
...
@@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip;
static
void
ia64_set_msi_irq_affinity
(
unsigned
int
irq
,
cpumask_t
cpu_mask
)
{
struct
msi_msg
msg
;
u32
addr
;
u32
addr
,
data
;
int
cpu
=
first_cpu
(
cpu_mask
);
if
(
!
cpu_online
(
cpu
))
return
;
if
(
reassign_irq_vector
(
irq
,
cpu
))
return
;
read_msi_msg
(
irq
,
&
msg
);
addr
=
msg
.
address_lo
;
addr
&=
MSI_ADDR_DESTID_MASK
;
addr
|=
MSI_ADDR_DESTID_CPU
(
cpu_physical_id
(
first_cpu
(
cpu_mask
)
));
addr
|=
MSI_ADDR_DESTID_CPU
(
cpu_physical_id
(
cpu
));
msg
.
address_lo
=
addr
;
data
=
msg
.
data
;
data
&=
MSI_DATA_VECTOR_MASK
;
data
|=
MSI_DATA_VECTOR
(
irq_to_vector
(
irq
));
msg
.
data
=
data
;
write_msi_msg
(
irq
,
&
msg
);
irq_desc
[
irq
].
affinity
=
cpu
_mask
;
irq_desc
[
irq
].
affinity
=
cpu
mask_of_cpu
(
cpu
)
;
}
#endif
/* CONFIG_SMP */
...
...
@@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
struct
msi_msg
msg
;
unsigned
long
dest_phys_id
;
int
irq
,
vector
;
cpumask_t
mask
;
irq
=
create_irq
();
if
(
irq
<
0
)
return
irq
;
set_irq_msi
(
irq
,
desc
);
dest_phys_id
=
cpu_physical_id
(
first_cpu
(
cpu_online_map
));
cpus_and
(
mask
,
irq_to_domain
(
irq
),
cpu_online_map
);
dest_phys_id
=
cpu_physical_id
(
first_cpu
(
mask
));
vector
=
irq_to_vector
(
irq
);
msg
.
address_hi
=
0
;
...
...
arch/ia64/kernel/smpboot.c
View file @
f4fbfb0d
...
...
@@ -395,9 +395,13 @@ smp_callin (void)
fix_b0_for_bsp
();
lock_ipi_calllock
();
spin_lock
(
&
vector_lock
);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq
(
cpuid
);
cpu_set
(
cpuid
,
cpu_online_map
);
unlock_ipi_calllock
();
per_cpu
(
cpu_state
,
cpuid
)
=
CPU_ONLINE
;
spin_unlock
(
&
vector_lock
);
smp_setup_percpu_timer
();
...
...
include/asm-ia64/hw_irq.h
View file @
f4fbfb0d
...
...
@@ -90,13 +90,27 @@ enum {
extern
__u8
isa_irq_to_vector_map
[
16
];
#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
struct
irq_cfg
{
ia64_vector
vector
;
cpumask_t
domain
;
};
extern
spinlock_t
vector_lock
;
extern
struct
irq_cfg
irq_cfg
[
NR_IRQS
];
#define irq_to_domain(x) irq_cfg[(x)].domain
DECLARE_PER_CPU
(
int
[
IA64_NUM_VECTORS
],
vector_irq
);
extern
struct
hw_interrupt_type
irq_type_ia64_lsapic
;
/* CPU-internal interrupt controller */
extern
int
bind_irq_vector
(
int
irq
,
int
vector
,
cpumask_t
domain
);
extern
int
assign_irq_vector
(
int
irq
);
/* allocate a free vector */
extern
void
free_irq_vector
(
int
vector
);
extern
int
reserve_irq_vector
(
int
vector
);
extern
void
__setup_vector_irq
(
int
cpu
);
extern
int
reassign_irq_vector
(
int
irq
,
int
cpu
);
extern
void
ia64_send_ipi
(
int
cpu
,
int
vector
,
int
delivery_mode
,
int
redirect
);
extern
void
register_percpu_irq
(
ia64_vector
vec
,
struct
irqaction
*
action
);
extern
int
check_irq_used
(
int
irq
);
extern
void
destroy_and_reserve_irq
(
unsigned
int
irq
);
static
inline
void
ia64_resend_irq
(
unsigned
int
vector
)
{
...
...
@@ -113,7 +127,7 @@ extern irq_desc_t irq_desc[NR_IRQS];
static
inline
unsigned
int
__ia64_local_vector_to_irq
(
ia64_vector
vec
)
{
return
(
unsigned
int
)
vec
;
return
__get_cpu_var
(
vector_irq
)[
vec
]
;
}
#endif
...
...
@@ -131,7 +145,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
static
inline
ia64_vector
irq_to_vector
(
int
irq
)
{
return
(
ia64_vector
)
irq
;
return
irq_cfg
[
irq
].
vector
;
}
/*
...
...
include/asm-ia64/iosapic.h
View file @
f4fbfb0d
...
...
@@ -47,19 +47,21 @@
#define IOSAPIC_MASK_SHIFT 16
#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
#define IOSAPIC_VECTOR_MASK 0xffffff00
#ifndef __ASSEMBLY__
#ifdef CONFIG_IOSAPIC
#define NR_IOSAPICS 256
static
inline
unsigned
int
iosapic_read
(
char
__iomem
*
iosapic
,
unsigned
int
reg
)
static
inline
unsigned
int
__
iosapic_read
(
char
__iomem
*
iosapic
,
unsigned
int
reg
)
{
writel
(
reg
,
iosapic
+
IOSAPIC_REG_SELECT
);
return
readl
(
iosapic
+
IOSAPIC_WINDOW
);
}
static
inline
void
iosapic_write
(
char
__iomem
*
iosapic
,
unsigned
int
reg
,
u32
val
)
static
inline
void
__
iosapic_write
(
char
__iomem
*
iosapic
,
unsigned
int
reg
,
u32
val
)
{
writel
(
reg
,
iosapic
+
IOSAPIC_REG_SELECT
);
writel
(
val
,
iosapic
+
IOSAPIC_WINDOW
);
...
...
include/asm-ia64/irq.h
View file @
f4fbfb0d
...
...
@@ -14,8 +14,13 @@
#include <linux/types.h>
#include <linux/cpumask.h>
#define NR_IRQS 256
#define NR_IRQ_VECTORS NR_IRQS
#define NR_VECTORS 256
#if (NR_VECTORS + 32 * NR_CPUS) < 1024
#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
#else
#define NR_IRQS 1024
#endif
static
__inline__
int
irq_canonicalize
(
int
irq
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment