Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
eac4345b
Commit
eac4345b
authored
Jul 31, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'x86/spinlocks' into x86/xen
parents
5fbf2465
d5de8841
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
226 additions
and
193 deletions
+226
-193
arch/x86/kernel/Makefile
arch/x86/kernel/Makefile
+2
-2
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt-spinlocks.c
+31
-0
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt.c
+0
-23
arch/x86/xen/Makefile
arch/x86/xen/Makefile
+7
-1
arch/x86/xen/smp.c
arch/x86/xen/smp.c
+0
-167
arch/x86/xen/spinlock.c
arch/x86/xen/spinlock.c
+183
-0
arch/x86/xen/xen-ops.h
arch/x86/xen/xen-ops.h
+3
-0
No files found.
arch/x86/kernel/Makefile
View file @
eac4345b
...
...
@@ -10,7 +10,7 @@ ifdef CONFIG_FTRACE
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_tsc.o
=
-pg
CFLAGS_REMOVE_rtc.o
=
-pg
CFLAGS_REMOVE_paravirt.o
=
-pg
CFLAGS_REMOVE_paravirt
-spinlocks
.o
=
-pg
endif
#
...
...
@@ -89,7 +89,7 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI)
+=
vmi_32.o vmiclock_32.o
obj-$(CONFIG_KVM_GUEST)
+=
kvm.o
obj-$(CONFIG_KVM_CLOCK)
+=
kvmclock.o
obj-$(CONFIG_PARAVIRT)
+=
paravirt.o paravirt_patch_
$(BITS)
.o
obj-$(CONFIG_PARAVIRT)
+=
paravirt.o paravirt_patch_
$(BITS)
.o
paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK)
+=
pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM)
+=
pcspeaker.o
...
...
arch/x86/kernel/paravirt-spinlocks.c
0 → 100644
View file @
eac4345b
/*
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/paravirt.h>
struct
pv_lock_ops
pv_lock_ops
=
{
#ifdef CONFIG_SMP
.
spin_is_locked
=
__ticket_spin_is_locked
,
.
spin_is_contended
=
__ticket_spin_is_contended
,
.
spin_lock
=
__ticket_spin_lock
,
.
spin_trylock
=
__ticket_spin_trylock
,
.
spin_unlock
=
__ticket_spin_unlock
,
#endif
};
EXPORT_SYMBOL_GPL
(
pv_lock_ops
);
void
__init
paravirt_use_bytelocks
(
void
)
{
#ifdef CONFIG_SMP
pv_lock_ops
.
spin_is_locked
=
__byte_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
__byte_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
__byte_spin_lock
;
pv_lock_ops
.
spin_trylock
=
__byte_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
__byte_spin_unlock
;
#endif
}
arch/x86/kernel/paravirt.c
View file @
eac4345b
...
...
@@ -268,17 +268,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return
__get_cpu_var
(
paravirt_lazy_mode
);
}
void
__init
paravirt_use_bytelocks
(
void
)
{
#ifdef CONFIG_SMP
pv_lock_ops
.
spin_is_locked
=
__byte_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
__byte_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
__byte_spin_lock
;
pv_lock_ops
.
spin_trylock
=
__byte_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
__byte_spin_unlock
;
#endif
}
struct
pv_info
pv_info
=
{
.
name
=
"bare hardware"
,
.
paravirt_enabled
=
0
,
...
...
@@ -465,18 +454,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.
set_fixmap
=
native_set_fixmap
,
};
struct
pv_lock_ops
pv_lock_ops
=
{
#ifdef CONFIG_SMP
.
spin_is_locked
=
__ticket_spin_is_locked
,
.
spin_is_contended
=
__ticket_spin_is_contended
,
.
spin_lock
=
__ticket_spin_lock
,
.
spin_trylock
=
__ticket_spin_trylock
,
.
spin_unlock
=
__ticket_spin_unlock
,
#endif
};
EXPORT_SYMBOL_GPL
(
pv_lock_ops
);
EXPORT_SYMBOL_GPL
(
pv_time_ops
);
EXPORT_SYMBOL
(
pv_cpu_ops
);
EXPORT_SYMBOL
(
pv_mmu_ops
);
...
...
arch/x86/xen/Makefile
View file @
eac4345b
ifdef
CONFIG_FTRACE
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_spinlock.o
=
-pg
CFLAGS_REMOVE_time.o
=
-pg
endif
obj-y
:=
enlighten.o setup.o multicalls.o mmu.o
\
time.o xen-asm_
$(BITS)
.o grant-table.o suspend.o
obj-$(CONFIG_SMP)
+=
smp.o
obj-$(CONFIG_SMP)
+=
smp.o
spinlock.o
arch/x86/xen/smp.c
View file @
eac4345b
...
...
@@ -15,7 +15,6 @@
* This does not handle HOTPLUG_CPU yet.
*/
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/err.h>
#include <linux/smp.h>
...
...
@@ -36,8 +35,6 @@
#include "xen-ops.h"
#include "mmu.h"
static
void
__cpuinit
xen_init_lock_cpu
(
int
cpu
);
cpumask_t
xen_cpu_initialized_map
;
static
DEFINE_PER_CPU
(
int
,
resched_irq
);
...
...
@@ -419,170 +416,6 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
return
IRQ_HANDLED
;
}
struct
xen_spinlock
{
unsigned
char
lock
;
/* 0 -> free; 1 -> locked */
unsigned
short
spinners
;
/* count of waiting cpus */
};
static
int
xen_spin_is_locked
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
return
xl
->
lock
!=
0
;
}
static
int
xen_spin_is_contended
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
/* Not strictly true; this is only the count of contended
lock-takers entering the slow path. */
return
xl
->
spinners
!=
0
;
}
static
int
xen_spin_trylock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
u8
old
=
1
;
asm
(
"xchgb %b0,%1"
:
"+q"
(
old
),
"+m"
(
xl
->
lock
)
:
:
"memory"
);
return
old
==
0
;
}
static
DEFINE_PER_CPU
(
int
,
lock_kicker_irq
)
=
-
1
;
static
DEFINE_PER_CPU
(
struct
xen_spinlock
*
,
lock_spinners
);
static
inline
void
spinning_lock
(
struct
xen_spinlock
*
xl
)
{
__get_cpu_var
(
lock_spinners
)
=
xl
;
wmb
();
/* set lock of interest before count */
asm
(
LOCK_PREFIX
" incw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
}
static
inline
void
unspinning_lock
(
struct
xen_spinlock
*
xl
)
{
asm
(
LOCK_PREFIX
" decw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
wmb
();
/* decrement count before clearing lock */
__get_cpu_var
(
lock_spinners
)
=
NULL
;
}
static
noinline
int
xen_spin_lock_slow
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
irq
=
__get_cpu_var
(
lock_kicker_irq
);
int
ret
;
/* If kicker interrupts not initialized yet, just spin */
if
(
irq
==
-
1
)
return
0
;
/* announce we're spinning */
spinning_lock
(
xl
);
/* clear pending */
xen_clear_irq_pending
(
irq
);
/* check again make sure it didn't become free while
we weren't looking */
ret
=
xen_spin_trylock
(
lock
);
if
(
ret
)
goto
out
;
/* block until irq becomes pending */
xen_poll_irq
(
irq
);
kstat_this_cpu
.
irqs
[
irq
]
++
;
out:
unspinning_lock
(
xl
);
return
ret
;
}
static
void
xen_spin_lock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
timeout
;
u8
oldval
;
do
{
timeout
=
1
<<
10
;
asm
(
"1: xchgb %1,%0
\n
"
" testb %1,%1
\n
"
" jz 3f
\n
"
"2: rep;nop
\n
"
" cmpb $0,%0
\n
"
" je 1b
\n
"
" dec %2
\n
"
" jnz 2b
\n
"
"3:
\n
"
:
"+m"
(
xl
->
lock
),
"=q"
(
oldval
),
"+r"
(
timeout
)
:
"1"
(
1
)
:
"memory"
);
}
while
(
unlikely
(
oldval
!=
0
&&
!
xen_spin_lock_slow
(
lock
)));
}
static
noinline
void
xen_spin_unlock_slow
(
struct
xen_spinlock
*
xl
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
{
/* XXX should mix up next cpu selection */
if
(
per_cpu
(
lock_spinners
,
cpu
)
==
xl
)
{
xen_send_IPI_one
(
cpu
,
XEN_SPIN_UNLOCK_VECTOR
);
break
;
}
}
}
static
void
xen_spin_unlock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
smp_wmb
();
/* make sure no writes get moved after unlock */
xl
->
lock
=
0
;
/* release lock */
/* make sure unlock happens before kick */
barrier
();
if
(
unlikely
(
xl
->
spinners
))
xen_spin_unlock_slow
(
xl
);
}
static
__cpuinit
void
xen_init_lock_cpu
(
int
cpu
)
{
int
irq
;
const
char
*
name
;
name
=
kasprintf
(
GFP_KERNEL
,
"spinlock%d"
,
cpu
);
irq
=
bind_ipi_to_irqhandler
(
XEN_SPIN_UNLOCK_VECTOR
,
cpu
,
xen_reschedule_interrupt
,
IRQF_DISABLED
|
IRQF_PERCPU
|
IRQF_NOBALANCING
,
name
,
NULL
);
if
(
irq
>=
0
)
{
disable_irq
(
irq
);
/* make sure it's never delivered */
per_cpu
(
lock_kicker_irq
,
cpu
)
=
irq
;
}
printk
(
"cpu %d spinlock event irq %d
\n
"
,
cpu
,
irq
);
}
static
void
__init
xen_init_spinlocks
(
void
)
{
pv_lock_ops
.
spin_is_locked
=
xen_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
xen_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
xen_spin_lock
;
pv_lock_ops
.
spin_trylock
=
xen_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
xen_spin_unlock
;
}
static
const
struct
smp_ops
xen_smp_ops
__initdata
=
{
.
smp_prepare_boot_cpu
=
xen_smp_prepare_boot_cpu
,
.
smp_prepare_cpus
=
xen_smp_prepare_cpus
,
...
...
arch/x86/xen/spinlock.c
0 → 100644
View file @
eac4345b
/*
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
#include <linux/kernel_stat.h>
#include <linux/spinlock.h>
#include <asm/paravirt.h>
#include <xen/interface/xen.h>
#include <xen/events.h>
#include "xen-ops.h"
struct
xen_spinlock
{
unsigned
char
lock
;
/* 0 -> free; 1 -> locked */
unsigned
short
spinners
;
/* count of waiting cpus */
};
static
int
xen_spin_is_locked
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
return
xl
->
lock
!=
0
;
}
static
int
xen_spin_is_contended
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
/* Not strictly true; this is only the count of contended
lock-takers entering the slow path. */
return
xl
->
spinners
!=
0
;
}
static
int
xen_spin_trylock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
u8
old
=
1
;
asm
(
"xchgb %b0,%1"
:
"+q"
(
old
),
"+m"
(
xl
->
lock
)
:
:
"memory"
);
return
old
==
0
;
}
static
DEFINE_PER_CPU
(
int
,
lock_kicker_irq
)
=
-
1
;
static
DEFINE_PER_CPU
(
struct
xen_spinlock
*
,
lock_spinners
);
static
inline
void
spinning_lock
(
struct
xen_spinlock
*
xl
)
{
__get_cpu_var
(
lock_spinners
)
=
xl
;
wmb
();
/* set lock of interest before count */
asm
(
LOCK_PREFIX
" incw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
}
static
inline
void
unspinning_lock
(
struct
xen_spinlock
*
xl
)
{
asm
(
LOCK_PREFIX
" decw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
wmb
();
/* decrement count before clearing lock */
__get_cpu_var
(
lock_spinners
)
=
NULL
;
}
static
noinline
int
xen_spin_lock_slow
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
irq
=
__get_cpu_var
(
lock_kicker_irq
);
int
ret
;
/* If kicker interrupts not initialized yet, just spin */
if
(
irq
==
-
1
)
return
0
;
/* announce we're spinning */
spinning_lock
(
xl
);
/* clear pending */
xen_clear_irq_pending
(
irq
);
/* check again make sure it didn't become free while
we weren't looking */
ret
=
xen_spin_trylock
(
lock
);
if
(
ret
)
goto
out
;
/* block until irq becomes pending */
xen_poll_irq
(
irq
);
kstat_this_cpu
.
irqs
[
irq
]
++
;
out:
unspinning_lock
(
xl
);
return
ret
;
}
static
void
xen_spin_lock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
timeout
;
u8
oldval
;
do
{
timeout
=
1
<<
10
;
asm
(
"1: xchgb %1,%0
\n
"
" testb %1,%1
\n
"
" jz 3f
\n
"
"2: rep;nop
\n
"
" cmpb $0,%0
\n
"
" je 1b
\n
"
" dec %2
\n
"
" jnz 2b
\n
"
"3:
\n
"
:
"+m"
(
xl
->
lock
),
"=q"
(
oldval
),
"+r"
(
timeout
)
:
"1"
(
1
)
:
"memory"
);
}
while
(
unlikely
(
oldval
!=
0
&&
!
xen_spin_lock_slow
(
lock
)));
}
static
noinline
void
xen_spin_unlock_slow
(
struct
xen_spinlock
*
xl
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
{
/* XXX should mix up next cpu selection */
if
(
per_cpu
(
lock_spinners
,
cpu
)
==
xl
)
{
xen_send_IPI_one
(
cpu
,
XEN_SPIN_UNLOCK_VECTOR
);
break
;
}
}
}
static
void
xen_spin_unlock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
smp_wmb
();
/* make sure no writes get moved after unlock */
xl
->
lock
=
0
;
/* release lock */
/* make sure unlock happens before kick */
barrier
();
if
(
unlikely
(
xl
->
spinners
))
xen_spin_unlock_slow
(
xl
);
}
static
irqreturn_t
dummy_handler
(
int
irq
,
void
*
dev_id
)
{
BUG
();
return
IRQ_HANDLED
;
}
void
__cpuinit
xen_init_lock_cpu
(
int
cpu
)
{
int
irq
;
const
char
*
name
;
name
=
kasprintf
(
GFP_KERNEL
,
"spinlock%d"
,
cpu
);
irq
=
bind_ipi_to_irqhandler
(
XEN_SPIN_UNLOCK_VECTOR
,
cpu
,
dummy_handler
,
IRQF_DISABLED
|
IRQF_PERCPU
|
IRQF_NOBALANCING
,
name
,
NULL
);
if
(
irq
>=
0
)
{
disable_irq
(
irq
);
/* make sure it's never delivered */
per_cpu
(
lock_kicker_irq
,
cpu
)
=
irq
;
}
printk
(
"cpu %d spinlock event irq %d
\n
"
,
cpu
,
irq
);
}
void
__init
xen_init_spinlocks
(
void
)
{
pv_lock_ops
.
spin_is_locked
=
xen_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
xen_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
xen_spin_lock
;
pv_lock_ops
.
spin_trylock
=
xen_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
xen_spin_unlock
;
}
arch/x86/xen/xen-ops.h
View file @
eac4345b
...
...
@@ -50,6 +50,9 @@ void __init xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
void
xen_smp_init
(
void
);
void
__init
xen_init_spinlocks
(
void
);
__cpuinit
void
xen_init_lock_cpu
(
int
cpu
);
extern
cpumask_t
xen_cpu_initialized_map
;
#else
static
inline
void
xen_smp_init
(
void
)
{}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment