Commit 95660182 authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

percpu: add percpu locked infrastructure

RT needs per cpu data structures protected by per cpu locks instead of
disabling preemption. Add the infrastructure for per cpu locked data.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 769610ed
......@@ -5,6 +5,9 @@
#include <linux/threads.h>
#include <linux/percpu-defs.h>
#define __per_cpu_var_lock(var) per_cpu__lock_##var##_locked
#define __per_cpu_var_lock_var(var) per_cpu__##var##_locked
#ifdef CONFIG_SMP
/*
......@@ -56,6 +59,14 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
#define per_cpu_lock(var, cpu) \
(*SHIFT_PERCPU_PTR(&__per_cpu_var_lock(var), per_cpu_offset(cpu)))
#define per_cpu_var_locked(var, cpu) \
(*SHIFT_PERCPU_PTR(&__per_cpu_var_lock_var(var), per_cpu_offset(cpu)))
#define __get_cpu_lock(var, cpu) \
per_cpu_lock(var, cpu)
#define __get_cpu_var_locked(var, cpu) \
per_cpu_var_locked(var, cpu)
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
......@@ -64,9 +75,11 @@ extern void setup_per_cpu_areas(void);
#else /* ! SMP */
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
#define per_cpu_var_locked(var, cpu) (*((void)(cpu), &__per_cpu_var_lock_var(var)))
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
#define __get_cpu_lock(var, cpu) __per_cpu_var_lock(var)
#define __get_cpu_var_locked(var, cpu) __per_cpu_var_lock_var(var)
#endif /* SMP */
#ifndef PER_CPU_BASE_SECTION
......
......@@ -37,6 +37,22 @@
#define DEFINE_PER_CPU(type, name) \
DEFINE_PER_CPU_SECTION(type, name, "")
/*
* next two added for RT patch
* (wonder if we need corresponding DECLARE_*'s?) (clrkwllms)
*/
#define DEFINE_PER_CPU_SPINLOCK(name, section) \
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
PER_CPU_ATTRIBUTES __DEFINE_SPINLOCK(per_cpu__lock_##name##_locked);
#define DECLARE_PER_CPU_LOCKED(type, name) \
extern PER_CPU_ATTRIBUTES spinlock_t __per_cpu_var_lock(name); \
extern PER_CPU_ATTRIBUTES __typeof__(type) __per_cpu_var_lock_var(name)
#define DEFINE_PER_CPU_LOCKED(type, name) \
DEFINE_PER_CPU_SPINLOCK(name, "") \
DEFINE_PER_CPU_SECTION(type, name##_locked, "")
/*
* Declaration/definition used for per-CPU variables that must come first in
* the set of variables.
......@@ -79,7 +95,9 @@
* Intermodule exports for per-CPU variables.
*/
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_LOCKED_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var##_locked)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
#define EXPORT_PER_CPU_LOCKED_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var##_locked)
#endif /* _LINUX_PERCPU_DEFS_H */
......@@ -32,6 +32,29 @@
&__get_cpu_var(var); }))
#define put_cpu_var(var) preempt_enable()
/*
* Per-CPU data structures with an additional lock - useful for
* PREEMPT_RT code that wants to reschedule but also wants
* per-CPU data structures.
*
* 'cpu' gets updated with the CPU the task is currently executing on.
*
* NOTE: on normal !PREEMPT_RT kernels these per-CPU variables
* are the same as the normal per-CPU variables, so there no
* runtime overhead.
*/
#define get_cpu_var_locked(var, cpuptr) \
(*({ \
int __cpu = raw_smp_processor_id(); \
\
*(cpuptr) = __cpu; \
spin_lock(&__get_cpu_lock(var, __cpu)); \
&__get_cpu_var_locked(var, __cpu); \
}))
#define put_cpu_var_locked(var, cpu) \
do { (void)cpu; spin_unlock(&__get_cpu_lock(var, cpu)); } while (0)
#ifdef CONFIG_SMP
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment