Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
de93d7b1
Commit
de93d7b1
authored
Jul 28, 2009
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'rt/percpu' into rt/base
parents
fc2c504b
849ac1ba
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
77 additions
and
1 deletion
+77
-1
include/asm-generic/percpu.h
include/asm-generic/percpu.h
+14
-1
include/linux/percpu-defs.h
include/linux/percpu-defs.h
+18
-0
include/linux/percpu.h
include/linux/percpu.h
+45
-0
No files found.
include/asm-generic/percpu.h
View file @
de93d7b1
...
...
@@ -5,6 +5,9 @@
#include <linux/threads.h>
#include <linux/percpu-defs.h>
#define __per_cpu_var_lock(var) per_cpu__lock_##var##_locked
#define __per_cpu_var_lock_var(var) per_cpu__##var##_locked
#ifdef CONFIG_SMP
/*
...
...
@@ -56,6 +59,14 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
#define per_cpu_lock(var, cpu) \
(*SHIFT_PERCPU_PTR(&__per_cpu_var_lock(var), per_cpu_offset(cpu)))
#define per_cpu_var_locked(var, cpu) \
(*SHIFT_PERCPU_PTR(&__per_cpu_var_lock_var(var), per_cpu_offset(cpu)))
#define __get_cpu_lock(var, cpu) \
per_cpu_lock(var, cpu)
#define __get_cpu_var_locked(var, cpu) \
per_cpu_var_locked(var, cpu)
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern
void
setup_per_cpu_areas
(
void
);
...
...
@@ -64,9 +75,11 @@ extern void setup_per_cpu_areas(void);
#else
/* ! SMP */
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
#define per_cpu_var_locked(var, cpu) (*((void)(cpu), &__per_cpu_var_lock_var(var)))
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
#define __get_cpu_lock(var, cpu) __per_cpu_var_lock(var)
#define __get_cpu_var_locked(var, cpu) __per_cpu_var_lock_var(var)
#endif
/* SMP */
#ifndef PER_CPU_BASE_SECTION
...
...
include/linux/percpu-defs.h
View file @
de93d7b1
...
...
@@ -37,6 +37,22 @@
#define DEFINE_PER_CPU(type, name) \
DEFINE_PER_CPU_SECTION(type, name, "")
/*
* next two added for RT patch
* (wonder if we need corresponding DECLARE_*'s?) (clrkwllms)
*/
#define DEFINE_PER_CPU_SPINLOCK(name, section) \
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
PER_CPU_ATTRIBUTES __DEFINE_SPINLOCK(per_cpu__lock_##name##_locked);
#define DECLARE_PER_CPU_LOCKED(type, name) \
extern PER_CPU_ATTRIBUTES spinlock_t __per_cpu_var_lock(name); \
extern PER_CPU_ATTRIBUTES __typeof__(type) __per_cpu_var_lock_var(name)
#define DEFINE_PER_CPU_LOCKED(type, name) \
DEFINE_PER_CPU_SPINLOCK(name, "") \
DEFINE_PER_CPU_SECTION(type, name##_locked, "")
/*
* Declaration/definition used for per-CPU variables that must come first in
* the set of variables.
...
...
@@ -79,7 +95,9 @@
* Intermodule exports for per-CPU variables.
*/
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_LOCKED_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var##_locked)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
#define EXPORT_PER_CPU_LOCKED_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var##_locked)
#endif
/* _LINUX_PERCPU_DEFS_H */
include/linux/percpu.h
View file @
de93d7b1
...
...
@@ -32,6 +32,51 @@
&__get_cpu_var(var); }))
#define put_cpu_var(var) preempt_enable()
/*
* Per-CPU data structures with an additional lock - useful for
* PREEMPT_RT code that wants to reschedule but also wants
* per-CPU data structures.
*
* 'cpu' gets updated with the CPU the task is currently executing on.
*
* NOTE: on normal !PREEMPT_RT kernels these per-CPU variables
* are the same as the normal per-CPU variables, so there no
* runtime overhead.
*/
#ifdef CONFIG_PREEMPT_RT
#define get_cpu_var_locked(var, cpuptr) \
(*({ \
spinlock_t *__lock; \
int __cpu; \
\
again: \
__cpu = raw_smp_processor_id(); \
__lock = &__get_cpu_lock(var, __cpu); \
spin_lock(__lock); \
if (!cpu_online(__cpu)) { \
spin_unlock(__lock); \
goto again; \
} \
*(cpuptr) = __cpu; \
&__get_cpu_var_locked(var, __cpu); \
}))
#else
#define get_cpu_var_locked(var, cpuptr) \
(*({ \
int __cpu; \
\
preempt_disable(); \
__cpu = smp_processor_id(); \
spin_lock(&__get_cpu_lock(var, __cpu)); \
preempt_enable(); \
*(cpuptr) = __cpu; \
&__get_cpu_var_locked(var, __cpu); \
}))
#endif
#define put_cpu_var_locked(var, cpu) \
do { (void)cpu; spin_unlock(&__get_cpu_lock(var, cpu)); } while (0)
#ifdef CONFIG_SMP
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment