Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
5d0859ce
Commit
5d0859ce
authored
Feb 26, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'sched/clock' into tracing/ftrace
Conflicts: kernel/sched_clock.c
parents
14131f2f
83ce4009
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
39 additions
and
25 deletions
+39
-25
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+7
-1
include/linux/sched.h
include/linux/sched.h
+10
-0
kernel/sched_clock.c
kernel/sched_clock.c
+22
-24
No files found.
arch/x86/kernel/cpu/intel.c
View file @
5d0859ce
...
...
@@ -4,6 +4,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/module.h>
...
...
@@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
/*
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states
* with P/T states and does not stop in deep C-states.
*
* It is also reliable across cores and sockets. (but not across
* cabinets - we turn it off in that case explicitly.)
*/
if
(
c
->
x86_power
&
(
1
<<
8
))
{
set_cpu_cap
(
c
,
X86_FEATURE_CONSTANT_TSC
);
set_cpu_cap
(
c
,
X86_FEATURE_NONSTOP_TSC
);
set_cpu_cap
(
c
,
X86_FEATURE_TSC_RELIABLE
);
sched_clock_stable
=
1
;
}
}
...
...
include/linux/sched.h
View file @
5d0859ce
...
...
@@ -1672,6 +1672,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
return
set_cpus_allowed_ptr
(
p
,
&
new_mask
);
}
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
extern
int
sched_clock_stable
;
#endif
extern
unsigned
long
long
sched_clock
(
void
);
extern
void
sched_clock_init
(
void
);
...
...
kernel/sched_clock.c
View file @
5d0859ce
...
...
@@ -24,12 +24,12 @@
* The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
* consistent between cpus (never more than 2 jiffies difference).
*/
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/ktime.h>
#include <linux/sched.h>
/*
* Scheduler clock - returns current time in nanosec units.
...
...
@@ -44,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
static
__read_mostly
int
sched_clock_running
;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
__read_mostly
int
sched_clock_stable
;
#else
static
const
int
sched_clock_stable
=
1
;
#endif
struct
sched_clock_data
{
/*
...
...
@@ -88,7 +92,7 @@ void sched_clock_init(void)
}
/*
* min,max except they take wrapping into account
* min,
max except they take wrapping into account
*/
static
inline
u64
wrap_min
(
u64
x
,
u64
y
)
...
...
@@ -117,10 +121,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
if
(
unlikely
(
delta
<
0
))
delta
=
0
;
if
(
unlikely
(
!
sched_clock_running
))
return
0ull
;
/*
* scd->clock = clamp(scd->tick_gtod + delta,
*
max(scd->tick_gtod, scd->clock),
*
scd->tick_gtod + TICK_NSEC);
* max(scd->tick_gtod, scd->clock),
* scd->tick_gtod + TICK_NSEC);
*/
clock
=
scd
->
tick_gtod
+
delta
;
...
...
@@ -149,8 +156,11 @@ static void lock_double_clock(struct sched_clock_data *data1,
u64
sched_clock_cpu
(
int
cpu
)
{
struct
sched_clock_data
*
scd
=
cpu_sdc
(
cpu
);
u64
now
,
clock
,
this_clock
,
remote_clock
;
struct
sched_clock_data
*
scd
;
if
(
sched_clock_stable
)
return
sched_clock
();
/*
* Normally this is not called in NMI context - but if it is,
...
...
@@ -162,6 +172,7 @@ u64 sched_clock_cpu(int cpu)
if
(
unlikely
(
!
sched_clock_running
))
return
0ull
;
scd
=
cpu_sdc
(
cpu
);
WARN_ON_ONCE
(
!
irqs_disabled
());
now
=
sched_clock
();
...
...
@@ -201,6 +212,8 @@ u64 sched_clock_cpu(int cpu)
return
clock
;
}
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
void
sched_clock_tick
(
void
)
{
struct
sched_clock_data
*
scd
=
this_scd
();
...
...
@@ -243,22 +256,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL
(
sched_clock_idle_wakeup_event
);
#else
/* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void
sched_clock_init
(
void
)
{
sched_clock_running
=
1
;
}
u64
sched_clock_cpu
(
int
cpu
)
{
if
(
unlikely
(
!
sched_clock_running
))
return
0
;
return
sched_clock
();
}
#endif
#endif
/* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned
long
long
cpu_clock
(
int
cpu
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment