Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
e26b33e9
Commit
e26b33e9
authored
Aug 12, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'sched/clock' into sched/urgent
parents
279ef6bb
cf206bff
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
53 additions
and
143 deletions
+53
-143
include/linux/sched.h
include/linux/sched.h
+4
-27
kernel/sched_clock.c
kernel/sched_clock.c
+49
-114
kernel/time/tick-sched.c
kernel/time/tick-sched.c
+0
-2
No files found.
include/linux/sched.h
View file @
e26b33e9
...
...
@@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern
unsigned
long
long
sched_clock
(
void
);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static
inline
void
sched_clock_init
(
void
)
{
}
static
inline
u64
sched_clock_cpu
(
int
cpu
)
{
return
sched_clock
();
}
extern
void
sched_clock_init
(
void
);
extern
u64
sched_clock_cpu
(
int
cpu
);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static
inline
void
sched_clock_tick
(
void
)
{
}
...
...
@@ -1572,28 +1566,11 @@ static inline void sched_clock_idle_sleep_event(void)
static
inline
void
sched_clock_idle_wakeup_event
(
u64
delta_ns
)
{
}
#ifdef CONFIG_NO_HZ
static
inline
void
sched_clock_tick_stop
(
int
cpu
)
{
}
static
inline
void
sched_clock_tick_start
(
int
cpu
)
{
}
#endif
#else
/* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
extern
void
sched_clock_init
(
void
);
extern
u64
sched_clock_cpu
(
int
cpu
);
#else
extern
void
sched_clock_tick
(
void
);
extern
void
sched_clock_idle_sleep_event
(
void
);
extern
void
sched_clock_idle_wakeup_event
(
u64
delta_ns
);
#ifdef CONFIG_NO_HZ
extern
void
sched_clock_tick_stop
(
int
cpu
);
extern
void
sched_clock_tick_start
(
int
cpu
);
#endif
#endif
/* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
...
...
kernel/sched_clock.c
View file @
e26b33e9
...
...
@@ -42,12 +42,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return
(
unsigned
long
long
)
jiffies
*
(
NSEC_PER_SEC
/
HZ
);
}
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static
__read_mostly
int
sched_clock_running
;
#define MULTI_SHIFT 15
/* Max is double, Min is 1/2 */
#define MAX_MULTI (2LL << MULTI_SHIFT)
#define MIN_MULTI (1LL << (MULTI_SHIFT-1))
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
struct
sched_clock_data
{
/*
...
...
@@ -58,14 +55,9 @@ struct sched_clock_data {
raw_spinlock_t
lock
;
unsigned
long
tick_jiffies
;
u64
prev_raw
;
u64
tick_raw
;
u64
tick_gtod
;
u64
clock
;
s64
multi
;
#ifdef CONFIG_NO_HZ
int
check_max
;
#endif
};
static
DEFINE_PER_CPU_SHARED_ALIGNED
(
struct
sched_clock_data
,
sched_clock_data
);
...
...
@@ -80,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
return
&
per_cpu
(
sched_clock_data
,
cpu
);
}
static
__read_mostly
int
sched_clock_running
;
void
sched_clock_init
(
void
)
{
u64
ktime_now
=
ktime_to_ns
(
ktime_get
());
...
...
@@ -93,90 +83,39 @@ void sched_clock_init(void)
scd
->
lock
=
(
raw_spinlock_t
)
__RAW_SPIN_LOCK_UNLOCKED
;
scd
->
tick_jiffies
=
now_jiffies
;
scd
->
prev_raw
=
0
;
scd
->
tick_raw
=
0
;
scd
->
tick_gtod
=
ktime_now
;
scd
->
clock
=
ktime_now
;
scd
->
multi
=
1
<<
MULTI_SHIFT
;
#ifdef CONFIG_NO_HZ
scd
->
check_max
=
1
;
#endif
}
sched_clock_running
=
1
;
}
#ifdef CONFIG_NO_HZ
/*
* The dynamic ticks makes the delta jiffies inaccurate. This
* prevents us from checking the maximum time update.
* Disable the maximum check during stopped ticks.
*/
void
sched_clock_tick_stop
(
int
cpu
)
{
struct
sched_clock_data
*
scd
=
cpu_sdc
(
cpu
);
scd
->
check_max
=
0
;
}
void
sched_clock_tick_start
(
int
cpu
)
{
struct
sched_clock_data
*
scd
=
cpu_sdc
(
cpu
);
scd
->
check_max
=
1
;
}
static
int
check_max
(
struct
sched_clock_data
*
scd
)
{
return
scd
->
check_max
;
}
#else
static
int
check_max
(
struct
sched_clock_data
*
scd
)
{
return
1
;
}
#endif
/* CONFIG_NO_HZ */
/*
* update the percpu scd from the raw @now value
*
* - filter out backward motion
* - use jiffies to generate a min,max window to clip the raw values
*/
static
void
__update_sched_clock
(
struct
sched_clock_data
*
scd
,
u64
now
,
u64
*
time
)
static
u64
__update_sched_clock
(
struct
sched_clock_data
*
scd
,
u64
now
)
{
unsigned
long
now_jiffies
=
jiffies
;
long
delta_jiffies
=
now_jiffies
-
scd
->
tick_jiffies
;
u64
clock
=
scd
->
clock
;
u64
min_clock
,
max_clock
;
s64
delta
=
now
-
scd
->
prev
_raw
;
s64
delta
=
now
-
scd
->
tick
_raw
;
WARN_ON_ONCE
(
!
irqs_disabled
());
/*
* At schedule tick the clock can be just under the gtod. We don't
* want to push it too prematurely.
*/
min_clock
=
scd
->
tick_gtod
+
(
delta_jiffies
*
TICK_NSEC
);
if
(
min_clock
>
TICK_NSEC
)
min_clock
-=
TICK_NSEC
/
2
;
min_clock
=
scd
->
tick_gtod
+
delta_jiffies
*
TICK_NSEC
;
if
(
unlikely
(
delta
<
0
))
{
clock
++
;
goto
out
;
}
/*
* The clock must stay within a jiffie of the gtod.
* But since we may be at the start of a jiffy or the end of one
* we add another jiffy buffer.
*/
max_clock
=
scd
->
tick_gtod
+
(
2
+
delta_jiffies
)
*
TICK_NSEC
;
delta
*=
scd
->
multi
;
delta
>>=
MULTI_SHIFT
;
max_clock
=
min_clock
+
TICK_NSEC
;
if
(
unlikely
(
clock
+
delta
>
max_clock
)
&&
check_max
(
scd
)
)
{
if
(
unlikely
(
clock
+
delta
>
max_clock
))
{
if
(
clock
<
max_clock
)
clock
=
max_clock
;
else
...
...
@@ -189,12 +128,10 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim
if
(
unlikely
(
clock
<
min_clock
))
clock
=
min_clock
;
if
(
time
)
*
time
=
clock
;
else
{
scd
->
prev_raw
=
now
;
scd
->
clock
=
clock
;
}
scd
->
tick_jiffies
=
now_jiffies
;
scd
->
clock
=
clock
;
return
clock
;
}
static
void
lock_double_clock
(
struct
sched_clock_data
*
data1
,
...
...
@@ -212,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1,
u64
sched_clock_cpu
(
int
cpu
)
{
struct
sched_clock_data
*
scd
=
cpu_sdc
(
cpu
);
u64
now
,
clock
;
u64
now
,
clock
,
this_clock
,
remote_clock
;
if
(
unlikely
(
!
sched_clock_running
))
return
0ull
;
...
...
@@ -221,43 +158,44 @@ u64 sched_clock_cpu(int cpu)
now
=
sched_clock
();
if
(
cpu
!=
raw_smp_processor_id
())
{
/*
* in order to update a remote cpu's clock based on our
* unstable raw time rebase it against:
* tick_raw (offset between raw counters)
* tick_gotd (tick offset between cpus)
*/
struct
sched_clock_data
*
my_scd
=
this_scd
();
lock_double_clock
(
scd
,
my_scd
);
now
-=
my_scd
->
tick_raw
;
now
+=
scd
->
tick_raw
;
this_clock
=
__update_sched_clock
(
my_scd
,
now
)
;
remote_clock
=
scd
->
clock
;
now
+=
my_scd
->
tick_gtod
;
now
-=
scd
->
tick_gtod
;
/*
* Use the opportunity that we have both locks
* taken to couple the two clocks: we take the
* larger time as the latest time for both
* runqueues. (this creates monotonic movement)
*/
if
(
likely
(
remote_clock
<
this_clock
))
{
clock
=
this_clock
;
scd
->
clock
=
clock
;
}
else
{
/*
* Should be rare, but possible:
*/
clock
=
remote_clock
;
my_scd
->
clock
=
remote_clock
;
}
__raw_spin_unlock
(
&
my_scd
->
lock
);
__update_sched_clock
(
scd
,
now
,
&
clock
);
__raw_spin_unlock
(
&
scd
->
lock
);
}
else
{
__raw_spin_lock
(
&
scd
->
lock
);
__update_sched_clock
(
scd
,
now
,
NULL
);
clock
=
scd
->
clock
;
__raw_spin_unlock
(
&
scd
->
lock
);
clock
=
__update_sched_clock
(
scd
,
now
);
}
__raw_spin_unlock
(
&
scd
->
lock
);
return
clock
;
}
void
sched_clock_tick
(
void
)
{
struct
sched_clock_data
*
scd
=
this_scd
();
unsigned
long
now_jiffies
=
jiffies
;
s64
mult
,
delta_gtod
,
delta_raw
;
u64
now
,
now_gtod
;
if
(
unlikely
(
!
sched_clock_running
))
...
...
@@ -269,29 +207,14 @@ void sched_clock_tick(void)
now
=
sched_clock
();
__raw_spin_lock
(
&
scd
->
lock
);
__update_sched_clock
(
scd
,
now
,
NULL
);
__update_sched_clock
(
scd
,
now
);
/*
* update tick_gtod after __update_sched_clock() because that will
* already observe 1 new jiffy; adding a new tick_gtod to that would
* increase the clock 2 jiffies.
*/
delta_gtod
=
now_gtod
-
scd
->
tick_gtod
;
delta_raw
=
now
-
scd
->
tick_raw
;
if
((
long
)
delta_raw
>
0
)
{
mult
=
delta_gtod
<<
MULTI_SHIFT
;
do_div
(
mult
,
delta_raw
);
scd
->
multi
=
mult
;
if
(
scd
->
multi
>
MAX_MULTI
)
scd
->
multi
=
MAX_MULTI
;
else
if
(
scd
->
multi
<
MIN_MULTI
)
scd
->
multi
=
MIN_MULTI
;
}
else
scd
->
multi
=
1
<<
MULTI_SHIFT
;
scd
->
tick_raw
=
now
;
scd
->
tick_gtod
=
now_gtod
;
scd
->
tick_jiffies
=
now_jiffies
;
__raw_spin_unlock
(
&
scd
->
lock
);
}
...
...
@@ -310,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
void
sched_clock_idle_wakeup_event
(
u64
delta_ns
)
{
struct
sched_clock_data
*
scd
=
this_scd
();
u64
now
=
sched_clock
();
/*
* Override the previous timestamp and ignore all
...
...
@@ -319,15 +241,28 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
* rq clock:
*/
__raw_spin_lock
(
&
scd
->
lock
);
scd
->
prev_raw
=
now
;
scd
->
clock
+=
delta_ns
;
scd
->
multi
=
1
<<
MULTI_SHIFT
;
__raw_spin_unlock
(
&
scd
->
lock
);
touch_softlockup_watchdog
();
}
EXPORT_SYMBOL_GPL
(
sched_clock_idle_wakeup_event
);
#else
/* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void
sched_clock_init
(
void
)
{
sched_clock_running
=
1
;
}
u64
sched_clock_cpu
(
int
cpu
)
{
if
(
unlikely
(
!
sched_clock_running
))
return
0
;
return
sched_clock
();
}
#endif
unsigned
long
long
cpu_clock
(
int
cpu
)
...
...
kernel/time/tick-sched.c
View file @
e26b33e9
...
...
@@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle)
ts
->
tick_stopped
=
1
;
ts
->
idle_jiffies
=
last_jiffies
;
rcu_enter_nohz
();
sched_clock_tick_stop
(
cpu
);
}
/*
...
...
@@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void)
select_nohz_load_balancer
(
0
);
now
=
ktime_get
();
tick_do_update_jiffies64
(
now
);
sched_clock_tick_start
(
cpu
);
cpu_clear
(
cpu
,
nohz_cpu_mask
);
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment