Commit b07514b6 authored by john stultz's avatar john stultz Committed by Thomas Gleixner

time: Remove xtime_cache

With the prior logarithmic time accumulation patch, xtime will now
always be within one "tick" of the current time, instead of
possibly half a second off.

This removes the need for the xtime_cache value, which always
stored the time at the last interrupt, so this patch cleans that up
removing the xtime_cache related code.

This is a bit simpler, but still could use some wider testing.
Signed-off-by: default avatarJohn Stultz <johnstul@us.ibm.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarJohn Kacur <jkacur@redhat.com>
Cc: Clark Williams <williams@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <1254525855.7741.95.camel@localhost.localdomain>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>

Cherry-picked from timers/core. Conflicts:

	kernel/time.c
	kernel/time/timekeeping.c
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent e32cce14
......@@ -136,7 +136,6 @@ static inline void warp_clock(void)
write_atomic_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
update_xtime_cache(0);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
}
......
......@@ -48,16 +48,8 @@ static unsigned long total_sleep_time; /* seconds */
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
static struct timespec xtime_cache __attribute__ ((aligned (16)));
void update_xtime_cache(u64 nsec)
{
xtime_cache = xtime;
timespec_add_ns(&xtime_cache, nsec);
}
struct clocksource *clock;
#ifdef CONFIG_GENERIC_TIME
/**
* clocksource_forward_now - update clock to the current time
......@@ -233,8 +225,6 @@ int do_settimeofday(struct timespec *tv)
xtime = *tv;
update_xtime_cache(0);
clock->error = 0;
ntp_clear();
......@@ -435,7 +425,6 @@ void __init timekeeping_init(void)
xtime.tv_nsec = 0;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
update_xtime_cache(0);
total_sleep_time = 0;
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
}
......@@ -467,7 +456,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic.tv_sec -= sleep_length;
total_sleep_time += sleep_length;
}
update_xtime_cache(0);
/* re-base the last cycle value */
clock->cycle_last = 0;
clock->cycle_last = clocksource_read(clock);
......@@ -608,7 +596,6 @@ static void clocksource_adjust(s64 offset)
(NTP_SCALE_SHIFT - clock->shift);
}
/**
* logarithmic_accumulation - shifted accumulation of cycles
*
......@@ -652,7 +639,6 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
return offset;
}
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
......@@ -724,8 +710,6 @@ void update_wall_time(void)
clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
update_xtime_cache(cyc2ns(clock, offset));
/* check to see if there is a new clocksource to use */
change_clocksource();
update_vsyscall(&xtime, clock);
......@@ -760,11 +744,10 @@ void monotonic_to_bootbased(struct timespec *ts)
unsigned long get_seconds(void)
{
return xtime_cache.tv_sec;
return xtime.tv_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec current_kernel_time(void)
{
struct timespec now;
......@@ -772,10 +755,10 @@ struct timespec current_kernel_time(void)
do {
seq = read_atomic_seqbegin(&xtime_lock);
now = xtime_cache;
now = xtime;
} while (read_atomic_seqretry(&xtime_lock, seq));
return now;
}
EXPORT_SYMBOL(current_kernel_time);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment