Commit 5a950072 authored by Thomas Gleixner's avatar Thomas Gleixner

xtime_lock: Convert to atomic_seqlock

Convert xtime_lock to atomic_seqlock and fix up all users.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 09e46c7a
......@@ -106,7 +106,7 @@ irqreturn_t timer_interrupt(int irq, void *dev)
profile_tick(CPU_PROFILING);
#endif
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
/*
* Calculate how many ticks have passed since the last update,
......@@ -136,7 +136,7 @@ irqreturn_t timer_interrupt(int irq, void *dev)
state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
}
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
while (nticks--)
......@@ -416,14 +416,14 @@ do_gettimeofday(struct timeval *tv)
unsigned long delta_cycles, delta_usec, partial_tick;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
delta_cycles = rpcc() - state.last_time;
sec = xtime.tv_sec;
usec = (xtime.tv_nsec / 1000);
partial_tick = state.partial_tick;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
} while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
#ifdef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
......@@ -470,7 +470,7 @@ do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
/* The offset that is added into time in do_gettimeofday above
must be subtracted out here to keep a coherent view of the
......@@ -496,7 +496,7 @@ do_settimeofday(struct timespec *tv)
ntp_clear();
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
......
......@@ -244,11 +244,11 @@ void do_gettimeofday(struct timeval *tv)
unsigned long usec, sec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = system_timer->offset();
sec = xtime.tv_sec;
usec += xtime.tv_nsec / 1000;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
} while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
/* usec may have gone up a lot: be safe */
while (usec >= 1000000) {
......@@ -270,7 +270,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
......@@ -286,7 +286,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
......@@ -336,9 +336,9 @@ void timer_tick(void)
profile_tick(CPU_PROFILING);
do_leds();
do_set_rtc();
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(1);
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
......
......@@ -128,7 +128,7 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
/* last time the cmos clock got updated */
static long last_rtc_update;
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(1);
/*
......@@ -148,7 +148,7 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
/* Do it again in 60s. */
last_rtc_update = xtime.tv_sec - 600;
}
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
#ifdef CONFIG_IPIPE
update_root_process_times(get_irq_regs());
......@@ -192,12 +192,12 @@ void do_gettimeofday(struct timeval *tv)
unsigned long usec, sec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = gettimeoffset();
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / NSEC_PER_USEC);
}
while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= USEC_PER_SEC) {
usec -= USEC_PER_SEC;
......@@ -217,7 +217,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set the xtime.tv_usec
* correctly. However, the value in this location is
......@@ -235,7 +235,7 @@ int do_settimeofday(struct timespec *tv)
ntp_clear();
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
......
......@@ -87,7 +87,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
......@@ -103,7 +103,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
......
......@@ -70,7 +70,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
*/
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(1);
......@@ -96,7 +96,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
__set_LEDS(n);
#endif /* CONFIG_HEARTBEAT */
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
......
......@@ -35,9 +35,9 @@ void h8300_timer_tick(void)
{
if (current->pid)
profile_tick(CPU_PROFILING);
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(1);
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
}
......
......@@ -197,10 +197,10 @@ timer_interrupt (int irq, void *dev_id)
* another CPU. We need to avoid to SMP race by acquiring the
* xtime_lock.
*/
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(1);
local_cpu_data->itm_next = new_itm;
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
} else
local_cpu_data->itm_next = new_itm;
......@@ -477,7 +477,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
{
unsigned long flags;
write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
write_atomic_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
......@@ -500,6 +500,6 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
write_atomic_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
}
......@@ -141,10 +141,10 @@ consider_steal_time(unsigned long new_itm)
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
if (cpu == time_keeper_id) {
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(stolen + blocked);
local_cpu_data->itm_next = delta_itm + new_itm;
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
} else {
local_cpu_data->itm_next = delta_itm + new_itm;
}
......
......@@ -106,7 +106,7 @@ void do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
usec = do_gettimeoffset();
......@@ -120,7 +120,7 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
while (usec >= 1000000) {
usec -= 1000000;
......@@ -141,7 +141,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
......@@ -157,7 +157,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
......@@ -202,7 +202,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
if (ntp_synced()
&& xtime.tv_sec > last_rtc_update + 660
&& (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
......@@ -213,7 +213,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
else /* do it again in 60 s */
last_rtc_update = xtime.tv_sec - 600;
}
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
/* As we return to user mode fire off the other CPU schedulers..
this is basically because we don't yet share IRQ's around.
This message is rigged to be safe on the 386 - basically it's
......
......@@ -102,7 +102,7 @@ void do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = mach_gettimeoffset();
......@@ -116,7 +116,7 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += xtime.tv_nsec/1000;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
} while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
......@@ -138,7 +138,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
/* This is revolting. We need to set the xtime.tv_nsec
* correctly. However, the value in this location is
* is value at the last tick.
......@@ -154,7 +154,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
......
......@@ -44,11 +44,11 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
if (current->pid)
profile_tick(CPU_PROFILING);
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(1);
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
......
......@@ -99,7 +99,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned tsc, elapse;
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
while (tsc = get_cycles(),
elapse = mn10300_last_tsc - tsc, /* time elapsed since last
......@@ -114,7 +114,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
check_rtc_time();
}
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
......
......@@ -163,9 +163,9 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
}
if (cpu == 0) {
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
do_timer(ticks_elapsed);
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
}
return IRQ_HANDLED;
......@@ -268,12 +268,12 @@ void __init time_init(void)
if (pdc_tod_read(&tod_data) == 0) {
unsigned long flags;
write_seqlock_irqsave(&xtime_lock, flags);
write_atomic_seqlock_irqsave(&xtime_lock, flags);
xtime.tv_sec = tod_data.tod_sec;
xtime.tv_nsec = tod_data.tod_usec * 1000;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
write_sequnlock_irqrestore(&xtime_lock, flags);
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
} else {
printk(KERN_ERR "Error reading tod clock\n");
xtime.tv_sec = 0;
......
......@@ -1000,7 +1000,7 @@ void __init time_init(void)
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
boot_tb = get_tb_or_rtc();
write_seqlock_irqsave(&xtime_lock, flags);
write_atomic_seqlock_irqsave(&xtime_lock, flags);
/* If platform provided a timezone (pmac), we correct the time */
if (timezone_offset) {
......@@ -1014,7 +1014,7 @@ void __init time_init(void)
vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
vdso_data->tb_to_xs = tb_to_xs;
write_sequnlock_irqrestore(&xtime_lock, flags);
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
/* Register the clocksource, if we're not running on iSeries */
if (!firmware_has_feature(FW_FEATURE_ISERIES))
......
......@@ -272,14 +272,14 @@ void __init time_init(void)
* small for /proc/uptime to be accurate.
* Reset xtime and wall_to_monotonic to sane values.
*/
write_seqlock_irqsave(&xtime_lock, flags);
write_atomic_seqlock_irqsave(&xtime_lock, flags);
now = get_clock();
tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
clocksource_tod.cycle_last = now;
clocksource_tod.raw_time = xtime;
tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
write_sequnlock_irqrestore(&xtime_lock, flags);
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
......
......@@ -703,10 +703,10 @@ static void pcic_clear_clock_irq(void)
static irqreturn_t pcic_timer_handler (int irq, void *h)
{
write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
write_atomic_seqlock(&xtime_lock); /* Dummy, to show that we remember */
pcic_clear_clock_irq();
do_timer(1);
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
......@@ -766,7 +766,7 @@ static void pci_do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = do_gettimeoffset();
/*
......@@ -779,7 +779,7 @@ static void pci_do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
} while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
......
......@@ -93,7 +93,7 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
#endif
/* Protect counter clear so that do_gettimeoffset works */
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
clear_clock_irq();
......@@ -109,7 +109,7 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
else
last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
}
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
......@@ -251,7 +251,7 @@ void do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = do_gettimeoffset();
/*
......@@ -264,7 +264,7 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
} while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
......@@ -281,9 +281,9 @@ int do_settimeofday(struct timespec *tv)
{
int ret;
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
ret = bus_do_settimeofday(tv);
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return ret;
}
......
......@@ -99,7 +99,7 @@ static inline struct timespec timespec_sub(struct timespec lhs,
extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
extern atomic_seqlock_t xtime_lock;
extern unsigned long read_persistent_clock(void);
extern int update_persistent_clock(struct timespec now);
......
......@@ -120,11 +120,11 @@ void ktime_get_ts(struct timespec *ts)
unsigned long seq;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
getnstimeofday(ts);
tomono = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
......@@ -142,10 +142,10 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
unsigned long seq;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
xts = current_kernel_time();
tom = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
xtim = timespec_to_ktime(xts);
tomono = timespec_to_ktime(tom);
......@@ -638,11 +638,11 @@ static void retrigger_next_event(void *arg)
return;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
set_normalized_timespec(&realtime_offset,
-wall_to_monotonic.tv_sec,
-wall_to_monotonic.tv_nsec);
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
base = &__get_cpu_var(hrtimer_bases);
......
......@@ -133,11 +133,11 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
*/
static inline void warp_clock(void)
{
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
update_xtime_cache(0);
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
}
......
......@@ -188,7 +188,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
{
enum hrtimer_restart res = HRTIMER_NORESTART;
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
switch (time_state) {
case TIME_OK:
......@@ -221,7 +221,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
}
update_vsyscall(&xtime, clock);
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
return res;
}
......@@ -479,7 +479,7 @@ int do_adjtimex(struct timex *txc)
getnstimeofday(&ts);
write_seqlock_irq(&xtime_lock);
write_atomic_seqlock_irq(&xtime_lock);
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
......@@ -527,7 +527,7 @@ int do_adjtimex(struct timex *txc)
txc->errcnt = 0;
txc->stbcnt = 0;
write_sequnlock_irq(&xtime_lock);
write_atomic_sequnlock_irq(&xtime_lock);
txc->time.tv_sec = ts.tv_sec;
txc->time.tv_usec = ts.tv_nsec;
......
......@@ -60,13 +60,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
......@@ -127,9 +127,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
next = tick_next_period;
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
......
......@@ -57,7 +57,7 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevalute with xtime_lock held */
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
......@@ -80,7 +80,7 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
}
/*
......@@ -90,12 +90,12 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
write_seqlock(&xtime_lock);
write_atomic_seqlock(&xtime_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
write_sequnlock(&xtime_lock);
write_atomic_sequnlock(&xtime_lock);
return period;
}
......@@ -267,10 +267,10 @@ void tick_nohz_stop_sched_tick(int inidle)
ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
/* Get the next timer wheel timer */
next_jiffies = get_next_timer_interrupt(last_jiffies);
......
......@@ -24,8 +24,7 @@
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
__cacheline_aligned_in_smp DEFINE_ATOMIC_SEQLOCK(xtime_lock);
/*
* The current time
......@@ -102,7 +101,7 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
*ts = xtime;
......@@ -118,7 +117,7 @@ void getnstimeofday(struct timespec *ts)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
timespec_add_ns(ts, nsecs);
}
......@@ -155,7 +154,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irqsave(&xtime_lock, flags);
write_atomic_seqlock_irqsave(&xtime_lock, flags);
clocksource_forward_now();
......@@ -172,7 +171,7 @@ int do_settimeofday(struct timespec *tv)
update_vsyscall(&xtime, clock);
write_sequnlock_irqrestore(&xtime_lock, flags);
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
......@@ -239,7 +238,7 @@ void getrawmonotonic(struct timespec *ts)
cycle_t cycle_now, cycle_delta;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
/* read clocksource: */
cycle_now = clocksource_read(clock);
......@@ -252,7 +251,7 @@ void getrawmonotonic(struct timespec *ts)
*ts = clock->raw_time;
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
timespec_add_ns(ts, nsecs);
}
......@@ -268,11 +267,11 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
return ret;
}
......@@ -299,7 +298,7 @@ void __init timekeeping_init(void)
unsigned long flags;
unsigned long sec = read_persistent_clock();
write_seqlock_irqsave(&xtime_lock, flags);
write_atomic_seqlock_irqsave(&xtime_lock, flags);
ntp_init();
......@@ -314,7 +313,7 @@ void __init timekeeping_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
update_xtime_cache(0);
total_sleep_time = 0;
write_sequnlock_irqrestore(&xtime_lock, flags);
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
}
/* time in seconds when suspend began */
......@@ -335,7 +334,7 @@ static int timekeeping_resume(struct sys_device *dev)
clocksource_resume();
write_seqlock_irqsave(&xtime_lock, flags);
write_atomic_seqlock_irqsave(&xtime_lock, flags);
if (now && (now > timekeeping_suspend_time)) {
unsigned long sleep_length = now - timekeeping_suspend_time;
......@@ -350,7 +349,7 @@ static int timekeeping_resume(struct sys_device *dev)
clock->cycle_last = clocksource_read(clock);
clock->error = 0;
timekeeping_suspended = 0;
write_sequnlock_irqrestore(&xtime_lock, flags);
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
touch_softlockup_watchdog();
......@@ -368,10 +367,10 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
timekeeping_suspend_time = read_persistent_clock();
write_seqlock_irqsave(&xtime_lock, flags);
write_atomic_seqlock_irqsave(&xtime_lock, flags);
clocksource_forward_now();
timekeeping_suspended = 1;
write_sequnlock_irqrestore(&xtime_lock, flags);
write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
......@@ -610,10 +609,10 @@ struct timespec current_kernel_time(void)
unsigned long seq;
do {
seq = read_seqbegin(&xtime_lock);
seq = read_atomic_seqbegin(&xtime_lock);
now = xtime_cache;
} while (read_seqretry(&xtime_lock, seq));
} while (read_atomic_seqretry(&xtime_lock, seq));
return now;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment