Commit 950b0d28 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
  x86: fix 64-bit asm NOPS for CONFIG_GENERIC_CPU
  x86: fix call to set_cyc2ns_scale() from time_cpufreq_notifier()
  revert "x86: tsc prevent time going backwards"
parents 2557a933 871de939
...@@ -256,9 +256,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) ...@@ -256,9 +256,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
ref_freq, freq->new); ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
tsc_khz = cpu_khz; tsc_khz = cpu_khz;
preempt_disable(); set_cyc2ns_scale(cpu_khz, freq->cpu);
set_cyc2ns_scale(cpu_khz, smp_processor_id());
preempt_enable();
/* /*
* TSC based sched_clock turns * TSC based sched_clock turns
* to junk w/ cpufreq * to junk w/ cpufreq
...@@ -287,27 +285,14 @@ core_initcall(cpufreq_tsc); ...@@ -287,27 +285,14 @@ core_initcall(cpufreq_tsc);
/* clock source code */ /* clock source code */
static unsigned long current_tsc_khz = 0; static unsigned long current_tsc_khz = 0;
static struct clocksource clocksource_tsc;
/*
* We compare the TSC to the cycle_last value in the clocksource
* structure to avoid a nasty time-warp issue. This can be observed in
* a very small window right after one CPU updated cycle_last under
* xtime lock and the other CPU reads a TSC value which is smaller
* than the cycle_last reference value due to a TSC which is slighty
* behind. This delta is nowhere else observable, but in that case it
* results in a forward time jump in the range of hours due to the
* unsigned delta calculation of the time keeping core code, which is
* necessary to support wrapping clocksources like pm timer.
*/
static cycle_t read_tsc(void) static cycle_t read_tsc(void)
{ {
cycle_t ret; cycle_t ret;
rdtscll(ret); rdtscll(ret);
return ret >= clocksource_tsc.cycle_last ? return ret;
ret : clocksource_tsc.cycle_last;
} }
static struct clocksource clocksource_tsc = { static struct clocksource clocksource_tsc = {
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/vgtod.h>
static int notsc __initdata = 0; static int notsc __initdata = 0;
...@@ -149,9 +148,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -149,9 +148,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
mark_tsc_unstable("cpufreq changes"); mark_tsc_unstable("cpufreq changes");
} }
preempt_disable(); set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
preempt_enable();
return 0; return 0;
} }
...@@ -291,34 +288,18 @@ int __init notsc_setup(char *s) ...@@ -291,34 +288,18 @@ int __init notsc_setup(char *s)
__setup("notsc", notsc_setup); __setup("notsc", notsc_setup);
static struct clocksource clocksource_tsc;
/* /* clock source code: */
* We compare the TSC to the cycle_last value in the clocksource
* structure to avoid a nasty time-warp. This can be observed in a
* very small window right after one CPU updated cycle_last under
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
* is smaller than the cycle_last reference value due to a TSC which
* is slighty behind. This delta is nowhere else observable, but in
* that case it results in a forward time jump in the range of hours
* due to the unsigned delta calculation of the time keeping core
* code, which is necessary to support wrapping clocksources like pm
* timer.
*/
static cycle_t read_tsc(void) static cycle_t read_tsc(void)
{ {
cycle_t ret = (cycle_t)get_cycles(); cycle_t ret = (cycle_t)get_cycles();
return ret;
return ret >= clocksource_tsc.cycle_last ?
ret : clocksource_tsc.cycle_last;
} }
static cycle_t __vsyscall_fn vread_tsc(void) static cycle_t __vsyscall_fn vread_tsc(void)
{ {
cycle_t ret = (cycle_t)vget_cycles(); cycle_t ret = (cycle_t)vget_cycles();
return ret;
return ret >= __vsyscall_gtod_data.clock.cycle_last ?
ret : __vsyscall_gtod_data.clock.cycle_last;
} }
static struct clocksource clocksource_tsc = { static struct clocksource clocksource_tsc = {
......
...@@ -73,16 +73,7 @@ ...@@ -73,16 +73,7 @@
#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
#if defined(CONFIG_MK8) #if defined(CONFIG_MK7)
#define ASM_NOP1 K8_NOP1
#define ASM_NOP2 K8_NOP2
#define ASM_NOP3 K8_NOP3
#define ASM_NOP4 K8_NOP4
#define ASM_NOP5 K8_NOP5
#define ASM_NOP6 K8_NOP6
#define ASM_NOP7 K8_NOP7
#define ASM_NOP8 K8_NOP8
#elif defined(CONFIG_MK7)
#define ASM_NOP1 K7_NOP1 #define ASM_NOP1 K7_NOP1
#define ASM_NOP2 K7_NOP2 #define ASM_NOP2 K7_NOP2
#define ASM_NOP3 K7_NOP3 #define ASM_NOP3 K7_NOP3
...@@ -100,6 +91,15 @@ ...@@ -100,6 +91,15 @@
#define ASM_NOP6 P6_NOP6 #define ASM_NOP6 P6_NOP6
#define ASM_NOP7 P6_NOP7 #define ASM_NOP7 P6_NOP7
#define ASM_NOP8 P6_NOP8 #define ASM_NOP8 P6_NOP8
#elif defined(CONFIG_X86_64)
#define ASM_NOP1 K8_NOP1
#define ASM_NOP2 K8_NOP2
#define ASM_NOP3 K8_NOP3
#define ASM_NOP4 K8_NOP4
#define ASM_NOP5 K8_NOP5
#define ASM_NOP6 K8_NOP6
#define ASM_NOP7 K8_NOP7
#define ASM_NOP8 K8_NOP8
#else #else
#define ASM_NOP1 GENERIC_NOP1 #define ASM_NOP1 GENERIC_NOP1
#define ASM_NOP2 GENERIC_NOP2 #define ASM_NOP2 GENERIC_NOP2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment