Commit f25f64ed authored by Juergen Beisert's avatar Juergen Beisert Committed by Linus Torvalds

x86: Replace NSC/Cyrix specific chipset access macros by inlined functions.

Due to index register access ordering problems, when using macros a line
like this fails (and does nothing):

	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);

With inlined functions this line will work as expected.

Note about a side effect: Seems on Geode GX1 based systems the
"suspend on halt power saving feature" was never enabled due to this
wrong macro expansion. With inlined functions it will be enabled, but
this will stop the TSC when the CPU runs into a HLT instruction.
Kernel output something like this:
	Clocksource tsc unstable (delta = -472746897 ns)

This is the 3rd version of this patch.

 - Adding missed arch/i386/kernel/cpu/mtrr/state.c
	Thanks to Andres Salomon
 - Adding some big fat comments into the new header file
 	Suggested by Andi Kleen

AK: fixed x86-64 compilation
Signed-off-by: default avatarJuergen Beisert <juergen@kreuzholzen.de>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bdda1561
...@@ -79,7 +79,7 @@ ...@@ -79,7 +79,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/processor.h> #include <asm/processor-cyrix.h>
#include <asm/errno.h> #include <asm/errno.h>
/* PCI config registers, all at F0 */ /* PCI config registers, all at F0 */
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/processor.h> #include <asm/processor-cyrix.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/tsc.h> #include <asm/tsc.h>
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/processor-cyrix.h>
#include "mtrr.h" #include "mtrr.h"
int arr3_protected; int arr3_protected;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm-i386/processor-cyrix.h>
#include "mtrr.h" #include "mtrr.h"
......
/*
* NSC/Cyrix CPU indexed register access. Must be inlined instead of
* macros to ensure correct access ordering
* Access order is always 0x22 (=offset), 0x23 (=value)
*
* When using the old macros a line like
* setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
* gets expanded to:
* do {
* outb((CX86_CCR2), 0x22);
* outb((({
* outb((CX86_CCR2), 0x22);
* inb(0x23);
* }) | 0x88), 0x23);
* } while (0);
*
* which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
*/
static inline u8 getCx86(u8 reg)
{
outb(reg, 0x22);
return inb(0x23);
}
static inline void setCx86(u8 reg, u8 data)
{
outb(reg, 0x22);
outb(data, 0x23);
}
...@@ -168,17 +168,6 @@ static inline void clear_in_cr4 (unsigned long mask) ...@@ -168,17 +168,6 @@ static inline void clear_in_cr4 (unsigned long mask)
write_cr4(cr4); write_cr4(cr4);
} }
/*
* NSC/Cyrix CPU indexed register access macros
*/
#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
#define setCx86(reg, data) do { \
outb((reg), 0x22); \
outb((data), 0x23); \
} while (0)
/* Stop speculative execution */ /* Stop speculative execution */
static inline void sync_core(void) static inline void sync_core(void)
{ {
......
...@@ -389,17 +389,6 @@ static inline void prefetchw(void *x) ...@@ -389,17 +389,6 @@ static inline void prefetchw(void *x)
#define cpu_relax() rep_nop() #define cpu_relax() rep_nop()
/*
* NSC/Cyrix CPU indexed register access macros
*/
#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
#define setCx86(reg, data) do { \
outb((reg), 0x22); \
outb((data), 0x23); \
} while (0)
static inline void serialize_cpu(void) static inline void serialize_cpu(void)
{ {
__asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment