Commit d8954222 authored by Glauber de Oliveira Costa's avatar Glauber de Oliveira Costa Committed by Ingo Molnar

x86: put together equal pieces of system.h

This patch puts together pieces of system_{32,64}.h that
looks like the same. It's the first step towards integration
of this file.
Signed-off-by: default avatarGlauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent e34907ae
...@@ -99,7 +99,7 @@ void exit_idle(void) ...@@ -99,7 +99,7 @@ void exit_idle(void)
* We use this if we don't have any better * We use this if we don't have any better
* idle routine.. * idle routine..
*/ */
static void default_idle(void) void default_idle(void)
{ {
current_thread_info()->status &= ~TS_POLLING; current_thread_info()->status &= ~TS_POLLING;
/* /*
......
#ifndef _ASM_X86_SYSTEM_H_
#define _ASM_X86_SYSTEM_H_
#include <asm/asm.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# include "system_32.h" # include "system_32.h"
#else #else
# include "system_64.h" # include "system_64.h"
#endif #endif
#ifdef __KERNEL__
#define _set_base(addr, base) do { unsigned long __pr; \
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
"movb %%dl,%2\n\t" \
"movb %%dh,%3" \
:"=&d" (__pr) \
:"m" (*((addr)+2)), \
"m" (*((addr)+4)), \
"m" (*((addr)+7)), \
"0" (base) \
); } while (0)
#define _set_limit(addr, limit) do { unsigned long __lr; \
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
"movb %2,%%dh\n\t" \
"andb $0xf0,%%dh\n\t" \
"orb %%dh,%%dl\n\t" \
"movb %%dl,%2" \
:"=&d" (__lr) \
:"m" (*(addr)), \
"m" (*((addr)+6)), \
"0" (limit) \
); } while (0)
#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
/*
* Save a segment register away
*/
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
static inline unsigned long get_limit(unsigned long segment)
{
unsigned long __limit;
__asm__("lsll %1,%0"
:"=r" (__limit):"r" (segment));
return __limit+1;
}
#endif /* __KERNEL__ */
static inline void clflush(void *__p)
{
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
}
#define nop() __asm__ __volatile__ ("nop")
void disable_hlt(void);
void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
#endif
...@@ -34,34 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc ...@@ -34,34 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
"2" (prev), "d" (next)); \ "2" (prev), "d" (next)); \
} while (0) } while (0)
#define _set_base(addr,base) do { unsigned long __pr; \
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
"movb %%dl,%2\n\t" \
"movb %%dh,%3" \
:"=&d" (__pr) \
:"m" (*((addr)+2)), \
"m" (*((addr)+4)), \
"m" (*((addr)+7)), \
"0" (base) \
); } while(0)
#define _set_limit(addr,limit) do { unsigned long __lr; \
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
"movb %2,%%dh\n\t" \
"andb $0xf0,%%dh\n\t" \
"orb %%dh,%%dl\n\t" \
"movb %%dl,%2" \
:"=&d" (__lr) \
:"m" (*(addr)), \
"m" (*((addr)+6)), \
"0" (limit) \
); } while(0)
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
/* /*
* Load a segment. Fall back on loading the zero * Load a segment. Fall back on loading the zero
* segment if something goes wrong.. * segment if something goes wrong..
...@@ -83,12 +55,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ ...@@ -83,12 +55,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
".previous" \ ".previous" \
: :"rm" (value)) : :"rm" (value))
/*
* Save a segment register away
*/
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
static inline void native_clts(void) static inline void native_clts(void)
{ {
...@@ -161,11 +127,6 @@ static inline void native_wbinvd(void) ...@@ -161,11 +127,6 @@ static inline void native_wbinvd(void)
asm volatile("wbinvd": : :"memory"); asm volatile("wbinvd": : :"memory");
} }
static inline void clflush(void *__p)
{
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
}
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
...@@ -190,15 +151,6 @@ static inline void clflush(void *__p) ...@@ -190,15 +151,6 @@ static inline void clflush(void *__p)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
static inline unsigned long get_limit(unsigned long segment)
{
unsigned long __limit;
__asm__("lsll %1,%0"
:"=r" (__limit):"r" (segment));
return __limit+1;
}
#define nop() __asm__ __volatile__ ("nop")
/* /*
* Force strict CPU ordering. * Force strict CPU ordering.
...@@ -305,15 +257,5 @@ static inline unsigned long get_limit(unsigned long segment) ...@@ -305,15 +257,5 @@ static inline unsigned long get_limit(unsigned long segment)
* disable hlt during certain critical i/o operations * disable hlt during certain critical i/o operations
*/ */
#define HAVE_DISABLE_HLT #define HAVE_DISABLE_HLT
void disable_hlt(void);
void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
#endif #endif
...@@ -141,13 +141,6 @@ static inline void write_cr8(unsigned long val) ...@@ -141,13 +141,6 @@ static inline void write_cr8(unsigned long val)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
}
#define nop() __asm__ __volatile__ ("nop")
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() barrier() #define smp_rmb() barrier()
...@@ -177,9 +170,4 @@ static inline void clflush(volatile void *__p) ...@@ -177,9 +170,4 @@ static inline void clflush(volatile void *__p)
#include <linux/irqflags.h> #include <linux/irqflags.h>
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment