Commit 6152e4b1 authored by H. Peter Anvin's avatar H. Peter Anvin Committed by Ingo Molnar

x86, xsave: keep the XSAVE feature mask as an u64

The XSAVE feature mask is a 64-bit number; keep it that way, in order
to avoid the mistake done with rdmsr/wrmsr.  Use the xsetbv() function
provided in the previous patch.
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b4a091a6
...@@ -552,18 +552,17 @@ static int restore_i387_xsave(void __user *buf) ...@@ -552,18 +552,17 @@ static int restore_i387_xsave(void __user *buf)
(struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
struct xsave_hdr_struct *xsave_hdr = struct xsave_hdr_struct *xsave_hdr =
&current->thread.xstate->xsave.xsave_hdr; &current->thread.xstate->xsave.xsave_hdr;
unsigned int lmask, hmask; u64 mask;
int err; int err;
if (check_for_xstate(fx, buf, &fx_sw_user)) if (check_for_xstate(fx, buf, &fx_sw_user))
goto fx_only; goto fx_only;
lmask = fx_sw_user.xstate_bv; mask = fx_sw_user.xstate_bv;
hmask = fx_sw_user.xstate_bv >> 32;
err = restore_i387_fxsave(buf, fx_sw_user.xstate_size); err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
xsave_hdr->xstate_bv &= (pcntxt_lmask | (((u64) pcntxt_hmask) << 32)); xsave_hdr->xstate_bv &= pcntxt_mask;
/* /*
* These bits must be zero. * These bits must be zero.
*/ */
...@@ -573,9 +572,8 @@ static int restore_i387_xsave(void __user *buf) ...@@ -573,9 +572,8 @@ static int restore_i387_xsave(void __user *buf)
* Init the state that is not present in the memory layout * Init the state that is not present in the memory layout
* and enabled by the OS. * and enabled by the OS.
*/ */
lmask = ~(pcntxt_lmask & ~lmask); mask = ~(pcntxt_mask & ~mask);
hmask = ~(pcntxt_hmask & ~hmask); xsave_hdr->xstate_bv &= mask;
xsave_hdr->xstate_bv &= (lmask | (((u64) hmask) << 32));
return err; return err;
fx_only: fx_only:
......
...@@ -9,11 +9,12 @@ ...@@ -9,11 +9,12 @@
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
#include <asm/sigcontext32.h> #include <asm/sigcontext32.h>
#endif #endif
#include <asm/xcr.h>
/* /*
* Supported feature mask by the CPU and the kernel. * Supported feature mask by the CPU and the kernel.
*/ */
unsigned int pcntxt_hmask, pcntxt_lmask; u64 pcntxt_mask;
struct _fpx_sw_bytes fx_sw_reserved; struct _fpx_sw_bytes fx_sw_reserved;
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
...@@ -127,30 +128,28 @@ int save_i387_xstate(void __user *buf) ...@@ -127,30 +128,28 @@ int save_i387_xstate(void __user *buf)
int restore_user_xstate(void __user *buf) int restore_user_xstate(void __user *buf)
{ {
struct _fpx_sw_bytes fx_sw_user; struct _fpx_sw_bytes fx_sw_user;
unsigned int lmask, hmask; u64 mask;
int err; int err;
if (((unsigned long)buf % 64) || if (((unsigned long)buf % 64) ||
check_for_xstate(buf, buf, &fx_sw_user)) check_for_xstate(buf, buf, &fx_sw_user))
goto fx_only; goto fx_only;
lmask = fx_sw_user.xstate_bv; mask = fx_sw_user.xstate_bv;
hmask = fx_sw_user.xstate_bv >> 32;
/* /*
* restore the state passed by the user. * restore the state passed by the user.
*/ */
err = xrestore_user(buf, lmask, hmask); err = xrestore_user(buf, mask);
if (err) if (err)
return err; return err;
/* /*
* init the state skipped by the user. * init the state skipped by the user.
*/ */
lmask = pcntxt_lmask & ~lmask; mask = pcntxt_mask & ~mask;
hmask = pcntxt_hmask & ~hmask;
xrstor_state(init_xstate_buf, lmask, hmask); xrstor_state(init_xstate_buf, mask);
return 0; return 0;
...@@ -160,8 +159,7 @@ fx_only: ...@@ -160,8 +159,7 @@ fx_only:
* memory layout. Restore just the FP/SSE and init all * memory layout. Restore just the FP/SSE and init all
* the other extended state. * the other extended state.
*/ */
xrstor_state(init_xstate_buf, pcntxt_lmask & ~XSTATE_FPSSE, xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
pcntxt_hmask);
return fxrstor_checking((__force struct i387_fxsave_struct *)buf); return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
} }
...@@ -231,8 +229,7 @@ void prepare_fx_sw_frame(void) ...@@ -231,8 +229,7 @@ void prepare_fx_sw_frame(void)
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = sig_xstate_size; fx_sw_reserved.extended_size = sig_xstate_size;
fx_sw_reserved.xstate_bv = pcntxt_lmask | fx_sw_reserved.xstate_bv = pcntxt_mask;
(((u64) (pcntxt_hmask)) << 32);
fx_sw_reserved.xstate_size = xstate_size; fx_sw_reserved.xstate_size = xstate_size;
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved, memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved,
...@@ -263,11 +260,8 @@ void __cpuinit xsave_init(void) ...@@ -263,11 +260,8 @@ void __cpuinit xsave_init(void)
/* /*
* Enable all the features that the HW is capable of * Enable all the features that the HW is capable of
* and the Linux kernel is aware of. * and the Linux kernel is aware of.
*
* xsetbv();
*/ */
asm volatile(".byte 0x0f,0x01,0xd1" : : "c" (0), xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
"a" (pcntxt_lmask), "d" (pcntxt_hmask));
} }
/* /*
...@@ -287,36 +281,31 @@ void __init xsave_cntxt_init(void) ...@@ -287,36 +281,31 @@ void __init xsave_cntxt_init(void)
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
pcntxt_mask = eax + ((u64)edx << 32);
pcntxt_lmask = eax; if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
pcntxt_hmask = edx; printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n",
pcntxt_mask);
if ((pcntxt_lmask & XSTATE_FPSSE) != XSTATE_FPSSE) {
printk(KERN_ERR "FP/SSE not shown under xsave features %x\n",
pcntxt_lmask);
BUG(); BUG();
} }
/* /*
* for now OS knows only about FP/SSE * for now OS knows only about FP/SSE
*/ */
pcntxt_lmask = pcntxt_lmask & XCNTXT_LMASK; pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
pcntxt_hmask = pcntxt_hmask & XCNTXT_HMASK;
xsave_init(); xsave_init();
/* /*
* Recompute the context size for enabled features * Recompute the context size for enabled features
*/ */
cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
xstate_size = ebx; xstate_size = ebx;
prepare_fx_sw_frame(); prepare_fx_sw_frame();
setup_xstate_init(); setup_xstate_init();
printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%Lx, " printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, "
"cntxt size 0x%x\n", "cntxt size 0x%x\n",
(pcntxt_lmask | ((u64) pcntxt_hmask << 32)), xstate_size); pcntxt_mask, xstate_size);
} }
#ifndef __ASM_X86_XSAVE_H #ifndef __ASM_X86_XSAVE_H
#define __ASM_X86_XSAVE_H #define __ASM_X86_XSAVE_H
#include <linux/types.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/i387.h> #include <asm/i387.h>
...@@ -14,8 +15,7 @@ ...@@ -14,8 +15,7 @@
/* /*
* These are the features that the OS can handle currently. * These are the features that the OS can handle currently.
*/ */
#define XCNTXT_LMASK (XSTATE_FP | XSTATE_SSE) #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE)
#define XCNTXT_HMASK 0x0
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, " #define REX_PREFIX "0x48, "
...@@ -23,7 +23,8 @@ ...@@ -23,7 +23,8 @@
#define REX_PREFIX #define REX_PREFIX
#endif #endif
extern unsigned int xstate_size, pcntxt_hmask, pcntxt_lmask; extern unsigned int xstate_size;
extern u64 pcntxt_mask;
extern struct xsave_struct *init_xstate_buf; extern struct xsave_struct *init_xstate_buf;
extern void xsave_cntxt_init(void); extern void xsave_cntxt_init(void);
...@@ -73,12 +74,12 @@ static inline int xsave_user(struct xsave_struct __user *buf) ...@@ -73,12 +74,12 @@ static inline int xsave_user(struct xsave_struct __user *buf)
return err; return err;
} }
static inline int xrestore_user(struct xsave_struct __user *buf, static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
unsigned int lmask,
unsigned int hmask)
{ {
int err; int err;
struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
u32 lmask = mask;
u32 hmask = mask >> 32;
__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
"2:\n" "2:\n"
...@@ -96,8 +97,11 @@ static inline int xrestore_user(struct xsave_struct __user *buf, ...@@ -96,8 +97,11 @@ static inline int xrestore_user(struct xsave_struct __user *buf,
return err; return err;
} }
static inline void xrstor_state(struct xsave_struct *fx, int lmask, int hmask) static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
{ {
u32 lmask = mask;
u32 hmask = mask >> 32;
asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory"); : "memory");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment