Commit 6152e4b1 authored by H. Peter Anvin's avatar H. Peter Anvin Committed by Ingo Molnar

x86, xsave: keep the XSAVE feature mask as an u64

The XSAVE feature mask is a 64-bit number; keep it that way, in order
to avoid the mistake done with rdmsr/wrmsr.  Use the xsetbv() function
provided in the previous patch.
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b4a091a6
......@@ -552,18 +552,17 @@ static int restore_i387_xsave(void __user *buf)
(struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
struct xsave_hdr_struct *xsave_hdr =
&current->thread.xstate->xsave.xsave_hdr;
unsigned int lmask, hmask;
u64 mask;
int err;
if (check_for_xstate(fx, buf, &fx_sw_user))
goto fx_only;
lmask = fx_sw_user.xstate_bv;
hmask = fx_sw_user.xstate_bv >> 32;
mask = fx_sw_user.xstate_bv;
err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
xsave_hdr->xstate_bv &= (pcntxt_lmask | (((u64) pcntxt_hmask) << 32));
xsave_hdr->xstate_bv &= pcntxt_mask;
/*
* These bits must be zero.
*/
......@@ -573,9 +572,8 @@ static int restore_i387_xsave(void __user *buf)
* Init the state that is not present in the memory layout
* and enabled by the OS.
*/
lmask = ~(pcntxt_lmask & ~lmask);
hmask = ~(pcntxt_hmask & ~hmask);
xsave_hdr->xstate_bv &= (lmask | (((u64) hmask) << 32));
mask = ~(pcntxt_mask & ~mask);
xsave_hdr->xstate_bv &= mask;
return err;
fx_only:
......
......@@ -9,11 +9,12 @@
#ifdef CONFIG_IA32_EMULATION
#include <asm/sigcontext32.h>
#endif
#include <asm/xcr.h>
/*
* Supported feature mask by the CPU and the kernel.
*/
unsigned int pcntxt_hmask, pcntxt_lmask;
u64 pcntxt_mask;
struct _fpx_sw_bytes fx_sw_reserved;
#ifdef CONFIG_IA32_EMULATION
......@@ -127,30 +128,28 @@ int save_i387_xstate(void __user *buf)
int restore_user_xstate(void __user *buf)
{
struct _fpx_sw_bytes fx_sw_user;
unsigned int lmask, hmask;
u64 mask;
int err;
if (((unsigned long)buf % 64) ||
check_for_xstate(buf, buf, &fx_sw_user))
goto fx_only;
lmask = fx_sw_user.xstate_bv;
hmask = fx_sw_user.xstate_bv >> 32;
mask = fx_sw_user.xstate_bv;
/*
* restore the state passed by the user.
*/
err = xrestore_user(buf, lmask, hmask);
err = xrestore_user(buf, mask);
if (err)
return err;
/*
* init the state skipped by the user.
*/
lmask = pcntxt_lmask & ~lmask;
hmask = pcntxt_hmask & ~hmask;
mask = pcntxt_mask & ~mask;
xrstor_state(init_xstate_buf, lmask, hmask);
xrstor_state(init_xstate_buf, mask);
return 0;
......@@ -160,8 +159,7 @@ fx_only:
* memory layout. Restore just the FP/SSE and init all
* the other extended state.
*/
xrstor_state(init_xstate_buf, pcntxt_lmask & ~XSTATE_FPSSE,
pcntxt_hmask);
xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
}
......@@ -231,8 +229,7 @@ void prepare_fx_sw_frame(void)
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = sig_xstate_size;
fx_sw_reserved.xstate_bv = pcntxt_lmask |
(((u64) (pcntxt_hmask)) << 32);
fx_sw_reserved.xstate_bv = pcntxt_mask;
fx_sw_reserved.xstate_size = xstate_size;
#ifdef CONFIG_IA32_EMULATION
memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved,
......@@ -263,11 +260,8 @@ void __cpuinit xsave_init(void)
/*
* Enable all the features that the HW is capable of
* and the Linux kernel is aware of.
*
* xsetbv();
*/
asm volatile(".byte 0x0f,0x01,0xd1" : : "c" (0),
"a" (pcntxt_lmask), "d" (pcntxt_hmask));
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
}
/*
......@@ -287,36 +281,31 @@ void __init xsave_cntxt_init(void)
unsigned int eax, ebx, ecx, edx;
cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
pcntxt_mask = eax + ((u64)edx << 32);
pcntxt_lmask = eax;
pcntxt_hmask = edx;
if ((pcntxt_lmask & XSTATE_FPSSE) != XSTATE_FPSSE) {
printk(KERN_ERR "FP/SSE not shown under xsave features %x\n",
pcntxt_lmask);
if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n",
pcntxt_mask);
BUG();
}
/*
* for now OS knows only about FP/SSE
*/
pcntxt_lmask = pcntxt_lmask & XCNTXT_LMASK;
pcntxt_hmask = pcntxt_hmask & XCNTXT_HMASK;
pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
xsave_init();
/*
* Recompute the context size for enabled features
*/
cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
xstate_size = ebx;
prepare_fx_sw_frame();
setup_xstate_init();
printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%Lx, "
printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, "
"cntxt size 0x%x\n",
(pcntxt_lmask | ((u64) pcntxt_hmask << 32)), xstate_size);
pcntxt_mask, xstate_size);
}
#ifndef __ASM_X86_XSAVE_H
#define __ASM_X86_XSAVE_H
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/i387.h>
......@@ -14,8 +15,7 @@
/*
* These are the features that the OS can handle currently.
*/
#define XCNTXT_LMASK (XSTATE_FP | XSTATE_SSE)
#define XCNTXT_HMASK 0x0
#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
......@@ -23,7 +23,8 @@
#define REX_PREFIX
#endif
extern unsigned int xstate_size, pcntxt_hmask, pcntxt_lmask;
extern unsigned int xstate_size;
extern u64 pcntxt_mask;
extern struct xsave_struct *init_xstate_buf;
extern void xsave_cntxt_init(void);
......@@ -73,12 +74,12 @@ static inline int xsave_user(struct xsave_struct __user *buf)
return err;
}
static inline int xrestore_user(struct xsave_struct __user *buf,
unsigned int lmask,
unsigned int hmask)
static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
{
int err;
struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
u32 lmask = mask;
u32 hmask = mask >> 32;
__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
"2:\n"
......@@ -96,8 +97,11 @@ static inline int xrestore_user(struct xsave_struct __user *buf,
return err;
}
static inline void xrstor_state(struct xsave_struct *fx, int lmask, int hmask)
static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment