Commit 059bf0f6 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86-64: Merge msr.c with i386 version

The only difference was the inline assembly, so move that into
asm/msr.h and merge with the i386 version.

This adds some missing sysfs support code to x86-64.
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 55679edb
...@@ -46,3 +46,4 @@ microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o ...@@ -46,3 +46,4 @@ microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o
intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o quirks-y += ../../i386/kernel/quirks.o
i8237-y += ../../i386/kernel/i8237.o i8237-y += ../../i386/kernel/i8237.o
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
/* ----------------------------------------------------------------------- *
*
* Copyright 2000 H. Peter Anvin - All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
* USA; either version 2 of the License, or (at your option) any later
* version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* msr.c
*
* x86 MSR access device
*
* This device is accessed by lseek() to the appropriate register number
* and then read/write in chunks of 8 bytes. A larger size means multiple
* reads or writes of the same register.
*
* This driver uses /dev/cpu/%d/msr where %d is the minor number, and on
* an SMP box will direct the access to CPU %d.
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/system.h>
/* Note: "err" is handled in a funny way below. Otherwise one version
of gcc or another breaks. */
static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
{
int err;
asm volatile ("1: wrmsr\n"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl %4,%0\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n" " .quad 1b,3b\n" ".previous":"=&bDS" (err)
:"a"(eax), "d"(edx), "c"(reg), "i"(-EIO), "0"(0));
return err;
}
static inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
{
int err;
asm volatile ("1: rdmsr\n"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl %4,%0\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 1b,3b\n"
".previous":"=&bDS" (err), "=a"(*eax), "=d"(*edx)
:"c"(reg), "i"(-EIO), "0"(0));
return err;
}
#ifdef CONFIG_SMP
struct msr_command {
int cpu;
int err;
u32 reg;
u32 data[2];
};
static void msr_smp_wrmsr(void *cmd_block)
{
struct msr_command *cmd = (struct msr_command *)cmd_block;
if (cmd->cpu == smp_processor_id())
cmd->err = wrmsr_eio(cmd->reg, cmd->data[0], cmd->data[1]);
}
static void msr_smp_rdmsr(void *cmd_block)
{
struct msr_command *cmd = (struct msr_command *)cmd_block;
if (cmd->cpu == smp_processor_id())
cmd->err = rdmsr_eio(cmd->reg, &cmd->data[0], &cmd->data[1]);
}
static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
{
struct msr_command cmd;
int ret;
preempt_disable();
if (cpu == smp_processor_id()) {
ret = wrmsr_eio(reg, eax, edx);
} else {
cmd.cpu = cpu;
cmd.reg = reg;
cmd.data[0] = eax;
cmd.data[1] = edx;
smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
ret = cmd.err;
}
preempt_enable();
return ret;
}
static inline int do_rdmsr(int cpu, u32 reg, u32 * eax, u32 * edx)
{
struct msr_command cmd;
int ret;
preempt_disable();
if (cpu == smp_processor_id()) {
ret = rdmsr_eio(reg, eax, edx);
} else {
cmd.cpu = cpu;
cmd.reg = reg;
smp_call_function(msr_smp_rdmsr, &cmd, 1, 1);
*eax = cmd.data[0];
*edx = cmd.data[1];
ret = cmd.err;
}
preempt_enable();
return ret;
}
#else /* ! CONFIG_SMP */
static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
{
return wrmsr_eio(reg, eax, edx);
}
static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
{
return rdmsr_eio(reg, eax, edx);
}
#endif /* ! CONFIG_SMP */
static loff_t msr_seek(struct file *file, loff_t offset, int orig)
{
loff_t ret = -EINVAL;
lock_kernel();
switch (orig) {
case 0:
file->f_pos = offset;
ret = file->f_pos;
break;
case 1:
file->f_pos += offset;
ret = file->f_pos;
}
unlock_kernel();
return ret;
}
static ssize_t msr_read(struct file *file, char __user * buf,
size_t count, loff_t * ppos)
{
u32 __user *tmp = (u32 __user *) buf;
u32 data[2];
size_t rv;
u32 reg = *ppos;
int cpu = iminor(file->f_dentry->d_inode);
int err;
if (count % 8)
return -EINVAL; /* Invalid chunk size */
for (rv = 0; count; count -= 8) {
err = do_rdmsr(cpu, reg, &data[0], &data[1]);
if (err)
return err;
if (copy_to_user(tmp, &data, 8))
return -EFAULT;
tmp += 2;
}
return ((char __user *)tmp) - buf;
}
static ssize_t msr_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
const u32 __user *tmp = (const u32 __user *)buf;
u32 data[2];
size_t rv;
u32 reg = *ppos;
int cpu = iminor(file->f_dentry->d_inode);
int err;
if (count % 8)
return -EINVAL; /* Invalid chunk size */
for (rv = 0; count; count -= 8) {
if (copy_from_user(&data, tmp, 8))
return -EFAULT;
err = do_wrmsr(cpu, reg, data[0], data[1]);
if (err)
return err;
tmp += 2;
}
return ((char __user *)tmp) - buf;
}
static int msr_open(struct inode *inode, struct file *file)
{
unsigned int cpu = iminor(file->f_dentry->d_inode);
struct cpuinfo_x86 *c = &(cpu_data)[cpu];
if (cpu >= NR_CPUS || !cpu_online(cpu))
return -ENXIO; /* No such CPU */
if (!cpu_has(c, X86_FEATURE_MSR))
return -EIO; /* MSR not supported */
return 0;
}
/*
* File operations we support
*/
static struct file_operations msr_fops = {
.owner = THIS_MODULE,
.llseek = msr_seek,
.read = msr_read,
.write = msr_write,
.open = msr_open,
};
static int __init msr_init(void)
{
if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
printk(KERN_ERR "msr: unable to get major %d for msr\n",
MSR_MAJOR);
return -EBUSY;
}
return 0;
}
static void __exit msr_exit(void)
{
unregister_chrdev(MSR_MAJOR, "cpu/msr");
}
module_init(msr_init);
module_exit(msr_exit)
MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
MODULE_DESCRIPTION("x86 generic MSR driver");
MODULE_LICENSE("GPL");
...@@ -40,11 +40,26 @@ ...@@ -40,11 +40,26 @@
" .quad 2b,3b\n\t" \ " .quad 2b,3b\n\t" \
".previous" \ ".previous" \
: "=a" (ret__) \ : "=a" (ret__) \
: "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
ret__; }) ret__; })
#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
#define rdmsr_safe(msr,a,b) \
({ int ret__; \
asm volatile ("1: rdmsr\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl %4,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad 1b,3b\n" \
".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\
:"c"(msr), "i"(-EIO), "0"(0)); \
ret__; })
#define rdtsc(low,high) \ #define rdtsc(low,high) \
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment