Commit e9eee03e authored by Ingo Molnar's avatar Ingo Molnar Committed by H. Peter Anvin

x86, mce: clean up mce_64.c

This file has been modified many times along the years, by multiple
authors, so the general style and structure has diverged in a number
of areas making this file hard to read.

So fix the coding style match that of the rest of the x86 arch code.

[ Impact: cleanup ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 13503fa9
/* /*
* Machine check handler. * Machine check handler.
*
* K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
* Rest from unknown author(s). * Rest from unknown author(s).
* 2004 Andi Kleen. Rewrote most of it. * 2004 Andi Kleen. Rewrote most of it.
* Copyright 2008 Intel Corporation * Copyright 2008 Intel Corporation
* Author: Andi Kleen * Author: Andi Kleen
*/ */
#include <linux/thread_info.h>
#include <linux/init.h> #include <linux/capability.h>
#include <linux/types.h> #include <linux/miscdevice.h>
#include <linux/kernel.h> #include <linux/ratelimit.h>
#include <linux/sched.h> #include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/kobject.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/rcupdate.h>
#include <linux/kallsyms.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/percpu.h>
#include <linux/poll.h>
#include <linux/thread_info.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/kmod.h> #include <linux/sched.h>
#include <linux/kdebug.h>
#include <linux/kobject.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/ratelimit.h> #include <linux/types.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/poll.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/msr.h>
#include <asm/mce.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/smp.h>
#define MISC_MCELOG_MINOR 227 #define MISC_MCELOG_MINOR 227
atomic_t mce_entry; atomic_t mce_entry;
static int mce_dont_init; static int mce_dont_init;
/* /*
* Tolerant levels: * Tolerant levels:
...@@ -49,16 +50,16 @@ static int mce_dont_init; ...@@ -49,16 +50,16 @@ static int mce_dont_init;
* 2: SIGBUS or log uncorrected errors (if possible), log corrected errors * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
* 3: never panic or SIGBUS, log all errors (for testing only) * 3: never panic or SIGBUS, log all errors (for testing only)
*/ */
static int tolerant = 1; static int tolerant = 1;
static int banks; static int banks;
static u64 *bank; static u64 *bank;
static unsigned long notify_user; static unsigned long notify_user;
static int rip_msr; static int rip_msr;
static int mce_bootlog = -1; static int mce_bootlog = -1;
static atomic_t mce_events; static atomic_t mce_events;
static char trigger[128]; static char trigger[128];
static char *trigger_argv[2] = { trigger, NULL }; static char *trigger_argv[2] = { trigger, NULL };
static DECLARE_WAIT_QUEUE_HEAD(mce_wait); static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
...@@ -89,19 +90,23 @@ static struct mce_log mcelog = { ...@@ -89,19 +90,23 @@ static struct mce_log mcelog = {
void mce_log(struct mce *mce) void mce_log(struct mce *mce)
{ {
unsigned next, entry; unsigned next, entry;
atomic_inc(&mce_events); atomic_inc(&mce_events);
mce->finished = 0; mce->finished = 0;
wmb(); wmb();
for (;;) { for (;;) {
entry = rcu_dereference(mcelog.next); entry = rcu_dereference(mcelog.next);
for (;;) { for (;;) {
/* When the buffer fills up discard new entries. Assume /*
that the earlier errors are the more interesting. */ * When the buffer fills up discard new entries.
* Assume that the earlier errors are the more
* interesting ones:
*/
if (entry >= MCE_LOG_LEN) { if (entry >= MCE_LOG_LEN) {
set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags); set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
return; return;
} }
/* Old left over entry. Skip. */ /* Old left over entry. Skip: */
if (mcelog.entry[entry].finished) { if (mcelog.entry[entry].finished) {
entry++; entry++;
continue; continue;
...@@ -264,12 +269,12 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) ...@@ -264,12 +269,12 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
* implies that most kernel services cannot be safely used. Don't even * implies that most kernel services cannot be safely used. Don't even
* think about putting a printk in there! * think about putting a printk in there!
*/ */
void do_machine_check(struct pt_regs * regs, long error_code) void do_machine_check(struct pt_regs *regs, long error_code)
{ {
struct mce m, panicm; struct mce m, panicm;
int panicm_found = 0;
u64 mcestart = 0; u64 mcestart = 0;
int i; int i;
int panicm_found = 0;
/* /*
* If no_way_out gets set, there is no safe way to recover from this * If no_way_out gets set, there is no safe way to recover from this
* MCE. If tolerant is cranked up, we'll try anyway. * MCE. If tolerant is cranked up, we'll try anyway.
...@@ -293,6 +298,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) ...@@ -293,6 +298,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
mce_setup(&m); mce_setup(&m);
rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
/* if the restart IP is not valid, we're done for */ /* if the restart IP is not valid, we're done for */
if (!(m.mcgstatus & MCG_STATUS_RIPV)) if (!(m.mcgstatus & MCG_STATUS_RIPV))
no_way_out = 1; no_way_out = 1;
...@@ -356,23 +362,29 @@ void do_machine_check(struct pt_regs * regs, long error_code) ...@@ -356,23 +362,29 @@ void do_machine_check(struct pt_regs * regs, long error_code)
mce_get_rip(&m, regs); mce_get_rip(&m, regs);
mce_log(&m); mce_log(&m);
/* Did this bank cause the exception? */ /*
/* Assume that the bank with uncorrectable errors did it, * Did this bank cause the exception?
and that there is only a single one. */ *
if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) { * Assume that the bank with uncorrectable errors did it,
* and that there is only a single one:
*/
if ((m.status & MCI_STATUS_UC) &&
(m.status & MCI_STATUS_EN)) {
panicm = m; panicm = m;
panicm_found = 1; panicm_found = 1;
} }
} }
/* If we didn't find an uncorrectable error, pick /*
the last one (shouldn't happen, just being safe). */ * If we didn't find an uncorrectable error, pick
* the last one (shouldn't happen, just being safe).
*/
if (!panicm_found) if (!panicm_found)
panicm = m; panicm = m;
/* /*
* If we have decided that we just CAN'T continue, and the user * If we have decided that we just CAN'T continue, and the user
* has not set tolerant to an insane level, give up and die. * has not set tolerant to an insane level, give up and die.
*/ */
if (no_way_out && tolerant < 3) if (no_way_out && tolerant < 3)
mce_panic("Machine check", &panicm, mcestart); mce_panic("Machine check", &panicm, mcestart);
...@@ -451,10 +463,9 @@ void mce_log_therm_throt_event(__u64 status) ...@@ -451,10 +463,9 @@ void mce_log_therm_throt_event(__u64 status)
* poller finds an MCE, poll 2x faster. When the poller finds no more * poller finds an MCE, poll 2x faster. When the poller finds no more
* errors, poll 2x slower (up to check_interval seconds). * errors, poll 2x slower (up to check_interval seconds).
*/ */
static int check_interval = 5 * 60; /* 5 minutes */ static int check_interval = 5 * 60; /* 5 minutes */
static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
static void mcheck_timer(unsigned long);
static DEFINE_PER_CPU(struct timer_list, mce_timer); static DEFINE_PER_CPU(struct timer_list, mce_timer);
static void mcheck_timer(unsigned long data) static void mcheck_timer(unsigned long data)
...@@ -464,9 +475,10 @@ static void mcheck_timer(unsigned long data) ...@@ -464,9 +475,10 @@ static void mcheck_timer(unsigned long data)
WARN_ON(smp_processor_id() != data); WARN_ON(smp_processor_id() != data);
if (mce_available(&current_cpu_data)) if (mce_available(&current_cpu_data)) {
machine_check_poll(MCP_TIMESTAMP, machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks)); &__get_cpu_var(mce_poll_banks));
}
/* /*
* Alert userspace if needed. If we logged an MCE, reduce the * Alert userspace if needed. If we logged an MCE, reduce the
...@@ -501,6 +513,7 @@ int mce_notify_user(void) ...@@ -501,6 +513,7 @@ int mce_notify_user(void)
static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
clear_thread_flag(TIF_MCE_NOTIFY); clear_thread_flag(TIF_MCE_NOTIFY);
if (test_and_clear_bit(0, &notify_user)) { if (test_and_clear_bit(0, &notify_user)) {
wake_up_interruptible(&mce_wait); wake_up_interruptible(&mce_wait);
...@@ -520,9 +533,10 @@ int mce_notify_user(void) ...@@ -520,9 +533,10 @@ int mce_notify_user(void)
return 0; return 0;
} }
/* see if the idle task needs to notify userspace */ /* see if the idle task needs to notify userspace: */
static int static int
mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk) mce_idle_callback(struct notifier_block *nfb, unsigned long action,
void *unused)
{ {
/* IDLE_END should be safe - interrupts are back on */ /* IDLE_END should be safe - interrupts are back on */
if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY)) if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
...@@ -532,7 +546,7 @@ mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk) ...@@ -532,7 +546,7 @@ mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
} }
static struct notifier_block mce_idle_notifier = { static struct notifier_block mce_idle_notifier = {
.notifier_call = mce_idle_callback, .notifier_call = mce_idle_callback,
}; };
static __init int periodic_mcheck_init(void) static __init int periodic_mcheck_init(void)
...@@ -547,8 +561,8 @@ __initcall(periodic_mcheck_init); ...@@ -547,8 +561,8 @@ __initcall(periodic_mcheck_init);
*/ */
static int mce_cap_init(void) static int mce_cap_init(void)
{ {
u64 cap;
unsigned b; unsigned b;
u64 cap;
rdmsrl(MSR_IA32_MCG_CAP, cap); rdmsrl(MSR_IA32_MCG_CAP, cap);
b = cap & 0xff; b = cap & 0xff;
...@@ -578,9 +592,9 @@ static int mce_cap_init(void) ...@@ -578,9 +592,9 @@ static int mce_cap_init(void)
static void mce_init(void *dummy) static void mce_init(void *dummy)
{ {
mce_banks_t all_banks;
u64 cap; u64 cap;
int i; int i;
mce_banks_t all_banks;
/* /*
* Log the machine checks left over from the previous reset. * Log the machine checks left over from the previous reset.
...@@ -605,14 +619,21 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) ...@@ -605,14 +619,21 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c)
{ {
/* This should be disabled by the BIOS, but isn't always */ /* This should be disabled by the BIOS, but isn't always */
if (c->x86_vendor == X86_VENDOR_AMD) { if (c->x86_vendor == X86_VENDOR_AMD) {
if (c->x86 == 15 && banks > 4) if (c->x86 == 15 && banks > 4) {
/* disable GART TBL walk error reporting, which trips off /*
incorrectly with the IOMMU & 3ware & Cerberus. */ * disable GART TBL walk error reporting, which
* trips off incorrectly with the IOMMU & 3ware
* & Cerberus:
*/
clear_bit(10, (unsigned long *)&bank[4]); clear_bit(10, (unsigned long *)&bank[4]);
if(c->x86 <= 17 && mce_bootlog < 0) }
/* Lots of broken BIOS around that don't clear them if (c->x86 <= 17 && mce_bootlog < 0) {
by default and leave crap in there. Don't log. */ /*
* Lots of broken BIOS around that don't clear them
* by default and leave crap in there. Don't log:
*/
mce_bootlog = 0; mce_bootlog = 0;
}
} }
} }
...@@ -646,7 +667,7 @@ static void mce_init_timer(void) ...@@ -646,7 +667,7 @@ static void mce_init_timer(void)
/* /*
* Called for each booted CPU to set up machine checks. * Called for each booted CPU to set up machine checks.
* Must be called with preempt off. * Must be called with preempt off:
*/ */
void __cpuinit mcheck_init(struct cpuinfo_x86 *c) void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
{ {
...@@ -669,8 +690,8 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c) ...@@ -669,8 +690,8 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
*/ */
static DEFINE_SPINLOCK(mce_state_lock); static DEFINE_SPINLOCK(mce_state_lock);
static int open_count; /* #times opened */ static int open_count; /* #times opened */
static int open_exclu; /* already open exclusive? */ static int open_exclu; /* already open exclusive? */
static int mce_open(struct inode *inode, struct file *file) static int mce_open(struct inode *inode, struct file *file)
{ {
...@@ -680,6 +701,7 @@ static int mce_open(struct inode *inode, struct file *file) ...@@ -680,6 +701,7 @@ static int mce_open(struct inode *inode, struct file *file)
if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
spin_unlock(&mce_state_lock); spin_unlock(&mce_state_lock);
unlock_kernel(); unlock_kernel();
return -EBUSY; return -EBUSY;
} }
...@@ -712,13 +734,14 @@ static void collect_tscs(void *data) ...@@ -712,13 +734,14 @@ static void collect_tscs(void *data)
rdtscll(cpu_tsc[smp_processor_id()]); rdtscll(cpu_tsc[smp_processor_id()]);
} }
static DEFINE_MUTEX(mce_read_mutex);
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
loff_t *off) loff_t *off)
{ {
char __user *buf = ubuf;
unsigned long *cpu_tsc; unsigned long *cpu_tsc;
static DEFINE_MUTEX(mce_read_mutex);
unsigned prev, next; unsigned prev, next;
char __user *buf = ubuf;
int i, err; int i, err;
cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
...@@ -732,6 +755,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, ...@@ -732,6 +755,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
mutex_unlock(&mce_read_mutex); mutex_unlock(&mce_read_mutex);
kfree(cpu_tsc); kfree(cpu_tsc);
return -EINVAL; return -EINVAL;
} }
...@@ -770,6 +794,7 @@ timeout: ...@@ -770,6 +794,7 @@ timeout:
* synchronize. * synchronize.
*/ */
on_each_cpu(collect_tscs, cpu_tsc, 1); on_each_cpu(collect_tscs, cpu_tsc, 1);
for (i = next; i < MCE_LOG_LEN; i++) { for (i = next; i < MCE_LOG_LEN; i++) {
if (mcelog.entry[i].finished && if (mcelog.entry[i].finished &&
mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
...@@ -782,6 +807,7 @@ timeout: ...@@ -782,6 +807,7 @@ timeout:
} }
mutex_unlock(&mce_read_mutex); mutex_unlock(&mce_read_mutex);
kfree(cpu_tsc); kfree(cpu_tsc);
return err ? -EFAULT : buf - ubuf; return err ? -EFAULT : buf - ubuf;
} }
...@@ -799,6 +825,7 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -799,6 +825,7 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
switch (cmd) { switch (cmd) {
case MCE_GET_RECORD_LEN: case MCE_GET_RECORD_LEN:
return put_user(sizeof(struct mce), p); return put_user(sizeof(struct mce), p);
...@@ -810,6 +837,7 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -810,6 +837,7 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
do { do {
flags = mcelog.flags; flags = mcelog.flags;
} while (cmpxchg(&mcelog.flags, flags, 0) != flags); } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
return put_user(flags, p); return put_user(flags, p);
} }
default: default:
...@@ -818,11 +846,11 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -818,11 +846,11 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
} }
static const struct file_operations mce_chrdev_ops = { static const struct file_operations mce_chrdev_ops = {
.open = mce_open, .open = mce_open,
.release = mce_release, .release = mce_release,
.read = mce_read, .read = mce_read,
.poll = mce_poll, .poll = mce_poll,
.unlocked_ioctl = mce_ioctl, .unlocked_ioctl = mce_ioctl,
}; };
static struct miscdevice mce_log_device = { static struct miscdevice mce_log_device = {
...@@ -891,13 +919,16 @@ static int mce_shutdown(struct sys_device *dev) ...@@ -891,13 +919,16 @@ static int mce_shutdown(struct sys_device *dev)
return mce_disable(); return mce_disable();
} }
/* On resume clear all MCE state. Don't want to see leftovers from the BIOS. /*
Only one CPU is active at this time, the others get readded later using * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
CPU hotplug. */ * Only one CPU is active at this time, the others get re-added later using
* CPU hotplug:
*/
static int mce_resume(struct sys_device *dev) static int mce_resume(struct sys_device *dev)
{ {
mce_init(NULL); mce_init(NULL);
mce_cpu_features(&current_cpu_data); mce_cpu_features(&current_cpu_data);
return 0; return 0;
} }
...@@ -916,14 +947,16 @@ static void mce_restart(void) ...@@ -916,14 +947,16 @@ static void mce_restart(void)
} }
static struct sysdev_class mce_sysclass = { static struct sysdev_class mce_sysclass = {
.suspend = mce_suspend, .suspend = mce_suspend,
.shutdown = mce_shutdown, .shutdown = mce_shutdown,
.resume = mce_resume, .resume = mce_resume,
.name = "machinecheck", .name = "machinecheck",
}; };
DEFINE_PER_CPU(struct sys_device, device_mce); DEFINE_PER_CPU(struct sys_device, device_mce);
void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
__cpuinitdata
void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
/* Why are there no generic functions for this? */ /* Why are there no generic functions for this? */
#define ACCESSOR(name, var, start) \ #define ACCESSOR(name, var, start) \
...@@ -937,9 +970,12 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit ...@@ -937,9 +970,12 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit
const char *buf, size_t siz) { \ const char *buf, size_t siz) { \
char *end; \ char *end; \
unsigned long new = simple_strtoul(buf, &end, 0); \ unsigned long new = simple_strtoul(buf, &end, 0); \
if (end == buf) return -EINVAL; \ \
if (end == buf) \
return -EINVAL; \
var = new; \ var = new; \
start; \ start; \
\
return end-buf; \ return end-buf; \
} \ } \
static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
...@@ -950,6 +986,7 @@ static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, ...@@ -950,6 +986,7 @@ static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
char *buf) char *buf)
{ {
u64 b = bank[attr - bank_attrs]; u64 b = bank[attr - bank_attrs];
return sprintf(buf, "%llx\n", b); return sprintf(buf, "%llx\n", b);
} }
...@@ -958,15 +995,18 @@ static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, ...@@ -958,15 +995,18 @@ static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
{ {
char *end; char *end;
u64 new = simple_strtoull(buf, &end, 0); u64 new = simple_strtoull(buf, &end, 0);
if (end == buf) if (end == buf)
return -EINVAL; return -EINVAL;
bank[attr - bank_attrs] = new; bank[attr - bank_attrs] = new;
mce_restart(); mce_restart();
return end-buf; return end-buf;
} }
static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, static ssize_t
char *buf) show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
{ {
strcpy(buf, trigger); strcpy(buf, trigger);
strcat(buf, "\n"); strcat(buf, "\n");
...@@ -974,21 +1014,27 @@ static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, ...@@ -974,21 +1014,27 @@ static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
} }
static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
const char *buf,size_t siz) const char *buf, size_t siz)
{ {
char *p; char *p;
int len; int len;
strncpy(trigger, buf, sizeof(trigger)); strncpy(trigger, buf, sizeof(trigger));
trigger[sizeof(trigger)-1] = 0; trigger[sizeof(trigger)-1] = 0;
len = strlen(trigger); len = strlen(trigger);
p = strchr(trigger, '\n'); p = strchr(trigger, '\n');
if (*p) *p = 0;
if (*p)
*p = 0;
return len; return len;
} }
static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
ACCESSOR(check_interval,check_interval,mce_restart())
ACCESSOR(check_interval, check_interval, mce_restart())
static struct sysdev_attribute *mce_attributes[] = { static struct sysdev_attribute *mce_attributes[] = {
&attr_tolerant.attr, &attr_check_interval, &attr_trigger, &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
NULL NULL
...@@ -996,7 +1042,7 @@ static struct sysdev_attribute *mce_attributes[] = { ...@@ -996,7 +1042,7 @@ static struct sysdev_attribute *mce_attributes[] = {
static cpumask_var_t mce_device_initialized; static cpumask_var_t mce_device_initialized;
/* Per cpu sysdev init. All of the cpus still share the same ctl bank */ /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
static __cpuinit int mce_create_device(unsigned int cpu) static __cpuinit int mce_create_device(unsigned int cpu)
{ {
int err; int err;
...@@ -1006,15 +1052,15 @@ static __cpuinit int mce_create_device(unsigned int cpu) ...@@ -1006,15 +1052,15 @@ static __cpuinit int mce_create_device(unsigned int cpu)
return -EIO; return -EIO;
memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
per_cpu(device_mce,cpu).id = cpu; per_cpu(device_mce, cpu).id = cpu;
per_cpu(device_mce,cpu).cls = &mce_sysclass; per_cpu(device_mce, cpu).cls = &mce_sysclass;
err = sysdev_register(&per_cpu(device_mce,cpu)); err = sysdev_register(&per_cpu(device_mce, cpu));
if (err) if (err)
return err; return err;
for (i = 0; mce_attributes[i]; i++) { for (i = 0; mce_attributes[i]; i++) {
err = sysdev_create_file(&per_cpu(device_mce,cpu), err = sysdev_create_file(&per_cpu(device_mce, cpu),
mce_attributes[i]); mce_attributes[i]);
if (err) if (err)
goto error; goto error;
...@@ -1035,10 +1081,10 @@ error2: ...@@ -1035,10 +1081,10 @@ error2:
} }
error: error:
while (--i >= 0) { while (--i >= 0) {
sysdev_remove_file(&per_cpu(device_mce,cpu), sysdev_remove_file(&per_cpu(device_mce, cpu),
mce_attributes[i]); mce_attributes[i]);
} }
sysdev_unregister(&per_cpu(device_mce,cpu)); sysdev_unregister(&per_cpu(device_mce, cpu));
return err; return err;
} }
...@@ -1051,12 +1097,12 @@ static __cpuinit void mce_remove_device(unsigned int cpu) ...@@ -1051,12 +1097,12 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
return; return;
for (i = 0; mce_attributes[i]; i++) for (i = 0; mce_attributes[i]; i++)
sysdev_remove_file(&per_cpu(device_mce,cpu), sysdev_remove_file(&per_cpu(device_mce, cpu),
mce_attributes[i]); mce_attributes[i]);
for (i = 0; i < banks; i++) for (i = 0; i < banks; i++)
sysdev_remove_file(&per_cpu(device_mce, cpu), sysdev_remove_file(&per_cpu(device_mce, cpu),
&bank_attrs[i]); &bank_attrs[i]);
sysdev_unregister(&per_cpu(device_mce,cpu)); sysdev_unregister(&per_cpu(device_mce, cpu));
cpumask_clear_cpu(cpu, mce_device_initialized); cpumask_clear_cpu(cpu, mce_device_initialized);
} }
...@@ -1076,11 +1122,12 @@ static void mce_disable_cpu(void *h) ...@@ -1076,11 +1122,12 @@ static void mce_disable_cpu(void *h)
static void mce_reenable_cpu(void *h) static void mce_reenable_cpu(void *h)
{ {
int i;
unsigned long action = *(unsigned long *)h; unsigned long action = *(unsigned long *)h;
int i;
if (!mce_available(&current_cpu_data)) if (!mce_available(&current_cpu_data))
return; return;
if (!(action & CPU_TASKS_FROZEN)) if (!(action & CPU_TASKS_FROZEN))
cmci_reenable(); cmci_reenable();
for (i = 0; i < banks; i++) for (i = 0; i < banks; i++)
...@@ -1088,8 +1135,8 @@ static void mce_reenable_cpu(void *h) ...@@ -1088,8 +1135,8 @@ static void mce_reenable_cpu(void *h)
} }
/* Get notified when a cpu comes on/off. Be hotplug friendly. */ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, static int __cpuinit
unsigned long action, void *hcpu) mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{ {
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
struct timer_list *t = &per_cpu(mce_timer, cpu); struct timer_list *t = &per_cpu(mce_timer, cpu);
...@@ -1142,12 +1189,14 @@ static __init int mce_init_banks(void) ...@@ -1142,12 +1189,14 @@ static __init int mce_init_banks(void)
for (i = 0; i < banks; i++) { for (i = 0; i < banks; i++) {
struct sysdev_attribute *a = &bank_attrs[i]; struct sysdev_attribute *a = &bank_attrs[i];
a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
if (!a->attr.name) if (!a->attr.name)
goto nomem; goto nomem;
a->attr.mode = 0644;
a->show = show_bank; a->attr.mode = 0644;
a->store = set_bank; a->show = show_bank;
a->store = set_bank;
} }
return 0; return 0;
...@@ -1156,6 +1205,7 @@ nomem: ...@@ -1156,6 +1205,7 @@ nomem:
kfree(bank_attrs[i].attr.name); kfree(bank_attrs[i].attr.name);
kfree(bank_attrs); kfree(bank_attrs);
bank_attrs = NULL; bank_attrs = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -1185,6 +1235,7 @@ static __init int mce_init_device(void) ...@@ -1185,6 +1235,7 @@ static __init int mce_init_device(void)
register_hotcpu_notifier(&mce_cpu_notifier); register_hotcpu_notifier(&mce_cpu_notifier);
misc_register(&mce_log_device); misc_register(&mce_log_device);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment