Commit f6d6ae96 authored by Borislav Petkov's avatar Borislav Petkov

amd64_edac: unify MCGCTL ECC switching

Unify almost identical code into one function and remove NUMA-specific
usage (specifically cpumask_of_node()) in favor of generic topology
methods.

Remove unused defines, while at it.
Signed-off-by: default avatarBorislav Petkov <borislav.petkov@amd.com>
parent ba578cb3
...@@ -2624,6 +2624,109 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) ...@@ -2624,6 +2624,109 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
return empty; return empty;
} }
/* get all cores on this DCT */
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
{
int cpu;
for_each_online_cpu(cpu)
if (amd_get_nb_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}
/* check MCG_CTL on all the cpus on this node */
static bool amd64_nb_mce_bank_enabled_on_node(int nid)
{
cpumask_var_t mask;
struct msr *msrs;
int cpu, nbe, idx = 0;
bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
__func__);
return false;
}
get_cpus_on_this_dct_cpumask(mask, nid);
msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
free_cpumask_var(mask);
return false;
}
rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, mask) {
nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, msrs[idx].q,
(nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
idx++;
}
ret = true;
out:
kfree(msrs);
free_cpumask_var(mask);
return ret;
}
static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
{
cpumask_var_t cmask;
struct msr *msrs = NULL;
int cpu, idx = 0;
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
__func__);
return false;
}
get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
return -ENOMEM;
}
rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, cmask) {
if (on) {
if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
pvt->flags.ecc_report = 1;
msrs[idx].l |= K8_MSR_MCGCTL_NBE;
} else {
/*
* Turn off ECC reporting only when it was off before
*/
if (!pvt->flags.ecc_report)
msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
}
idx++;
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
kfree(msrs);
free_cpumask_var(cmask);
return 0;
}
/* /*
* Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
* enable it. * enable it.
...@@ -2631,17 +2734,12 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) ...@@ -2631,17 +2734,12 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
{ {
struct amd64_pvt *pvt = mci->pvt_info; struct amd64_pvt *pvt = mci->pvt_info;
const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id); int err = 0;
int cpu, idx = 0, err = 0; u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
struct msr msrs[cpumask_weight(cpumask)];
u32 value;
u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
if (!ecc_enable_override) if (!ecc_enable_override)
return; return;
memset(msrs, 0, sizeof(msrs));
amd64_printk(KERN_WARNING, amd64_printk(KERN_WARNING,
"'ecc_enable_override' parameter is active, " "'ecc_enable_override' parameter is active, "
"Enabling AMD ECC hardware now: CAUTION\n"); "Enabling AMD ECC hardware now: CAUTION\n");
...@@ -2657,16 +2755,9 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) ...@@ -2657,16 +2755,9 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
value |= mask; value |= mask;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); if (amd64_toggle_ecc_err_reporting(pvt, ON))
amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
for_each_cpu(cpu, cpumask) { "MCGCTL!\n");
if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
set_bit(idx, &pvt->old_mcgctl);
msrs[idx].l |= K8_MSR_MCGCTL_NBE;
idx++;
}
wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
if (err) if (err)
...@@ -2707,17 +2798,12 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) ...@@ -2707,17 +2798,12 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
{ {
const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id); int err = 0;
int cpu, idx = 0, err = 0; u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
struct msr msrs[cpumask_weight(cpumask)];
u32 value;
u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
if (!pvt->nbctl_mcgctl_saved) if (!pvt->nbctl_mcgctl_saved)
return; return;
memset(msrs, 0, sizeof(msrs));
err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
if (err) if (err)
debugf0("Reading K8_NBCTL failed\n"); debugf0("Reading K8_NBCTL failed\n");
...@@ -2727,72 +2813,9 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) ...@@ -2727,72 +2813,9 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
/* restore the NB Enable MCGCTL bit */ /* restore the NB Enable MCGCTL bit */
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); if (amd64_toggle_ecc_err_reporting(pvt, OFF))
amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
for_each_cpu(cpu, cpumask) { "MCGCTL!\n");
msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
msrs[idx].l |=
test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
idx++;
}
wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
}
/* get all cores on this DCT */
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
{
int cpu;
for_each_online_cpu(cpu)
if (amd_get_nb_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}
/* check MCG_CTL on all the cpus on this node */
static bool amd64_nb_mce_bank_enabled_on_node(int nid)
{
cpumask_var_t mask;
struct msr *msrs;
int cpu, nbe, idx = 0;
bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
__func__);
return false;
}
get_cpus_on_this_dct_cpumask(mask, nid);
msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
free_cpumask_var(mask);
return false;
}
rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, mask) {
nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, msrs[idx].q,
(nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
idx++;
}
ret = true;
out:
kfree(msrs);
free_cpumask_var(mask);
return ret;
} }
/* /*
...@@ -2921,7 +2944,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, ...@@ -2921,7 +2944,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
pvt->ext_model = boot_cpu_data.x86_model >> 4; pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->mc_type_index = mc_type_index; pvt->mc_type_index = mc_type_index;
pvt->ops = family_ops(mc_type_index); pvt->ops = family_ops(mc_type_index);
pvt->old_mcgctl = 0;
/* /*
* We have the dram_f2_ctl device as an argument, now go reserve its * We have the dram_f2_ctl device as an argument, now go reserve its
......
...@@ -147,6 +147,8 @@ ...@@ -147,6 +147,8 @@
#define MAX_CS_COUNT 8 #define MAX_CS_COUNT 8
#define DRAM_REG_COUNT 8 #define DRAM_REG_COUNT 8
#define ON true
#define OFF false
/* /*
* PCI-defined configuration space registers * PCI-defined configuration space registers
...@@ -386,10 +388,7 @@ enum { ...@@ -386,10 +388,7 @@ enum {
#define K8_NBCAP_DUAL_NODE BIT(1) #define K8_NBCAP_DUAL_NODE BIT(1)
#define K8_NBCAP_DCT_DUAL BIT(0) #define K8_NBCAP_DCT_DUAL BIT(0)
/* /* MSRs */
* MSR Regs
*/
#define K8_MSR_MCGCTL 0x017b
#define K8_MSR_MCGCTL_NBE BIT(4) #define K8_MSR_MCGCTL_NBE BIT(4)
#define K8_MSR_MC4CTL 0x0410 #define K8_MSR_MC4CTL 0x0410
...@@ -487,7 +486,6 @@ struct amd64_pvt { ...@@ -487,7 +486,6 @@ struct amd64_pvt {
/* Save old hw registers' values before we modified them */ /* Save old hw registers' values before we modified them */
u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
u32 old_nbctl; u32 old_nbctl;
unsigned long old_mcgctl; /* per core on this node */
/* MC Type Index value: socket F vs Family 10h */ /* MC Type Index value: socket F vs Family 10h */
u32 mc_type_index; u32 mc_type_index;
...@@ -495,6 +493,7 @@ struct amd64_pvt { ...@@ -495,6 +493,7 @@ struct amd64_pvt {
/* misc settings */ /* misc settings */
struct flags { struct flags {
unsigned long cf8_extcfg:1; unsigned long cf8_extcfg:1;
unsigned long ecc_report:1;
} flags; } flags;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment