Commit d80a7920 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
  [PATCH] i386: Fix some warnings added by earlier patch
  [PATCH] x86-64: Always flush all pages in change_page_attr
  [PATCH] x86: Remove noreplacement option
  [PATCH] x86-64: make GART PTEs uncacheable
parents 32bd33e2 8689b517
......@@ -293,7 +293,3 @@ Debugging
stuck (default)
Miscellaneous
noreplacement Don't replace instructions with more appropriate ones
for the CPU. This may be useful on asymmetric MP systems
where some CPUs have less capabilities than others.
......@@ -5,15 +5,9 @@
#include <asm/alternative.h>
#include <asm/sections.h>
static int no_replacement = 0;
static int smp_alt_once = 0;
static int debug_alternative = 0;
static int __init noreplacement_setup(char *s)
{
no_replacement = 1;
return 1;
}
static int __init bootonly(char *str)
{
smp_alt_once = 1;
......@@ -25,7 +19,6 @@ static int __init debug_alt(char *str)
return 1;
}
__setup("noreplacement", noreplacement_setup);
__setup("smp-alt-boot", bootonly);
__setup("debug-alternative", debug_alt);
......@@ -252,9 +245,6 @@ void alternatives_smp_module_add(struct module *mod, char *name,
struct smp_alt_module *smp;
unsigned long flags;
if (no_replacement)
return;
if (smp_alt_once) {
if (boot_cpu_has(X86_FEATURE_UP))
alternatives_smp_unlock(locks, locks_end,
......@@ -289,7 +279,7 @@ void alternatives_smp_module_del(struct module *mod)
struct smp_alt_module *item;
unsigned long flags;
if (no_replacement || smp_alt_once)
if (smp_alt_once)
return;
spin_lock_irqsave(&smp_alt, flags);
......@@ -320,7 +310,7 @@ void alternatives_smp_switch(int smp)
return;
#endif
if (no_replacement || smp_alt_once)
if (smp_alt_once)
return;
BUG_ON(!smp && (num_online_cpus() > 1));
......@@ -386,13 +376,6 @@ extern struct paravirt_patch __start_parainstructions[],
void __init alternative_instructions(void)
{
unsigned long flags;
if (no_replacement) {
printk(KERN_INFO "(SMP-)alternatives turned off\n");
free_init_pages("SMP alternatives",
(unsigned long)__smp_alt_begin,
(unsigned long)__smp_alt_end);
return;
}
local_irq_save(flags);
apply_alternatives(__alt_instructions, __alt_instructions_end);
......
......@@ -126,7 +126,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]))
return 0;
}
return 1;
......@@ -142,7 +142,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr)
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]))
return 0;
}
return 1;
......@@ -157,7 +157,7 @@ static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]))
return 1;
return 0;
}
......@@ -171,7 +171,7 @@ static void __release_perfctr_nmi(int cpu, unsigned int msr)
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]);
}
int reserve_perfctr_nmi(unsigned int msr)
......
......@@ -519,7 +519,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
if (!gatt)
panic("Cannot allocate GATT table");
panic("Cannot allocate GATT table");
if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
panic("Could not set GART PTEs to uncacheable pages");
global_flush_tlb();
memset(gatt, 0, gatt_size);
agp_gatt_table = gatt;
......
......@@ -81,8 +81,8 @@ static void flush_kernel_map(void *arg)
void *adr = page_address(pg);
if (cpu_has_clflush)
cache_flush_page(adr);
__flush_tlb_one(adr);
}
__flush_tlb_all();
}
static inline void flush_map(struct list_head *l)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment