Commit 16da2f93 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: smp_64.c: remove unused exports and cleanup while at it

The exports are nowhere used. There is even no reason why they were
ever introduced.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 081e10b9
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <asm/idle.h> #include <asm/idle.h>
/* /*
* Smarter SMP flushing macros. * Smarter SMP flushing macros.
* c/o Linus Torvalds. * c/o Linus Torvalds.
* *
* These mean you can really definitely utterly forget about * These mean you can really definitely utterly forget about
...@@ -37,15 +37,15 @@ ...@@ -37,15 +37,15 @@
* *
* Optimizations Manfred Spraul <manfred@colorfullife.com> * Optimizations Manfred Spraul <manfred@colorfullife.com>
* *
* More scalable flush, from Andi Kleen * More scalable flush, from Andi Kleen
* *
* To avoid global state use 8 different call vectors. * To avoid global state use 8 different call vectors.
* Each CPU uses a specific vector to trigger flushes on other * Each CPU uses a specific vector to trigger flushes on other
* CPUs. Depending on the received vector the target CPUs look into * CPUs. Depending on the received vector the target CPUs look into
* the right per cpu variable for the flush data. * the right per cpu variable for the flush data.
* *
* With more than 8 CPUs they are hashed to the 8 available * With more than 8 CPUs they are hashed to the 8 available
* vectors. The limited global vector space forces us to this right now. * vectors. The limited global vector space forces us to this right now.
* In future when interrupts are split into per CPU domains this could be * In future when interrupts are split into per CPU domains this could be
* fixed, at the cost of triggering multiple IPIs in some cases. * fixed, at the cost of triggering multiple IPIs in some cases.
*/ */
...@@ -67,7 +67,7 @@ union smp_flush_state { ...@@ -67,7 +67,7 @@ union smp_flush_state {
static DEFINE_PER_CPU(union smp_flush_state, flush_state); static DEFINE_PER_CPU(union smp_flush_state, flush_state);
/* /*
* We cannot call mmdrop() because we are in interrupt context, * We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask. * instead update mm->cpu_vm_mask.
*/ */
static inline void leave_mm(int cpu) static inline void leave_mm(int cpu)
...@@ -85,25 +85,25 @@ static inline void leave_mm(int cpu) ...@@ -85,25 +85,25 @@ static inline void leave_mm(int cpu)
* 1) switch_mm() either 1a) or 1b) * 1) switch_mm() either 1a) or 1b)
* 1a) thread switch to a different mm * 1a) thread switch to a different mm
* 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with * Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis * the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superfluous * for the wrong mm, and in the worst case we perform a superfluous
* tlb flush. * tlb flush.
* 1a2) set cpu mmu_state to TLBSTATE_OK * 1a2) set cpu mmu_state to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
* was in lazy tlb mode. * was in lazy tlb mode.
* 1a3) update cpu active_mm * 1a3) update cpu active_mm
* Now cpu0 accepts tlb flushes for the new mm. * Now cpu0 accepts tlb flushes for the new mm.
* 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
* Now the other cpus will send tlb flush ipis. * Now the other cpus will send tlb flush ipis.
* 1a4) change cr3. * 1a4) change cr3.
* 1b) thread switch without mm change * 1b) thread switch without mm change
* cpu active_mm is correct, cpu0 already handles * cpu active_mm is correct, cpu0 already handles
* flush ipis. * flush ipis.
* 1b1) set cpu mmu_state to TLBSTATE_OK * 1b1) set cpu mmu_state to TLBSTATE_OK
* 1b2) test_and_set the cpu bit in cpu_vm_mask. * 1b2) test_and_set the cpu bit in cpu_vm_mask.
* Atomically set the bit [other cpus will start sending flush ipis], * Atomically set the bit [other cpus will start sending flush ipis],
* and test the bit. * and test the bit.
* 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
* 2) switch %%esp, ie current * 2) switch %%esp, ie current
* *
...@@ -142,7 +142,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -142,7 +142,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
if (!cpu_isset(cpu, f->flush_cpumask)) if (!cpu_isset(cpu, f->flush_cpumask))
goto out; goto out;
/* /*
* This was a BUG() but until someone can quote me the * This was a BUG() but until someone can quote me the
* line from the intel manual that guarantees an IPI to * line from the intel manual that guarantees an IPI to
* multiple CPUs is retried _only_ on the erroring CPUs * multiple CPUs is retried _only_ on the erroring CPUs
...@@ -150,7 +150,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -150,7 +150,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
* *
* BUG(); * BUG();
*/ */
if (f->flush_mm == read_pda(active_mm)) { if (f->flush_mm == read_pda(active_mm)) {
if (read_pda(mmu_state) == TLBSTATE_OK) { if (read_pda(mmu_state) == TLBSTATE_OK) {
if (f->flush_va == FLUSH_ALL) if (f->flush_va == FLUSH_ALL)
...@@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
f = &per_cpu(flush_state, sender); f = &per_cpu(flush_state, sender);
/* Could avoid this lock when /*
num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is * Could avoid this lock when
probably not worth checking this for a cache-hot lock. */ * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
* probably not worth checking this for a cache-hot lock.
*/
spin_lock(&f->tlbstate_lock); spin_lock(&f->tlbstate_lock);
f->flush_mm = mm; f->flush_mm = mm;
...@@ -202,14 +204,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -202,14 +204,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
int __cpuinit init_smp_flush(void) int __cpuinit init_smp_flush(void)
{ {
int i; int i;
for_each_cpu_mask(i, cpu_possible_map) { for_each_cpu_mask(i, cpu_possible_map) {
spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
} }
return 0; return 0;
} }
core_initcall(init_smp_flush); core_initcall(init_smp_flush);
void flush_tlb_current_task(void) void flush_tlb_current_task(void)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -224,7 +226,6 @@ void flush_tlb_current_task(void) ...@@ -224,7 +226,6 @@ void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm) void flush_tlb_mm (struct mm_struct * mm)
{ {
...@@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm) ...@@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm)
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{ {
...@@ -259,8 +259,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -259,8 +259,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
if (current->active_mm == mm) { if (current->active_mm == mm) {
if(current->mm) if(current->mm)
__flush_tlb_one(va); __flush_tlb_one(va);
else else
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (!cpus_empty(cpu_mask)) if (!cpus_empty(cpu_mask))
...@@ -268,7 +268,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -268,7 +268,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info) static void do_flush_tlb_all(void* info)
{ {
...@@ -325,11 +324,9 @@ void unlock_ipi_call_lock(void) ...@@ -325,11 +324,9 @@ void unlock_ipi_call_lock(void)
* this function sends a 'generic call function' IPI to all other CPU * this function sends a 'generic call function' IPI to all other CPU
* of the system defined in the mask. * of the system defined in the mask.
*/ */
static int __smp_call_function_mask(cpumask_t mask,
static int void (*func)(void *), void *info,
__smp_call_function_mask(cpumask_t mask, int wait)
void (*func)(void *), void *info,
int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
cpumask_t allbutself; cpumask_t allbutself;
...@@ -417,11 +414,10 @@ EXPORT_SYMBOL(smp_call_function_mask); ...@@ -417,11 +414,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
*/ */
int smp_call_function_single (int cpu, void (*func) (void *info), void *info, int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait) int nonatomic, int wait)
{ {
/* prevent preemption and reschedule on another processor */ /* prevent preemption and reschedule on another processor */
int ret; int ret, me = get_cpu();
int me = get_cpu();
/* Can deadlock when called with interrupts disabled */ /* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
...@@ -471,9 +467,9 @@ static void stop_this_cpu(void *dummy) ...@@ -471,9 +467,9 @@ static void stop_this_cpu(void *dummy)
*/ */
cpu_clear(smp_processor_id(), cpu_online_map); cpu_clear(smp_processor_id(), cpu_online_map);
disable_local_APIC(); disable_local_APIC();
for (;;) for (;;)
halt(); halt();
} }
void smp_send_stop(void) void smp_send_stop(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment