Commit 6612538c authored by Hiroshi Shimamoto's avatar Hiroshi Shimamoto Committed by Ingo Molnar

x86: clean up process_32/64.c

White space and coding style clean up.
Make process_32/64.c similar.
Signed-off-by: default avatarHiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 3c2362e6
...@@ -142,7 +142,7 @@ EXPORT_SYMBOL(default_idle); ...@@ -142,7 +142,7 @@ EXPORT_SYMBOL(default_idle);
* to poll the ->work.need_resched flag instead of waiting for the * to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution. * cross-CPU IPI to arrive. Use this option with caution.
*/ */
static void poll_idle (void) static void poll_idle(void)
{ {
cpu_relax(); cpu_relax();
} }
...@@ -493,7 +493,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -493,7 +493,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
p->thread.ip = (unsigned long) ret_from_fork; p->thread.ip = (unsigned long) ret_from_fork;
savesegment(gs,p->thread.gs); savesegment(gs, p->thread.gs);
tsk = current; tsk = current;
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
......
...@@ -19,19 +19,19 @@ ...@@ -19,19 +19,19 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/fs.h>
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/user.h> #include <linux/user.h>
#include <linux/module.h>
#include <linux/a.out.h> #include <linux/a.out.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/utsname.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/module.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/utsname.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
...@@ -129,54 +129,12 @@ static void default_idle(void) ...@@ -129,54 +129,12 @@ static void default_idle(void)
* to poll the ->need_resched flag instead of waiting for the * to poll the ->need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution. * cross-CPU IPI to arrive. Use this option with caution.
*/ */
static void poll_idle (void) static void poll_idle(void)
{ {
local_irq_enable(); local_irq_enable();
cpu_relax(); cpu_relax();
} }
static void do_nothing(void *unused)
{
}
void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
cpumask_t map, tmp = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();
cpus_clear(map);
for_each_online_cpu(cpu) {
per_cpu(cpu_idle_state, cpu) = 1;
cpu_set(cpu, map);
}
__get_cpu_var(cpu_idle_state) = 0;
wmb();
do {
ssleep(1);
for_each_online_cpu(cpu) {
if (cpu_isset(cpu, map) &&
!per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
/*
* We waited 1 sec, if a CPU still did not call idle
* it may be because it is in idle and not waking up
* because it has nothing to do.
* Give all the remaining CPUS a kick.
*/
smp_call_function_mask(map, do_nothing, 0, 0);
} while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
DECLARE_PER_CPU(int, cpu_state); DECLARE_PER_CPU(int, cpu_state);
...@@ -247,6 +205,47 @@ void cpu_idle(void) ...@@ -247,6 +205,47 @@ void cpu_idle(void)
} }
} }
static void do_nothing(void *unused)
{
}
void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
cpumask_t map, tmp = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();
cpus_clear(map);
for_each_online_cpu(cpu) {
per_cpu(cpu_idle_state, cpu) = 1;
cpu_set(cpu, map);
}
__get_cpu_var(cpu_idle_state) = 0;
wmb();
do {
ssleep(1);
for_each_online_cpu(cpu) {
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
/*
* We waited 1 sec, if a CPU still did not call idle
* it may be because it is in idle and not waking up
* because it has nothing to do.
* Give all the remaining CPUS a kick.
*/
smp_call_function_mask(map, do_nothing, 0, 0);
} while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/* /*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched. * which can obviate IPI to trigger checking of need_resched.
...@@ -300,7 +299,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) ...@@ -300,7 +299,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
} }
} }
static int __init idle_setup (char *str) static int __init idle_setup(char *str)
{ {
if (!strcmp(str, "poll")) { if (!strcmp(str, "poll")) {
printk("using polling idle threads.\n"); printk("using polling idle threads.\n");
...@@ -320,8 +319,8 @@ void __show_regs(struct pt_regs * regs) ...@@ -320,8 +319,8 @@ void __show_regs(struct pt_regs * regs)
{ {
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7; unsigned long d0, d1, d2, d3, d6, d7;
unsigned int fsindex,gsindex; unsigned int fsindex, gsindex;
unsigned int ds,cs,es; unsigned int ds, cs, es;
printk("\n"); printk("\n");
print_modules(); print_modules();
...@@ -544,7 +543,25 @@ out: ...@@ -544,7 +543,25 @@ out:
/* /*
* This special macro can be used to load a debugging register * This special macro can be used to load a debugging register
*/ */
#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r) #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
/*
* Capture the user space registers if the task is not running (in user space)
*/
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *pp, ptregs;
pp = task_pt_regs(tsk);
ptregs = *pp;
ptregs.cs &= 0xffff;
ptregs.ss &= 0xffff;
elf_core_copy_regs(regs, &ptregs);
return 1;
}
static inline void __switch_to_xtra(struct task_struct *prev_p, static inline void __switch_to_xtra(struct task_struct *prev_p,
struct task_struct *next_p, struct task_struct *next_p,
...@@ -885,24 +902,6 @@ long sys_arch_prctl(int code, unsigned long addr) ...@@ -885,24 +902,6 @@ long sys_arch_prctl(int code, unsigned long addr)
return do_arch_prctl(current, code, addr); return do_arch_prctl(current, code, addr);
} }
/*
* Capture the user space registers if the task is not running (in user space)
*/
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *pp, ptregs;
pp = task_pt_regs(tsk);
ptregs = *pp;
ptregs.cs &= 0xffff;
ptregs.ss &= 0xffff;
elf_core_copy_regs(regs, &ptregs);
return 1;
}
unsigned long arch_align_stack(unsigned long sp) unsigned long arch_align_stack(unsigned long sp)
{ {
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment