Commit a9d9baa1 authored by Ashok Raj's avatar Ashok Raj Committed by Linus Torvalds

[PATCH] clean up lock_cpu_hotplug() in cpufreq

There are some callers in cpufreq hotplug notify path that the lowest
function calls lock_cpu_hotplug().  The lock is already held during
cpu_up() and cpu_down() calls when the notify calls are broadcast to
registered clients.

Ideally if possible, we could disable_preempt() at the highest caller and
make sure we dont sleep in the path down in cpufreq->driver_target() calls
but the calls are so intertwined and cumbersome to cleanup.

Hence we consistently use lock_cpu_hotplug() and unlock_cpu_hotplug() in
all places.

 - Removed export of cpucontrol semaphore and made it static.
 - removed explicit uses of up/down with lock_cpu_hotplug()
   so we can keep track of the the callers in same thread context and
   just keep refcounts without calling a down() that causes a deadlock.
 - Removed current_in_hotplug() uses
 - Removed PF_HOTPLUG_CPU in sched.h introduced for the current_in_hotplug()
   temporary workaround.

Tested with insmod of cpufreq_stat.ko, and logical online/offline
to make sure we dont have any hang situations.
Signed-off-by: default avatarAshok Raj <ashok.raj@intel.com>
Cc: Zwane Mwaikambo <zwane@linuxpower.ca>
Cc: Shaohua Li <shaohua.li@intel.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e0f39591
...@@ -1113,20 +1113,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, ...@@ -1113,20 +1113,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
{ {
int retval = -EINVAL; int retval = -EINVAL;
/*
* If we are already in context of hotplug thread, we dont need to
* acquire the hotplug lock. Otherwise acquire cpucontrol to prevent
* hotplug from removing this cpu that we are working on.
*/
if (!current_in_cpu_hotplug())
lock_cpu_hotplug(); lock_cpu_hotplug();
dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
target_freq, relation); target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target) if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation); retval = cpufreq_driver->target(policy, target_freq, relation);
if (!current_in_cpu_hotplug())
unlock_cpu_hotplug(); unlock_cpu_hotplug();
return retval; return retval;
......
...@@ -65,10 +65,9 @@ extern struct sysdev_class cpu_sysdev_class; ...@@ -65,10 +65,9 @@ extern struct sysdev_class cpu_sysdev_class;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */ /* Stop CPUs going up and down. */
extern struct semaphore cpucontrol; extern void lock_cpu_hotplug(void);
#define lock_cpu_hotplug() down(&cpucontrol) extern void unlock_cpu_hotplug(void);
#define unlock_cpu_hotplug() up(&cpucontrol) extern int lock_cpu_hotplug_interruptible(void);
#define lock_cpu_hotplug_interruptible() down_interruptible(&cpucontrol)
#define hotcpu_notifier(fn, pri) { \ #define hotcpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \ static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \ { .notifier_call = fn, .priority = pri }; \
......
...@@ -908,7 +908,6 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) ...@@ -908,7 +908,6 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ #define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ #define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
#define PF_HOTPLUG_CPU 0x01000000 /* Currently performing CPU hotplug */
/* /*
* Only the _current_ task can read/write to tsk->flags, but other * Only the _current_ task can read/write to tsk->flags, but other
......
...@@ -16,47 +16,76 @@ ...@@ -16,47 +16,76 @@
#include <asm/semaphore.h> #include <asm/semaphore.h>
/* This protects CPUs going up and down... */ /* This protects CPUs going up and down... */
DECLARE_MUTEX(cpucontrol); static DECLARE_MUTEX(cpucontrol);
EXPORT_SYMBOL_GPL(cpucontrol);
static struct notifier_block *cpu_chain; static struct notifier_block *cpu_chain;
/* #ifdef CONFIG_HOTPLUG_CPU
* Used to check by callers if they need to acquire the cpucontrol static struct task_struct *lock_cpu_hotplug_owner;
* or not to protect a cpu from being removed. Its sometimes required to static int lock_cpu_hotplug_depth;
* call these functions both for normal operations, and in response to
* a cpu being added/removed. If the context of the call is in the same static int __lock_cpu_hotplug(int interruptible)
* thread context as a CPU hotplug thread, we dont need to take the lock {
* since its already protected int ret = 0;
* check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
if (lock_cpu_hotplug_owner != current) {
if (interruptible)
ret = down_interruptible(&cpucontrol);
else
down(&cpucontrol);
}
/*
* Set only if we succeed in locking
*/ */
if (!ret) {
lock_cpu_hotplug_depth++;
lock_cpu_hotplug_owner = current;
}
return ret;
}
int current_in_cpu_hotplug(void) void lock_cpu_hotplug(void)
{ {
return (current->flags & PF_HOTPLUG_CPU); __lock_cpu_hotplug(0);
} }
EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); void unlock_cpu_hotplug(void)
{
if (--lock_cpu_hotplug_depth == 0) {
lock_cpu_hotplug_owner = NULL;
up(&cpucontrol);
}
}
EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
int lock_cpu_hotplug_interruptible(void)
{
return __lock_cpu_hotplug(1);
}
EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */ /* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb) int register_cpu_notifier(struct notifier_block *nb)
{ {
int ret; int ret;
if ((ret = down_interruptible(&cpucontrol)) != 0) if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret; return ret;
ret = notifier_chain_register(&cpu_chain, nb); ret = notifier_chain_register(&cpu_chain, nb);
up(&cpucontrol); unlock_cpu_hotplug();
return ret; return ret;
} }
EXPORT_SYMBOL(register_cpu_notifier); EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb) void unregister_cpu_notifier(struct notifier_block *nb)
{ {
down(&cpucontrol); lock_cpu_hotplug();
notifier_chain_unregister(&cpu_chain, nb); notifier_chain_unregister(&cpu_chain, nb);
up(&cpucontrol); unlock_cpu_hotplug();
} }
EXPORT_SYMBOL(unregister_cpu_notifier); EXPORT_SYMBOL(unregister_cpu_notifier);
...@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu) ...@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu)
goto out; goto out;
} }
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug. Callers can check if cpucontrol
* is already acquired by current thread, and if so not cause
* a dead lock by not acquiring the lock
*/
current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu); (void *)(long)cpu);
if (err == NOTIFY_BAD) { if (err == NOTIFY_BAD) {
...@@ -171,7 +193,6 @@ out_thread: ...@@ -171,7 +193,6 @@ out_thread:
out_allowed: out_allowed:
set_cpus_allowed(current, old_allowed); set_cpus_allowed(current, old_allowed);
out: out:
current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug(); unlock_cpu_hotplug();
return err; return err;
} }
...@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) ...@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
int ret; int ret;
void *hcpu = (void *)(long)cpu; void *hcpu = (void *)(long)cpu;
if ((ret = down_interruptible(&cpucontrol)) != 0) if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret; return ret;
if (cpu_online(cpu) || !cpu_present(cpu)) { if (cpu_online(cpu) || !cpu_present(cpu)) {
...@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu) ...@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu)
goto out; goto out;
} }
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug.
*/
current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) { if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n", printk("%s: attempt to bring up CPU %u failed\n",
...@@ -217,7 +233,6 @@ out_notify: ...@@ -217,7 +233,6 @@ out_notify:
if (ret != 0) if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out: out:
current->flags &= ~PF_HOTPLUG_CPU; unlock_cpu_hotplug();
up(&cpucontrol);
return ret; return ret;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment