Commit b1f4ec17 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

workqueue: introduce cpu_singlethread_map

The code like

	if (is_single_threaded(wq))
		do_something(singlethread_cpu);
	else {
		for_each_cpu_mask(cpu, cpu_populated_map)
			do_something(cpu);
	}

looks very annoying. We can add "static cpumask_t cpu_singlethread_map" and
simplify the code. Lessens .text a bit, and imho makes the code more readable.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dfb4b82e
...@@ -69,6 +69,7 @@ static DEFINE_MUTEX(workqueue_mutex); ...@@ -69,6 +69,7 @@ static DEFINE_MUTEX(workqueue_mutex);
static LIST_HEAD(workqueues); static LIST_HEAD(workqueues);
static int singlethread_cpu __read_mostly; static int singlethread_cpu __read_mostly;
static cpumask_t cpu_singlethread_map __read_mostly;
/* optimization, we could use cpu_possible_map */ /* optimization, we could use cpu_possible_map */
static cpumask_t cpu_populated_map __read_mostly; static cpumask_t cpu_populated_map __read_mostly;
...@@ -78,6 +79,12 @@ static inline int is_single_threaded(struct workqueue_struct *wq) ...@@ -78,6 +79,12 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
return list_empty(&wq->list); return list_empty(&wq->list);
} }
static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
{
return is_single_threaded(wq)
? &cpu_singlethread_map : &cpu_populated_map;
}
/* /*
* Set the workqueue on which a work item is to be run * Set the workqueue on which a work item is to be run
* - Must *only* be called if the pending flag is set * - Must *only* be called if the pending flag is set
...@@ -393,16 +400,12 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) ...@@ -393,16 +400,12 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
*/ */
void fastcall flush_workqueue(struct workqueue_struct *wq) void fastcall flush_workqueue(struct workqueue_struct *wq)
{ {
might_sleep(); const cpumask_t *cpu_map = wq_cpu_map(wq);
int cpu
if (is_single_threaded(wq)) might_sleep();
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); for_each_cpu_mask(cpu, *cpu_map)
else {
int cpu;
for_each_cpu_mask(cpu, cpu_populated_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
} }
EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(flush_workqueue);
...@@ -439,7 +442,9 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq, ...@@ -439,7 +442,9 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
*/ */
void flush_work(struct workqueue_struct *wq, struct work_struct *work) void flush_work(struct workqueue_struct *wq, struct work_struct *work)
{ {
const cpumask_t *cpu_map = wq_cpu_map(wq);
struct cpu_workqueue_struct *cwq; struct cpu_workqueue_struct *cwq;
int cpu;
might_sleep(); might_sleep();
...@@ -457,14 +462,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) ...@@ -457,14 +462,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
work_release(work); work_release(work);
spin_unlock_irq(&cwq->lock); spin_unlock_irq(&cwq->lock);
if (is_single_threaded(wq)) for_each_cpu_mask(cpu, *cpu_map)
wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
else {
int cpu;
for_each_cpu_mask(cpu, cpu_populated_map)
wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
} }
EXPORT_SYMBOL_GPL(flush_work); EXPORT_SYMBOL_GPL(flush_work);
...@@ -757,23 +756,18 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) ...@@ -757,23 +756,18 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
*/ */
void destroy_workqueue(struct workqueue_struct *wq) void destroy_workqueue(struct workqueue_struct *wq)
{ {
const cpumask_t *cpu_map = wq_cpu_map(wq);
struct cpu_workqueue_struct *cwq; struct cpu_workqueue_struct *cwq;
if (is_single_threaded(wq)) {
cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu);
cleanup_workqueue_thread(cwq, singlethread_cpu);
} else {
int cpu; int cpu;
mutex_lock(&workqueue_mutex); mutex_lock(&workqueue_mutex);
list_del(&wq->list); list_del(&wq->list);
mutex_unlock(&workqueue_mutex); mutex_unlock(&workqueue_mutex);
for_each_cpu_mask(cpu, cpu_populated_map) { for_each_cpu_mask(cpu, *cpu_map) {
cwq = per_cpu_ptr(wq->cpu_wq, cpu); cwq = per_cpu_ptr(wq->cpu_wq, cpu);
cleanup_workqueue_thread(cwq, cpu); cleanup_workqueue_thread(cwq, cpu);
} }
}
free_percpu(wq->cpu_wq); free_percpu(wq->cpu_wq);
kfree(wq); kfree(wq);
...@@ -831,6 +825,7 @@ void init_workqueues(void) ...@@ -831,6 +825,7 @@ void init_workqueues(void)
{ {
cpu_populated_map = cpu_online_map; cpu_populated_map = cpu_online_map;
singlethread_cpu = first_cpu(cpu_possible_map); singlethread_cpu = first_cpu(cpu_possible_map);
cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
hotcpu_notifier(workqueue_cpu_callback, 0); hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events"); keventd_wq = create_workqueue("events");
BUG_ON(!keventd_wq); BUG_ON(!keventd_wq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment