Commit 6e74057c authored by Chris Mason's avatar Chris Mason

Btrfs: Fix async thread shutdown race

It was possible for an async worker thread to be selected to
receive a new work item, but exit before the work item was
actually placed into that thread's work list.

This commit fixes the race by incrementing the num_pending
counter earlier, and making sure to check the number of pending
work items before a thread exits.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 627e421a
...@@ -191,7 +191,8 @@ static int try_worker_shutdown(struct btrfs_worker_thread *worker) ...@@ -191,7 +191,8 @@ static int try_worker_shutdown(struct btrfs_worker_thread *worker)
!worker->working && !worker->working &&
!list_empty(&worker->worker_list) && !list_empty(&worker->worker_list) &&
list_empty(&worker->prio_pending) && list_empty(&worker->prio_pending) &&
list_empty(&worker->pending)) { list_empty(&worker->pending) &&
atomic_read(&worker->num_pending) == 0) {
freeit = 1; freeit = 1;
list_del_init(&worker->worker_list); list_del_init(&worker->worker_list);
worker->workers->num_workers--; worker->workers->num_workers--;
...@@ -485,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) ...@@ -485,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
*/ */
next = workers->worker_list.next; next = workers->worker_list.next;
worker = list_entry(next, struct btrfs_worker_thread, worker_list); worker = list_entry(next, struct btrfs_worker_thread, worker_list);
atomic_inc(&worker->num_pending);
worker->sequence++; worker->sequence++;
if (worker->sequence % workers->idle_thresh == 0) if (worker->sequence % workers->idle_thresh == 0)
...@@ -521,8 +521,7 @@ again: ...@@ -521,8 +521,7 @@ again:
goto again; goto again;
} }
} }
spin_unlock_irqrestore(&workers->lock, flags); goto found;
return worker;
fallback: fallback:
fallback = NULL; fallback = NULL;
...@@ -537,6 +536,12 @@ fallback: ...@@ -537,6 +536,12 @@ fallback:
BUG_ON(!fallback); BUG_ON(!fallback);
worker = list_entry(fallback, worker = list_entry(fallback,
struct btrfs_worker_thread, worker_list); struct btrfs_worker_thread, worker_list);
found:
/*
* this makes sure the worker doesn't exit before it is placed
* onto a busy/idle list
*/
atomic_inc(&worker->num_pending);
spin_unlock_irqrestore(&workers->lock, flags); spin_unlock_irqrestore(&workers->lock, flags);
return worker; return worker;
} }
...@@ -627,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) ...@@ -627,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
list_add_tail(&work->list, &worker->prio_pending); list_add_tail(&work->list, &worker->prio_pending);
else else
list_add_tail(&work->list, &worker->pending); list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);
check_busy_worker(worker); check_busy_worker(worker);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment