Commit 7dc5d24b authored by Tejun Heo's avatar Tejun Heo Committed by Eric Van Hensbergen

9p-trans_fd: fix trans_fd::p9_conn_destroy()

p9_conn_destroy() first kills all current requests by calling
p9_conn_cancel(), then waits for the request list to be cleared by
waiting on p9_conn->equeue.  After that, polling is stopped and the
trans is destroyed.  This sequence has a few problems.

* Read and write works were never cancelled and the p9_conn can be
  destroyed while the works are running as r/w works remove requests
  from the list and dereference the p9_conn from them.

* The list emptiness wait using p9_conn->equeue wouldn't trigger
  because p9_conn_cancel() always clears all the lists and the only
  way the wait can be triggered is to have another task to issue a
  request between the slim window between p9_conn_cancel() and the
  wait, which isn't safe under the current implementation with or
  without the wait.

This patch fixes the problem by first stopping poll, which can
schedule r/w works, first and cancle r/w works which guarantees that
r/w works are not and will not run from that point and then calling
p9_conn_cancel() and do the rest of destruction.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarEric Van Hensbergen <ericvh@gmail.com>
parent 72029fe8
...@@ -151,7 +151,6 @@ struct p9_mux_poll_task { ...@@ -151,7 +151,6 @@ struct p9_mux_poll_task {
* @trans: reference to transport instance for this connection * @trans: reference to transport instance for this connection
* @tagpool: id accounting for transactions * @tagpool: id accounting for transactions
* @err: error state * @err: error state
* @equeue: event wait_q (?)
* @req_list: accounting for requests which have been sent * @req_list: accounting for requests which have been sent
* @unsent_req_list: accounting for requests that haven't been sent * @unsent_req_list: accounting for requests that haven't been sent
* @rcall: current response &p9_fcall structure * @rcall: current response &p9_fcall structure
...@@ -178,7 +177,6 @@ struct p9_conn { ...@@ -178,7 +177,6 @@ struct p9_conn {
struct p9_trans *trans; struct p9_trans *trans;
struct p9_idpool *tagpool; struct p9_idpool *tagpool;
int err; int err;
wait_queue_head_t equeue;
struct list_head req_list; struct list_head req_list;
struct list_head unsent_req_list; struct list_head unsent_req_list;
struct p9_fcall *rcall; struct p9_fcall *rcall;
...@@ -430,7 +428,6 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans) ...@@ -430,7 +428,6 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans)
} }
m->err = 0; m->err = 0;
init_waitqueue_head(&m->equeue);
INIT_LIST_HEAD(&m->req_list); INIT_LIST_HEAD(&m->req_list);
INIT_LIST_HEAD(&m->unsent_req_list); INIT_LIST_HEAD(&m->unsent_req_list);
m->rcall = NULL; m->rcall = NULL;
...@@ -483,18 +480,13 @@ static void p9_conn_destroy(struct p9_conn *m) ...@@ -483,18 +480,13 @@ static void p9_conn_destroy(struct p9_conn *m)
{ {
P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
m->mux_list.prev, m->mux_list.next); m->mux_list.prev, m->mux_list.next);
p9_conn_cancel(m, -ECONNRESET);
if (!list_empty(&m->req_list)) {
/* wait until all processes waiting on this session exit */
P9_DPRINTK(P9_DEBUG_MUX,
"mux %p waiting for empty request queue\n", m);
wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m,
list_empty(&m->req_list));
}
p9_mux_poll_stop(m); p9_mux_poll_stop(m);
cancel_work_sync(&m->rq);
cancel_work_sync(&m->wq);
p9_conn_cancel(m, -ECONNRESET);
m->trans = NULL; m->trans = NULL;
p9_idpool_destroy(m->tagpool); p9_idpool_destroy(m->tagpool);
kfree(m); kfree(m);
...@@ -840,8 +832,6 @@ static void p9_read_work(struct work_struct *work) ...@@ -840,8 +832,6 @@ static void p9_read_work(struct work_struct *work)
(*req->cb) (req, req->cba); (*req->cb) (req, req->cba);
else else
kfree(req->rcall); kfree(req->rcall);
wake_up(&m->equeue);
} }
} else { } else {
if (err >= 0 && rcall->id != P9_RFLUSH) if (err >= 0 && rcall->id != P9_RFLUSH)
...@@ -984,8 +974,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a) ...@@ -984,8 +974,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a)
(*req->cb) (req, req->cba); (*req->cb) (req, req->cba);
else else
kfree(req->rcall); kfree(req->rcall);
wake_up(&m->equeue);
} }
kfree(freq->tcall); kfree(freq->tcall);
...@@ -1191,8 +1179,6 @@ void p9_conn_cancel(struct p9_conn *m, int err) ...@@ -1191,8 +1179,6 @@ void p9_conn_cancel(struct p9_conn *m, int err)
else else
kfree(req->rcall); kfree(req->rcall);
} }
wake_up(&m->equeue);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment