Commit 7086721f authored by Jeff Layton's avatar Jeff Layton Committed by J. Bruce Fields

SUNRPC: have svc_recv() check kthread_should_stop()

When using kthreads that call into svc_recv, we want to make sure that
they do not block there for a long time when we're trying to take down
the kthread.

This patch changes svc_recv() to check kthread_should_stop() at the same
places that it checks to see if it's signalled(). Also check just before
svc_recv() tries to schedule(). By making sure that we check it just
after setting the task state we can avoid having to use any locking or
signalling to ensure it doesn't block for a long time.

There's still a chance of a 500ms sleep if alloc_page() fails, but
that should be a rare occurrence and isn't a terribly long time in
the context of a kthread being taken down.
Signed-off-by: default avatarJeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent 23d42ee2
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/kthread.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/checksum.h> #include <net/checksum.h>
#include <net/ip.h> #include <net/ip.h>
...@@ -587,6 +588,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -587,6 +588,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
struct page *p = alloc_page(GFP_KERNEL); struct page *p = alloc_page(GFP_KERNEL);
if (!p) { if (!p) {
int j = msecs_to_jiffies(500); int j = msecs_to_jiffies(500);
if (kthread_should_stop())
return -EINTR;
schedule_timeout_uninterruptible(j); schedule_timeout_uninterruptible(j);
} }
rqstp->rq_pages[i] = p; rqstp->rq_pages[i] = p;
...@@ -607,7 +610,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -607,7 +610,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
try_to_freeze(); try_to_freeze();
cond_resched(); cond_resched();
if (signalled()) if (signalled() || kthread_should_stop())
return -EINTR; return -EINTR;
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
...@@ -626,6 +629,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -626,6 +629,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
* to bring down the daemons ... * to bring down the daemons ...
*/ */
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
/*
* checking kthread_should_stop() here allows us to avoid
* locking and signalling when stopping kthreads that call
* svc_recv. If the thread has already been woken up, then
* we can exit here without sleeping. If not, then it
* it'll be woken up quickly during the schedule_timeout
*/
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
spin_unlock_bh(&pool->sp_lock);
return -EINTR;
}
add_wait_queue(&rqstp->rq_wait, &wait); add_wait_queue(&rqstp->rq_wait, &wait);
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
...@@ -641,7 +658,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -641,7 +658,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
svc_thread_dequeue(pool, rqstp); svc_thread_dequeue(pool, rqstp);
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
dprintk("svc: server %p, no data yet\n", rqstp); dprintk("svc: server %p, no data yet\n", rqstp);
return signalled()? -EINTR : -EAGAIN; if (signalled() || kthread_should_stop())
return -EINTR;
else
return -EAGAIN;
} }
} }
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment