Commit abff55ce authored by Tony Battersby's avatar Tony Battersby Committed by Linus Torvalds

epoll: don't use current in irq context

ep_call_nested() (formerly ep_poll_safewake()) uses "current" (without
dereferencing it) to detect callback recursion, but it may be called from
irq context where the use of current is generally discouraged.  It would
be better to use get_cpu() and put_cpu() to detect the callback recursion.
Signed-off-by: default avatarTony Battersby <tonyb@cybernetics.com>
Acked-by: default avatarDavide Libenzi <davidel@xmailserver.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bb57c3ed
...@@ -97,8 +97,8 @@ struct epoll_filefd { ...@@ -97,8 +97,8 @@ struct epoll_filefd {
*/ */
struct nested_call_node { struct nested_call_node {
struct list_head llink; struct list_head llink;
struct task_struct *task;
void *cookie; void *cookie;
int cpu;
}; };
/* /*
...@@ -327,7 +327,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, ...@@ -327,7 +327,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
{ {
int error, call_nests = 0; int error, call_nests = 0;
unsigned long flags; unsigned long flags;
struct task_struct *this_task = current; int this_cpu = get_cpu();
struct list_head *lsthead = &ncalls->tasks_call_list; struct list_head *lsthead = &ncalls->tasks_call_list;
struct nested_call_node *tncur; struct nested_call_node *tncur;
struct nested_call_node tnode; struct nested_call_node tnode;
...@@ -340,20 +340,19 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, ...@@ -340,20 +340,19 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
* very much limited. * very much limited.
*/ */
list_for_each_entry(tncur, lsthead, llink) { list_for_each_entry(tncur, lsthead, llink) {
if (tncur->task == this_task && if (tncur->cpu == this_cpu &&
(tncur->cookie == cookie || ++call_nests > max_nests)) { (tncur->cookie == cookie || ++call_nests > max_nests)) {
/* /*
* Ops ... loop detected or maximum nest level reached. * Ops ... loop detected or maximum nest level reached.
* We abort this wake by breaking the cycle itself. * We abort this wake by breaking the cycle itself.
*/ */
spin_unlock_irqrestore(&ncalls->lock, flags); error = -1;
goto out_unlock;
return -1;
} }
} }
/* Add the current task and cookie to the list */ /* Add the current task and cookie to the list */
tnode.task = this_task; tnode.cpu = this_cpu;
tnode.cookie = cookie; tnode.cookie = cookie;
list_add(&tnode.llink, lsthead); list_add(&tnode.llink, lsthead);
...@@ -365,8 +364,10 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, ...@@ -365,8 +364,10 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
/* Remove the current task from the list */ /* Remove the current task from the list */
spin_lock_irqsave(&ncalls->lock, flags); spin_lock_irqsave(&ncalls->lock, flags);
list_del(&tnode.llink); list_del(&tnode.llink);
out_unlock:
spin_unlock_irqrestore(&ncalls->lock, flags); spin_unlock_irqrestore(&ncalls->lock, flags);
put_cpu();
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment