Commit 9f339e70 authored by Markus Metzger's avatar Markus Metzger Committed by Ingo Molnar

x86, ptrace, mm: fix double-free on race

Ptrace_detach() races with __ptrace_unlink() if the traced task is
reaped while detaching. This might cause a double-free of the BTS
buffer.

Change the ptrace_detach() path to only do the memory accounting in
ptrace_bts_detach() and leave the buffer free to ptrace_bts_untrace()
which will be called from __ptrace_unlink().

The fix follows a proposal from Oleg Nesterov.
Reported-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarMarkus Metzger <markus.t.metzger@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 06eb23b1
...@@ -810,12 +810,16 @@ static void ptrace_bts_untrace(struct task_struct *child) ...@@ -810,12 +810,16 @@ static void ptrace_bts_untrace(struct task_struct *child)
static void ptrace_bts_detach(struct task_struct *child) static void ptrace_bts_detach(struct task_struct *child)
{ {
if (unlikely(child->bts)) { /*
ds_release_bts(child->bts); * Ptrace_detach() races with ptrace_untrace() in case
child->bts = NULL; * the child dies and is reaped by another thread.
*
ptrace_bts_free_buffer(child); * We only do the memory accounting at this point and
} * leave the buffer deallocation and the bts tracer
* release to ptrace_bts_untrace() which will be called
* later on with tasklist_lock held.
*/
release_locked_buffer(child->bts_buffer, child->bts_size);
} }
#else #else
static inline void ptrace_bts_fork(struct task_struct *tsk) {} static inline void ptrace_bts_fork(struct task_struct *tsk) {}
......
...@@ -1305,5 +1305,6 @@ void vmemmap_populate_print_last(void); ...@@ -1305,5 +1305,6 @@ void vmemmap_populate_print_last(void);
extern void *alloc_locked_buffer(size_t size); extern void *alloc_locked_buffer(size_t size);
extern void free_locked_buffer(void *buffer, size_t size); extern void free_locked_buffer(void *buffer, size_t size);
extern void release_locked_buffer(void *buffer, size_t size);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */ #endif /* _LINUX_MM_H */
...@@ -657,7 +657,7 @@ void *alloc_locked_buffer(size_t size) ...@@ -657,7 +657,7 @@ void *alloc_locked_buffer(size_t size)
return buffer; return buffer;
} }
void free_locked_buffer(void *buffer, size_t size) void release_locked_buffer(void *buffer, size_t size)
{ {
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
...@@ -667,6 +667,11 @@ void free_locked_buffer(void *buffer, size_t size) ...@@ -667,6 +667,11 @@ void free_locked_buffer(void *buffer, size_t size)
current->mm->locked_vm -= pgsz; current->mm->locked_vm -= pgsz;
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
}
void free_locked_buffer(void *buffer, size_t size)
{
release_locked_buffer(buffer, size);
kfree(buffer); kfree(buffer);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment