Commit 61b771fc authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'rt/pagefault' into rt/base

parents dd9a0752 e34d6077
......@@ -258,7 +258,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
/*
......
......@@ -116,6 +116,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned long paddr;
preempt_disable();
pagefault_disable();
debug_kmap_atomic(type);
paddr = page_to_phys(page);
......@@ -173,6 +174,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
BUG();
}
pagefault_enable();
preempt_enable();
}
#endif /* !__ASSEMBLY__ */
......
......@@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
if (in_atomic() || !mm || current->pagefault_disabled)
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
......
......@@ -45,7 +45,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
enum fixed_addresses idx;
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
......@@ -71,6 +71,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
preempt_enable();
return;
}
......@@ -85,6 +86,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
#endif
pagefault_enable();
preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
......@@ -97,6 +99,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum fixed_addresses idx;
unsigned long vaddr;
preempt_disable();
pagefault_disable();
debug_kmap_atomic(type);
......
......@@ -159,7 +159,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
if (in_atomic() || mm == NULL) {
if (in_atomic() || mm == NULL || current->pagefault_disabled) {
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
......
......@@ -35,6 +35,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
......@@ -73,5 +74,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
local_flush_tlb_page(NULL, vaddr);
#endif
pagefault_enable();
preempt_enable();
}
EXPORT_SYMBOL(kunmap_atomic);
......@@ -34,7 +34,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned long idx;
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
......@@ -73,6 +73,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
preempt_enable();
return;
}
......@@ -99,6 +100,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#endif
pagefault_enable();
preempt_enable();
}
EXPORT_SYMBOL(kunmap_atomic);
......
......@@ -1032,7 +1032,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if (unlikely(in_atomic() || !mm)) {
if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
......
......@@ -33,6 +33,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
......@@ -74,6 +75,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
}
pagefault_enable();
preempt_enable();
}
/*
......
......@@ -37,6 +37,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
enum fixed_addresses idx;
unsigned long vaddr;
preempt_disable();
pagefault_disable();
debug_kmap_atomic(type);
......@@ -83,5 +84,6 @@ iounmap_atomic(void *kvaddr, enum km_type type)
kpte_clear_flush(kmap_pte-idx, vaddr);
pagefault_enable();
preempt_enable();
}
EXPORT_SYMBOL_GPL(iounmap_atomic);
......@@ -1360,6 +1360,7 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
int pagefault_disabled;
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
int hardirqs_enabled;
......
......@@ -6,37 +6,10 @@
/*
* These routines enable/disable the pagefault handler in that
* it will not take any locks and go straight to the fixup table.
*
* They have great resemblance to the preempt_disable/enable calls
* and in fact they are identical; this is because currently there is
* no other way to make the pagefault handlers do this. So we do
* disable preemption but we don't necessarily care about that.
*/
static inline void pagefault_disable(void)
{
inc_preempt_count();
/*
* make sure to have issued the store before a pagefault
* can hit.
* it will not take any MM locks and go straight to the fixup table.
*/
barrier();
}
static inline void pagefault_enable(void)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
dec_preempt_count();
/*
* make sure we do..
*/
barrier();
preempt_check_resched();
}
extern void pagefault_disable(void);
extern void pagefault_enable(void);
#ifndef ARCH_HAS_NOCACHE_UACCESS
......
......@@ -1080,6 +1080,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
p->pagefault_disabled = 0;
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
......
......@@ -2955,6 +2955,28 @@ unlock:
return 0;
}
void pagefault_disable(void)
{
current->pagefault_disabled++;
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier();
}
EXPORT_SYMBOL(pagefault_disable);
void pagefault_enable(void)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
current->pagefault_disabled--;
}
EXPORT_SYMBOL(pagefault_enable);
/*
* By the time we get here, we already hold the mm semaphore
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment