Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
61b771fc
Commit
61b771fc
authored
Jul 28, 2009
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'rt/pagefault' into rt/base
parents
dd9a0752
e34d6077
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
46 additions
and
36 deletions
+46
-36
arch/arm/mm/fault.c
arch/arm/mm/fault.c
+1
-1
arch/frv/include/asm/highmem.h
arch/frv/include/asm/highmem.h
+2
-0
arch/mips/mm/fault.c
arch/mips/mm/fault.c
+1
-1
arch/mips/mm/highmem.c
arch/mips/mm/highmem.c
+4
-1
arch/powerpc/mm/fault.c
arch/powerpc/mm/fault.c
+1
-1
arch/powerpc/mm/highmem.c
arch/powerpc/mm/highmem.c
+2
-0
arch/sparc/mm/highmem.c
arch/sparc/mm/highmem.c
+3
-1
arch/x86/mm/fault.c
arch/x86/mm/fault.c
+1
-1
arch/x86/mm/highmem_32.c
arch/x86/mm/highmem_32.c
+2
-0
arch/x86/mm/iomap_32.c
arch/x86/mm/iomap_32.c
+2
-0
include/linux/sched.h
include/linux/sched.h
+1
-0
include/linux/uaccess.h
include/linux/uaccess.h
+3
-30
kernel/fork.c
kernel/fork.c
+1
-0
mm/memory.c
mm/memory.c
+22
-0
No files found.
arch/arm/mm/fault.c
View file @
61b771fc
...
...
@@ -258,7 +258,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if
(
in_atomic
()
||
!
mm
)
if
(
in_atomic
()
||
!
mm
||
current
->
pagefault_disabled
)
goto
no_context
;
/*
...
...
arch/frv/include/asm/highmem.h
View file @
61b771fc
...
...
@@ -116,6 +116,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned
long
paddr
;
preempt_disable
();
pagefault_disable
();
debug_kmap_atomic
(
type
);
paddr
=
page_to_phys
(
page
);
...
...
@@ -173,6 +174,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
BUG
();
}
pagefault_enable
();
preempt_enable
();
}
#endif
/* !__ASSEMBLY__ */
...
...
arch/mips/mm/fault.c
View file @
61b771fc
...
...
@@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if
(
in_atomic
()
||
!
mm
)
if
(
in_atomic
()
||
!
mm
||
current
->
pagefault_disabled
)
goto
bad_area_nosemaphore
;
down_read
(
&
mm
->
mmap_sem
);
...
...
arch/mips/mm/highmem.c
View file @
61b771fc
...
...
@@ -45,7 +45,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
enum
fixed_addresses
idx
;
unsigned
long
vaddr
;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable
();
pagefault_disable
();
if
(
!
PageHighMem
(
page
))
return
page_address
(
page
);
...
...
@@ -71,6 +71,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
if
(
vaddr
<
FIXADDR_START
)
{
// FIXME
pagefault_enable
();
preempt_enable
();
return
;
}
...
...
@@ -85,6 +86,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
#endif
pagefault_enable
();
preempt_enable
();
}
EXPORT_SYMBOL
(
__kunmap_atomic
);
...
...
@@ -97,6 +99,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum
fixed_addresses
idx
;
unsigned
long
vaddr
;
preempt_disable
();
pagefault_disable
();
debug_kmap_atomic
(
type
);
...
...
arch/powerpc/mm/fault.c
View file @
61b771fc
...
...
@@ -159,7 +159,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif
/* !(CONFIG_4xx || CONFIG_BOOKE)*/
if
(
in_atomic
()
||
mm
==
NULL
)
{
if
(
in_atomic
()
||
mm
==
NULL
||
current
->
pagefault_disabled
)
{
if
(
!
user_mode
(
regs
))
return
SIGSEGV
;
/* in_atomic() in user mode is really bad,
...
...
arch/powerpc/mm/highmem.c
View file @
61b771fc
...
...
@@ -35,6 +35,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
unsigned
long
vaddr
;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable
();
pagefault_disable
();
if
(
!
PageHighMem
(
page
))
return
page_address
(
page
);
...
...
@@ -73,5 +74,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
local_flush_tlb_page
(
NULL
,
vaddr
);
#endif
pagefault_enable
();
preempt_enable
();
}
EXPORT_SYMBOL
(
kunmap_atomic
);
arch/sparc/mm/highmem.c
View file @
61b771fc
...
...
@@ -34,7 +34,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned
long
idx
;
unsigned
long
vaddr
;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable
();
pagefault_disable
();
if
(
!
PageHighMem
(
page
))
return
page_address
(
page
);
...
...
@@ -73,6 +73,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
if
(
vaddr
<
FIXADDR_START
)
{
// FIXME
pagefault_enable
();
preempt_enable
();
return
;
}
...
...
@@ -99,6 +100,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#endif
pagefault_enable
();
preempt_enable
();
}
EXPORT_SYMBOL
(
kunmap_atomic
);
...
...
arch/x86/mm/fault.c
View file @
61b771fc
...
...
@@ -1032,7 +1032,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if
(
unlikely
(
in_atomic
()
||
!
mm
))
{
if
(
unlikely
(
in_atomic
()
||
!
mm
||
current
->
pagefault_disabled
))
{
bad_area_nosemaphore
(
regs
,
error_code
,
address
);
return
;
}
...
...
arch/x86/mm/highmem_32.c
View file @
61b771fc
...
...
@@ -33,6 +33,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
unsigned
long
vaddr
;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
preempt_disable
();
pagefault_disable
();
if
(
!
PageHighMem
(
page
))
...
...
@@ -74,6 +75,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
}
pagefault_enable
();
preempt_enable
();
}
/*
...
...
arch/x86/mm/iomap_32.c
View file @
61b771fc
...
...
@@ -37,6 +37,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
enum
fixed_addresses
idx
;
unsigned
long
vaddr
;
preempt_disable
();
pagefault_disable
();
debug_kmap_atomic
(
type
);
...
...
@@ -83,5 +84,6 @@ iounmap_atomic(void *kvaddr, enum km_type type)
kpte_clear_flush
(
kmap_pte
-
idx
,
vaddr
);
pagefault_enable
();
preempt_enable
();
}
EXPORT_SYMBOL_GPL
(
iounmap_atomic
);
include/linux/sched.h
View file @
61b771fc
...
...
@@ -1360,6 +1360,7 @@ struct task_struct {
/* mutex deadlock detection */
struct
mutex_waiter
*
blocked_on
;
#endif
int
pagefault_disabled
;
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned
int
irq_events
;
int
hardirqs_enabled
;
...
...
include/linux/uaccess.h
View file @
61b771fc
...
...
@@ -6,37 +6,10 @@
/*
* These routines enable/disable the pagefault handler in that
* it will not take any locks and go straight to the fixup table.
*
* They have great resemblance to the preempt_disable/enable calls
* and in fact they are identical; this is because currently there is
* no other way to make the pagefault handlers do this. So we do
* disable preemption but we don't necessarily care about that.
* it will not take any MM locks and go straight to the fixup table.
*/
static
inline
void
pagefault_disable
(
void
)
{
inc_preempt_count
();
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier
();
}
static
inline
void
pagefault_enable
(
void
)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier
();
dec_preempt_count
();
/*
* make sure we do..
*/
barrier
();
preempt_check_resched
();
}
extern
void
pagefault_disable
(
void
);
extern
void
pagefault_enable
(
void
);
#ifndef ARCH_HAS_NOCACHE_UACCESS
...
...
kernel/fork.c
View file @
61b771fc
...
...
@@ -1080,6 +1080,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p
->
hardirq_context
=
0
;
p
->
softirq_context
=
0
;
#endif
p
->
pagefault_disabled
=
0
;
#ifdef CONFIG_LOCKDEP
p
->
lockdep_depth
=
0
;
/* no locks held yet */
p
->
curr_chain_key
=
0
;
...
...
mm/memory.c
View file @
61b771fc
...
...
@@ -2955,6 +2955,28 @@ unlock:
return
0
;
}
void
pagefault_disable
(
void
)
{
current
->
pagefault_disabled
++
;
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier
();
}
EXPORT_SYMBOL
(
pagefault_disable
);
void
pagefault_enable
(
void
)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier
();
current
->
pagefault_disabled
--
;
}
EXPORT_SYMBOL
(
pagefault_enable
);
/*
* By the time we get here, we already hold the mm semaphore
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment