Commit f8eeae68 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: clean up arch/x86/mm/mmap_32/64.c

White space and coding style clenaup.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ed4aed98
......@@ -64,8 +64,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (sysctl_legacy_va_layout ||
(current->personality & ADDR_COMPAT_LAYOUT) ||
current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
(current->personality & ADDR_COMPAT_LAYOUT) ||
current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
......
......@@ -16,11 +16,14 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
#endif
mm->mmap_base = TASK_UNMAPPED_BASE;
if (current->flags & PF_RANDOMIZE) {
/* Add 28bit randomness which is about 40bits of address space
because mmap base has to be page aligned.
or ~1/128 of the total user VM
(total user address space is 47bits) */
/*
* Add 28bit randomness which is about 40bits of
* address space because mmap base has to be page
* aligned. or ~1/128 of the total user VM (total
* user address space is 47bits)
*/
unsigned rnd = get_random_int() & 0xfffffff;
mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
}
mm->get_unmapped_area = arch_get_unmapped_area;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment