Commit f6c2e333 authored by Siddha, Suresh B's avatar Siddha, Suresh B Committed by Linus Torvalds

[PATCH] x86_64: Unmap NULL during early bootup

We should zap the low mappings, as soon as possible, so that we can catch
kernel bugs more effectively. Previously early boot had NULL mapped
and didn't trap on NULL references.

This patch introduces boot_level4_pgt, which will always have low identity
addresses mapped.  Druing boot, all the processors will use this as their
level4 pgt.  On BP, we will switch to init_level4_pgt as soon as we enter C
code and zap the low mappings as soon as we are done with the usage of
identity low mapped addresses.  On AP's we will zap the low mappings as
soon as we jump to C code.
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarAshok Raj <ashok.raj@intel.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 69d81fcd
...@@ -542,7 +542,7 @@ acpi_scan_rsdp(unsigned long start, unsigned long length) ...@@ -542,7 +542,7 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
* RSDP signature. * RSDP signature.
*/ */
for (offset = 0; offset < length; offset += 16) { for (offset = 0; offset < length; offset += 16) {
if (strncmp((char *)(start + offset), "RSD PTR ", sig_len)) if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
continue; continue;
return (start + offset); return (start + offset);
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/init.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -70,7 +71,7 @@ startup_32: ...@@ -70,7 +71,7 @@ startup_32:
movl %eax, %cr4 movl %eax, %cr4
/* Setup early boot stage 4 level pagetables */ /* Setup early boot stage 4 level pagetables */
movl $(init_level4_pgt - __START_KERNEL_map), %eax movl $(boot_level4_pgt - __START_KERNEL_map), %eax
movl %eax, %cr3 movl %eax, %cr3
/* Setup EFER (Extended Feature Enable Register) */ /* Setup EFER (Extended Feature Enable Register) */
...@@ -113,7 +114,7 @@ startup_64: ...@@ -113,7 +114,7 @@ startup_64:
movq %rax, %cr4 movq %rax, %cr4
/* Setup early boot stage 4 level pagetables. */ /* Setup early boot stage 4 level pagetables. */
movq $(init_level4_pgt - __START_KERNEL_map), %rax movq $(boot_level4_pgt - __START_KERNEL_map), %rax
movq %rax, %cr3 movq %rax, %cr3
/* Check if nx is implemented */ /* Check if nx is implemented */
...@@ -240,20 +241,10 @@ ljumpvector: ...@@ -240,20 +241,10 @@ ljumpvector:
ENTRY(stext) ENTRY(stext)
ENTRY(_stext) ENTRY(_stext)
/*
* This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address
* 0xffffffff80000000 to physical address 0x000000. (always using
* 2Mbyte large pages provided by PAE mode)
*/
.org 0x1000 .org 0x1000
ENTRY(init_level4_pgt) ENTRY(init_level4_pgt)
.quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */ /* This gets initialized in x86_64_start_kernel */
.fill 255,8,0 .fill 512,8,0
.quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
.org 0x2000 .org 0x2000
ENTRY(level3_ident_pgt) ENTRY(level3_ident_pgt)
...@@ -350,6 +341,24 @@ ENTRY(wakeup_level4_pgt) ...@@ -350,6 +341,24 @@ ENTRY(wakeup_level4_pgt)
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
#endif #endif
#ifndef CONFIG_HOTPLUG_CPU
__INITDATA
#endif
/*
* This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address
* 0xffffffff80000000 to physical address 0x000000. (always using
* 2Mbyte large pages provided by PAE mode)
*/
.align PAGE_SIZE
ENTRY(boot_level4_pgt)
.quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
.fill 255,8,0
.quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
.data .data
.align 16 .align 16
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/bootsetup.h> #include <asm/bootsetup.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/pgtable.h>
/* Don't add a printk in there. printk relies on the PDA which is not initialized /* Don't add a printk in there. printk relies on the PDA which is not initialized
yet. */ yet. */
...@@ -86,6 +87,13 @@ void __init x86_64_start_kernel(char * real_mode_data) ...@@ -86,6 +87,13 @@ void __init x86_64_start_kernel(char * real_mode_data)
set_intr_gate(i, early_idt_handler); set_intr_gate(i, early_idt_handler);
asm volatile("lidt %0" :: "m" (idt_descr)); asm volatile("lidt %0" :: "m" (idt_descr));
clear_bss(); clear_bss();
/*
* switch to init_level4_pgt from boot_level4_pgt
*/
memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
pda_init(0); pda_init(0);
copy_bootdata(real_mode_data); copy_bootdata(real_mode_data);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -549,7 +549,7 @@ void __init get_smp_config (void) ...@@ -549,7 +549,7 @@ void __init get_smp_config (void)
* Read the physical hardware table. Anything here will * Read the physical hardware table. Anything here will
* override the defaults. * override the defaults.
*/ */
if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) { if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
smp_found_config = 0; smp_found_config = 0;
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
......
...@@ -571,6 +571,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -571,6 +571,8 @@ void __init setup_arch(char **cmdline_p)
init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
zap_low_mappings(0);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
/* /*
* Initialize the ACPI boot-time table parser (gets the RSDP and SDT). * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
......
...@@ -141,7 +141,6 @@ void pda_init(int cpu) ...@@ -141,7 +141,6 @@ void pda_init(int cpu)
panic("cannot allocate irqstack for cpu %d", cpu); panic("cannot allocate irqstack for cpu %d", cpu);
} }
asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
pda->irqstackptr += IRQSTACKSIZE-64; pda->irqstackptr += IRQSTACKSIZE-64;
} }
...@@ -197,6 +196,7 @@ void __cpuinit cpu_init (void) ...@@ -197,6 +196,7 @@ void __cpuinit cpu_init (void)
/* CPU 0 is initialised in head64.c */ /* CPU 0 is initialised in head64.c */
if (cpu != 0) { if (cpu != 0) {
pda_init(cpu); pda_init(cpu);
zap_low_mappings(cpu);
} else } else
estacks = boot_exception_stacks; estacks = boot_exception_stacks;
......
...@@ -1063,9 +1063,6 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -1063,9 +1063,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/ */
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
#ifndef CONFIG_HOTPLUG_CPU
zap_low_mappings();
#endif
smp_cleanup_boot(); smp_cleanup_boot();
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
......
...@@ -312,12 +312,19 @@ void __init init_memory_mapping(unsigned long start, unsigned long end) ...@@ -312,12 +312,19 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
extern struct x8664_pda cpu_pda[NR_CPUS]; extern struct x8664_pda cpu_pda[NR_CPUS];
/* Assumes all CPUs still execute in init_mm */ void __cpuinit zap_low_mappings(int cpu)
void zap_low_mappings(void)
{ {
if (cpu == 0) {
pgd_t *pgd = pgd_offset_k(0UL); pgd_t *pgd = pgd_offset_k(0UL);
pgd_clear(pgd); pgd_clear(pgd);
flush_tlb_all(); } else {
/*
* For AP's, zap the low identity mappings by changing the cr3
* to init_level4_pgt and doing local flush tlb all
*/
asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
}
__flush_tlb_all();
} }
/* Compute zone sizes for the DMA and DMA32 zones in a node. */ /* Compute zone sizes for the DMA and DMA32 zones in a node. */
...@@ -474,14 +481,13 @@ void __init mem_init(void) ...@@ -474,14 +481,13 @@ void __init mem_init(void)
datasize >> 10, datasize >> 10,
initsize >> 10); initsize >> 10);
#ifdef CONFIG_SMP
/* /*
* Subtle. SMP is doing its boot stuff late (because it has to * Sync boot_level4_pgt mappings with the init_level4_pgt
* fork idle threads) - but it also needs low mappings for the * except for the low identity mappings which are already zapped
* protected-mode entry to work. We zap these entries only after * in init_level4_pgt. This sync-up is essential for AP's bringup
* the WP-bit has been tested.
*/ */
#ifndef CONFIG_SMP memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
zap_low_mappings();
#endif #endif
} }
......
...@@ -16,6 +16,7 @@ extern pud_t level3_physmem_pgt[512]; ...@@ -16,6 +16,7 @@ extern pud_t level3_physmem_pgt[512];
extern pud_t level3_ident_pgt[512]; extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_kernel_pgt[512];
extern pgd_t init_level4_pgt[]; extern pgd_t init_level4_pgt[];
extern pgd_t boot_level4_pgt[];
extern unsigned long __supported_pte_mask; extern unsigned long __supported_pte_mask;
#define swapper_pg_dir init_level4_pgt #define swapper_pg_dir init_level4_pgt
......
...@@ -11,6 +11,8 @@ struct pt_regs; ...@@ -11,6 +11,8 @@ struct pt_regs;
extern void start_kernel(void); extern void start_kernel(void);
extern void pda_init(int); extern void pda_init(int);
extern void zap_low_mappings(int cpu);
extern void early_idt_handler(void); extern void early_idt_handler(void);
extern void mcheck_init(struct cpuinfo_x86 *c); extern void mcheck_init(struct cpuinfo_x86 *c);
......
...@@ -47,7 +47,6 @@ extern void lock_ipi_call_lock(void); ...@@ -47,7 +47,6 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings; extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu); extern void smp_send_reschedule(int cpu);
extern void zap_low_mappings(void);
void smp_stop_cpu(void); void smp_stop_cpu(void);
extern int smp_call_function_single(int cpuid, void (*func) (void *info), extern int smp_call_function_single(int cpuid, void (*func) (void *info),
void *info, int retry, int wait); void *info, int retry, int wait);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment