Commit c11efdf9 authored by Ravikiran G Thirumalai's avatar Ravikiran G Thirumalai Committed by Linus Torvalds

[PATCH] x86_64: Align and pad x86_64 GDT on page boundary

This patch is on the same lines as Zachary Amsden's i386 GDT page alignemnt
patch in -mm, but for x86_64.

Patch to align and pad x86_64 GDT on page boundries.

[AK: some minor cleanups and fixed incorrect TLS initialization
in CPU init.]
Signed-off-by: default avatarNippun Goel <nippung@calsoftinc.com>
Signed-off-by: default avatarRavikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: default avatarShai Fultheim <shai@scalex86.org>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bb33421d
...@@ -379,7 +379,7 @@ gdt: ...@@ -379,7 +379,7 @@ gdt:
* Also sysret mandates a special GDT layout * Also sysret mandates a special GDT layout
*/ */
.align L1_CACHE_BYTES .align PAGE_SIZE
/* The TLS descriptors are currently at a different place compared to i386. /* The TLS descriptors are currently at a different place compared to i386.
Hopefully nobody expects them at a fixed place (Wine?) */ Hopefully nobody expects them at a fixed place (Wine?) */
...@@ -401,10 +401,11 @@ ENTRY(cpu_gdt_table) ...@@ -401,10 +401,11 @@ ENTRY(cpu_gdt_table)
gdt_end: gdt_end:
/* asm/segment.h:GDT_ENTRIES must match this */ /* asm/segment.h:GDT_ENTRIES must match this */
/* This should be a multiple of the cache line size */ /* This should be a multiple of the cache line size */
/* GDTs of other CPUs: */ /* GDTs of other CPUs are now dynamically allocated */
.fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
/* zero the remaining page */
.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
.align L1_CACHE_BYTES
ENTRY(idt_table) ENTRY(idt_table)
.rept 256 .rept 256
.quad 0 .quad 0
......
...@@ -213,16 +213,14 @@ void __cpuinit cpu_init (void) ...@@ -213,16 +213,14 @@ void __cpuinit cpu_init (void)
* Initialize the per-CPU GDT with the boot GDT, * Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor: * and set up the GDT descriptor:
*/ */
if (cpu) { if (cpu)
memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE); memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
}
cpu_gdt_descr[cpu].size = GDT_SIZE; cpu_gdt_descr[cpu].size = GDT_SIZE;
cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
asm volatile("lidt %0" :: "m" (idt_descr)); asm volatile("lidt %0" :: "m" (idt_descr));
memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init(); syscall_init();
wrmsrl(MSR_FS_BASE, 0); wrmsrl(MSR_FS_BASE, 0);
......
...@@ -744,6 +744,13 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) ...@@ -744,6 +744,13 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
}; };
DECLARE_WORK(work, do_fork_idle, &c_idle); DECLARE_WORK(work, do_fork_idle, &c_idle);
/* allocate memory for gdts of secondary cpus. Hotplug is considered */
if (!cpu_gdt_descr[cpu].address &&
!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
return -1;
}
c_idle.idle = get_idle_for_cpu(cpu); c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) { if (c_idle.idle) {
......
...@@ -120,7 +120,7 @@ void fix_processor_context(void) ...@@ -120,7 +120,7 @@ void fix_processor_context(void)
set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
cpu_gdt_table[cpu][GDT_ENTRY_TSS].type = 9; cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
syscall_init(); /* This sets MSR_*STAR and related */ syscall_init(); /* This sets MSR_*STAR and related */
load_TR_desc(); /* This does ltr */ load_TR_desc(); /* This does ltr */
......
...@@ -25,7 +25,7 @@ struct n_desc_struct { ...@@ -25,7 +25,7 @@ struct n_desc_struct {
unsigned int a,b; unsigned int a,b;
}; };
extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES]; extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
enum { enum {
GATE_INTERRUPT = 0xE, GATE_INTERRUPT = 0xE,
...@@ -79,6 +79,9 @@ extern struct desc_struct default_ldt[]; ...@@ -79,6 +79,9 @@ extern struct desc_struct default_ldt[];
extern struct gate_struct idt_table[]; extern struct gate_struct idt_table[];
extern struct desc_ptr cpu_gdt_descr[]; extern struct desc_ptr cpu_gdt_descr[];
/* the cpu gdt accessor */
#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
{ {
struct gate_struct s; struct gate_struct s;
...@@ -144,20 +147,20 @@ static inline void set_tss_desc(unsigned cpu, void *addr) ...@@ -144,20 +147,20 @@ static inline void set_tss_desc(unsigned cpu, void *addr)
* -1? seg base+limit should be pointing to the address of the * -1? seg base+limit should be pointing to the address of the
* last valid byte * last valid byte
*/ */
set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
(unsigned long)addr, DESC_TSS, (unsigned long)addr, DESC_TSS,
IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
} }
static inline void set_ldt_desc(unsigned cpu, void *addr, int size) static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
{ {
set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (unsigned long)addr, set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
DESC_LDT, size * 8 - 1); DESC_LDT, size * 8 - 1);
} }
static inline void set_seg_base(unsigned cpu, int entry, void *base) static inline void set_seg_base(unsigned cpu, int entry, void *base)
{ {
struct desc_struct *d = &cpu_gdt_table[cpu][entry]; struct desc_struct *d = &cpu_gdt(cpu)[entry];
u32 addr = (u32)(u64)base; u32 addr = (u32)(u64)base;
BUG_ON((u64)base >> 32); BUG_ON((u64)base >> 32);
d->base0 = addr & 0xffff; d->base0 = addr & 0xffff;
...@@ -199,7 +202,7 @@ static inline void set_seg_base(unsigned cpu, int entry, void *base) ...@@ -199,7 +202,7 @@ static inline void set_seg_base(unsigned cpu, int entry, void *base)
static inline void load_TLS(struct thread_struct *t, unsigned int cpu) static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{ {
u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN); u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
gdt[0] = t->tls_array[0]; gdt[0] = t->tls_array[0];
gdt[1] = t->tls_array[1]; gdt[1] = t->tls_array[1];
gdt[2] = t->tls_array[2]; gdt[2] = t->tls_array[2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment