Commit a04ad82d authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: fix init_memory_mapping over boundary, v4

use PMD_SHIFT to calculate boundary also adjust size for pre-allocated
table size
Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b4df32f4
......@@ -184,8 +184,9 @@ static inline int is_kernel_text(unsigned long addr)
* PAGE_OFFSET:
*/
static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
unsigned long start,
unsigned long end)
unsigned long start_pfn,
unsigned long end_pfn,
int use_pse)
{
int pgd_idx, pmd_idx, pte_ofs;
unsigned long pfn;
......@@ -193,33 +194,33 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
pmd_t *pmd;
pte_t *pte;
unsigned pages_2m = 0, pages_4k = 0;
unsigned limit_pfn = end >> PAGE_SHIFT;
pgd_idx = pgd_index(start + PAGE_OFFSET);
pgd = pgd_base + pgd_idx;
pfn = start >> PAGE_SHIFT;
if (!cpu_has_pse)
use_pse = 0;
pfn = start_pfn;
pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pgd = pgd_base + pgd_idx;
for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
pmd = one_md_table_init(pgd);
if (pfn >= limit_pfn)
continue;
for (pmd_idx = 0;
pmd_idx < PTRS_PER_PMD && pfn < limit_pfn;
if (pfn >= end_pfn)
continue;
#ifdef CONFIG_X86_PAE
pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pmd += pmd_idx;
#else
pmd_idx = 0;
#endif
for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
pmd++, pmd_idx++) {
unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
/*
* Map with big pages if possible, otherwise
* create normal page tables:
*
* Don't use a large page for the first 2/4MB of memory
* because there are often fixed size MTRRs in there
* and overlapping MTRRs into large pages can cause
* slowdowns.
*/
if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0) &&
(pfn + PTRS_PER_PTE) <= limit_pfn) {
if (use_pse) {
unsigned int addr2;
pgprot_t prot = PAGE_KERNEL_LARGE;
......@@ -238,8 +239,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
}
pte = one_page_table_init(pmd);
for (pte_ofs = 0;
pte_ofs < PTRS_PER_PTE && pfn < limit_pfn;
pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pte += pte_ofs;
for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
pgprot_t prot = PAGE_KERNEL;
......@@ -738,14 +740,18 @@ static void __init find_early_table_space(unsigned long end)
if (cpu_has_pse) {
unsigned long extra;
extra = end - ((end>>21) << 21);
extra += (2UL<<20);
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
extra += PMD_SIZE;
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
tables += PAGE_ALIGN(ptes * sizeof(pte_t));
/* for fixmap */
tables += PAGE_SIZE * 2;
/*
* RED-PEN putting page tables only on node 0 could
* cause a hotspot and fill up ZONE_DMA. The page tables
......@@ -770,6 +776,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long end)
{
pgd_t *pgd_base = swapper_pg_dir;
unsigned long start_pfn, end_pfn;
unsigned long big_page_start;
/*
* Find space for the kernel direct mapping tables.
......@@ -794,7 +802,44 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
}
kernel_physical_mapping_init(pgd_base, start, end);
/*
* Don't use a large page for the first 2/4MB of memory
* because there are often fixed size MTRRs in there
* and overlapping MTRRs into large pages can cause
* slowdowns.
*/
big_page_start = PMD_SIZE;
if (start < big_page_start) {
start_pfn = start >> PAGE_SHIFT;
end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
} else {
/* head is not big page alignment ? */
start_pfn = start >> PAGE_SHIFT;
end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
}
if (start_pfn < end_pfn)
kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
/* big page range */
start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
if (start_pfn < (big_page_start >> PAGE_SHIFT))
start_pfn = big_page_start >> PAGE_SHIFT;
end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
if (start_pfn < end_pfn)
kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
cpu_has_pse);
/* tail is not big page alignment ? */
start_pfn = end_pfn;
if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
end_pfn = end >> PAGE_SHIFT;
if (start_pfn < end_pfn)
kernel_physical_mapping_init(pgd_base, start_pfn,
end_pfn, 0);
}
early_ioremap_page_table_range_init(pgd_base);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment