Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
28e93a00
Commit
28e93a00
authored
Mar 05, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'x86/mm' into x86/core
parents
caab36b5
ed26dbe5
Changes
13
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
472 additions
and
491 deletions
+472
-491
arch/x86/include/asm/init.h
arch/x86/include/asm/init.h
+18
-0
arch/x86/include/asm/page_types.h
arch/x86/include/asm/page_types.h
+0
-6
arch/x86/include/asm/pat.h
arch/x86/include/asm/pat.h
+5
-0
arch/x86/include/asm/pgtable_32_types.h
arch/x86/include/asm/pgtable_32_types.h
+5
-0
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/pgtable_types.h
+1
-0
arch/x86/kernel/mpparse.c
arch/x86/kernel/mpparse.c
+22
-3
arch/x86/kernel/setup.c
arch/x86/kernel/setup.c
+3
-1
arch/x86/mm/highmem_32.c
arch/x86/mm/highmem_32.c
+0
-9
arch/x86/mm/init.c
arch/x86/mm/init.c
+344
-0
arch/x86/mm/init_32.c
arch/x86/mm/init_32.c
+59
-196
arch/x86/mm/init_64.c
arch/x86/mm/init_64.c
+7
-265
arch/x86/mm/ioremap.c
arch/x86/mm/ioremap.c
+5
-9
arch/x86/mm/numa_32.c
arch/x86/mm/numa_32.c
+3
-2
No files found.
arch/x86/include/asm/init.h
0 → 100644
View file @
28e93a00
#ifndef _ASM_X86_INIT_32_H
#define _ASM_X86_INIT_32_H
#ifdef CONFIG_X86_32
extern
void
__init
early_ioremap_page_table_range_init
(
void
);
#endif
extern
unsigned
long
__init
kernel_physical_mapping_init
(
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
page_size_mask
);
extern
unsigned
long
__initdata
e820_table_start
;
extern
unsigned
long
__meminitdata
e820_table_end
;
extern
unsigned
long
__meminitdata
e820_table_top
;
#endif
/* _ASM_X86_INIT_32_H */
arch/x86/include/asm/page_types.h
View file @
28e93a00
...
...
@@ -40,14 +40,8 @@
#ifndef __ASSEMBLY__
struct
pgprot
;
extern
int
page_is_ram
(
unsigned
long
pagenr
);
extern
int
devmem_is_allowed
(
unsigned
long
pagenr
);
extern
void
map_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
struct
pgprot
vma_prot
);
extern
void
unmap_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
struct
pgprot
vma_prot
);
extern
unsigned
long
max_low_pfn_mapped
;
extern
unsigned
long
max_pfn_mapped
;
...
...
arch/x86/include/asm/pat.h
View file @
28e93a00
...
...
@@ -2,6 +2,7 @@
#define _ASM_X86_PAT_H
#include <linux/types.h>
#include <asm/pgtable_types.h>
#ifdef CONFIG_X86_PAT
extern
int
pat_enabled
;
...
...
@@ -17,5 +18,9 @@ extern int free_memtype(u64 start, u64 end);
extern
int
kernel_map_sync_memtype
(
u64
base
,
unsigned
long
size
,
unsigned
long
flag
);
extern
void
map_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
struct
pgprot
vma_prot
);
extern
void
unmap_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
struct
pgprot
vma_prot
);
#endif
/* _ASM_X86_PAT_H */
arch/x86/include/asm/pgtable_32_types.h
View file @
28e93a00
...
...
@@ -25,6 +25,11 @@
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#ifndef __ASSEMBLER__
extern
bool
__vmalloc_start_set
;
/* set once high_memory is set */
#endif
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
...
...
arch/x86/include/asm/pgtable_types.h
View file @
28e93a00
...
...
@@ -273,6 +273,7 @@ typedef struct page *pgtable_t;
extern
pteval_t
__supported_pte_mask
;
extern
int
nx_enabled
;
extern
void
set_nx
(
void
);
#define pgprot_writecombine pgprot_writecombine
extern
pgprot_t
pgprot_writecombine
(
pgprot_t
prot
);
...
...
arch/x86/kernel/mpparse.c
View file @
28e93a00
...
...
@@ -558,6 +558,19 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
static
struct
mpf_intel
*
mpf_found
;
static
unsigned
long
__init
get_mpc_size
(
unsigned
long
physptr
)
{
struct
mpc_table
*
mpc
;
unsigned
long
size
;
mpc
=
early_ioremap
(
physptr
,
PAGE_SIZE
);
size
=
mpc
->
length
;
early_iounmap
(
mpc
,
PAGE_SIZE
);
apic_printk
(
APIC_VERBOSE
,
" mpc: %lx-%lx
\n
"
,
physptr
,
physptr
+
size
);
return
size
;
}
/*
* Scan the memory blocks for an SMP configuration block.
*/
...
...
@@ -611,12 +624,16 @@ static void __init __get_smp_config(unsigned int early)
construct_default_ISA_mptable
(
mpf
->
feature1
);
}
else
if
(
mpf
->
physptr
)
{
struct
mpc_table
*
mpc
;
unsigned
long
size
;
size
=
get_mpc_size
(
mpf
->
physptr
);
mpc
=
early_ioremap
(
mpf
->
physptr
,
size
);
/*
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if
(
!
smp_read_mpc
(
phys_to_virt
(
mpf
->
physptr
)
,
early
))
{
if
(
!
smp_read_mpc
(
mpc
,
early
))
{
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config
=
0
;
#endif
...
...
@@ -624,8 +641,10 @@ static void __init __get_smp_config(unsigned int early)
"BIOS bug, MP table errors detected!...
\n
"
);
printk
(
KERN_ERR
"... disabling SMP support. "
"(tell your hw vendor)
\n
"
);
early_iounmap
(
mpc
,
size
);
return
;
}
early_iounmap
(
mpc
,
size
);
if
(
early
)
return
;
...
...
@@ -697,10 +716,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
if
(
!
reserve
)
return
1
;
reserve_bootmem_generic
(
virt_to_phys
(
mpf
),
PAGE_SIZE
,
reserve_bootmem_generic
(
virt_to_phys
(
mpf
),
sizeof
(
*
mpf
)
,
BOOTMEM_DEFAULT
);
if
(
mpf
->
physptr
)
{
unsigned
long
size
=
PAGE_SIZE
;
unsigned
long
size
=
get_mpc_size
(
mpf
->
physptr
)
;
#ifdef CONFIG_X86_32
/*
* We cannot access to MPC table to compute
...
...
arch/x86/kernel/setup.c
View file @
28e93a00
...
...
@@ -202,7 +202,9 @@ struct ist_info ist_info;
#endif
#else
struct
cpuinfo_x86
boot_cpu_data
__read_mostly
;
struct
cpuinfo_x86
boot_cpu_data
__read_mostly
=
{
.
x86_phys_bits
=
MAX_PHYSMEM_BITS
,
};
EXPORT_SYMBOL
(
boot_cpu_data
);
#endif
...
...
arch/x86/mm/highmem_32.c
View file @
28e93a00
...
...
@@ -158,7 +158,6 @@ EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL
(
kmap_atomic
);
EXPORT_SYMBOL
(
kunmap_atomic
);
#ifdef CONFIG_NUMA
void
__init
set_highmem_pages_init
(
void
)
{
struct
zone
*
zone
;
...
...
@@ -182,11 +181,3 @@ void __init set_highmem_pages_init(void)
}
totalram_pages
+=
totalhigh_pages
;
}
#else
void
__init
set_highmem_pages_init
(
void
)
{
add_highpages_with_active_regions
(
0
,
highstart_pfn
,
highend_pfn
);
totalram_pages
+=
totalhigh_pages
;
}
#endif
/* CONFIG_NUMA */
arch/x86/mm/init.c
View file @
28e93a00
#include <linux/ioport.h>
#include <linux/swap.h>
#include <asm/cacheflush.h>
#include <asm/e820.h>
#include <asm/init.h>
#include <asm/page.h>
#include <asm/page_types.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
unsigned
long
__initdata
e820_table_start
;
unsigned
long
__meminitdata
e820_table_end
;
unsigned
long
__meminitdata
e820_table_top
;
int
after_bootmem
;
int
direct_gbpages
#ifdef CONFIG_DIRECT_GBPAGES
=
1
#endif
;
static
void
__init
find_early_table_space
(
unsigned
long
end
,
int
use_pse
,
int
use_gbpages
)
{
unsigned
long
puds
,
pmds
,
ptes
,
tables
,
start
;
puds
=
(
end
+
PUD_SIZE
-
1
)
>>
PUD_SHIFT
;
tables
=
roundup
(
puds
*
sizeof
(
pud_t
),
PAGE_SIZE
);
if
(
use_gbpages
)
{
unsigned
long
extra
;
extra
=
end
-
((
end
>>
PUD_SHIFT
)
<<
PUD_SHIFT
);
pmds
=
(
extra
+
PMD_SIZE
-
1
)
>>
PMD_SHIFT
;
}
else
pmds
=
(
end
+
PMD_SIZE
-
1
)
>>
PMD_SHIFT
;
tables
+=
roundup
(
pmds
*
sizeof
(
pmd_t
),
PAGE_SIZE
);
if
(
use_pse
)
{
unsigned
long
extra
;
extra
=
end
-
((
end
>>
PMD_SHIFT
)
<<
PMD_SHIFT
);
#ifdef CONFIG_X86_32
extra
+=
PMD_SIZE
;
#endif
ptes
=
(
extra
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
}
else
ptes
=
(
end
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
tables
+=
roundup
(
ptes
*
sizeof
(
pte_t
),
PAGE_SIZE
);
#ifdef CONFIG_X86_32
/* for fixmap */
tables
+=
roundup
(
__end_of_fixed_addresses
*
sizeof
(
pte_t
),
PAGE_SIZE
);
#endif
/*
* RED-PEN putting page tables only on node 0 could
* cause a hotspot and fill up ZONE_DMA. The page tables
* need roughly 0.5KB per GB.
*/
#ifdef CONFIG_X86_32
start
=
0x7000
;
e820_table_start
=
find_e820_area
(
start
,
max_pfn_mapped
<<
PAGE_SHIFT
,
tables
,
PAGE_SIZE
);
#else
/* CONFIG_X86_64 */
start
=
0x8000
;
e820_table_start
=
find_e820_area
(
start
,
end
,
tables
,
PAGE_SIZE
);
#endif
if
(
e820_table_start
==
-
1UL
)
panic
(
"Cannot find space for the kernel page tables"
);
e820_table_start
>>=
PAGE_SHIFT
;
e820_table_end
=
e820_table_start
;
e820_table_top
=
e820_table_start
+
(
tables
>>
PAGE_SHIFT
);
printk
(
KERN_DEBUG
"kernel direct mapping tables up to %lx @ %lx-%lx
\n
"
,
end
,
e820_table_start
<<
PAGE_SHIFT
,
e820_table_top
<<
PAGE_SHIFT
);
}
struct
map_range
{
unsigned
long
start
;
unsigned
long
end
;
unsigned
page_size_mask
;
};
#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
#else
/* CONFIG_X86_64 */
#define NR_RANGE_MR 5
#endif
static
int
save_mr
(
struct
map_range
*
mr
,
int
nr_range
,
unsigned
long
start_pfn
,
unsigned
long
end_pfn
,
unsigned
long
page_size_mask
)
{
if
(
start_pfn
<
end_pfn
)
{
if
(
nr_range
>=
NR_RANGE_MR
)
panic
(
"run out of range for init_memory_mapping
\n
"
);
mr
[
nr_range
].
start
=
start_pfn
<<
PAGE_SHIFT
;
mr
[
nr_range
].
end
=
end_pfn
<<
PAGE_SHIFT
;
mr
[
nr_range
].
page_size_mask
=
page_size_mask
;
nr_range
++
;
}
return
nr_range
;
}
#ifdef CONFIG_X86_64
static
void
__init
init_gbpages
(
void
)
{
if
(
direct_gbpages
&&
cpu_has_gbpages
)
printk
(
KERN_INFO
"Using GB pages for direct mapping
\n
"
);
else
direct_gbpages
=
0
;
}
#else
static
inline
void
init_gbpages
(
void
)
{
}
#endif
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
* the physical memory. To access them they are temporarily mapped.
*/
unsigned
long
__init_refok
init_memory_mapping
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
page_size_mask
=
0
;
unsigned
long
start_pfn
,
end_pfn
;
unsigned
long
pos
;
unsigned
long
ret
;
struct
map_range
mr
[
NR_RANGE_MR
];
int
nr_range
,
i
;
int
use_pse
,
use_gbpages
;
printk
(
KERN_INFO
"init_memory_mapping: %016lx-%016lx
\n
"
,
start
,
end
);
if
(
!
after_bootmem
)
init_gbpages
();
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
use_pse
=
use_gbpages
=
0
;
#else
use_pse
=
cpu_has_pse
;
use_gbpages
=
direct_gbpages
;
#endif
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_PAE
set_nx
();
if
(
nx_enabled
)
printk
(
KERN_INFO
"NX (Execute Disable) protection: active
\n
"
);
#endif
/* Enable PSE if available */
if
(
cpu_has_pse
)
set_in_cr4
(
X86_CR4_PSE
);
/* Enable PGE if available */
if
(
cpu_has_pge
)
{
set_in_cr4
(
X86_CR4_PGE
);
__supported_pte_mask
|=
_PAGE_GLOBAL
;
}
#endif
if
(
use_gbpages
)
page_size_mask
|=
1
<<
PG_LEVEL_1G
;
if
(
use_pse
)
page_size_mask
|=
1
<<
PG_LEVEL_2M
;
memset
(
mr
,
0
,
sizeof
(
mr
));
nr_range
=
0
;
/* head if not big page alignment ? */
start_pfn
=
start
>>
PAGE_SHIFT
;
pos
=
start_pfn
<<
PAGE_SHIFT
;
#ifdef CONFIG_X86_32
/*
* Don't use a large page for the first 2/4MB of memory
* because there are often fixed size MTRRs in there
* and overlapping MTRRs into large pages can cause
* slowdowns.
*/
if
(
pos
==
0
)
end_pfn
=
1
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
else
end_pfn
=
((
pos
+
(
PMD_SIZE
-
1
))
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
#else
/* CONFIG_X86_64 */
end_pfn
=
((
pos
+
(
PMD_SIZE
-
1
))
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
#endif
if
(
end_pfn
>
(
end
>>
PAGE_SHIFT
))
end_pfn
=
end
>>
PAGE_SHIFT
;
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
0
);
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
/* big page (2M) range */
start_pfn
=
((
pos
+
(
PMD_SIZE
-
1
))
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
#ifdef CONFIG_X86_32
end_pfn
=
(
end
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
#else
/* CONFIG_X86_64 */
end_pfn
=
((
pos
+
(
PUD_SIZE
-
1
))
>>
PUD_SHIFT
)
<<
(
PUD_SHIFT
-
PAGE_SHIFT
);
if
(
end_pfn
>
((
end
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
)))
end_pfn
=
((
end
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
));
#endif
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
page_size_mask
&
(
1
<<
PG_LEVEL_2M
));
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
#ifdef CONFIG_X86_64
/* big page (1G) range */
start_pfn
=
((
pos
+
(
PUD_SIZE
-
1
))
>>
PUD_SHIFT
)
<<
(
PUD_SHIFT
-
PAGE_SHIFT
);
end_pfn
=
(
end
>>
PUD_SHIFT
)
<<
(
PUD_SHIFT
-
PAGE_SHIFT
);
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
page_size_mask
&
((
1
<<
PG_LEVEL_2M
)
|
(
1
<<
PG_LEVEL_1G
)));
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
/* tail is not big page (1G) alignment */
start_pfn
=
((
pos
+
(
PMD_SIZE
-
1
))
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
end_pfn
=
(
end
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
page_size_mask
&
(
1
<<
PG_LEVEL_2M
));
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
#endif
/* tail is not big page (2M) alignment */
start_pfn
=
pos
>>
PAGE_SHIFT
;
end_pfn
=
end
>>
PAGE_SHIFT
;
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
0
);
/* try to merge same page size and continuous */
for
(
i
=
0
;
nr_range
>
1
&&
i
<
nr_range
-
1
;
i
++
)
{
unsigned
long
old_start
;
if
(
mr
[
i
].
end
!=
mr
[
i
+
1
].
start
||
mr
[
i
].
page_size_mask
!=
mr
[
i
+
1
].
page_size_mask
)
continue
;
/* move it */
old_start
=
mr
[
i
].
start
;
memmove
(
&
mr
[
i
],
&
mr
[
i
+
1
],
(
nr_range
-
1
-
i
)
*
sizeof
(
struct
map_range
));
mr
[
i
--
].
start
=
old_start
;
nr_range
--
;
}
for
(
i
=
0
;
i
<
nr_range
;
i
++
)
printk
(
KERN_DEBUG
" %010lx - %010lx page %s
\n
"
,
mr
[
i
].
start
,
mr
[
i
].
end
,
(
mr
[
i
].
page_size_mask
&
(
1
<<
PG_LEVEL_1G
))
?
"1G"
:
(
(
mr
[
i
].
page_size_mask
&
(
1
<<
PG_LEVEL_2M
))
?
"2M"
:
"4k"
));
/*
* Find space for the kernel direct mapping tables.
*
* Later we should allocate these tables in the local node of the
* memory mapped. Unfortunately this is done currently before the
* nodes are discovered.
*/
if
(
!
after_bootmem
)
find_early_table_space
(
end
,
use_pse
,
use_gbpages
);
#ifdef CONFIG_X86_32
for
(
i
=
0
;
i
<
nr_range
;
i
++
)
kernel_physical_mapping_init
(
mr
[
i
].
start
,
mr
[
i
].
end
,
mr
[
i
].
page_size_mask
);
ret
=
end
;
#else
/* CONFIG_X86_64 */
for
(
i
=
0
;
i
<
nr_range
;
i
++
)
ret
=
kernel_physical_mapping_init
(
mr
[
i
].
start
,
mr
[
i
].
end
,
mr
[
i
].
page_size_mask
);
#endif
#ifdef CONFIG_X86_32
early_ioremap_page_table_range_init
();
load_cr3
(
swapper_pg_dir
);
#endif
#ifdef CONFIG_X86_64
if
(
!
after_bootmem
)
mmu_cr4_features
=
read_cr4
();
#endif
__flush_tlb_all
();
if
(
!
after_bootmem
&&
e820_table_end
>
e820_table_start
)
reserve_early
(
e820_table_start
<<
PAGE_SHIFT
,
e820_table_end
<<
PAGE_SHIFT
,
"PGTABLE"
);
if
(
!
after_bootmem
)
early_memtest
(
start
,
end
);
return
ret
>>
PAGE_SHIFT
;
}
/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
*
* On x86, access has to be given to the first megabyte of ram because that area
* contains bios code and data regions used by X and dosemu and similar apps.
* Access has to be given to non-kernel-ram areas as well, these contain the PCI
* mmio resources as well as potential bios/acpi data regions.
*/
int
devmem_is_allowed
(
unsigned
long
pagenr
)
{
if
(
pagenr
<=
256
)
return
1
;
if
(
iomem_is_exclusive
(
pagenr
<<
PAGE_SHIFT
))
return
0
;
if
(
!
page_is_ram
(
pagenr
))
return
1
;
return
0
;
}
void
free_init_pages
(
char
*
what
,
unsigned
long
begin
,
unsigned
long
end
)
{
...
...
@@ -47,3 +384,10 @@ void free_initmem(void)
(
unsigned
long
)(
&
__init_begin
),
(
unsigned
long
)(
&
__init_end
));
}
#ifdef CONFIG_BLK_DEV_INITRD
void
free_initrd_mem
(
unsigned
long
start
,
unsigned
long
end
)
{
free_init_pages
(
"initrd memory"
,
start
,
end
);
}
#endif
arch/x86/mm/init_32.c
View file @
28e93a00
This diff is collapsed.
Click to expand it.
arch/x86/mm/init_64.c
View file @
28e93a00
...
...
@@ -48,6 +48,7 @@
#include <asm/kdebug.h>
#include <asm/numa.h>
#include <asm/cacheflush.h>
#include <asm/init.h>
/*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
...
...
@@ -61,12 +62,6 @@ static unsigned long dma_reserve __initdata;
DEFINE_PER_CPU
(
struct
mmu_gather
,
mmu_gathers
);
int
direct_gbpages
#ifdef CONFIG_DIRECT_GBPAGES
=
1
#endif
;
static
int
__init
parse_direct_gbpages_off
(
char
*
arg
)
{
direct_gbpages
=
0
;
...
...
@@ -87,8 +82,6 @@ early_param("gbpages", parse_direct_gbpages_on);
* around without checking the pgd every time.
*/
int
after_bootmem
;
pteval_t
__supported_pte_mask
__read_mostly
=
~
_PAGE_IOMAP
;
EXPORT_SYMBOL_GPL
(
__supported_pte_mask
);
...
...
@@ -325,13 +318,9 @@ void __init cleanup_highmap(void)
}
}
static
unsigned
long
__initdata
table_start
;
static
unsigned
long
__meminitdata
table_end
;
static
unsigned
long
__meminitdata
table_top
;
static
__ref
void
*
alloc_low_page
(
unsigned
long
*
phys
)
{
unsigned
long
pfn
=
table_end
++
;
unsigned
long
pfn
=
e820_
table_end
++
;
void
*
adr
;
if
(
after_bootmem
)
{
...
...
@@ -341,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
return
adr
;
}
if
(
pfn
>=
table_top
)
if
(
pfn
>=
e820_
table_top
)
panic
(
"alloc_low_page: ran out of memory"
);
adr
=
early_memremap
(
pfn
*
PAGE_SIZE
,
PAGE_SIZE
);
...
...
@@ -581,56 +570,8 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
return
phys_pud_init
(
pud
,
addr
,
end
,
page_size_mask
);
}
static
void
__init
find_early_table_space
(
unsigned
long
end
,
int
use_pse
,
int
use_gbpages
)
{
unsigned
long
puds
,
pmds
,
ptes
,
tables
,
start
;
puds
=
(
end
+
PUD_SIZE
-
1
)
>>
PUD_SHIFT
;
tables
=
roundup
(
puds
*
sizeof
(
pud_t
),
PAGE_SIZE
);
if
(
use_gbpages
)
{
unsigned
long
extra
;
extra
=
end
-
((
end
>>
PUD_SHIFT
)
<<
PUD_SHIFT
);
pmds
=
(
extra
+
PMD_SIZE
-
1
)
>>
PMD_SHIFT
;
}
else
pmds
=
(
end
+
PMD_SIZE
-
1
)
>>
PMD_SHIFT
;
tables
+=
roundup
(
pmds
*
sizeof
(
pmd_t
),
PAGE_SIZE
);
if
(
use_pse
)
{
unsigned
long
extra
;
extra
=
end
-
((
end
>>
PMD_SHIFT
)
<<
PMD_SHIFT
);
ptes
=
(
extra
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
}
else
ptes
=
(
end
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
tables
+=
roundup
(
ptes
*
sizeof
(
pte_t
),
PAGE_SIZE
);
/*
* RED-PEN putting page tables only on node 0 could
* cause a hotspot and fill up ZONE_DMA. The page tables
* need roughly 0.5KB per GB.
*/
start
=
0x8000
;
table_start
=
find_e820_area
(
start
,
end
,
tables
,
PAGE_SIZE
);
if
(
table_start
==
-
1UL
)
panic
(
"Cannot find space for the kernel page tables"
);
table_start
>>=
PAGE_SHIFT
;
table_end
=
table_start
;
table_top
=
table_start
+
(
tables
>>
PAGE_SHIFT
);
printk
(
KERN_DEBUG
"kernel direct mapping tables up to %lx @ %lx-%lx
\n
"
,
end
,
table_start
<<
PAGE_SHIFT
,
table_top
<<
PAGE_SHIFT
);
}
static
void
__init
init_gbpages
(
void
)
{
if
(
direct_gbpages
&&
cpu_has_gbpages
)
printk
(
KERN_INFO
"Using GB pages for direct mapping
\n
"
);
else
direct_gbpages
=
0
;
}
static
unsigned
long
__meminit
kernel_physical_mapping_init
(
unsigned
long
start
,
unsigned
long
__init
kernel_physical_mapping_init
(
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
page_size_mask
)
{
...
...
@@ -669,176 +610,6 @@ static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
return
last_map_addr
;
}
struct
map_range
{
unsigned
long
start
;
unsigned
long
end
;
unsigned
page_size_mask
;
};
#define NR_RANGE_MR 5
static
int
save_mr
(
struct
map_range
*
mr
,
int
nr_range
,
unsigned
long
start_pfn
,
unsigned
long
end_pfn
,
unsigned
long
page_size_mask
)
{
if
(
start_pfn
<
end_pfn
)
{
if
(
nr_range
>=
NR_RANGE_MR
)
panic
(
"run out of range for init_memory_mapping
\n
"
);
mr
[
nr_range
].
start
=
start_pfn
<<
PAGE_SHIFT
;
mr
[
nr_range
].
end
=
end_pfn
<<
PAGE_SHIFT
;
mr
[
nr_range
].
page_size_mask
=
page_size_mask
;
nr_range
++
;
}
return
nr_range
;
}
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
* the physical memory. To access them they are temporarily mapped.
*/
unsigned
long
__init_refok
init_memory_mapping
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
last_map_addr
=
0
;
unsigned
long
page_size_mask
=
0
;
unsigned
long
start_pfn
,
end_pfn
;
unsigned
long
pos
;
struct
map_range
mr
[
NR_RANGE_MR
];
int
nr_range
,
i
;
int
use_pse
,
use_gbpages
;
printk
(
KERN_INFO
"init_memory_mapping: %016lx-%016lx
\n
"
,
start
,
end
);
/*
* Find space for the kernel direct mapping tables.
*
* Later we should allocate these tables in the local node of the
* memory mapped. Unfortunately this is done currently before the
* nodes are discovered.
*/
if
(
!
after_bootmem
)
init_gbpages
();
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
use_pse
=
use_gbpages
=
0
;
#else
use_pse
=
cpu_has_pse
;
use_gbpages
=
direct_gbpages
;
#endif
if
(
use_gbpages
)
page_size_mask
|=
1
<<
PG_LEVEL_1G
;
if
(
use_pse
)
page_size_mask
|=
1
<<
PG_LEVEL_2M
;
memset
(
mr
,
0
,
sizeof
(
mr
));
nr_range
=
0
;
/* head if not big page alignment ?*/
start_pfn
=
start
>>
PAGE_SHIFT
;
pos
=
start_pfn
<<
PAGE_SHIFT
;
end_pfn
=
((
pos
+
(
PMD_SIZE
-
1
))
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
if
(
end_pfn
>
(
end
>>
PAGE_SHIFT
))
end_pfn
=
end
>>
PAGE_SHIFT
;
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
0
);
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
/* big page (2M) range*/
start_pfn
=
((
pos
+
(
PMD_SIZE
-
1
))
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
end_pfn
=
((
pos
+
(
PUD_SIZE
-
1
))
>>
PUD_SHIFT
)
<<
(
PUD_SHIFT
-
PAGE_SHIFT
);
if
(
end_pfn
>
((
end
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
)))
end_pfn
=
((
end
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
));
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
page_size_mask
&
(
1
<<
PG_LEVEL_2M
));
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
/* big page (1G) range */
start_pfn
=
((
pos
+
(
PUD_SIZE
-
1
))
>>
PUD_SHIFT
)
<<
(
PUD_SHIFT
-
PAGE_SHIFT
);
end_pfn
=
(
end
>>
PUD_SHIFT
)
<<
(
PUD_SHIFT
-
PAGE_SHIFT
);
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
page_size_mask
&
((
1
<<
PG_LEVEL_2M
)
|
(
1
<<
PG_LEVEL_1G
)));
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
/* tail is not big page (1G) alignment */
start_pfn
=
((
pos
+
(
PMD_SIZE
-
1
))
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
end_pfn
=
(
end
>>
PMD_SHIFT
)
<<
(
PMD_SHIFT
-
PAGE_SHIFT
);
if
(
start_pfn
<
end_pfn
)
{
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
page_size_mask
&
(
1
<<
PG_LEVEL_2M
));
pos
=
end_pfn
<<
PAGE_SHIFT
;
}
/* tail is not big page (2M) alignment */
start_pfn
=
pos
>>
PAGE_SHIFT
;
end_pfn
=
end
>>
PAGE_SHIFT
;
nr_range
=
save_mr
(
mr
,
nr_range
,
start_pfn
,
end_pfn
,
0
);
/* try to merge same page size and continuous */
for
(
i
=
0
;
nr_range
>
1
&&
i
<
nr_range
-
1
;
i
++
)
{
unsigned
long
old_start
;
if
(
mr
[
i
].
end
!=
mr
[
i
+
1
].
start
||
mr
[
i
].
page_size_mask
!=
mr
[
i
+
1
].
page_size_mask
)
continue
;
/* move it */
old_start
=
mr
[
i
].
start
;
memmove
(
&
mr
[
i
],
&
mr
[
i
+
1
],
(
nr_range
-
1
-
i
)
*
sizeof
(
struct
map_range
));
mr
[
i
--
].
start
=
old_start
;
nr_range
--
;
}
for
(
i
=
0
;
i
<
nr_range
;
i
++
)
printk
(
KERN_DEBUG
" %010lx - %010lx page %s
\n
"
,
mr
[
i
].
start
,
mr
[
i
].
end
,
(
mr
[
i
].
page_size_mask
&
(
1
<<
PG_LEVEL_1G
))
?
"1G"
:
(
(
mr
[
i
].
page_size_mask
&
(
1
<<
PG_LEVEL_2M
))
?
"2M"
:
"4k"
));
if
(
!
after_bootmem
)
find_early_table_space
(
end
,
use_pse
,
use_gbpages
);
for
(
i
=
0
;
i
<
nr_range
;
i
++
)
last_map_addr
=
kernel_physical_mapping_init
(
mr
[
i
].
start
,
mr
[
i
].
end
,
mr
[
i
].
page_size_mask
);
if
(
!
after_bootmem
)
mmu_cr4_features
=
read_cr4
();
__flush_tlb_all
();
if
(
!
after_bootmem
&&
table_end
>
table_start
)
reserve_early
(
table_start
<<
PAGE_SHIFT
,
table_end
<<
PAGE_SHIFT
,
"PGTABLE"
);
printk
(
KERN_INFO
"last_map_addr: %lx end: %lx
\n
"
,
last_map_addr
,
end
);
if
(
!
after_bootmem
)
early_memtest
(
start
,
end
);
return
last_map_addr
>>
PAGE_SHIFT
;
}
#ifndef CONFIG_NUMA
void
__init
initmem_init
(
unsigned
long
start_pfn
,
unsigned
long
end_pfn
)
{
...
...
@@ -910,28 +681,6 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
/* CONFIG_MEMORY_HOTPLUG */
/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
*
* On x86, access has to be given to the first megabyte of ram because that area
* contains bios code and data regions used by X and dosemu and similar apps.
* Access has to be given to non-kernel-ram areas as well, these contain the PCI
* mmio resources as well as potential bios/acpi data regions.
*/
int
devmem_is_allowed
(
unsigned
long
pagenr
)
{
if
(
pagenr
<=
256
)
return
1
;
if
(
iomem_is_exclusive
(
pagenr
<<
PAGE_SHIFT
))
return
0
;
if
(
!
page_is_ram
(
pagenr
))
return
1
;
return
0
;
}
static
struct
kcore_list
kcore_mem
,
kcore_vmalloc
,
kcore_kernel
,
kcore_modules
,
kcore_vsyscall
;
...
...
@@ -1019,13 +768,6 @@ void mark_rodata_ro(void)
#endif
#ifdef CONFIG_BLK_DEV_INITRD
void
free_initrd_mem
(
unsigned
long
start
,
unsigned
long
end
)
{
free_init_pages
(
"initrd memory"
,
start
,
end
);
}
#endif
int
__init
reserve_bootmem_generic
(
unsigned
long
phys
,
unsigned
long
len
,
int
flags
)
{
...
...
arch/x86/mm/ioremap.c
View file @
28e93a00
...
...
@@ -38,8 +38,7 @@ unsigned long __phys_addr(unsigned long x)
}
else
{
VIRTUAL_BUG_ON
(
x
<
PAGE_OFFSET
);
x
-=
PAGE_OFFSET
;
VIRTUAL_BUG_ON
(
system_state
==
SYSTEM_BOOTING
?
x
>
MAXMEM
:
!
phys_addr_valid
(
x
));
VIRTUAL_BUG_ON
(
!
phys_addr_valid
(
x
));
}
return
x
;
}
...
...
@@ -56,11 +55,9 @@ bool __virt_addr_valid(unsigned long x)
if
(
x
<
PAGE_OFFSET
)
return
false
;
x
-=
PAGE_OFFSET
;
if
(
system_state
==
SYSTEM_BOOTING
?
x
>
MAXMEM
:
!
phys_addr_valid
(
x
))
{
if
(
!
phys_addr_valid
(
x
))
return
false
;
}
}
return
pfn_valid
(
x
>>
PAGE_SHIFT
);
}
...
...
@@ -76,10 +73,9 @@ static inline int phys_addr_valid(unsigned long addr)
#ifdef CONFIG_DEBUG_VIRTUAL
unsigned
long
__phys_addr
(
unsigned
long
x
)
{
/* VMALLOC_* aren't constants
; not available at the boot time
*/
/* VMALLOC_* aren't constants
*/
VIRTUAL_BUG_ON
(
x
<
PAGE_OFFSET
);
VIRTUAL_BUG_ON
(
system_state
!=
SYSTEM_BOOTING
&&
is_vmalloc_addr
((
void
*
)
x
));
VIRTUAL_BUG_ON
(
__vmalloc_start_set
&&
is_vmalloc_addr
((
void
*
)
x
));
return
x
-
PAGE_OFFSET
;
}
EXPORT_SYMBOL
(
__phys_addr
);
...
...
@@ -89,7 +85,7 @@ bool __virt_addr_valid(unsigned long x)
{
if
(
x
<
PAGE_OFFSET
)
return
false
;
if
(
system_state
!=
SYSTEM_BOOTING
&&
is_vmalloc_addr
((
void
*
)
x
))
if
(
__vmalloc_start_set
&&
is_vmalloc_addr
((
void
*
)
x
))
return
false
;
return
pfn_valid
((
x
-
PAGE_OFFSET
)
>>
PAGE_SHIFT
);
}
...
...
arch/x86/mm/numa_32.c
View file @
28e93a00
...
...
@@ -416,10 +416,11 @@ void __init initmem_init(unsigned long start_pfn,
for_each_online_node
(
nid
)
propagate_e820_map_node
(
nid
);
for_each_online_node
(
nid
)
for_each_online_node
(
nid
)
{
memset
(
NODE_DATA
(
nid
),
0
,
sizeof
(
struct
pglist_data
));
NODE_DATA
(
nid
)
->
bdata
=
&
bootmem_node_data
[
nid
];
}
NODE_DATA
(
0
)
->
bdata
=
&
bootmem_node_data
[
0
];
setup_bootmem_allocator
();
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment