Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
51c78eb3
Commit
51c78eb3
authored
Feb 08, 2009
by
Jeremy Fitzhardinge
Committed by
Jeremy Fitzhardinge
Feb 11, 2009
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
x86: create _types.h counterparts for page*.h
Signed-off-by:
Jeremy Fitzhardinge
<
jeremy@goop.org
>
parent
1484096c
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
389 additions
and
248 deletions
+389
-248
arch/x86/include/asm/page.h
arch/x86/include/asm/page.h
+3
-61
arch/x86/include/asm/page_32.h
arch/x86/include/asm/page_32.h
+2
-87
arch/x86/include/asm/page_32_types.h
arch/x86/include/asm/page_32_types.h
+90
-0
arch/x86/include/asm/page_64.h
arch/x86/include/asm/page_64.h
+1
-100
arch/x86/include/asm/page_64.h.rej
arch/x86/include/asm/page_64.h.rej
+114
-0
arch/x86/include/asm/page_64_types.h
arch/x86/include/asm/page_64_types.h
+102
-0
arch/x86/include/asm/page_types.h
arch/x86/include/asm/page_types.h
+77
-0
No files found.
arch/x86/include/asm/page.h
View file @
51c78eb3
#ifndef _ASM_X86_PAGE_H
#define _ASM_X86_PAGE_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
#include <asm/page_types.h>
#ifdef CONFIG_X86_64
#include <asm/page_64.h>
...
...
@@ -44,39 +11,18 @@
#include <asm/page_32.h>
#endif
/* CONFIG_X86_64 */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifndef __ASSEMBLY__
typedef
struct
{
pgdval_t
pgd
;
}
pgd_t
;
typedef
struct
{
pgprotval_t
pgprot
;
}
pgprot_t
;
extern
int
page_is_ram
(
unsigned
long
pagenr
);
extern
int
pagerange_is_ram
(
unsigned
long
start
,
unsigned
long
end
);
extern
int
devmem_is_allowed
(
unsigned
long
pagenr
);
extern
void
map_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
pgprot_t
vma_prot
);
extern
void
unmap_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
pgprot_t
vma_prot
);
extern
unsigned
long
max_low_pfn_mapped
;
extern
unsigned
long
max_pfn_mapped
;
struct
page
;
static
inline
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
struct
page
*
pg
)
struct
page
*
pg
)
{
clear_page
(
page
);
}
static
inline
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
,
struct
page
*
topage
)
struct
page
*
topage
)
{
copy_page
(
to
,
from
);
}
...
...
@@ -102,8 +48,6 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
#if PAGETABLE_LEVELS >= 3
#if PAGETABLE_LEVELS == 4
typedef
struct
{
pudval_t
pud
;
}
pud_t
;
static
inline
pud_t
native_make_pud
(
pmdval_t
val
)
{
return
(
pud_t
)
{
val
};
...
...
@@ -127,8 +71,6 @@ static inline pudval_t pud_flags(pud_t pud)
return
native_pud_val
(
pud
)
&
PTE_FLAGS_MASK
;
}
typedef
struct
{
pmdval_t
pmd
;
}
pmd_t
;
static
inline
pmd_t
native_make_pmd
(
pmdval_t
val
)
{
return
(
pmd_t
)
{
val
};
...
...
arch/x86/include/asm/page_32.h
View file @
51c78eb3
#ifndef _ASM_X86_PAGE_32_H
#define _ASM_X86_PAGE_32_H
/*
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#ifndef __ASSEMBLY__
typedef
u64
pteval_t
;
typedef
u64
pmdval_t
;
typedef
u64
pudval_t
;
typedef
u64
pgdval_t
;
typedef
u64
pgprotval_t
;
typedef
union
{
struct
{
unsigned
long
pte_low
,
pte_high
;
};
pteval_t
pte
;
}
pte_t
;
#endif
/* __ASSEMBLY__
*/
#else
/* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#ifndef __ASSEMBLY__
typedef
unsigned
long
pteval_t
;
typedef
unsigned
long
pmdval_t
;
typedef
unsigned
long
pudval_t
;
typedef
unsigned
long
pgdval_t
;
typedef
unsigned
long
pgprotval_t
;
typedef
union
{
pteval_t
pte
;
pteval_t
pte_low
;
}
pte_t
;
#endif
/* __ASSEMBLY__ */
#endif
/* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__
typedef
struct
page
*
pgtable_t
;
#endif
#include <asm/page_32_types.h>
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#ifndef __ASSEMBLY__
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern
unsigned
long
__phys_addr
(
unsigned
long
);
...
...
@@ -89,22 +19,7 @@ extern unsigned long __phys_addr(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif
/* CONFIG_FLATMEM */
extern
int
nx_enabled
;
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern
unsigned
int
__VMALLOC_RESERVE
;
extern
int
sysctl_legacy_va_layout
;
extern
void
find_low_pfn_range
(
void
);
extern
unsigned
long
init_memory_mapping
(
unsigned
long
start
,
unsigned
long
end
);
extern
void
initmem_init
(
unsigned
long
,
unsigned
long
);
extern
void
free_initmem
(
void
);
extern
void
setup_bootmem_allocator
(
void
);
#ifndef __ASSEMBLY__
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
...
...
arch/x86/include/asm/page_32_types.h
0 → 100644
View file @
51c78eb3
#ifndef _ASM_X86_PAGE_32_DEFS_H
#define _ASM_X86_PAGE_32_DEFS_H
#include <linux/const.h>
/*
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#else
/* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#endif
/* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__
#ifdef CONFIG_X86_PAE
typedef
u64
pteval_t
;
typedef
u64
pmdval_t
;
typedef
u64
pudval_t
;
typedef
u64
pgdval_t
;
typedef
u64
pgprotval_t
;
typedef
union
{
struct
{
unsigned
long
pte_low
,
pte_high
;
};
pteval_t
pte
;
}
pte_t
;
#else
/* !CONFIG_X86_PAE */
typedef
unsigned
long
pteval_t
;
typedef
unsigned
long
pmdval_t
;
typedef
unsigned
long
pudval_t
;
typedef
unsigned
long
pgdval_t
;
typedef
unsigned
long
pgprotval_t
;
typedef
union
{
pteval_t
pte
;
pteval_t
pte_low
;
}
pte_t
;
#endif
/* CONFIG_X86_PAE */
extern
int
nx_enabled
;
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern
unsigned
int
__VMALLOC_RESERVE
;
extern
int
sysctl_legacy_va_layout
;
extern
void
find_low_pfn_range
(
void
);
extern
unsigned
long
init_memory_mapping
(
unsigned
long
start
,
unsigned
long
end
);
extern
void
initmem_init
(
unsigned
long
,
unsigned
long
);
extern
void
free_initmem
(
void
);
extern
void
setup_bootmem_allocator
(
void
);
#endif
/* !__ASSEMBLY__ */
#endif
/* _ASM_X86_PAGE_32_DEFS_H */
arch/x86/include/asm/page_64.h
View file @
51c78eb3
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5
/* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void
clear_page
(
void
*
page
);
void
copy_page
(
void
*
to
,
void
*
from
);
/* duplicated to the one in bootmem.h */
extern
unsigned
long
max_pfn
;
extern
unsigned
long
phys_base
;
extern
unsigned
long
__phys_addr
(
unsigned
long
);
#define __phys_reloc_hide(x) (x)
/*
* These are used to make use of C type-checking..
*/
typedef
unsigned
long
pteval_t
;
typedef
unsigned
long
pmdval_t
;
typedef
unsigned
long
pudval_t
;
typedef
unsigned
long
pgdval_t
;
typedef
unsigned
long
pgprotval_t
;
typedef
struct
page
*
pgtable_t
;
typedef
struct
{
pteval_t
pte
;
}
pte_t
;
#define vmemmap ((struct page *)VMEMMAP_START)
extern
unsigned
long
init_memory_mapping
(
unsigned
long
start
,
unsigned
long
end
);
extern
void
initmem_init
(
unsigned
long
start_pfn
,
unsigned
long
end_pfn
);
extern
void
free_initmem
(
void
);
extern
void
init_extra_mapping_uc
(
unsigned
long
phys
,
unsigned
long
size
);
extern
void
init_extra_mapping_wb
(
unsigned
long
phys
,
unsigned
long
size
);
#endif
/* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#include <asm/page_64_types.h>
#endif
/* _ASM_X86_PAGE_64_H */
arch/x86/include/asm/page_64.h.rej
0 → 100644
View file @
51c78eb3
***************
*** 1,105 ****
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
- #define PAGETABLE_LEVELS 4
-
- #define THREAD_ORDER 1
- #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
- #define CURRENT_MASK (~(THREAD_SIZE - 1))
-
- #define EXCEPTION_STACK_ORDER 0
- #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-
- #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
- #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-
- #define IRQSTACK_ORDER 2
- #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
- #define STACKFAULT_STACK 1
- #define DOUBLEFAULT_STACK 2
- #define NMI_STACK 3
- #define DEBUG_STACK 4
- #define MCE_STACK 5
- #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-
- #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
- #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
-
- /*
- * Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
- */
- #define __PAGE_OFFSET _AC(0xffff880000000000, UL)
-
- #define __PHYSICAL_START CONFIG_PHYSICAL_START
- #define __KERNEL_ALIGN 0x200000
-
- /*
- * Make sure kernel is aligned to 2MB address. Catching it at compile
- * time is better. Change your config file and compile the kernel
- * for a 2MB aligned address (CONFIG_PHYSICAL_START)
- */
- #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
- #error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
- #endif
-
- #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
- #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
-
- /* See Documentation/x86_64/mm.txt for a description of the memory map. */
- #define __PHYSICAL_MASK_SHIFT 46
- #define __VIRTUAL_MASK_SHIFT 48
-
- /*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
- */
- #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
- #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
-
- #ifndef __ASSEMBLY__
- void clear_page(void *page);
- void copy_page(void *to, void *from);
-
- /* duplicated to the one in bootmem.h */
- extern unsigned long max_pfn;
- extern unsigned long phys_base;
-
- extern unsigned long __phys_addr(unsigned long);
- #define __phys_reloc_hide(x) (x)
-
- /*
- * These are used to make use of C type-checking..
- */
- typedef unsigned long pteval_t;
- typedef unsigned long pmdval_t;
- typedef unsigned long pudval_t;
- typedef unsigned long pgdval_t;
- typedef unsigned long pgprotval_t;
-
- typedef struct page *pgtable_t;
-
- typedef struct { pteval_t pte; } pte_t;
-
- #define vmemmap ((struct page *)VMEMMAP_START)
-
- extern unsigned long init_memory_mapping(unsigned long start,
- unsigned long end);
-
- extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
- extern void free_initmem(void);
-
- extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
- extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-
- #endif /* !__ASSEMBLY__ */
-
- #ifdef CONFIG_FLATMEM
- #define pfn_valid(pfn) ((pfn) < max_pfn)
- #endif
-
#endif /* _ASM_X86_PAGE_64_H */
--- 1,6 ----
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
+ #include <asm/page_64_types.h>
#endif /* _ASM_X86_PAGE_64_H */
arch/x86/include/asm/page_64_types.h
0 → 100644
View file @
51c78eb3
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5
/* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void
clear_page
(
void
*
page
);
void
copy_page
(
void
*
to
,
void
*
from
);
/* duplicated to the one in bootmem.h */
extern
unsigned
long
max_pfn
;
extern
unsigned
long
phys_base
;
extern
unsigned
long
__phys_addr
(
unsigned
long
);
#define __phys_reloc_hide(x) (x)
/*
* These are used to make use of C type-checking..
*/
typedef
unsigned
long
pteval_t
;
typedef
unsigned
long
pmdval_t
;
typedef
unsigned
long
pudval_t
;
typedef
unsigned
long
pgdval_t
;
typedef
unsigned
long
pgprotval_t
;
typedef
struct
{
pteval_t
pte
;
}
pte_t
;
#define vmemmap ((struct page *)VMEMMAP_START)
extern
unsigned
long
init_memory_mapping
(
unsigned
long
start
,
unsigned
long
end
);
extern
void
initmem_init
(
unsigned
long
start_pfn
,
unsigned
long
end_pfn
);
extern
void
free_initmem
(
void
);
extern
void
init_extra_mapping_uc
(
unsigned
long
phys
,
unsigned
long
size
);
extern
void
init_extra_mapping_wb
(
unsigned
long
phys
,
unsigned
long
size
);
#endif
/* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#endif
/* _ASM_X86_PAGE_64_DEFS_H */
arch/x86/include/asm/page_types.h
0 → 100644
View file @
51c78eb3
#ifndef _ASM_X86_PAGE_DEFS_H
#define _ASM_X86_PAGE_DEFS_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#else
#include <asm/page_32_types.h>
#endif
/* CONFIG_X86_64 */
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef
struct
{
pgdval_t
pgd
;
}
pgd_t
;
typedef
struct
{
pgprotval_t
pgprot
;
}
pgprot_t
;
#if PAGETABLE_LEVELS > 3
typedef
struct
{
pudval_t
pud
;
}
pud_t
;
#endif
#if PAGETABLE_LEVELS > 2
typedef
struct
{
pmdval_t
pmd
;
}
pmd_t
;
#endif
typedef
struct
page
*
pgtable_t
;
extern
int
page_is_ram
(
unsigned
long
pagenr
);
extern
int
pagerange_is_ram
(
unsigned
long
start
,
unsigned
long
end
);
extern
int
devmem_is_allowed
(
unsigned
long
pagenr
);
extern
void
map_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
pgprot_t
vma_prot
);
extern
void
unmap_devmem
(
unsigned
long
pfn
,
unsigned
long
size
,
pgprot_t
vma_prot
);
extern
unsigned
long
max_low_pfn_mapped
;
extern
unsigned
long
max_pfn_mapped
;
#endif
/* !__ASSEMBLY__ */
#endif
/* _ASM_X86_PAGE_DEFS_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment