Commit 39b742f9 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

[S390] Use add_active_range() and free_area_init_nodes().

Size zones and holes in an architecture independent manner for s390.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent f4eb07c1
...@@ -233,6 +233,9 @@ config WARN_STACK_SIZE ...@@ -233,6 +233,9 @@ config WARN_STACK_SIZE
This allows you to specify the maximum frame size a function may This allows you to specify the maximum frame size a function may
have without the compiler complaining about it. have without the compiler complaining about it.
config ARCH_POPULATES_NODE_MAP
def_bool y
source "mm/Kconfig" source "mm/Kconfig"
config HOLES_IN_ZONE config HOLES_IN_ZONE
......
...@@ -66,7 +66,6 @@ unsigned long machine_flags = 0; ...@@ -66,7 +66,6 @@ unsigned long machine_flags = 0;
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
unsigned long __initdata zholes_size[MAX_NR_ZONES];
static unsigned long __initdata memory_end; static unsigned long __initdata memory_end;
/* /*
...@@ -354,21 +353,6 @@ void machine_power_off(void) ...@@ -354,21 +353,6 @@ void machine_power_off(void)
*/ */
void (*pm_power_off)(void) = machine_power_off; void (*pm_power_off)(void) = machine_power_off;
static void __init
add_memory_hole(unsigned long start, unsigned long end)
{
unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
if (end <= dma_pfn)
zholes_size[ZONE_DMA] += end - start + 1;
else if (start > dma_pfn)
zholes_size[ZONE_NORMAL] += end - start + 1;
else {
zholes_size[ZONE_DMA] += dma_pfn - start + 1;
zholes_size[ZONE_NORMAL] += end - dma_pfn;
}
}
static int __init early_parse_mem(char *p) static int __init early_parse_mem(char *p)
{ {
memory_end = memparse(p, &p); memory_end = memparse(p, &p);
...@@ -521,7 +505,6 @@ setup_memory(void) ...@@ -521,7 +505,6 @@ setup_memory(void)
{ {
unsigned long bootmap_size; unsigned long bootmap_size;
unsigned long start_pfn, end_pfn, init_pfn; unsigned long start_pfn, end_pfn, init_pfn;
unsigned long last_rw_end;
int i; int i;
/* /*
...@@ -577,39 +560,27 @@ setup_memory(void) ...@@ -577,39 +560,27 @@ setup_memory(void)
/* /*
* Register RAM areas with the bootmem allocator. * Register RAM areas with the bootmem allocator.
*/ */
last_rw_end = start_pfn;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
unsigned long start_chunk, end_chunk; unsigned long start_chunk, end_chunk, pfn;
if (memory_chunk[i].type != CHUNK_READ_WRITE) if (memory_chunk[i].type != CHUNK_READ_WRITE)
continue; continue;
start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); start_chunk = PFN_DOWN(memory_chunk[i].addr);
start_chunk >>= PAGE_SHIFT; end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); end_chunk = min(end_chunk, end_pfn);
end_chunk >>= PAGE_SHIFT; if (start_chunk >= end_chunk)
if (start_chunk < start_pfn) continue;
start_chunk = start_pfn; add_active_range(0, start_chunk, end_chunk);
if (end_chunk > end_pfn) pfn = max(start_chunk, start_pfn);
end_chunk = end_pfn; for (; pfn <= end_chunk; pfn++)
if (start_chunk < end_chunk) { page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
/* Initialize storage key for RAM pages */
for (init_pfn = start_chunk ; init_pfn < end_chunk;
init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY);
free_bootmem(start_chunk << PAGE_SHIFT,
(end_chunk - start_chunk) << PAGE_SHIFT);
if (last_rw_end < start_chunk)
add_memory_hole(last_rw_end, start_chunk - 1);
last_rw_end = end_chunk;
}
} }
psw_set_key(PAGE_DEFAULT_KEY); psw_set_key(PAGE_DEFAULT_KEY);
if (last_rw_end < end_pfn - 1) free_bootmem_with_active_regions(0, max_pfn);
add_memory_hole(last_rw_end, end_pfn - 1); reserve_bootmem(0, PFN_PHYS(start_pfn));
/* /*
* Reserve the bootmem bitmap itself as well. We do this in two * Reserve the bootmem bitmap itself as well. We do this in two
......
...@@ -106,8 +106,8 @@ static void __init setup_ro_region(void) ...@@ -106,8 +106,8 @@ static void __init setup_ro_region(void)
} }
} }
extern unsigned long __initdata zholes_size[];
extern void vmem_map_init(void); extern void vmem_map_init(void);
/* /*
* paging_init() sets up the page tables * paging_init() sets up the page tables
*/ */
...@@ -117,8 +117,7 @@ void __init paging_init(void) ...@@ -117,8 +117,7 @@ void __init paging_init(void)
int i; int i;
unsigned long pgdir_k; unsigned long pgdir_k;
static const int ssm_mask = 0x04000000L; static const int ssm_mask = 0x04000000L;
unsigned long zones_size[MAX_NR_ZONES]; unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long dma_pfn, high_pfn;
pg_dir = swapper_pg_dir; pg_dir = swapper_pg_dir;
...@@ -142,20 +141,10 @@ void __init paging_init(void) ...@@ -142,20 +141,10 @@ void __init paging_init(void)
__ctl_load(pgdir_k, 13, 13); __ctl_load(pgdir_k, 13, 13);
__raw_local_irq_ssm(ssm_mask); __raw_local_irq_ssm(ssm_mask);
memset(zones_size, 0, sizeof(zones_size)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
high_pfn = max_low_pfn; max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
else {
zones_size[ZONE_DMA] = dma_pfn;
zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
}
/* Initialize mem_map[]. */
free_area_init_node(0, &contig_page_data, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
} }
void __init mem_init(void) void __init mem_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment