Commit 7c8c6b97 authored by Paul Mackerras's avatar Paul Mackerras

powerpc: Merge lmb.c and make MM initialization use it.

This also creates merged versions of do_init_bootmem, paging_init
and mem_init and moves them to arch/powerpc/mm/mem.c.  It gets rid
of the mem_pieces stuff.

I made memory_limit a parameter to lmb_enforce_memory_limit rather
than a global referenced by that function.  This will require some
small changes to ppc64 if we want to continue building ARCH=ppc64
using the merged lmb.c.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 9b6b563c
...@@ -2,9 +2,9 @@ ...@@ -2,9 +2,9 @@
# Makefile for the linux ppc-specific parts of the memory manager. # Makefile for the linux ppc-specific parts of the memory manager.
# #
obj-y := fault.o mem.o obj-y := fault.o mem.o lmb.o
obj-$(CONFIG_PPC32) += init.o pgtable.o mmu_context.o \ obj-$(CONFIG_PPC32) += init.o pgtable.o mmu_context.o \
mem_pieces.o tlb.o tlb.o
obj-$(CONFIG_PPC64) += init64.o pgtable64.o mmu_context64.o obj-$(CONFIG_PPC64) += init64.o pgtable64.o mmu_context64.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu.o hash_32.o obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu.o hash_32.o
obj-$(CONFIG_40x) += 4xx_mmu.o obj-$(CONFIG_40x) += 4xx_mmu.o
......
This diff is collapsed.
...@@ -166,77 +166,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) ...@@ -166,77 +166,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
} }
#endif #endif
/*
* Initialize the bootmem system and give it all the memory we
* have available.
*/
#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem(void)
{
unsigned long i;
unsigned long start, bootmap_pages;
unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
int boot_mapsize;
/*
* Find an area to use for the bootmem bitmap. Calculate the size of
* bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
* Add 1 additional page in case the address isn't page-aligned.
*/
bootmap_pages = bootmem_bootmap_pages(total_pages);
start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
BUG_ON(!start);
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
max_pfn = max_low_pfn;
/* Add all physical memory to the bootmem map, mark each area
* present.
*/
for (i=0; i < lmb.memory.cnt; i++)
free_bootmem(lmb.memory.region[i].base,
lmb_size_bytes(&lmb.memory, i));
/* reserve the sections we're already using */
for (i=0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i));
for (i=0; i < lmb.memory.cnt; i++)
memory_present(0, lmb_start_pfn(&lmb.memory, i),
lmb_end_pfn(&lmb.memory, i));
}
/*
* paging_init() sets up the page tables - in fact we've already done this.
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
unsigned long total_ram = lmb_phys_mem_size();
unsigned long top_of_ram = lmb_end_of_DRAM();
printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
printk(KERN_INFO "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
/*
* All pages are DMA-able so we put them all in the DMA zone.
*/
memset(zones_size, 0, sizeof(zones_size));
memset(zholes_size, 0, sizeof(zholes_size));
zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
free_area_init_node(0, NODE_DATA(0), zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
}
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
static struct kcore_list kcore_vmem; static struct kcore_list kcore_vmem;
static int __init setup_kcore(void) static int __init setup_kcore(void)
...@@ -264,61 +193,6 @@ static int __init setup_kcore(void) ...@@ -264,61 +193,6 @@ static int __init setup_kcore(void)
} }
module_init(setup_kcore); module_init(setup_kcore);
void __init mem_init(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
int nid;
#endif
pg_data_t *pgdat;
unsigned long i;
struct page *page;
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
#ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %x\n", nid);
totalram_pages +=
free_all_bootmem_node(NODE_DATA(nid));
}
}
#else
max_mapnr = num_physpages;
totalram_pages += free_all_bootmem();
#endif
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
page = pgdat_page_nr(pgdat, i);
if (PageReserved(page))
reservedpages++;
}
}
codesize = (unsigned long)&_etext - (unsigned long)&_stext;
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
"%luk reserved, %luk data, %luk bss, %luk init)\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
bsssize >> 10,
initsize >> 10);
mem_init_done = 1;
/* Initialize the vDSO */
vdso_init();
}
void __iomem * reserve_phb_iospace(unsigned long size) void __iomem * reserve_phb_iospace(unsigned long size)
{ {
void __iomem *virt_addr; void __iomem *virt_addr;
......
/*
* Procedures for maintaining information about logical memory blocks.
*
* Peter Bergner, IBM Corp. June 2001.
* Copyright (C) 2001 Peter Bergner.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/types.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/lmb.h>
#ifdef CONFIG_PPC32
#include "mmu_decl.h" /* for __max_low_memory */
#endif
struct lmb lmb;
#undef DEBUG
void lmb_dump_all(void)
{
#ifdef DEBUG
unsigned long i;
udbg_printf("lmb_dump_all:\n");
udbg_printf(" memory.cnt = 0x%lx\n",
lmb.memory.cnt);
udbg_printf(" memory.size = 0x%lx\n",
lmb.memory.size);
for (i=0; i < lmb.memory.cnt ;i++) {
udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
i, lmb.memory.region[i].base);
udbg_printf(" .size = 0x%lx\n",
lmb.memory.region[i].size);
}
udbg_printf("\n reserved.cnt = 0x%lx\n",
lmb.reserved.cnt);
udbg_printf(" reserved.size = 0x%lx\n",
lmb.reserved.size);
for (i=0; i < lmb.reserved.cnt ;i++) {
udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
i, lmb.reserved.region[i].base);
udbg_printf(" .size = 0x%lx\n",
lmb.reserved.region[i].size);
}
#endif /* DEBUG */
}
static unsigned long __init lmb_addrs_overlap(unsigned long base1,
unsigned long size1, unsigned long base2, unsigned long size2)
{
return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
}
static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
unsigned long base2, unsigned long size2)
{
if (base2 == base1 + size1)
return 1;
else if (base1 == base2 + size2)
return -1;
return 0;
}
static long __init lmb_regions_adjacent(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
unsigned long base1 = rgn->region[r1].base;
unsigned long size1 = rgn->region[r1].size;
unsigned long base2 = rgn->region[r2].base;
unsigned long size2 = rgn->region[r2].size;
return lmb_addrs_adjacent(base1, size1, base2, size2);
}
/* Assumption: base addr of region 1 < base addr of region 2 */
static void __init lmb_coalesce_regions(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
unsigned long i;
rgn->region[r1].size += rgn->region[r2].size;
for (i=r2; i < rgn->cnt-1; i++) {
rgn->region[i].base = rgn->region[i+1].base;
rgn->region[i].size = rgn->region[i+1].size;
}
rgn->cnt--;
}
/* This routine called with relocation disabled. */
void __init lmb_init(void)
{
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
lmb.memory.region[0].base = 0;
lmb.memory.region[0].size = 0;
lmb.memory.cnt = 1;
/* Ditto. */
lmb.reserved.region[0].base = 0;
lmb.reserved.region[0].size = 0;
lmb.reserved.cnt = 1;
}
/* This routine may be called with relocation disabled. */
void __init lmb_analyze(void)
{
int i;
lmb.memory.size = 0;
for (i = 0; i < lmb.memory.cnt; i++)
lmb.memory.size += lmb.memory.region[i].size;
}
/* This routine called with relocation disabled. */
static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
unsigned long size)
{
unsigned long i, coalesced = 0;
long adjacent;
/* First try and coalesce this LMB with another. */
for (i=0; i < rgn->cnt; i++) {
unsigned long rgnbase = rgn->region[i].base;
unsigned long rgnsize = rgn->region[i].size;
adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
if ( adjacent > 0 ) {
rgn->region[i].base -= size;
rgn->region[i].size += size;
coalesced++;
break;
}
else if ( adjacent < 0 ) {
rgn->region[i].size += size;
coalesced++;
break;
}
}
if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
lmb_coalesce_regions(rgn, i, i+1);
coalesced++;
}
if (coalesced)
return coalesced;
if (rgn->cnt >= MAX_LMB_REGIONS)
return -1;
/* Couldn't coalesce the LMB, so add it to the sorted table. */
for (i = rgn->cnt-1; i >= 0; i--) {
if (base < rgn->region[i].base) {
rgn->region[i+1].base = rgn->region[i].base;
rgn->region[i+1].size = rgn->region[i].size;
} else {
rgn->region[i+1].base = base;
rgn->region[i+1].size = size;
break;
}
}
rgn->cnt++;
return 0;
}
/* This routine may be called with relocation disabled. */
long __init lmb_add(unsigned long base, unsigned long size)
{
struct lmb_region *_rgn = &(lmb.memory);
/* On pSeries LPAR systems, the first LMB is our RMO region. */
if (base == 0)
lmb.rmo_size = size;
return lmb_add_region(_rgn, base, size);
}
long __init lmb_reserve(unsigned long base, unsigned long size)
{
struct lmb_region *_rgn = &(lmb.reserved);
return lmb_add_region(_rgn, base, size);
}
long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
unsigned long size)
{
unsigned long i;
for (i=0; i < rgn->cnt; i++) {
unsigned long rgnbase = rgn->region[i].base;
unsigned long rgnsize = rgn->region[i].size;
if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
break;
}
}
return (i < rgn->cnt) ? i : -1;
}
unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
{
return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
}
unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
unsigned long max_addr)
{
long i, j;
unsigned long base = 0;
#ifdef CONFIG_PPC32
/* On 32-bit, make sure we allocate lowmem */
if (max_addr == LMB_ALLOC_ANYWHERE)
max_addr = __max_low_memory;
#endif
for (i = lmb.memory.cnt-1; i >= 0; i--) {
unsigned long lmbbase = lmb.memory.region[i].base;
unsigned long lmbsize = lmb.memory.region[i].size;
if (max_addr == LMB_ALLOC_ANYWHERE)
base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
else if (lmbbase < max_addr) {
base = min(lmbbase + lmbsize, max_addr);
base = _ALIGN_DOWN(base - size, align);
} else
continue;
while ((lmbbase <= base) &&
((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
align);
if ((base != 0) && (lmbbase <= base))
break;
}
if (i < 0)
return 0;
lmb_add_region(&lmb.reserved, base, size);
return base;
}
/* You must call lmb_analyze() before this. */
unsigned long __init lmb_phys_mem_size(void)
{
return lmb.memory.size;
}
unsigned long __init lmb_end_of_DRAM(void)
{
int idx = lmb.memory.cnt - 1;
return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
}
/*
* Truncate the lmb list to memory_limit if it's set
* You must call lmb_analyze() after this.
*/
void __init lmb_enforce_memory_limit(unsigned long memory_limit)
{
unsigned long i, limit;
if (! memory_limit)
return;
limit = memory_limit;
for (i = 0; i < lmb.memory.cnt; i++) {
if (limit > lmb.memory.region[i].size) {
limit -= lmb.memory.region[i].size;
continue;
}
lmb.memory.region[i].size = limit;
lmb.memory.cnt = i + 1;
break;
}
}
...@@ -45,8 +45,9 @@ ...@@ -45,8 +45,9 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/lmb.h>
#include <asm/sections.h>
#include "mem_pieces.h"
#include "mmu_decl.h" #include "mmu_decl.h"
#ifndef CPU_FTR_COHERENT_ICACHE #ifndef CPU_FTR_COHERENT_ICACHE
...@@ -54,6 +55,9 @@ ...@@ -54,6 +55,9 @@
#define CPU_FTR_NOEXECUTE 0 #define CPU_FTR_NOEXECUTE 0
#endif #endif
int init_bootmem_done;
int mem_init_done;
/* /*
* This is called by /dev/mem to know if a given address has to * This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not * be mapped non-cacheable or not
...@@ -130,6 +134,185 @@ void show_mem(void) ...@@ -130,6 +134,185 @@ void show_mem(void)
printk("%ld pages swap cached\n", cached); printk("%ld pages swap cached\n", cached);
} }
/*
* Initialize the bootmem system and give it all the memory we
* have available. If we are using highmem, we only put the
* lowmem into the bootmem system.
*/
#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem(void)
{
unsigned long i;
unsigned long start, bootmap_pages;
unsigned long total_pages;
int boot_mapsize;
max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
total_pages = total_lowmem >> PAGE_SHIFT;
#endif
/*
* Find an area to use for the bootmem bitmap. Calculate the size of
* bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
* Add 1 additional page in case the address isn't page-aligned.
*/
bootmap_pages = bootmem_bootmap_pages(total_pages);
start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
BUG_ON(!start);
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
/* Add all physical memory to the bootmem map, mark each area
* present.
*/
for (i = 0; i < lmb.memory.cnt; i++) {
unsigned long base = lmb.memory.region[i].base;
unsigned long size = lmb_size_bytes(&lmb.memory, i);
#ifdef CONFIG_HIGHMEM
if (base >= total_lowmem)
continue;
if (base + size > total_lowmem)
size = total_lowmem - base;
#endif
free_bootmem(base, size);
}
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i));
/* XXX need to clip this if using highmem? */
for (i = 0; i < lmb.memory.cnt; i++)
memory_present(0, lmb_start_pfn(&lmb.memory, i),
lmb_end_pfn(&lmb.memory, i));
init_bootmem_done = 1;
}
/*
* paging_init() sets up the page tables - in fact we've already done this.
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
unsigned long total_ram = lmb_phys_mem_size();
unsigned long top_of_ram = lmb_end_of_DRAM();
#ifdef CONFIG_HIGHMEM
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
printk(KERN_INFO "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
/*
* All pages are DMA-able so we put them all in the DMA zone.
*/
memset(zones_size, 0, sizeof(zones_size));
memset(zholes_size, 0, sizeof(zholes_size));
zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
#else
zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
#endif /* CONFIG_HIGHMEM */
free_area_init_node(0, NODE_DATA(0), zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
}
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
void __init mem_init(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
int nid;
#endif
pg_data_t *pgdat;
unsigned long i;
struct page *page;
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
num_physpages = max_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
#ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %x\n", nid);
totalram_pages +=
free_all_bootmem_node(NODE_DATA(nid));
}
}
#else
max_mapnr = num_physpages;
totalram_pages += free_all_bootmem();
#endif
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
page = pgdat_page_nr(pgdat, i);
if (PageReserved(page))
reservedpages++;
}
}
codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
#ifdef CONFIG_HIGHMEM
{
unsigned long pfn, highmem_mapnr;
highmem_mapnr = total_lowmem >> PAGE_SHIFT;
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
set_page_count(page, 1);
__free_page(page);
totalhigh_pages++;
}
totalram_pages += totalhigh_pages;
printk(KERN_INFO "High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* CONFIG_HIGHMEM */
printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
"%luk reserved, %luk data, %luk bss, %luk init)\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
bsssize >> 10,
initsize >> 10);
mem_init_done = 1;
#ifdef CONFIG_PPC64
/* Initialize the vDSO */
vdso_init();
#endif
}
/* /*
* This is called when a page has been modified by the kernel. * This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache * It just marks the page as not i-cache clean. We do the i-cache
......
/*
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Changes to accommodate Power Macintoshes.
* Cort Dougan <cort@cs.nmt.edu>
* Rewrites.
* Grant Erickson <grant@lcse.umn.edu>
* General rework and split from mm/init.c.
*
* Module name: mem_pieces.c
*
* Description:
* Routines and data structures for manipulating and representing
* phyiscal memory extents (i.e. address/length pairs).
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <asm/page.h>
#include "mem_pieces.h"
extern struct mem_pieces phys_avail;
static void mem_pieces_print(struct mem_pieces *);
/*
* Scan a region for a piece of a given size with the required alignment.
*/
void __init *
mem_pieces_find(unsigned int size, unsigned int align)
{
int i;
unsigned a, e;
struct mem_pieces *mp = &phys_avail;
for (i = 0; i < mp->n_regions; ++i) {
a = mp->regions[i].address;
e = a + mp->regions[i].size;
a = (a + align - 1) & -align;
if (a + size <= e) {
mem_pieces_remove(mp, a, size, 1);
return (void *) __va(a);
}
}
panic("Couldn't find %u bytes at %u alignment\n", size, align);
return NULL;
}
/*
* Remove some memory from an array of pieces
*/
void __init
mem_pieces_remove(struct mem_pieces *mp, unsigned int start, unsigned int size,
int must_exist)
{
int i, j;
unsigned int end, rs, re;
struct reg_property *rp;
end = start + size;
for (i = 0, rp = mp->regions; i < mp->n_regions; ++i, ++rp) {
if (end > rp->address && start < rp->address + rp->size)
break;
}
if (i >= mp->n_regions) {
if (must_exist)
printk("mem_pieces_remove: [%x,%x) not in any region\n",
start, end);
return;
}
for (; i < mp->n_regions && end > rp->address; ++i, ++rp) {
rs = rp->address;
re = rs + rp->size;
if (must_exist && (start < rs || end > re)) {
printk("mem_pieces_remove: bad overlap [%x,%x) with",
start, end);
mem_pieces_print(mp);
must_exist = 0;
}
if (start > rs) {
rp->size = start - rs;
if (end < re) {
/* need to split this entry */
if (mp->n_regions >= MEM_PIECES_MAX)
panic("eek... mem_pieces overflow");
for (j = mp->n_regions; j > i + 1; --j)
mp->regions[j] = mp->regions[j-1];
++mp->n_regions;
rp[1].address = end;
rp[1].size = re - end;
}
} else {
if (end < re) {
rp->address = end;
rp->size = re - end;
} else {
/* need to delete this entry */
for (j = i; j < mp->n_regions - 1; ++j)
mp->regions[j] = mp->regions[j+1];
--mp->n_regions;
--i;
--rp;
}
}
}
}
static void __init
mem_pieces_print(struct mem_pieces *mp)
{
int i;
for (i = 0; i < mp->n_regions; ++i)
printk(" [%x, %x)", mp->regions[i].address,
mp->regions[i].address + mp->regions[i].size);
printk("\n");
}
void __init
mem_pieces_sort(struct mem_pieces *mp)
{
unsigned long a, s;
int i, j;
for (i = 1; i < mp->n_regions; ++i) {
a = mp->regions[i].address;
s = mp->regions[i].size;
for (j = i - 1; j >= 0; --j) {
if (a >= mp->regions[j].address)
break;
mp->regions[j+1] = mp->regions[j];
}
mp->regions[j+1].address = a;
mp->regions[j+1].size = s;
}
}
void __init
mem_pieces_coalesce(struct mem_pieces *mp)
{
unsigned long a, s, ns;
int i, j, d;
d = 0;
for (i = 0; i < mp->n_regions; i = j) {
a = mp->regions[i].address;
s = mp->regions[i].size;
for (j = i + 1; j < mp->n_regions
&& mp->regions[j].address - a <= s; ++j) {
ns = mp->regions[j].address + mp->regions[j].size - a;
if (ns > s)
s = ns;
}
mp->regions[d].address = a;
mp->regions[d].size = s;
++d;
}
mp->n_regions = d;
}
/*
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Changes to accommodate Power Macintoshes.
* Cort Dougan <cort@cs.nmt.edu>
* Rewrites.
* Grant Erickson <grant@lcse.umn.edu>
* General rework and split from mm/init.c.
*
* Module name: mem_pieces.h
*
* Description:
* Routines and data structures for manipulating and representing
* phyiscal memory extents (i.e. address/length pairs).
*
*/
#ifndef __MEM_PIECES_H__
#define __MEM_PIECES_H__
#include <asm/prom.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Type Definitions */
#define MEM_PIECES_MAX 32
struct mem_pieces {
int n_regions;
struct reg_property regions[MEM_PIECES_MAX];
};
/* Function Prototypes */
extern void *mem_pieces_find(unsigned int size, unsigned int align);
extern void mem_pieces_remove(struct mem_pieces *mp, unsigned int start,
unsigned int size, int must_exist);
extern void mem_pieces_coalesce(struct mem_pieces *mp);
extern void mem_pieces_sort(struct mem_pieces *mp);
#ifdef __cplusplus
}
#endif
#endif /* __MEM_PIECES_H__ */
...@@ -36,6 +36,8 @@ extern unsigned long ioremap_base; ...@@ -36,6 +36,8 @@ extern unsigned long ioremap_base;
extern unsigned long ioremap_bot; extern unsigned long ioremap_bot;
extern unsigned int rtas_data, rtas_size; extern unsigned int rtas_data, rtas_size;
extern unsigned long __max_low_memory;
extern unsigned long __initial_memory_limit;
extern unsigned long total_memory; extern unsigned long total_memory;
extern unsigned long total_lowmem; extern unsigned long total_lowmem;
extern int mem_init_done; extern int mem_init_done;
......
...@@ -190,8 +190,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) ...@@ -190,8 +190,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
* Don't allow anybody to remap normal RAM that we're using. * Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that. * mem_init() sets high_memory so only do the check after that.
*/ */
if ( mem_init_done && (p < virt_to_phys(high_memory)) ) if (mem_init_done && (p < virt_to_phys(high_memory))) {
{
printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p, printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
__builtin_return_address(0)); __builtin_return_address(0));
return NULL; return NULL;
......
...@@ -32,9 +32,9 @@ ...@@ -32,9 +32,9 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/lmb.h>
#include "mmu_decl.h" #include "mmu_decl.h"
#include "mem_pieces.h"
PTE *Hash, *Hash_end; PTE *Hash, *Hash_end;
unsigned long Hash_size, Hash_mask; unsigned long Hash_size, Hash_mask;
...@@ -215,17 +215,6 @@ void __init MMU_init_hw(void) ...@@ -215,17 +215,6 @@ void __init MMU_init_hw(void)
#define MIN_N_HPTEG 1024 /* min 64kB hash table */ #define MIN_N_HPTEG 1024 /* min 64kB hash table */
#endif #endif
#ifdef CONFIG_POWER4
/* The hash table has already been allocated and initialized
in prom.c */
n_hpteg = Hash_size >> LG_HPTEG_SIZE;
lg_n_hpteg = __ilog2(n_hpteg);
/* Remove the hash table from the available memory */
if (Hash)
reserve_phys_mem(__pa(Hash), Hash_size);
#else /* CONFIG_POWER4 */
/* /*
* Allow 1 HPTE (1/8 HPTEG) for each page of memory. * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
* This is less than the recommended amount, but then * This is less than the recommended amount, but then
...@@ -245,10 +234,10 @@ void __init MMU_init_hw(void) ...@@ -245,10 +234,10 @@ void __init MMU_init_hw(void)
* Find some memory for the hash table. * Find some memory for the hash table.
*/ */
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Hash = mem_pieces_find(Hash_size, Hash_size); Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
__initial_memory_limit));
cacheable_memzero(Hash, Hash_size); cacheable_memzero(Hash, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS; _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
#endif /* CONFIG_POWER4 */
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
......
...@@ -34,6 +34,17 @@ typedef unsigned long pte_basic_t; ...@@ -34,6 +34,17 @@ typedef unsigned long pte_basic_t;
#define PTE_FMT "%.8lx" #define PTE_FMT "%.8lx"
#endif #endif
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
#undef STRICT_MM_TYPECHECKS #undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS
...@@ -76,13 +87,6 @@ typedef unsigned long pgprot_t; ...@@ -76,13 +87,6 @@ typedef unsigned long pgprot_t;
#endif #endif
/* align addr on a size boundary - adjust address up if needed -- Cort */
#define _ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
struct page; struct page;
extern void clear_pages(void *page, int order); extern void clear_pages(void *page, int order);
static inline void clear_page(void *page) { clear_pages(page, 0); } static inline void clear_page(void *page) { clear_pages(page, 0); }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment