init.c 8.29 KB
Newer Older
1 2
/*
 * linux/arch/sh/mm/init.c
Linus Torvalds's avatar
Linus Torvalds committed
3 4
 *
 *  Copyright (C) 1999  Niibe Yutaka
5
 *  Copyright (C) 2002 - 2007  Paul Mundt
Linus Torvalds's avatar
Linus Torvalds committed
6 7 8 9 10 11 12 13
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
Paul Mundt's avatar
Paul Mundt committed
14
#include <linux/proc_fs.h>
15
#include <linux/pagemap.h>
16 17
#include <linux/percpu.h>
#include <linux/io.h>
Linus Torvalds's avatar
Linus Torvalds committed
18 19 20
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
21
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24 25
#include <asm/cache.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
26
unsigned long cached_to_uncached = 0;
Linus Torvalds's avatar
Linus Torvalds committed
27 28 29

void show_mem(void)
{
30 31 32
	int total = 0, reserved = 0, free = 0;
	int shared = 0, cached = 0, slab = 0;
	pg_data_t *pgdat;
Linus Torvalds's avatar
Linus Torvalds committed
33 34 35

	printk("Mem-info:\n");
	show_free_areas();
36 37

	for_each_online_pgdat(pgdat) {
Paul Mundt's avatar
Paul Mundt committed
38
		unsigned long flags, i;
39 40

		pgdat_resize_lock(pgdat, &flags);
Paul Mundt's avatar
Paul Mundt committed
41 42
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			struct page *page = pgdat_page_nr(pgdat, i);
43 44 45 46 47 48 49 50 51 52 53
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
Paul Mundt's avatar
Paul Mundt committed
54
		}
55
		pgdat_resize_unlock(pgdat, &flags);
Linus Torvalds's avatar
Linus Torvalds committed
56
	}
57 58 59 60 61 62 63 64

	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
Paul Mundt's avatar
Paul Mundt committed
65 66
	printk(KERN_INFO "Total of %ld pages in page table cache\n",
	       quicklist_total_size());
Linus Torvalds's avatar
Linus Torvalds committed
67 68
}

69
#ifdef CONFIG_MMU
Linus Torvalds's avatar
Linus Torvalds committed
70 71 72
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
73
	pud_t *pud;
Linus Torvalds's avatar
Linus Torvalds committed
74 75 76
	pmd_t *pmd;
	pte_t *pte;

Stuart Menefy's avatar
Stuart Menefy committed
77
	pgd = pgd_offset_k(addr);
Linus Torvalds's avatar
Linus Torvalds committed
78 79 80 81 82
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

Stuart Menefy's avatar
Stuart Menefy committed
83 84 85 86
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
87 88
	}

Stuart Menefy's avatar
Stuart Menefy committed
89 90 91 92
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
93 94 95 96 97 98 99 100 101 102
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));

103 104
	if (cached_to_uncached)
		flush_tlb_one(get_asid(), addr);
Linus Torvalds's avatar
Linus Torvalds committed
105 106 107 108 109 110
}

/*
 * As a performance optimization, other platforms preserve the fixmap mapping
 * across a context switch, we don't presently do this, but this could be done
 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
Simon Arlott's avatar
Simon Arlott committed
111
 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
Linus Torvalds's avatar
Linus Torvalds committed
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
 * give up a TLB entry for each mapping we want to preserve. While this may be
 * viable for a small number of fixmaps, it's not particularly useful for
 * everything and needs to be carefully evaluated. (ie, we may want this for
 * the vsyscall page).
 *
 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
 * in at __set_fixmap() time to determine the appropriate behavior to follow.
 *
 *					 -- PFM.
 */
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163

void __init page_table_range_init(unsigned long start, unsigned long end,
					 pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	int pgd_idx;
	unsigned long vaddr;

	vaddr = start & PMD_MASK;
	end = (end + PMD_SIZE - 1) & PMD_MASK;
	pgd_idx = pgd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
		BUG_ON(pgd_none(*pgd));
		pud = pud_offset(pgd, 0);
		BUG_ON(pud_none(*pud));
		pmd = pmd_offset(pud, 0);

		if (!pmd_present(*pmd)) {
			pte_t *pte_table;
			pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
			memset(pte_table, 0, PAGE_SIZE);
			pmd_populate_kernel(&init_mm, pmd, pte_table);
		}

		vaddr += PMD_SIZE;
	}
}
164
#endif	/* CONFIG_MMU */
Linus Torvalds's avatar
Linus Torvalds committed
165 166 167 168 169 170

/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
171
	unsigned long max_zone_pfns[MAX_NR_ZONES];
172
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
173

174 175 176 177
	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
Linus Torvalds's avatar
Linus Torvalds committed
178

179 180 181 182
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

183 184 185 186 187
	/* Populate the relevant portions of swapper_pg_dir so that
	 * we can use the fixmap entries without calling kmalloc.
	 * pte's will be filled in by __set_fixmap(). */
	page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);

188 189
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

190 191 192 193 194 195 196
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

		start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
		low = pgdat->bdata->node_low_pfn;

197 198
		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;
199 200 201 202

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}
203 204

	free_area_init_nodes(max_zone_pfns);
205 206 207 208 209 210 211 212 213 214 215 216

	/* Set up the uncached fixmap */
	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));

#ifdef CONFIG_29BIT
	/*
	 * Handle trivial transitions between cached and uncached
	 * segments, making use of the 1:1 mapping relationship in
	 * 512MB lowmem.
	 */
	cached_to_uncached = P2SEG - P1SEG;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
217 218
}

Paul Mundt's avatar
Paul Mundt committed
219
static struct kcore_list kcore_mem, kcore_vmalloc;
220
int after_bootmem = 0;
Paul Mundt's avatar
Paul Mundt committed
221

Linus Torvalds's avatar
Linus Torvalds committed
222 223
void __init mem_init(void)
{
Paul Mundt's avatar
Paul Mundt committed
224
	int codesize, datasize, initsize;
225
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
226

227 228 229
	num_physpages = 0;
	high_memory = NULL;

230 231 232 233 234 235 236 237 238 239 240
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long node_pages = 0;
		void *node_high_memory;

		num_physpages += pgdat->node_present_pages;

		if (pgdat->node_spanned_pages)
			node_pages = free_all_bootmem_node(pgdat);

		totalram_pages += node_pages;
Linus Torvalds's avatar
Linus Torvalds committed
241

242 243 244
		node_high_memory = (void *)__va((pgdat->node_start_pfn +
						 pgdat->node_spanned_pages) <<
						 PAGE_SHIFT);
245 246 247
		if (node_high_memory > high_memory)
			high_memory = node_high_memory;
	}
Linus Torvalds's avatar
Linus Torvalds committed
248 249 250 251 252

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

253 254
	after_bootmem = 1;

Linus Torvalds's avatar
Linus Torvalds committed
255 256 257 258
	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

Paul Mundt's avatar
Paul Mundt committed
259 260 261 262 263
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		   VMALLOC_END - VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
Paul Mundt's avatar
Paul Mundt committed
264
	       "%dk data, %dk init)\n",
Linus Torvalds's avatar
Linus Torvalds committed
265
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
266
		num_physpages << (PAGE_SHIFT-10),
Linus Torvalds's avatar
Linus Torvalds committed
267 268 269 270 271
		codesize >> 10,
		datasize >> 10,
		initsize >> 10);

	p3_cache_init();
272 273 274

	/* Initialize the vDSO */
	vsyscall_init();
Linus Torvalds's avatar
Linus Torvalds committed
275 276 277 278 279
}

void free_initmem(void)
{
	unsigned long addr;
280

Linus Torvalds's avatar
Linus Torvalds committed
281 282 283
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
284
		init_page_count(virt_to_page(addr));
Linus Torvalds's avatar
Linus Torvalds committed
285 286 287
		free_page(addr);
		totalram_pages++;
	}
288 289 290
	printk("Freeing unused kernel memory: %ldk freed\n",
	       ((unsigned long)&__init_end -
	        (unsigned long)&__init_begin) >> 10);
Linus Torvalds's avatar
Linus Torvalds committed
291 292 293 294 295 296 297 298
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
299
		init_page_count(virt_to_page(p));
Linus Torvalds's avatar
Linus Torvalds committed
300 301 302
		free_page(p);
		totalram_pages++;
	}
303
	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
Linus Torvalds's avatar
Linus Torvalds committed
304 305
}
#endif
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334

#ifdef CONFIG_MEMORY_HOTPLUG
void online_page(struct page *page)
{
	ClearPageReserved(page);
	init_page_count(page);
	__free_page(page);
	totalram_pages++;
	num_physpages++;
}

int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
	if (unlikely(ret))
		printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__, ret);

	return ret;
}
EXPORT_SYMBOL_GPL(arch_add_memory);

335
#ifdef CONFIG_NUMA
336 337 338 339 340 341 342
int memory_add_physaddr_to_nid(u64 addr)
{
	/* Node 0 for now.. */
	return 0;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
343
#endif