Commit 20b5014b authored by Matt Fleming's avatar Matt Fleming Committed by Paul Mundt

sh: Fold fixed-PMB support into dynamic PMB support

The initialisation process differs for CONFIG_PMB and for
CONFIG_PMB_FIXED. For CONFIG_PMB_FIXED we need to register the PMB
entries that were allocated by the bootloader.
Signed-off-by: default avatarMatt Fleming <matt@console-pimps.org>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent ef269b32
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#define PMB_E_MASK 0x0000000f #define PMB_E_MASK 0x0000000f
#define PMB_E_SHIFT 8 #define PMB_E_SHIFT 8
#define PMB_PFN_MASK 0xff000000
#define PMB_SZ_16M 0x00000000 #define PMB_SZ_16M 0x00000000
#define PMB_SZ_64M 0x00000010 #define PMB_SZ_64M 0x00000010
#define PMB_SZ_128M 0x00000080 #define PMB_SZ_128M 0x00000080
......
...@@ -453,7 +453,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -453,7 +453,7 @@ void __init setup_arch(char **cmdline_p)
paging_init(); paging_init();
#ifdef CONFIG_PMB #ifdef CONFIG_PMB_ENABLE
pmb_init(); pmb_init();
#endif #endif
......
...@@ -33,8 +33,7 @@ obj-y += $(tlb-y) ...@@ -33,8 +33,7 @@ obj-y += $(tlb-y)
endif endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_PMB_ENABLE) += pmb.o
obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o
obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_NUMA) += numa.o
# Special flags for fault_64.o. This puts restrictions on the number of # Special flags for fault_64.o. This puts restrictions on the number of
......
/*
* arch/sh/mm/fixed_pmb.c
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
static int __uses_jump_to_uncached fixed_pmb_init(void)
{
int i;
unsigned long addr, data;
jump_to_uncached();
for (i = 0; i < PMB_ENTRY_MAX; i++) {
addr = PMB_DATA + (i << PMB_E_SHIFT);
data = ctrl_inl(addr);
if (!(data & PMB_V))
continue;
if (data & PMB_C) {
#if defined(CONFIG_CACHE_WRITETHROUGH)
data |= PMB_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
data &= ~PMB_WT;
#else
data &= ~(PMB_C | PMB_WT);
#endif
}
ctrl_outl(data, addr);
}
back_to_cached();
return 0;
}
arch_initcall(fixed_pmb_init);
...@@ -70,14 +70,20 @@ repeat: ...@@ -70,14 +70,20 @@ repeat:
} }
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
unsigned long flags) unsigned long flags, int entry)
{ {
struct pmb_entry *pmbe; struct pmb_entry *pmbe;
int pos; int pos;
pos = pmb_alloc_entry(); if (entry == PMB_NO_ENTRY) {
if (pos < 0) pos = pmb_alloc_entry();
return ERR_PTR(pos); if (pos < 0)
return ERR_PTR(pos);
} else {
if (test_bit(entry, &pmb_map))
return ERR_PTR(-ENOSPC);
pos = entry;
}
pmbe = &pmb_entry_list[pos]; pmbe = &pmb_entry_list[pos];
if (!pmbe) if (!pmbe)
...@@ -187,7 +193,8 @@ again: ...@@ -187,7 +193,8 @@ again:
if (size < pmb_sizes[i].size) if (size < pmb_sizes[i].size)
continue; continue;
pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
PMB_NO_ENTRY);
if (IS_ERR(pmbe)) { if (IS_ERR(pmbe)) {
err = PTR_ERR(pmbe); err = PTR_ERR(pmbe);
goto out; goto out;
...@@ -272,6 +279,7 @@ static void __pmb_unmap(struct pmb_entry *pmbe) ...@@ -272,6 +279,7 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
} while (pmbe); } while (pmbe);
} }
#ifdef CONFIG_PMB
int __uses_jump_to_uncached pmb_init(void) int __uses_jump_to_uncached pmb_init(void)
{ {
unsigned int i; unsigned int i;
...@@ -309,6 +317,53 @@ int __uses_jump_to_uncached pmb_init(void) ...@@ -309,6 +317,53 @@ int __uses_jump_to_uncached pmb_init(void)
return 0; return 0;
} }
#else
int __uses_jump_to_uncached pmb_init(void)
{
int i;
unsigned long addr, data;
jump_to_uncached();
for (i = 0; i < PMB_ENTRY_MAX; i++) {
struct pmb_entry *pmbe;
unsigned long vpn, ppn, flags;
addr = PMB_DATA + (i << PMB_E_SHIFT);
data = ctrl_inl(addr);
if (!(data & PMB_V))
continue;
if (data & PMB_C) {
#if defined(CONFIG_CACHE_WRITETHROUGH)
data |= PMB_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
data &= ~PMB_WT;
#else
data &= ~(PMB_C | PMB_WT);
#endif
}
ctrl_outl(data, addr);
ppn = data & PMB_PFN_MASK;
flags = data & (PMB_C | PMB_WT | PMB_UB);
flags |= data & PMB_SZ_MASK;
addr = PMB_ADDR + (i << PMB_E_SHIFT);
data = ctrl_inl(addr);
vpn = data & PMB_PFN_MASK;
pmbe = pmb_alloc(vpn, ppn, flags, i);
WARN_ON(IS_ERR(pmbe));
}
back_to_cached();
return 0;
}
#endif /* CONFIG_PMB */
static int pmb_seq_show(struct seq_file *file, void *iter) static int pmb_seq_show(struct seq_file *file, void *iter)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment