Commit 8cf1a743 authored by Paul Mundt's avatar Paul Mundt

sh: Add kmap_coherent()/kunmap_coherent() interface for SH-4.

This wires up kmap_coherent() and kunmap_coherent() on SH-4, and
moves away from the p3map_mutex and reserved P3 space, opting to
use fixmaps for colouring instead.

The copy_user_page()/clear_user_page() implementations are moved
to this, which fixes the nasty blowups with spinlock debugging
as a result of having some of these calls nested under the page
table lock.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent f695baf2
...@@ -77,16 +77,8 @@ static void __init emit_cache_params(void) ...@@ -77,16 +77,8 @@ static void __init emit_cache_params(void)
/* /*
* SH-4 has virtually indexed and physically tagged cache. * SH-4 has virtually indexed and physically tagged cache.
*/ */
/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
#define MAX_P3_MUTEXES 16
struct mutex p3map_mutex[MAX_P3_MUTEXES];
void __init p3_cache_init(void) void __init p3_cache_init(void)
{ {
int i;
compute_alias(&current_cpu_data.icache); compute_alias(&current_cpu_data.icache);
compute_alias(&current_cpu_data.dcache); compute_alias(&current_cpu_data.dcache);
...@@ -109,9 +101,6 @@ void __init p3_cache_init(void) ...@@ -109,9 +101,6 @@ void __init p3_cache_init(void)
if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
panic("%s failed.", __FUNCTION__); panic("%s failed.", __FUNCTION__);
for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
mutex_init(&p3map_mutex[i]);
} }
/* /*
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* arch/sh/mm/pg-sh4.c * arch/sh/mm/pg-sh4.c
* *
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2002 - 2005 Paul Mundt * Copyright (C) 2002 - 2007 Paul Mundt
* *
* Released under the terms of the GNU GPL v2.0. * Released under the terms of the GNU GPL v2.0.
*/ */
...@@ -11,10 +11,35 @@ ...@@ -11,10 +11,35 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
extern struct mutex p3map_mutex[];
#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
static inline void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
unsigned long vaddr, flags;
pte_t pte;
inc_preempt_count();
idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, PAGE_KERNEL);
local_irq_save(flags);
flush_tlb_one(get_asid(), vaddr);
local_irq_restore(flags);
update_mmu_cache(NULL, vaddr, pte);
return (void *)vaddr;
}
static inline void kunmap_coherent(struct page *page)
{
dec_preempt_count();
preempt_check_resched();
}
/* /*
* clear_user_page * clear_user_page
* @to: P1 address * @to: P1 address
...@@ -27,25 +52,9 @@ void clear_user_page(void *to, unsigned long address, struct page *page) ...@@ -27,25 +52,9 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
clear_page(to); clear_page(to);
else { else {
unsigned long phys_addr = PHYSADDR(to); void *vto = kmap_coherent(page, address);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); __clear_user_page(vto, to);
pgd_t *pgd = pgd_offset_k(p3_addr); kunmap_coherent(vto);
pud_t *pud = pud_offset(pgd, p3_addr);
pmd_t *pmd = pmd_offset(pud, p3_addr);
pte_t *pte = pte_offset_kernel(pmd, p3_addr);
pte_t entry;
unsigned long flags;
entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
local_irq_save(flags);
flush_tlb_one(get_asid(), p3_addr);
local_irq_restore(flags);
update_mmu_cache(NULL, p3_addr, entry);
__clear_user_page((void *)p3_addr, to);
pte_clear(&init_mm, p3_addr, pte);
mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
} }
} }
...@@ -63,25 +72,9 @@ void copy_user_page(void *to, void *from, unsigned long address, ...@@ -63,25 +72,9 @@ void copy_user_page(void *to, void *from, unsigned long address,
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from); copy_page(to, from);
else { else {
unsigned long phys_addr = PHYSADDR(to); void *vfrom = kmap_coherent(page, address);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); __copy_user_page(vfrom, from, to);
pgd_t *pgd = pgd_offset_k(p3_addr); kunmap_coherent(vfrom);
pud_t *pud = pud_offset(pgd, p3_addr);
pmd_t *pmd = pmd_offset(pud, p3_addr);
pte_t *pte = pte_offset_kernel(pmd, p3_addr);
pte_t entry;
unsigned long flags;
entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
local_irq_save(flags);
flush_tlb_one(get_asid(), p3_addr);
local_irq_restore(flags);
update_mmu_cache(NULL, p3_addr, entry);
__copy_user_page((void *)p3_addr, from, to);
pte_clear(&init_mm, p3_addr, pte);
mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
} }
} }
......
...@@ -46,6 +46,9 @@ ...@@ -46,6 +46,9 @@
* fix-mapped? * fix-mapped?
*/ */
enum fixed_addresses { enum fixed_addresses {
#define FIX_N_COLOURS 16
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
...@@ -53,8 +56,8 @@ enum fixed_addresses { ...@@ -53,8 +56,8 @@ enum fixed_addresses {
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
extern void __set_fixmap (enum fixed_addresses idx, extern void __set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags); unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \ #define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL) __set_fixmap(idx, phys, PAGE_KERNEL)
...@@ -106,5 +109,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) ...@@ -106,5 +109,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr); return __virt_to_fix(vaddr);
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment