Commit 7e3bfc7c authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] Handle IDE PIO cache aliases on SMP.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent bb12d612
...@@ -260,6 +260,10 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, ...@@ -260,6 +260,10 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, unsigned long page,
{ {
} }
static void local_r3k_flush_data_cache_page(unsigned long addr)
{
}
static void r3k_flush_data_cache_page(unsigned long addr) static void r3k_flush_data_cache_page(unsigned long addr)
{ {
} }
...@@ -335,6 +339,7 @@ void __init r3k_cache_init(void) ...@@ -335,6 +339,7 @@ void __init r3k_cache_init(void)
flush_icache_range = r3k_flush_icache_range; flush_icache_range = r3k_flush_icache_range;
flush_cache_sigtramp = r3k_flush_cache_sigtramp; flush_cache_sigtramp = r3k_flush_cache_sigtramp;
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
flush_data_cache_page = r3k_flush_data_cache_page; flush_data_cache_page = r3k_flush_data_cache_page;
_dma_cache_wback_inv = r3k_dma_cache_wback_inv; _dma_cache_wback_inv = r3k_dma_cache_wback_inv;
......
...@@ -1199,6 +1199,7 @@ void __init r4k_cache_init(void) ...@@ -1199,6 +1199,7 @@ void __init r4k_cache_init(void)
flush_cache_sigtramp = r4k_flush_cache_sigtramp; flush_cache_sigtramp = r4k_flush_cache_sigtramp;
flush_icache_all = r4k_flush_icache_all; flush_icache_all = r4k_flush_icache_all;
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range; flush_icache_range = r4k_flush_icache_range;
......
...@@ -528,6 +528,7 @@ void sb1_cache_init(void) ...@@ -528,6 +528,7 @@ void sb1_cache_init(void)
flush_cache_page = sb1_flush_cache_page; flush_cache_page = sb1_flush_cache_page;
flush_cache_sigtramp = sb1_flush_cache_sigtramp; flush_cache_sigtramp = sb1_flush_cache_sigtramp;
local_flush_data_cache_page = (void *) sb1_nop;
flush_data_cache_page = (void *) sb1_nop; flush_data_cache_page = (void *) sb1_nop;
/* Full flush */ /* Full flush */
......
...@@ -216,6 +216,11 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page ...@@ -216,6 +216,11 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
tx39_blast_icache_page_indexed(page); tx39_blast_icache_page_indexed(page);
} }
static void local_tx39_flush_data_cache_page(void * addr)
{
tx39_blast_dcache_page(addr);
}
static void tx39_flush_data_cache_page(unsigned long addr) static void tx39_flush_data_cache_page(unsigned long addr)
{ {
tx39_blast_dcache_page(addr); tx39_blast_dcache_page(addr);
...@@ -381,6 +386,7 @@ void __init tx39_cache_init(void) ...@@ -381,6 +386,7 @@ void __init tx39_cache_init(void)
flush_icache_range = (void *) tx39h_flush_icache_all; flush_icache_range = (void *) tx39h_flush_icache_all;
flush_cache_sigtramp = (void *) tx39h_flush_icache_all; flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
flush_data_cache_page = (void *) tx39h_flush_icache_all; flush_data_cache_page = (void *) tx39h_flush_icache_all;
_dma_cache_wback_inv = tx39h_dma_cache_wback_inv; _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
...@@ -406,6 +412,7 @@ void __init tx39_cache_init(void) ...@@ -406,6 +412,7 @@ void __init tx39_cache_init(void)
flush_icache_range = tx39_flush_icache_range; flush_icache_range = tx39_flush_icache_range;
flush_cache_sigtramp = tx39_flush_cache_sigtramp; flush_cache_sigtramp = tx39_flush_cache_sigtramp;
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
flush_data_cache_page = tx39_flush_data_cache_page; flush_data_cache_page = tx39_flush_data_cache_page;
_dma_cache_wback_inv = tx39_dma_cache_wback_inv; _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
......
...@@ -30,6 +30,7 @@ void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); ...@@ -30,6 +30,7 @@ void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page);
/* MIPS specific cache operations */ /* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr); void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);
void (*flush_data_cache_page)(unsigned long addr); void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void); void (*flush_icache_all)(void);
......
...@@ -74,6 +74,7 @@ static inline void copy_from_user_page(struct vm_area_struct *vma, ...@@ -74,6 +74,7 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
extern void (*flush_cache_sigtramp)(unsigned long addr); extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void); extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr); extern void (*flush_data_cache_page)(unsigned long addr);
/* /*
......
...@@ -104,65 +104,107 @@ static __inline__ unsigned long ide_default_io_base(int index) ...@@ -104,65 +104,107 @@ static __inline__ unsigned long ide_default_io_base(int index)
#endif #endif
/* MIPS port and memory-mapped I/O string operations. */ /* MIPS port and memory-mapped I/O string operations. */
static inline void __ide_flush_prologue(void)
{
#ifdef CONFIG_SMP
if (cpu_has_dc_aliases)
preempt_disable();
#endif
}
static inline void __ide_flush_epilogue(void)
{
#ifdef CONFIG_SMP
if (cpu_has_dc_aliases)
preempt_enable();
#endif
}
static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size) static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size)
{ {
if (cpu_has_dc_aliases) { if (cpu_has_dc_aliases) {
unsigned long end = addr + size; unsigned long end = addr + size;
for (; addr < end; addr += PAGE_SIZE)
flush_dcache_page(virt_to_page(addr)); while (addr < end) {
local_flush_data_cache_page((void *)addr);
addr += PAGE_SIZE;
}
} }
} }
/*
* insw() and gang might be called with interrupts disabled, so we can't
* send IPIs for flushing due to the potencial of deadlocks, see the comment
* above smp_call_function() in arch/mips/kernel/smp.c. We work around the
* problem by disabling preemption so we know we actually perform the flush
* on the processor that actually has the lines to be flushed which hopefully
* is even better for performance anyway.
*/
static inline void __ide_insw(unsigned long port, void *addr, static inline void __ide_insw(unsigned long port, void *addr,
unsigned int count) unsigned int count)
{ {
__ide_flush_prologue();
insw(port, addr, count); insw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2); __ide_flush_dcache_range((unsigned long)addr, count * 2);
__ide_flush_epilogue();
} }
static inline void __ide_insl(unsigned long port, void *addr, unsigned int count) static inline void __ide_insl(unsigned long port, void *addr, unsigned int count)
{ {
__ide_flush_prologue();
insl(port, addr, count); insl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4); __ide_flush_dcache_range((unsigned long)addr, count * 4);
__ide_flush_epilogue();
} }
static inline void __ide_outsw(unsigned long port, const void *addr, static inline void __ide_outsw(unsigned long port, const void *addr,
unsigned long count) unsigned long count)
{ {
__ide_flush_prologue();
outsw(port, addr, count); outsw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2); __ide_flush_dcache_range((unsigned long)addr, count * 2);
__ide_flush_epilogue();
} }
static inline void __ide_outsl(unsigned long port, const void *addr, static inline void __ide_outsl(unsigned long port, const void *addr,
unsigned long count) unsigned long count)
{ {
__ide_flush_prologue();
outsl(port, addr, count); outsl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4); __ide_flush_dcache_range((unsigned long)addr, count * 4);
__ide_flush_epilogue();
} }
static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count) static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
{ {
__ide_flush_prologue();
readsw(port, addr, count); readsw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2); __ide_flush_dcache_range((unsigned long)addr, count * 2);
__ide_flush_epilogue();
} }
static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count) static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
{ {
__ide_flush_prologue();
readsl(port, addr, count); readsl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4); __ide_flush_dcache_range((unsigned long)addr, count * 4);
__ide_flush_epilogue();
} }
static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
{ {
__ide_flush_prologue();
writesw(port, addr, count); writesw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2); __ide_flush_dcache_range((unsigned long)addr, count * 2);
__ide_flush_epilogue();
} }
static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
{ {
__ide_flush_prologue();
writesl(port, addr, count); writesl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4); __ide_flush_dcache_range((unsigned long)addr, count * 4);
__ide_flush_epilogue();
} }
/* ide_insw calls insw, not __ide_insw. Why? */ /* ide_insw calls insw, not __ide_insw. Why? */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment