Commit 22607a28 authored by Michal Simek's avatar Michal Simek

microblaze: Add define for ASM_LOOP

It is default option but both options must be measured.
Signed-off-by: default avatarMichal Simek <monstr@monstr.eu>
parent dcbae4be
...@@ -15,25 +15,6 @@ ...@@ -15,25 +15,6 @@
#include <asm/cpuinfo.h> #include <asm/cpuinfo.h>
#include <asm/pvr.h> #include <asm/pvr.h>
static inline void __invalidate_flush_icache(unsigned int addr)
{
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (addr));
}
static inline void __flush_dcache(unsigned int addr)
{
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (addr));
}
static inline void __invalidate_dcache(unsigned int baseaddr,
unsigned int offset)
{
__asm__ __volatile__ ("wdc.clear %0, %1;" \
: : "r" (baseaddr), "r" (offset));
}
static inline void __enable_icache_msr(void) static inline void __enable_icache_msr(void)
{ {
__asm__ __volatile__ (" msrset r0, %0; \ __asm__ __volatile__ (" msrset r0, %0; \
...@@ -183,10 +164,14 @@ do { \ ...@@ -183,10 +164,14 @@ do { \
"r" (line_length) : "memory"); \ "r" (line_length) : "memory"); \
} while (0); } while (0);
#define ASM_LOOP
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
...@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) ...@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
local_irq_save(flags); local_irq_save(flags);
__disable_icache_msr(); __disable_icache_msr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_msr(); __enable_icache_msr();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, ...@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
...@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, ...@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
local_irq_save(flags); local_irq_save(flags);
__disable_icache_nomsr(); __disable_icache_nomsr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_nomsr(); __enable_icache_nomsr();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, ...@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
static void __flush_icache_range_noirq(unsigned long start, static void __flush_icache_range_noirq(unsigned long start,
unsigned long end) unsigned long end)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end, CACHE_LOOP_LIMITS(start, end,
cpuinfo.icache_line_length, cpuinfo.icache_size); cpuinfo.icache_line_length, cpuinfo.icache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
} }
static void __flush_icache_all_msr_irq(void) static void __flush_icache_all_msr_irq(void)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
local_irq_save(flags); local_irq_save(flags);
__disable_icache_msr(); __disable_icache_msr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
#else
for (i = 0; i < cpuinfo.icache_size;
i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_msr(); __enable_icache_msr();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void) ...@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
static void __flush_icache_all_nomsr_irq(void) static void __flush_icache_all_nomsr_irq(void)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
local_irq_save(flags); local_irq_save(flags);
__disable_icache_nomsr(); __disable_icache_nomsr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
#else
for (i = 0; i < cpuinfo.icache_size;
i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_nomsr(); __enable_icache_nomsr();
local_irq_restore(flags); local_irq_restore(flags);
} }
static void __flush_icache_all_noirq(void) static void __flush_icache_all_noirq(void)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
#else
for (i = 0; i < cpuinfo.icache_size;
i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
} }
static void __invalidate_dcache_all_msr_irq(void) static void __invalidate_dcache_all_msr_irq(void)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
local_irq_save(flags); local_irq_save(flags);
__disable_dcache_msr(); __disable_dcache_msr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_msr(); __enable_dcache_msr();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void) ...@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
static void __invalidate_dcache_all_nomsr_irq(void) static void __invalidate_dcache_all_nomsr_irq(void)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
local_irq_save(flags); local_irq_save(flags);
__disable_dcache_nomsr(); __disable_dcache_nomsr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_nomsr(); __enable_dcache_nomsr();
local_irq_restore(flags); local_irq_restore(flags);
} }
static void __invalidate_dcache_all_noirq_wt(void) static void __invalidate_dcache_all_noirq_wt(void)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
} }
/* FIXME this is weird - should be only wdc but not work /* FIXME this is weird - should be only wdc but not work
* MS: I am getting bus errors and other weird things */ * MS: I am getting bus errors and other weird things */
static void __invalidate_dcache_all_wb(void) static void __invalidate_dcache_all_wb(void)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear) wdc.clear)
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc.clear %0, r0;" \
: : "r" (i));
#endif
} }
static void __invalidate_dcache_range_wb(unsigned long start, static void __invalidate_dcache_range_wb(unsigned long start,
unsigned long end) unsigned long end)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end, CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size); cpuinfo.dcache_line_length, cpuinfo.dcache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc.clear %0, r0;" \
: : "r" (i));
#endif
} }
static void __invalidate_dcache_range_nomsr_wt(unsigned long start, static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
unsigned long end) unsigned long end)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end, CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size); cpuinfo.dcache_line_length, cpuinfo.dcache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
} }
static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end, CACHE_LOOP_LIMITS(start, end,
...@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, ...@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
local_irq_save(flags); local_irq_save(flags);
__disable_dcache_msr(); __disable_dcache_msr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_msr(); __enable_dcache_msr();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, ...@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long flags; unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
...@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, ...@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
local_irq_save(flags); local_irq_save(flags);
__disable_dcache_nomsr(); __disable_dcache_nomsr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_nomsr(); __enable_dcache_nomsr();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, ...@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
static void __flush_dcache_all_wb(void) static void __flush_dcache_all_wb(void)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__); pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.flush); wdc.flush);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (i));
#endif
} }
static void __flush_dcache_range_wb(unsigned long start, unsigned long end) static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
{ {
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__, pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end); (unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end, CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size); cpuinfo.dcache_line_length, cpuinfo.dcache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (i));
#endif
} }
/* struct for wb caches and for wt caches */ /* struct for wb caches and for wt caches */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment