Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
94ecd224
Commit
94ecd224
authored
Aug 16, 2009
by
Paul Mundt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sh: Fix up the
SH-5
build with caches enabled.
Signed-off-by:
Paul Mundt
<
lethal@linux-sh.org
>
parent
1ee4ab09
Changes
6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
64 additions
and
303 deletions
+64
-303
arch/sh/include/asm/system.h
arch/sh/include/asm/system.h
+1
-13
arch/sh/include/asm/system_32.h
arch/sh/include/asm/system_32.h
+10
-0
arch/sh/include/asm/system_64.h
arch/sh/include/asm/system_64.h
+5
-0
arch/sh/kernel/sh_ksyms_64.c
arch/sh/kernel/sh_ksyms_64.c
+0
-8
arch/sh/mm/cache-sh5.c
arch/sh/mm/cache-sh5.c
+21
-228
arch/sh/mm/flush-sh4.c
arch/sh/mm/flush-sh4.c
+27
-54
No files found.
arch/sh/include/asm/system.h
View file @
94ecd224
...
@@ -14,18 +14,6 @@
...
@@ -14,18 +14,6 @@
#define AT_VECTOR_SIZE_ARCH 5
/* entries in ARCH_DLINFO */
#define AT_VECTOR_SIZE_ARCH 5
/* entries in ARCH_DLINFO */
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define __icbi() \
{ \
unsigned long __addr; \
__addr = 0xa8000000; \
__asm__ __volatile__( \
"icbi %0\n\t" \
:
/* no output */
\
: "m" (__m(__addr))); \
}
#endif
/*
/*
* A brief note on ctrl_barrier(), the control register write barrier.
* A brief note on ctrl_barrier(), the control register write barrier.
*
*
...
@@ -44,7 +32,7 @@
...
@@ -44,7 +32,7 @@
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("synco": : :"memory")
#define wmb() __asm__ __volatile__ ("synco": : :"memory")
#define ctrl_barrier() __icbi()
#define ctrl_barrier() __icbi(
0xa8000000
)
#define read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0)
#else
#else
#define mb() __asm__ __volatile__ ("": : :"memory")
#define mb() __asm__ __volatile__ ("": : :"memory")
...
...
arch/sh/include/asm/system_32.h
View file @
94ecd224
...
@@ -63,6 +63,16 @@ do { \
...
@@ -63,6 +63,16 @@ do { \
#define __restore_dsp(tsk) do { } while (0)
#define __restore_dsp(tsk) do { } while (0)
#endif
#endif
#if defined(CONFIG_CPU_SH4A)
#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
#else
#define __icbi(addr) mb()
#endif
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
struct
task_struct
*
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
);
struct
task_struct
*
next
);
...
...
arch/sh/include/asm/system_64.h
View file @
94ecd224
...
@@ -37,6 +37,11 @@ do { \
...
@@ -37,6 +37,11 @@ do { \
#define jump_to_uncached() do { } while (0)
#define jump_to_uncached() do { } while (0)
#define back_to_cached() do { } while (0)
#define back_to_cached() do { } while (0)
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
static
inline
reg_size_t
register_align
(
void
*
val
)
static
inline
reg_size_t
register_align
(
void
*
val
)
{
{
return
(
unsigned
long
long
)(
signed
long
long
)(
signed
long
)
val
;
return
(
unsigned
long
long
)(
signed
long
long
)(
signed
long
)
val
;
...
...
arch/sh/kernel/sh_ksyms_64.c
View file @
94ecd224
...
@@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
...
@@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
EXPORT_SYMBOL
(
dump_fpu
);
EXPORT_SYMBOL
(
dump_fpu
);
EXPORT_SYMBOL
(
kernel_thread
);
EXPORT_SYMBOL
(
kernel_thread
);
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU)
EXPORT_SYMBOL
(
clear_user_page
);
#endif
#ifndef CONFIG_CACHE_OFF
EXPORT_SYMBOL
(
flush_dcache_page
);
#endif
#ifdef CONFIG_VT
#ifdef CONFIG_VT
EXPORT_SYMBOL
(
screen_info
);
EXPORT_SYMBOL
(
screen_info
);
#endif
#endif
...
...
arch/sh/mm/cache-sh5.c
View file @
94ecd224
This diff is collapsed.
Click to expand it.
arch/sh/mm/flush-sh4.c
View file @
94ecd224
...
@@ -19,28 +19,19 @@ static void sh4__flush_wback_region(void *start, int size)
...
@@ -19,28 +19,19 @@ static void sh4__flush_wback_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
cnt
-=
8
;
}
}
while
(
cnt
)
{
while
(
cnt
)
{
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
cnt
--
;
cnt
--
;
}
}
}
}
...
@@ -62,27 +53,18 @@ static void sh4__flush_purge_region(void *start, int size)
...
@@ -62,27 +53,18 @@ static void sh4__flush_purge_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
cnt
-=
8
;
}
}
while
(
cnt
)
{
while
(
cnt
)
{
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
cnt
--
;
cnt
--
;
}
}
}
}
...
@@ -101,28 +83,19 @@ static void sh4__flush_invalidate_region(void *start, int size)
...
@@ -101,28 +83,19 @@ static void sh4__flush_invalidate_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
cnt
-=
8
;
}
}
while
(
cnt
)
{
while
(
cnt
)
{
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
v
+=
L1_CACHE_BYTES
;
cnt
--
;
cnt
--
;
}
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment