Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
35d1bc90
Commit
35d1bc90
authored
Jun 08, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Automatic merge of master.kernel.org:/home/rmk/linux-2.6-arm
parents
1d6757fb
f8f98a93
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
241 additions
and
220 deletions
+241
-220
arch/arm/boot/compressed/head-xscale.S
arch/arm/boot/compressed/head-xscale.S
+7
-0
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-pxa/mainstone.c
+9
-0
arch/arm/mach-pxa/pm.c
arch/arm/mach-pxa/pm.c
+18
-14
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa25x.c
+29
-0
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-pxa/pxa27x.c
+32
-0
arch/arm/mach-s3c2410/dma.c
arch/arm/mach-s3c2410/dma.c
+4
-0
arch/arm/mm/Kconfig
arch/arm/mm/Kconfig
+0
-7
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+0
-2
arch/arm/mm/copypage-xscale.S
arch/arm/mm/copypage-xscale.S
+0
-113
arch/arm/mm/copypage-xscale.c
arch/arm/mm/copypage-xscale.c
+131
-0
arch/arm/mm/minicache.c
arch/arm/mm/minicache.c
+0
-73
include/asm-arm/arch-ixp2000/io.h
include/asm-arm/arch-ixp2000/io.h
+8
-8
include/asm-arm/elf.h
include/asm-arm/elf.h
+2
-2
include/asm-arm26/elf.h
include/asm-arm26/elf.h
+1
-1
No files found.
arch/arm/boot/compressed/head-xscale.S
View file @
35d1bc90
...
...
@@ -47,3 +47,10 @@ __XScale_start:
orr
r7
,
r7
,
#(
MACH_TYPE_GTWX5715
&
0xff00
)
#endif
#ifdef CONFIG_ARCH_IXP2000
mov
r1
,
#-
1
mov
r0
,
#
0xd6000000
str
r1
,
[
r0
,
#
0x14
]
str
r1
,
[
r0
,
#
0x18
]
#endif
arch/arm/mach-pxa/mainstone.c
View file @
35d1bc90
...
...
@@ -304,6 +304,15 @@ static void __init mainstone_map_io(void)
PWER
=
0xC0000002
;
PRER
=
0x00000002
;
PFER
=
0x00000002
;
/* for use I SRAM as framebuffer. */
PSLR
|=
0xF04
;
PCFR
=
0x66
;
/* For Keypad wakeup. */
KPC
&=~
KPC_ASACT
;
KPC
|=
KPC_AS
;
PKWR
=
0x000FD000
;
/* Need read PKWR back after set it. */
PKWR
;
}
MACHINE_START
(
MAINSTONE
,
"Intel HCDDBBVA0 Development Platform (aka Mainstone)"
)
...
...
arch/arm/mach-pxa/pm.c
View file @
35d1bc90
...
...
@@ -29,9 +29,6 @@
*/
#undef DEBUG
extern
void
pxa_cpu_suspend
(
void
);
extern
void
pxa_cpu_resume
(
void
);
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
...
...
@@ -63,6 +60,12 @@ enum { SLEEP_SAVE_START = 0,
SLEEP_SAVE_ICMR
,
SLEEP_SAVE_CKEN
,
#ifdef CONFIG_PXA27x
SLEEP_SAVE_MDREFR
,
SLEEP_SAVE_PWER
,
SLEEP_SAVE_PCFR
,
SLEEP_SAVE_PRER
,
SLEEP_SAVE_PFER
,
SLEEP_SAVE_PKWR
,
#endif
SLEEP_SAVE_CKSUM
,
SLEEP_SAVE_SIZE
...
...
@@ -75,9 +78,7 @@ static int pxa_pm_enter(suspend_state_t state)
unsigned
long
checksum
=
0
;
struct
timespec
delta
,
rtc
;
int
i
;
if
(
state
!=
PM_SUSPEND_MEM
)
return
-
EINVAL
;
extern
void
pxa_cpu_pm_enter
(
suspend_state_t
state
);
#ifdef CONFIG_IWMMXT
/* force any iWMMXt context to ram **/
...
...
@@ -100,16 +101,17 @@ static int pxa_pm_enter(suspend_state_t state)
SAVE
(
GAFR2_L
);
SAVE
(
GAFR2_U
);
#ifdef CONFIG_PXA27x
SAVE
(
MDREFR
);
SAVE
(
GPLR3
);
SAVE
(
GPDR3
);
SAVE
(
GRER3
);
SAVE
(
GFER3
);
SAVE
(
PGSR3
);
SAVE
(
GAFR3_L
);
SAVE
(
GAFR3_U
);
SAVE
(
PWER
);
SAVE
(
PCFR
);
SAVE
(
PRER
);
SAVE
(
PFER
);
SAVE
(
PKWR
);
#endif
SAVE
(
ICMR
);
ICMR
=
0
;
SAVE
(
CKEN
);
CKEN
=
0
;
SAVE
(
PSTR
);
/* Note: wake up source are set up in each machine specific files */
...
...
@@ -123,16 +125,13 @@ static int pxa_pm_enter(suspend_state_t state)
/* Clear sleep reset status */
RCSR
=
RCSR_SMR
;
/* set resume return address */
PSPR
=
virt_to_phys
(
pxa_cpu_resume
);
/* before sleeping, calculate and save a checksum */
for
(
i
=
0
;
i
<
SLEEP_SAVE_SIZE
-
1
;
i
++
)
checksum
+=
sleep_save
[
i
];
sleep_save
[
SLEEP_SAVE_CKSUM
]
=
checksum
;
/* *** go zzz *** */
pxa_cpu_
suspend
(
);
pxa_cpu_
pm_enter
(
state
);
/* after sleeping, validate the checksum */
checksum
=
0
;
...
...
@@ -145,7 +144,7 @@ static int pxa_pm_enter(suspend_state_t state)
LUB_HEXLED
=
0xbadbadc5
;
#endif
while
(
1
)
pxa_cpu_
suspend
(
);
pxa_cpu_
pm_enter
(
state
);
}
/* ensure not to come back here if it wasn't intended */
...
...
@@ -162,8 +161,11 @@ static int pxa_pm_enter(suspend_state_t state)
RESTORE
(
PGSR0
);
RESTORE
(
PGSR1
);
RESTORE
(
PGSR2
);
#ifdef CONFIG_PXA27x
RESTORE
(
MDREFR
);
RESTORE
(
GAFR3_L
);
RESTORE
(
GAFR3_U
);
RESTORE_GPLEVEL
(
3
);
RESTORE
(
GPDR3
);
RESTORE
(
GRER3
);
RESTORE
(
GFER3
);
RESTORE
(
PGSR3
);
RESTORE
(
PWER
);
RESTORE
(
PCFR
);
RESTORE
(
PRER
);
RESTORE
(
PFER
);
RESTORE
(
PKWR
);
#endif
PSSR
=
PSSR_RDH
|
PSSR_PH
;
...
...
@@ -197,7 +199,9 @@ unsigned long sleep_phys_sp(void *sp)
*/
static
int
pxa_pm_prepare
(
suspend_state_t
state
)
{
return
0
;
extern
int
pxa_cpu_pm_prepare
(
suspend_state_t
state
);
return
pxa_cpu_pm_prepare
(
state
);
}
/*
...
...
arch/arm/mach-pxa/pxa25x.c
View file @
35d1bc90
...
...
@@ -102,3 +102,32 @@ unsigned int get_lcdclk_frequency_10khz(void)
}
EXPORT_SYMBOL
(
get_lcdclk_frequency_10khz
);
int
pxa_cpu_pm_prepare
(
suspend_state_t
state
)
{
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
break
;
default:
return
-
EINVAL
;
}
return
0
;
}
void
pxa_cpu_pm_enter
(
suspend_state_t
state
)
{
extern
void
pxa_cpu_suspend
(
unsigned
int
);
extern
void
pxa_cpu_resume
(
void
);
CKEN
=
0
;
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
/* set resume return address */
PSPR
=
virt_to_phys
(
pxa_cpu_resume
);
pxa_cpu_suspend
(
3
);
break
;
}
}
arch/arm/mach-pxa/pxa27x.c
View file @
35d1bc90
...
...
@@ -120,6 +120,38 @@ EXPORT_SYMBOL(get_clk_frequency_khz);
EXPORT_SYMBOL
(
get_memclk_frequency_10khz
);
EXPORT_SYMBOL
(
get_lcdclk_frequency_10khz
);
int
pxa_cpu_pm_prepare
(
suspend_state_t
state
)
{
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
return
0
;
default:
return
-
EINVAL
;
}
}
void
pxa_cpu_pm_enter
(
suspend_state_t
state
)
{
extern
void
pxa_cpu_standby
(
void
);
extern
void
pxa_cpu_suspend
(
unsigned
int
);
extern
void
pxa_cpu_resume
(
void
);
CKEN
=
CKEN22_MEMC
|
CKEN9_OSTIMER
;
/* ensure voltage-change sequencer not initiated, which hangs */
PCFR
&=
~
PCFR_FVC
;
/* Clear edge-detect status register. */
PEDR
=
0xDF12FE1B
;
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
/* set resume return address */
PSPR
=
virt_to_phys
(
pxa_cpu_resume
);
pxa_cpu_suspend
(
3
);
break
;
}
}
/*
* device registration specific to PXA27x.
...
...
arch/arm/mach-s3c2410/dma.c
View file @
35d1bc90
...
...
@@ -785,6 +785,10 @@ int s3c2410_dma_free(dmach_t channel, s3c2410_dma_client_t *client)
chan
->
client
=
NULL
;
chan
->
in_use
=
0
;
if
(
chan
->
irq_claimed
)
free_irq
(
chan
->
irq
,
(
void
*
)
chan
);
chan
->
irq_claimed
=
0
;
local_irq_restore
(
flags
);
return
0
;
...
...
arch/arm/mm/Kconfig
View file @
35d1bc90
...
...
@@ -228,7 +228,6 @@ config CPU_SA1100
select CPU_CACHE_V4WB
select CPU_CACHE_VIVT
select CPU_TLB_V4WB
select CPU_MINICACHE
# XScale
config CPU_XSCALE
...
...
@@ -239,7 +238,6 @@ config CPU_XSCALE
select CPU_ABRT_EV5T
select CPU_CACHE_VIVT
select CPU_TLB_V4WBI
select CPU_MINICACHE
# ARMv6
config CPU_V6
...
...
@@ -345,11 +343,6 @@ config CPU_TLB_V4WBI
config CPU_TLB_V6
bool
config CPU_MINICACHE
bool
help
Processor has a minicache.
comment "Processor Features"
config ARM_THUMB
...
...
arch/arm/mm/Makefile
View file @
35d1bc90
...
...
@@ -31,8 +31,6 @@ obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100)
+=
copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE)
+=
copypage-xscale.o
obj-$(CONFIG_CPU_MINICACHE)
+=
minicache.o
obj-$(CONFIG_CPU_TLB_V3)
+=
tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT)
+=
tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB)
+=
tlb-v4wb.o
...
...
arch/arm/mm/copypage-xscale.S
deleted
100644 → 0
View file @
1d6757fb
/*
*
linux
/
arch
/
arm
/
lib
/
copypage
-
xscale
.
S
*
*
Copyright
(
C
)
2001
Russell
King
*
*
This
program
is
free
software
; you can redistribute it and/or modify
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
published
by
the
Free
Software
Foundation
.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/constants.h>
/*
*
General
note
:
*
We
don
't really want write-allocate cache behaviour for these functions
*
since
that
will
just
eat
through
8
K
of
the
cache
.
*/
.
text
.
align
5
/*
*
XScale
optimised
copy_user_page
*
r0
=
destination
*
r1
=
source
*
r2
=
virtual
user
address
of
ultimate
destination
page
*
*
The
source
page
may
have
some
clean
entries
in
the
cache
already
,
but
we
*
can
safely
ignore
them
-
break_cow
()
will
flush
them
out
of
the
cache
*
if
we
eventually
end
up
using
our
copied
page
.
*
*
What
we
could
do
is
use
the
mini
-
cache
to
buffer
reads
from
the
source
*
page
.
We
rely
on
the
mini
-
cache
being
smaller
than
one
page
,
so
we
'll
*
cycle
through
the
complete
cache
anyway
.
*/
ENTRY
(
xscale_mc_copy_user_page
)
stmfd
sp
!,
{
r4
,
r5
,
lr
}
mov
r5
,
r0
mov
r0
,
r1
bl
map_page_minicache
mov
r1
,
r5
mov
lr
,
#
PAGE_SZ
/
64
-
1
/
*
*
Strangely
enough
,
best
performance
is
achieved
*
when
prefetching
destination
as
well
.
(
NP
)
*/
pld
[
r0
,
#
0
]
pld
[
r0
,
#
32
]
pld
[
r1
,
#
0
]
pld
[
r1
,
#
32
]
1
:
pld
[
r0
,
#
64
]
pld
[
r0
,
#
96
]
pld
[
r1
,
#
64
]
pld
[
r1
,
#
96
]
2
:
ldrd
r2
,
[
r0
],
#
8
ldrd
r4
,
[
r0
],
#
8
mov
ip
,
r1
strd
r2
,
[
r1
],
#
8
ldrd
r2
,
[
r0
],
#
8
strd
r4
,
[
r1
],
#
8
ldrd
r4
,
[
r0
],
#
8
strd
r2
,
[
r1
],
#
8
strd
r4
,
[
r1
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c10
,
1
@
clean
D
line
ldrd
r2
,
[
r0
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c6
,
1
@
invalidate
D
line
ldrd
r4
,
[
r0
],
#
8
mov
ip
,
r1
strd
r2
,
[
r1
],
#
8
ldrd
r2
,
[
r0
],
#
8
strd
r4
,
[
r1
],
#
8
ldrd
r4
,
[
r0
],
#
8
strd
r2
,
[
r1
],
#
8
strd
r4
,
[
r1
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c10
,
1
@
clean
D
line
subs
lr
,
lr
,
#
1
mcr
p15
,
0
,
ip
,
c7
,
c6
,
1
@
invalidate
D
line
bgt
1
b
beq
2
b
ldmfd
sp
!,
{
r4
,
r5
,
pc
}
.
align
5
/*
*
XScale
optimised
clear_user_page
*
r0
=
destination
*
r1
=
virtual
user
address
of
ultimate
destination
page
*/
ENTRY
(
xscale_mc_clear_user_page
)
mov
r1
,
#
PAGE_SZ
/
32
mov
r2
,
#
0
mov
r3
,
#
0
1
:
mov
ip
,
r0
strd
r2
,
[
r0
],
#
8
strd
r2
,
[
r0
],
#
8
strd
r2
,
[
r0
],
#
8
strd
r2
,
[
r0
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c10
,
1
@
clean
D
line
subs
r1
,
r1
,
#
1
mcr
p15
,
0
,
ip
,
c7
,
c6
,
1
@
invalidate
D
line
bne
1
b
mov
pc
,
lr
__INITDATA
.
type
xscale_mc_user_fns
,
#
object
ENTRY
(
xscale_mc_user_fns
)
.
long
xscale_mc_clear_user_page
.
long
xscale_mc_copy_user_page
.
size
xscale_mc_user_fns
,
.
-
xscale_mc_user_fns
arch/arm/mm/copypage-xscale.c
0 → 100644
View file @
35d1bc90
/*
* linux/arch/arm/lib/copypage-xscale.S
*
* Copyright (C) 1995-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define COPYPAGE_MINICACHE 0xffff8000
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE)
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
static
DEFINE_SPINLOCK
(
minicache_lock
);
/*
* XScale mini-dcache optimised copy_user_page
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
static
void
__attribute__
((
naked
))
mc_copy_user_page
(
void
*
from
,
void
*
to
)
{
/*
* Strangely enough, best performance is achieved
* when prefetching destination as well. (NP)
*/
asm
volatile
(
"stmfd sp!, {r4, r5, lr}
\n
\
mov lr, %2
\n
\
pld [r0, #0]
\n
\
pld [r0, #32]
\n
\
pld [r1, #0]
\n
\
pld [r1, #32]
\n
\
1: pld [r0, #64]
\n
\
pld [r0, #96]
\n
\
pld [r1, #64]
\n
\
pld [r1, #96]
\n
\
2: ldrd r2, [r0], #8
\n
\
ldrd r4, [r0], #8
\n
\
mov ip, r1
\n
\
strd r2, [r1], #8
\n
\
ldrd r2, [r0], #8
\n
\
strd r4, [r1], #8
\n
\
ldrd r4, [r0], #8
\n
\
strd r2, [r1], #8
\n
\
strd r4, [r1], #8
\n
\
mcr p15, 0, ip, c7, c10, 1 @ clean D line
\n
\
ldrd r2, [r0], #8
\n
\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
\n
\
ldrd r4, [r0], #8
\n
\
mov ip, r1
\n
\
strd r2, [r1], #8
\n
\
ldrd r2, [r0], #8
\n
\
strd r4, [r1], #8
\n
\
ldrd r4, [r0], #8
\n
\
strd r2, [r1], #8
\n
\
strd r4, [r1], #8
\n
\
mcr p15, 0, ip, c7, c10, 1 @ clean D line
\n
\
subs lr, lr, #1
\n
\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
\n
\
bgt 1b
\n
\
beq 2b
\n
\
ldmfd sp!, {r4, r5, pc} "
:
:
"r"
(
from
),
"r"
(
to
),
"I"
(
PAGE_SIZE
/
64
-
1
));
}
void
xscale_mc_copy_user_page
(
void
*
kto
,
const
void
*
kfrom
,
unsigned
long
vaddr
)
{
spin_lock
(
&
minicache_lock
);
set_pte
(
TOP_PTE
(
COPYPAGE_MINICACHE
),
pfn_pte
(
__pa
(
kfrom
)
>>
PAGE_SHIFT
,
minicache_pgprot
));
flush_tlb_kernel_page
(
COPYPAGE_MINICACHE
);
mc_copy_user_page
((
void
*
)
COPYPAGE_MINICACHE
,
kto
);
spin_unlock
(
&
minicache_lock
);
}
/*
* XScale optimised clear_user_page
*/
void
__attribute__
((
naked
))
xscale_mc_clear_user_page
(
void
*
kaddr
,
unsigned
long
vaddr
)
{
asm
volatile
(
"mov r1, %0
\n
\
mov r2, #0
\n
\
mov r3, #0
\n
\
1: mov ip, r0
\n
\
strd r2, [r0], #8
\n
\
strd r2, [r0], #8
\n
\
strd r2, [r0], #8
\n
\
strd r2, [r0], #8
\n
\
mcr p15, 0, ip, c7, c10, 1 @ clean D line
\n
\
subs r1, r1, #1
\n
\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
\n
\
bne 1b
\n
\
mov pc, lr"
:
:
"I"
(
PAGE_SIZE
/
32
));
}
struct
cpu_user_fns
xscale_mc_user_fns
__initdata
=
{
.
cpu_clear_user_page
=
xscale_mc_clear_user_page
,
.
cpu_copy_user_page
=
xscale_mc_copy_user_page
,
};
arch/arm/mm/minicache.c
View file @
35d1bc90
/*
* linux/arch/arm/mm/minicache.c
*
* Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define minicache_address (0xffff8000)
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE)
static
pte_t
*
minicache_pte
;
/*
* Note that this is intended to be called only from the copy_user_page
* asm code; anything else will require special locking to prevent the
* mini-cache space being re-used. (Note: probably preempt unsafe).
*
* We rely on the fact that the minicache is 2K, and we'll be pushing
* 4K of data through it, so we don't actually have to specifically
* flush the minicache when we change the mapping.
*
* Note also: assert(PAGE_OFFSET <= virt < high_memory).
* Unsafe: preempt, kmap.
*/
unsigned
long
map_page_minicache
(
unsigned
long
virt
)
{
set_pte
(
minicache_pte
,
pfn_pte
(
__pa
(
virt
)
>>
PAGE_SHIFT
,
minicache_pgprot
));
flush_tlb_kernel_page
(
minicache_address
);
return
minicache_address
;
}
static
int
__init
minicache_init
(
void
)
{
pgd_t
*
pgd
;
pmd_t
*
pmd
;
spin_lock
(
&
init_mm
.
page_table_lock
);
pgd
=
pgd_offset_k
(
minicache_address
);
pmd
=
pmd_alloc
(
&
init_mm
,
pgd
,
minicache_address
);
if
(
!
pmd
)
BUG
();
minicache_pte
=
pte_alloc_kernel
(
&
init_mm
,
pmd
,
minicache_address
);
if
(
!
minicache_pte
)
BUG
();
spin_unlock
(
&
init_mm
.
page_table_lock
);
return
0
;
}
core_initcall
(
minicache_init
);
include/asm-arm/arch-ixp2000/io.h
View file @
35d1bc90
...
...
@@ -75,8 +75,8 @@ static inline void insw(u32 ptr, void *buf, int length)
* Is this cycle meant for the CS8900?
*/
if
((
machine_is_ixdp2401
()
||
machine_is_ixdp2801
())
&&
((
port
>=
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
port
<=
IXDP2X01_CS8900_VIRT_END
)))
{
((
(
u32
)
port
>=
(
u32
)
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
(
u32
)
port
<=
(
u32
)
IXDP2X01_CS8900_VIRT_END
)))
{
u8
*
buf8
=
(
u8
*
)
buf
;
register
u32
tmp32
;
...
...
@@ -100,8 +100,8 @@ static inline void outsw(u32 ptr, void *buf, int length)
* Is this cycle meant for the CS8900?
*/
if
((
machine_is_ixdp2401
()
||
machine_is_ixdp2801
())
&&
((
port
>=
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
port
<=
IXDP2X01_CS8900_VIRT_END
)))
{
((
(
u32
)
port
>=
(
u32
)
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
(
u32
)
port
<=
(
u32
)
IXDP2X01_CS8900_VIRT_END
)))
{
register
u32
tmp32
;
u8
*
buf8
=
(
u8
*
)
buf
;
do
{
...
...
@@ -124,8 +124,8 @@ static inline u16 inw(u32 ptr)
* Is this cycle meant for the CS8900?
*/
if
((
machine_is_ixdp2401
()
||
machine_is_ixdp2801
())
&&
((
port
>=
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
port
<=
IXDP2X01_CS8900_VIRT_END
)))
{
((
(
u32
)
port
>=
(
u32
)
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
(
u32
)
port
<=
(
u32
)
IXDP2X01_CS8900_VIRT_END
)))
{
return
(
u16
)(
*
port
);
}
...
...
@@ -137,8 +137,8 @@ static inline void outw(u16 value, u32 ptr)
register
volatile
u32
*
port
=
(
volatile
u32
*
)
ptr
;
if
((
machine_is_ixdp2401
()
||
machine_is_ixdp2801
())
&&
((
port
>=
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
port
<=
IXDP2X01_CS8900_VIRT_END
)))
{
((
(
u32
)
port
>=
(
u32
)
IXDP2X01_CS8900_VIRT_BASE
)
&&
(
(
u32
)
port
<=
(
u32
)
IXDP2X01_CS8900_VIRT_END
)))
{
*
port
=
value
;
return
;
}
...
...
include/asm-arm/elf.h
View file @
35d1bc90
...
...
@@ -38,9 +38,9 @@ typedef struct user_fp elf_fpregset_t;
*/
#define ELF_CLASS ELFCLASS32
#ifdef __ARMEB__
#define ELF_DATA ELFDATA2MSB
;
#define ELF_DATA ELFDATA2MSB
#else
#define ELF_DATA ELFDATA2LSB
;
#define ELF_DATA ELFDATA2LSB
#endif
#define ELF_ARCH EM_ARM
...
...
include/asm-arm26/elf.h
View file @
35d1bc90
...
...
@@ -36,7 +36,7 @@ typedef struct { void *null; } elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
;
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ARM
#define USE_ELF_CORE_DUMP
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment