Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
fbf2b1f9
Commit
fbf2b1f9
authored
Mar 24, 2009
by
Russell King
Committed by
Russell King
Mar 24, 2009
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'highmem' into devel
parents
9a38e989
053a96ca
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
557 additions
and
79 deletions
+557
-79
Documentation/arm/memory.txt
Documentation/arm/memory.txt
+8
-1
arch/arm/Kconfig
arch/arm/Kconfig
+17
-0
arch/arm/common/dmabounce.c
arch/arm/common/dmabounce.c
+7
-0
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/dma-mapping.h
+13
-1
arch/arm/include/asm/fixmap.h
arch/arm/include/asm/fixmap.h
+41
-0
arch/arm/include/asm/highmem.h
arch/arm/include/asm/highmem.h
+31
-0
arch/arm/include/asm/kmap_types.h
arch/arm/include/asm/kmap_types.h
+1
-0
arch/arm/include/asm/memory.h
arch/arm/include/asm/memory.h
+11
-3
arch/arm/mach-iop13xx/include/mach/memory.h
arch/arm/mach-iop13xx/include/mach/memory.h
+4
-1
arch/arm/mach-ks8695/include/mach/memory.h
arch/arm/mach-ks8695/include/mach/memory.h
+5
-1
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+1
-0
arch/arm/mm/cache-feroceon-l2.c
arch/arm/mm/cache-feroceon-l2.c
+37
-17
arch/arm/mm/cache-xsc3l2.c
arch/arm/mm/cache-xsc3l2.c
+80
-27
arch/arm/mm/dma-mapping.c
arch/arm/mm/dma-mapping.c
+83
-9
arch/arm/mm/flush.c
arch/arm/mm/flush.c
+1
-1
arch/arm/mm/highmem.c
arch/arm/mm/highmem.c
+116
-0
arch/arm/mm/init.c
arch/arm/mm/init.c
+19
-4
arch/arm/mm/mm.h
arch/arm/mm/mm.h
+1
-2
arch/arm/mm/mmap.c
arch/arm/mm/mmap.c
+1
-1
arch/arm/mm/mmu.c
arch/arm/mm/mmu.c
+18
-0
arch/arm/plat-omap/include/mach/memory.h
arch/arm/plat-omap/include/mach/memory.h
+5
-3
mm/highmem.c
mm/highmem.c
+57
-8
No files found.
Documentation/arm/memory.txt
View file @
fbf2b1f9
...
...
@@ -29,7 +29,14 @@ ffff0000 ffff0fff CPU vector page.
CPU supports vector relocation (control
register V bit.)
ffc00000 fffeffff DMA memory mapping region. Memory returned
fffe0000 fffeffff XScale cache flush area. This is used
in proc-xscale.S to flush the whole data
cache. Free for other usage on non-XScale.
fff00000 fffdffff Fixmap mapping region. Addresses provided
by fix_to_virt() will be located here.
ffc00000 ffefffff DMA memory mapping region. Memory returned
by the dma_alloc_xxx functions will be
dynamically mapped here.
...
...
arch/arm/Kconfig
View file @
fbf2b1f9
...
...
@@ -939,6 +939,23 @@ config NODES_SHIFT
default "2"
depends on NEED_MULTIPLE_NODES
config HIGHMEM
bool "High Memory Support (EXPERIMENTAL)"
depends on MMU && EXPERIMENTAL
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
space as well as some memory mapped IO. That means that, if you
have a large amount of physical memory and/or IO, not all of the
memory can be "permanently mapped" by the kernel. The physical
memory that is not permanently mapped is called "high memory".
Depending on the selected kernel/user memory split, minimum
vmalloc space and actual amount of RAM, you may not need this
option which should result in a slightly faster kernel.
If unsure, say n.
source "mm/Kconfig"
config LEDS
...
...
arch/arm/common/dmabounce.c
View file @
fbf2b1f9
...
...
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/page-flags.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
...
...
@@ -349,6 +350,12 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
BUG_ON
(
!
valid_dma_direction
(
dir
));
if
(
PageHighMem
(
page
))
{
dev_err
(
dev
,
"DMA buffer bouncing of HIGHMEM pages "
"is not supported
\n
"
);
return
~
0
;
}
return
map_single
(
dev
,
page_address
(
page
)
+
offset
,
size
,
dir
);
}
EXPORT_SYMBOL
(
dma_map_page
);
...
...
arch/arm/include/asm/dma-mapping.h
View file @
fbf2b1f9
...
...
@@ -15,10 +15,20 @@
* must not be used by drivers.
*/
#ifndef __arch_page_to_dma
#if !defined(CONFIG_HIGHMEM)
static
inline
dma_addr_t
page_to_dma
(
struct
device
*
dev
,
struct
page
*
page
)
{
return
(
dma_addr_t
)
__virt_to_bus
((
unsigned
long
)
page_address
(
page
));
}
#elif defined(__pfn_to_bus)
static
inline
dma_addr_t
page_to_dma
(
struct
device
*
dev
,
struct
page
*
page
)
{
return
(
dma_addr_t
)
__pfn_to_bus
(
page_to_pfn
(
page
));
}
#else
#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
#endif
static
inline
void
*
dma_to_virt
(
struct
device
*
dev
,
dma_addr_t
addr
)
{
...
...
@@ -57,6 +67,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
extern
void
dma_cache_maint
(
const
void
*
kaddr
,
size_t
size
,
int
rw
);
extern
void
dma_cache_maint_page
(
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
int
rw
);
/*
* Return whether the given device DMA address mask can be supported
...
...
@@ -316,7 +328,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
BUG_ON
(
!
valid_dma_direction
(
dir
));
if
(
!
arch_is_coherent
())
dma_cache_maint
(
page_address
(
page
)
+
offset
,
size
,
dir
);
dma_cache_maint
_page
(
page
,
offset
,
size
,
dir
);
return
page_to_dma
(
dev
,
page
)
+
offset
;
}
...
...
arch/arm/include/asm/fixmap.h
0 → 100644
View file @
fbf2b1f9
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
/*
* Nothing too fancy for now.
*
* On ARM we already have well known fixed virtual addresses imposed by
* the architecture such as the vector page which is located at 0xffff0000,
* therefore a second level page table is already allocated covering
* 0xfff00000 upwards.
*
* The cache flushing code in proc-xscale.S uses the virtual area between
* 0xfffe0000 and 0xfffeffff.
*/
#define FIXADDR_START 0xfff00000UL
#define FIXADDR_TOP 0xfffe0000UL
#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
#define FIX_KMAP_BEGIN 0
#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
extern
void
__this_fixmap_does_not_exist
(
void
);
static
inline
unsigned
long
fix_to_virt
(
const
unsigned
int
idx
)
{
if
(
idx
>=
FIX_KMAP_END
)
__this_fixmap_does_not_exist
();
return
__fix_to_virt
(
idx
);
}
static
inline
unsigned
int
virt_to_fix
(
const
unsigned
long
vaddr
)
{
BUG_ON
(
vaddr
>=
FIXADDR_TOP
||
vaddr
<
FIXADDR_START
);
return
__virt_to_fix
(
vaddr
);
}
#endif
arch/arm/include/asm/highmem.h
0 → 100644
View file @
fbf2b1f9
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
#include <asm/kmap_types.h>
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define kmap_prot PAGE_KERNEL
#define flush_cache_kmaps() flush_cache_all()
extern
pte_t
*
pkmap_page_table
;
#define ARCH_NEEDS_KMAP_HIGH_GET
extern
void
*
kmap_high
(
struct
page
*
page
);
extern
void
*
kmap_high_get
(
struct
page
*
page
);
extern
void
kunmap_high
(
struct
page
*
page
);
extern
void
*
kmap
(
struct
page
*
page
);
extern
void
kunmap
(
struct
page
*
page
);
extern
void
*
kmap_atomic
(
struct
page
*
page
,
enum
km_type
type
);
extern
void
kunmap_atomic
(
void
*
kvaddr
,
enum
km_type
type
);
extern
void
*
kmap_atomic_pfn
(
unsigned
long
pfn
,
enum
km_type
type
);
extern
struct
page
*
kmap_atomic_to_page
(
const
void
*
ptr
);
#endif
arch/arm/include/asm/kmap_types.h
View file @
fbf2b1f9
...
...
@@ -18,6 +18,7 @@ enum km_type {
KM_IRQ1
,
KM_SOFTIRQ0
,
KM_SOFTIRQ1
,
KM_L2_CACHE
,
KM_TYPE_NR
};
...
...
arch/arm/include/asm/memory.h
View file @
fbf2b1f9
...
...
@@ -44,13 +44,20 @@
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - 16*1048576)
#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
#if TASK_SIZE > MODULES_VADDR
#error Top of user space clashes with start of module space
#endif
/*
* The highmem pkmap virtual space shares the end of the module area.
*/
#ifdef CONFIG_HIGHMEM
#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
#else
#define MODULES_END (PAGE_OFFSET)
#endif
/*
* The XIP kernel gets mapped at the bottom of the module vm area.
* Since we use sections to map it, this macro replaces the physical address
...
...
@@ -181,6 +188,7 @@ static inline void *phys_to_virt(unsigned long x)
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
#define __pfn_to_bus(x) ((x) << PAGE_SHIFT)
#endif
static
inline
__deprecated
unsigned
long
virt_to_bus
(
void
*
x
)
...
...
arch/arm/mach-iop13xx/include/mach/memory.h
View file @
fbf2b1f9
...
...
@@ -59,7 +59,10 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
})
#define __arch_page_to_dma(dev, page) \
__arch_virt_to_dma(dev, page_address(page))
({ \
/* __is_lbus_virt() can never be true for RAM pages */
\
(dma_addr_t)page_to_phys(page); \
})
#endif
/* CONFIG_ARCH_IOP13XX */
#endif
/* !ASSEMBLY */
...
...
arch/arm/mach-ks8695/include/mach/memory.h
View file @
fbf2b1f9
...
...
@@ -35,7 +35,11 @@ extern struct bus_type platform_bus_type;
__phys_to_virt(x) : __bus_to_virt(x)); })
#define __arch_virt_to_dma(dev, x) ({ is_lbus_device(dev) ? \
(dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x))
#define __arch_page_to_dma(dev, x) \
({ dma_addr_t __dma = page_to_phys(page); \
if (!is_lbus_device(dev)) \
__dma = __dma - PHYS_OFFSET + KS8695_PCIMEM_PA; \
__dma; })
#endif
...
...
arch/arm/mm/Makefile
View file @
fbf2b1f9
...
...
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP)
+=
alignment.o
obj-$(CONFIG_DISCONTIGMEM)
+=
discontig.o
obj-$(CONFIG_HIGHMEM)
+=
highmem.o
obj-$(CONFIG_CPU_ABRT_NOMMU)
+=
abort-nommu.o
obj-$(CONFIG_CPU_ABRT_EV4)
+=
abort-ev4.o
...
...
arch/arm/mm/cache-feroceon-l2.c
View file @
fbf2b1f9
...
...
@@ -14,8 +14,12 @@
#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <plat/cache-feroceon-l2.h>
#include "mm.h"
/*
* Low-level cache maintenance operations.
...
...
@@ -34,14 +38,36 @@
* The range operations require two successive cp15 writes, in
* between which we don't want to be preempted.
*/
static
inline
unsigned
long
l2_start_va
(
unsigned
long
paddr
)
{
#ifdef CONFIG_HIGHMEM
/*
* Let's do our own fixmap stuff in a minimal way here.
* Because range ops can't be done on physical addresses,
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
* memory mapping. This is protected with the disabling of
* interrupts by the caller.
*/
unsigned
long
idx
=
KM_L2_CACHE
+
KM_TYPE_NR
*
smp_processor_id
();
unsigned
long
vaddr
=
__fix_to_virt
(
FIX_KMAP_BEGIN
+
idx
);
set_pte_ext
(
TOP_PTE
(
vaddr
),
pfn_pte
(
paddr
>>
PAGE_SHIFT
,
PAGE_KERNEL
),
0
);
local_flush_tlb_kernel_page
(
vaddr
);
return
vaddr
+
(
paddr
&
~
PAGE_MASK
);
#else
return
__phys_to_virt
(
paddr
);
#endif
}
static
inline
void
l2_clean_pa
(
unsigned
long
addr
)
{
__asm__
(
"mcr p15, 1, %0, c15, c9, 3"
:
:
"r"
(
addr
));
}
static
inline
void
l2_clean_
mv
a_range
(
unsigned
long
start
,
unsigned
long
end
)
static
inline
void
l2_clean_
p
a_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
flags
;
unsigned
long
va_start
,
va_end
,
flags
;
/*
* Make sure 'start' and 'end' reference the same page, as
...
...
@@ -51,17 +77,14 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
BUG_ON
((
start
^
end
)
>>
PAGE_SHIFT
);
raw_local_irq_save
(
flags
);
va_start
=
l2_start_va
(
start
);
va_end
=
va_start
+
(
end
-
start
);
__asm__
(
"mcr p15, 1, %0, c15, c9, 4
\n\t
"
"mcr p15, 1, %1, c15, c9, 5"
:
:
"r"
(
start
),
"r"
(
end
));
:
:
"r"
(
va_start
),
"r"
(
va_
end
));
raw_local_irq_restore
(
flags
);
}
static
inline
void
l2_clean_pa_range
(
unsigned
long
start
,
unsigned
long
end
)
{
l2_clean_mva_range
(
__phys_to_virt
(
start
),
__phys_to_virt
(
end
));
}
static
inline
void
l2_clean_inv_pa
(
unsigned
long
addr
)
{
__asm__
(
"mcr p15, 1, %0, c15, c10, 3"
:
:
"r"
(
addr
));
...
...
@@ -72,9 +95,9 @@ static inline void l2_inv_pa(unsigned long addr)
__asm__
(
"mcr p15, 1, %0, c15, c11, 3"
:
:
"r"
(
addr
));
}
static
inline
void
l2_inv_
mv
a_range
(
unsigned
long
start
,
unsigned
long
end
)
static
inline
void
l2_inv_
p
a_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
flags
;
unsigned
long
va_start
,
va_end
,
flags
;
/*
* Make sure 'start' and 'end' reference the same page, as
...
...
@@ -84,17 +107,14 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
BUG_ON
((
start
^
end
)
>>
PAGE_SHIFT
);
raw_local_irq_save
(
flags
);
va_start
=
l2_start_va
(
start
);
va_end
=
va_start
+
(
end
-
start
);
__asm__
(
"mcr p15, 1, %0, c15, c11, 4
\n\t
"
"mcr p15, 1, %1, c15, c11, 5"
:
:
"r"
(
start
),
"r"
(
end
));
:
:
"r"
(
va_start
),
"r"
(
va_
end
));
raw_local_irq_restore
(
flags
);
}
static
inline
void
l2_inv_pa_range
(
unsigned
long
start
,
unsigned
long
end
)
{
l2_inv_mva_range
(
__phys_to_virt
(
start
),
__phys_to_virt
(
end
));
}
/*
* Linux primitives.
...
...
arch/arm/mm/cache-xsc3l2.c
View file @
fbf2b1f9
...
...
@@ -17,12 +17,14 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <asm/system.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "mm.h"
#define CR_L2 (1 << 26)
...
...
@@ -47,21 +49,11 @@ static inline void xsc3_l2_clean_mva(unsigned long addr)
__asm__
(
"mcr p15, 1, %0, c7, c11, 1"
:
:
"r"
(
addr
));
}
static
inline
void
xsc3_l2_clean_pa
(
unsigned
long
addr
)
{
xsc3_l2_clean_mva
(
__phys_to_virt
(
addr
));
}
static
inline
void
xsc3_l2_inv_mva
(
unsigned
long
addr
)
{
__asm__
(
"mcr p15, 1, %0, c7, c7, 1"
:
:
"r"
(
addr
));
}
static
inline
void
xsc3_l2_inv_pa
(
unsigned
long
addr
)
{
xsc3_l2_inv_mva
(
__phys_to_virt
(
addr
));
}
static
inline
void
xsc3_l2_inv_all
(
void
)
{
unsigned
long
l2ctype
,
set_way
;
...
...
@@ -79,50 +71,103 @@ static inline void xsc3_l2_inv_all(void)
dsb
();
}
#ifdef CONFIG_HIGHMEM
#define l2_map_save_flags(x) raw_local_save_flags(x)
#define l2_map_restore_flags(x) raw_local_irq_restore(x)
#else
#define l2_map_save_flags(x) ((x) = 0)
#define l2_map_restore_flags(x) ((void)(x))
#endif
static
inline
unsigned
long
l2_map_va
(
unsigned
long
pa
,
unsigned
long
prev_va
,
unsigned
long
flags
)
{
#ifdef CONFIG_HIGHMEM
unsigned
long
va
=
prev_va
&
PAGE_MASK
;
unsigned
long
pa_offset
=
pa
<<
(
32
-
PAGE_SHIFT
);
if
(
unlikely
(
pa_offset
<
(
prev_va
<<
(
32
-
PAGE_SHIFT
))))
{
/*
* Switching to a new page. Because cache ops are
* using virtual addresses only, we must put a mapping
* in place for it. We also enable interrupts for a
* short while and disable them again to protect this
* mapping.
*/
unsigned
long
idx
;
raw_local_irq_restore
(
flags
);
idx
=
KM_L2_CACHE
+
KM_TYPE_NR
*
smp_processor_id
();
va
=
__fix_to_virt
(
FIX_KMAP_BEGIN
+
idx
);
raw_local_irq_restore
(
flags
|
PSR_I_BIT
);
set_pte_ext
(
TOP_PTE
(
va
),
pfn_pte
(
pa
>>
PAGE_SHIFT
,
PAGE_KERNEL
),
0
);
local_flush_tlb_kernel_page
(
va
);
}
return
va
+
(
pa_offset
>>
(
32
-
PAGE_SHIFT
));
#else
return
__phys_to_virt
(
pa
);
#endif
}
static
void
xsc3_l2_inv_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
vaddr
,
flags
;
if
(
start
==
0
&&
end
==
-
1ul
)
{
xsc3_l2_inv_all
();
return
;
}
vaddr
=
-
1
;
/* to force the first mapping */
l2_map_save_flags
(
flags
);
/*
* Clean and invalidate partial first cache line.
*/
if
(
start
&
(
CACHE_LINE_SIZE
-
1
))
{
xsc3_l2_clean_pa
(
start
&
~
(
CACHE_LINE_SIZE
-
1
));
xsc3_l2_inv_pa
(
start
&
~
(
CACHE_LINE_SIZE
-
1
));
vaddr
=
l2_map_va
(
start
&
~
(
CACHE_LINE_SIZE
-
1
),
vaddr
,
flags
);
xsc3_l2_clean_mva
(
vaddr
);
xsc3_l2_inv_mva
(
vaddr
);
start
=
(
start
|
(
CACHE_LINE_SIZE
-
1
))
+
1
;
}
/*
*
Clean and invalidate partial last cache line
.
*
Invalidate all full cache lines between 'start' and 'end'
.
*/
if
(
start
<
end
&&
(
end
&
(
CACHE_LINE_SIZE
-
1
)))
{
xsc3_l2_clean_pa
(
end
&
~
(
CACHE_LINE_SIZE
-
1
)
);
xsc3_l2_inv_
pa
(
end
&
~
(
CACHE_LINE_SIZE
-
1
)
);
end
&=
~
(
CACHE_LINE_SIZE
-
1
)
;
while
(
start
<
(
end
&
~
(
CACHE_LINE_SIZE
-
1
)))
{
vaddr
=
l2_map_va
(
start
,
vaddr
,
flags
);
xsc3_l2_inv_
mva
(
vaddr
);
start
+=
CACHE_LINE_SIZE
;
}
/*
*
Invalidate all full cache lines between 'start' and 'end'
.
*
Clean and invalidate partial last cache line
.
*/
while
(
start
<
end
)
{
xsc3_l2_inv_pa
(
start
);
start
+=
CACHE_LINE_SIZE
;
if
(
start
<
end
)
{
vaddr
=
l2_map_va
(
start
,
vaddr
,
flags
);
xsc3_l2_clean_mva
(
vaddr
);
xsc3_l2_inv_mva
(
vaddr
);
}
l2_map_restore_flags
(
flags
);
dsb
();
}
static
void
xsc3_l2_clean_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
vaddr
,
flags
;
vaddr
=
-
1
;
/* to force the first mapping */
l2_map_save_flags
(
flags
);
start
&=
~
(
CACHE_LINE_SIZE
-
1
);
while
(
start
<
end
)
{
xsc3_l2_clean_pa
(
start
);
vaddr
=
l2_map_va
(
start
,
vaddr
,
flags
);
xsc3_l2_clean_mva
(
vaddr
);
start
+=
CACHE_LINE_SIZE
;
}
l2_map_restore_flags
(
flags
);
dsb
();
}
...
...
@@ -148,18 +193,26 @@ static inline void xsc3_l2_flush_all(void)
static
void
xsc3_l2_flush_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
vaddr
,
flags
;
if
(
start
==
0
&&
end
==
-
1ul
)
{
xsc3_l2_flush_all
();
return
;
}
vaddr
=
-
1
;
/* to force the first mapping */
l2_map_save_flags
(
flags
);
start
&=
~
(
CACHE_LINE_SIZE
-
1
);
while
(
start
<
end
)
{
xsc3_l2_clean_pa
(
start
);
xsc3_l2_inv_pa
(
start
);
vaddr
=
l2_map_va
(
start
,
vaddr
,
flags
);
xsc3_l2_clean_mva
(
vaddr
);
xsc3_l2_inv_mva
(
vaddr
);
start
+=
CACHE_LINE_SIZE
;
}
l2_map_restore_flags
(
flags
);
dsb
();
}
...
...
arch/arm/mm/dma-mapping.c
View file @
fbf2b1f9
...
...
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <asm/memory.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/sizes.h>
...
...
@@ -490,29 +491,101 @@ core_initcall(consistent_init);
*/
void
dma_cache_maint
(
const
void
*
start
,
size_t
size
,
int
direction
)
{
const
void
*
end
=
start
+
size
;
void
(
*
inner_op
)(
const
void
*
,
const
void
*
);
void
(
*
outer_op
)(
unsigned
long
,
unsigned
long
);
BUG_ON
(
!
virt_addr_valid
(
start
)
||
!
virt_addr_valid
(
end
-
1
));
BUG_ON
(
!
virt_addr_valid
(
start
)
||
!
virt_addr_valid
(
start
+
size
-
1
));
switch
(
direction
)
{
case
DMA_FROM_DEVICE
:
/* invalidate only */
dmac_inv_range
(
start
,
end
)
;
outer_
inv_range
(
__pa
(
start
),
__pa
(
end
))
;
inner_op
=
dmac_inv_range
;
outer_
op
=
outer_inv_range
;
break
;
case
DMA_TO_DEVICE
:
/* writeback only */
dmac_clean_range
(
start
,
end
)
;
outer_
clean_range
(
__pa
(
start
),
__pa
(
end
))
;
inner_op
=
dmac_clean_range
;
outer_
op
=
outer_clean_range
;
break
;
case
DMA_BIDIRECTIONAL
:
/* writeback and invalidate */
dmac_flush_range
(
start
,
end
)
;
outer_
flush_range
(
__pa
(
start
),
__pa
(
end
))
;
inner_op
=
dmac_flush_range
;
outer_
op
=
outer_flush_range
;
break
;
default:
BUG
();
}
inner_op
(
start
,
start
+
size
);
outer_op
(
__pa
(
start
),
__pa
(
start
)
+
size
);
}
EXPORT_SYMBOL
(
dma_cache_maint
);
static
void
dma_cache_maint_contiguous
(
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
int
direction
)
{
void
*
vaddr
;
unsigned
long
paddr
;
void
(
*
inner_op
)(
const
void
*
,
const
void
*
);
void
(
*
outer_op
)(
unsigned
long
,
unsigned
long
);
switch
(
direction
)
{
case
DMA_FROM_DEVICE
:
/* invalidate only */
inner_op
=
dmac_inv_range
;
outer_op
=
outer_inv_range
;
break
;
case
DMA_TO_DEVICE
:
/* writeback only */
inner_op
=
dmac_clean_range
;
outer_op
=
outer_clean_range
;
break
;
case
DMA_BIDIRECTIONAL
:
/* writeback and invalidate */
inner_op
=
dmac_flush_range
;
outer_op
=
outer_flush_range
;
break
;
default:
BUG
();
}
if
(
!
PageHighMem
(
page
))
{
vaddr
=
page_address
(
page
)
+
offset
;
inner_op
(
vaddr
,
vaddr
+
size
);
}
else
{
vaddr
=
kmap_high_get
(
page
);
if
(
vaddr
)
{
vaddr
+=
offset
;
inner_op
(
vaddr
,
vaddr
+
size
);
kunmap_high
(
page
);
}
}
paddr
=
page_to_phys
(
page
)
+
offset
;
outer_op
(
paddr
,
paddr
+
size
);
}
void
dma_cache_maint_page
(
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
int
dir
)
{
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
size_t
left
=
size
;
do
{
size_t
len
=
left
;
if
(
PageHighMem
(
page
)
&&
len
+
offset
>
PAGE_SIZE
)
{
if
(
offset
>=
PAGE_SIZE
)
{
page
+=
offset
/
PAGE_SIZE
;
offset
%=
PAGE_SIZE
;
}
len
=
PAGE_SIZE
-
offset
;
}
dma_cache_maint_contiguous
(
page
,
offset
,
len
,
dir
);
offset
=
0
;
page
++
;
left
-=
len
;
}
while
(
left
);
}
EXPORT_SYMBOL
(
dma_cache_maint_page
);
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...
...
@@ -610,7 +683,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
continue
;
if
(
!
arch_is_coherent
())
dma_cache_maint
(
sg_virt
(
s
),
s
->
length
,
dir
);
dma_cache_maint_page
(
sg_page
(
s
),
s
->
offset
,
s
->
length
,
dir
);
}
}
EXPORT_SYMBOL
(
dma_sync_sg_for_device
);
arch/arm/mm/flush.c
View file @
fbf2b1f9
...
...
@@ -192,7 +192,7 @@ void flush_dcache_page(struct page *page)
struct
address_space
*
mapping
=
page_mapping
(
page
);
#ifndef CONFIG_SMP
if
(
mapping
&&
!
mapping_mapped
(
mapping
))
if
(
!
PageHighMem
(
page
)
&&
mapping
&&
!
mapping_mapped
(
mapping
))
set_bit
(
PG_dcache_dirty
,
&
page
->
flags
);
else
#endif
...
...
arch/arm/mm/highmem.c
0 → 100644
View file @
fbf2b1f9
/*
* arch/arm/mm/highmem.c -- ARM highmem support
*
* Author: Nicolas Pitre
* Created: september 8, 2008
* Copyright: Marvell Semiconductors Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <asm/fixmap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "mm.h"
void
*
kmap
(
struct
page
*
page
)
{
might_sleep
();
if
(
!
PageHighMem
(
page
))
return
page_address
(
page
);
return
kmap_high
(
page
);
}
EXPORT_SYMBOL
(
kmap
);
void
kunmap
(
struct
page
*
page
)
{
BUG_ON
(
in_interrupt
());
if
(
!
PageHighMem
(
page
))
return
;
kunmap_high
(
page
);
}
EXPORT_SYMBOL
(
kunmap
);
void
*
kmap_atomic
(
struct
page
*
page
,
enum
km_type
type
)
{
unsigned
int
idx
;
unsigned
long
vaddr
;
pagefault_disable
();
if
(
!
PageHighMem
(
page
))
return
page_address
(
page
);
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
vaddr
=
__fix_to_virt
(
FIX_KMAP_BEGIN
+
idx
);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* With debugging enabled, kunmap_atomic forces that entry to 0.
* Make sure it was indeed properly unmapped.
*/
BUG_ON
(
!
pte_none
(
*
(
TOP_PTE
(
vaddr
))));
#endif
set_pte_ext
(
TOP_PTE
(
vaddr
),
mk_pte
(
page
,
kmap_prot
),
0
);
/*
* When debugging is off, kunmap_atomic leaves the previous mapping
* in place, so this TLB flush ensures the TLB is updated with the
* new mapping.
*/
local_flush_tlb_kernel_page
(
vaddr
);
return
(
void
*
)
vaddr
;
}
EXPORT_SYMBOL
(
kmap_atomic
);
void
kunmap_atomic
(
void
*
kvaddr
,
enum
km_type
type
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
kvaddr
&
PAGE_MASK
;
unsigned
int
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
if
(
kvaddr
>=
(
void
*
)
FIXADDR_START
)
{
__cpuc_flush_dcache_page
((
void
*
)
vaddr
);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON
(
vaddr
!=
__fix_to_virt
(
FIX_KMAP_BEGIN
+
idx
));
set_pte_ext
(
TOP_PTE
(
vaddr
),
__pte
(
0
),
0
);
local_flush_tlb_kernel_page
(
vaddr
);
#else
(
void
)
idx
;
/* to kill a warning */
#endif
}
pagefault_enable
();
}
EXPORT_SYMBOL
(
kunmap_atomic
);
void
*
kmap_atomic_pfn
(
unsigned
long
pfn
,
enum
km_type
type
)
{
unsigned
int
idx
;
unsigned
long
vaddr
;
pagefault_disable
();
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
vaddr
=
__fix_to_virt
(
FIX_KMAP_BEGIN
+
idx
);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON
(
!
pte_none
(
*
(
TOP_PTE
(
vaddr
))));
#endif
set_pte_ext
(
TOP_PTE
(
vaddr
),
pfn_pte
(
pfn
,
kmap_prot
),
0
);
local_flush_tlb_kernel_page
(
vaddr
);
return
(
void
*
)
vaddr
;
}
struct
page
*
kmap_atomic_to_page
(
const
void
*
ptr
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
ptr
;
pte_t
*
pte
;
if
(
vaddr
<
FIXADDR_START
)
return
virt_to_page
(
ptr
);
pte
=
TOP_PTE
(
vaddr
);
return
pte_page
(
*
pte
);
}
arch/arm/mm/init.c
View file @
fbf2b1f9
...
...
@@ -15,6 +15,7 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
...
...
@@ -382,7 +383,7 @@ void __init bootmem_init(void)
for_each_node
(
node
)
bootmem_free_node
(
node
,
mi
);
high_memory
=
__va
(
memend_pfn
<<
PAGE_SHIFT
)
;
high_memory
=
__va
(
(
memend_pfn
<<
PAGE_SHIFT
)
-
1
)
+
1
;
/*
* This doesn't seem to be used by the Linux memory manager any
...
...
@@ -485,7 +486,7 @@ void __init mem_init(void)
int
i
,
node
;
#ifndef CONFIG_DISCONTIGMEM
max_mapnr
=
virt_to_page
(
high_memory
)
-
mem_map
;
max_mapnr
=
pfn_to_page
(
max_pfn
+
PHYS_PFN_OFFSET
)
-
mem_map
;
#endif
/* this will put all unused low memory onto the freelists */
...
...
@@ -504,6 +505,19 @@ void __init mem_init(void)
__phys_to_pfn
(
__pa
(
swapper_pg_dir
)),
NULL
);
#endif
#ifdef CONFIG_HIGHMEM
/* set highmem page free */
for_each_online_node
(
node
)
{
for_each_nodebank
(
i
,
&
meminfo
,
node
)
{
unsigned
long
start
=
bank_pfn_start
(
&
meminfo
.
bank
[
i
]);
unsigned
long
end
=
bank_pfn_end
(
&
meminfo
.
bank
[
i
]);
if
(
start
>=
max_low_pfn
+
PHYS_PFN_OFFSET
)
totalhigh_pages
+=
free_area
(
start
,
end
,
NULL
);
}
}
totalram_pages
+=
totalhigh_pages
;
#endif
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
...
...
@@ -521,9 +535,10 @@ void __init mem_init(void)
initsize
=
__init_end
-
__init_begin
;
printk
(
KERN_NOTICE
"Memory: %luKB available (%dK code, "
"%dK data, %dK init)
\n
"
,
"%dK data, %dK init
, %luK highmem
)
\n
"
,
(
unsigned
long
)
nr_free_pages
()
<<
(
PAGE_SHIFT
-
10
),
codesize
>>
10
,
datasize
>>
10
,
initsize
>>
10
);
codesize
>>
10
,
datasize
>>
10
,
initsize
>>
10
,
(
unsigned
long
)
(
totalhigh_pages
<<
(
PAGE_SHIFT
-
10
)));
if
(
PAGE_SIZE
>=
16384
&&
num_physpages
<=
128
)
{
extern
int
sysctl_overcommit_memory
;
...
...
arch/arm/mm/mm.h
View file @
fbf2b1f9
/* the upper-most page table pointer */
#ifdef CONFIG_MMU
/* the upper-most page table pointer */
extern
pmd_t
*
top_pmd
;
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
...
...
arch/arm/mm/mmap.c
View file @
fbf2b1f9
...
...
@@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size)
{
if
(
addr
<
PHYS_OFFSET
)
return
0
;
if
(
addr
+
size
>
__pa
(
high_memory
))
if
(
addr
+
size
>
=
__pa
(
high_memory
-
1
))
return
0
;
return
1
;
...
...
arch/arm/mm/mmu.c
View file @
fbf2b1f9
...
...
@@ -18,9 +18,11 @@
#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
...
...
@@ -700,6 +702,10 @@ static void __init sanity_check_meminfo(void)
if
(
meminfo
.
nr_banks
>=
NR_BANKS
)
{
printk
(
KERN_CRIT
"NR_BANKS too low, "
"ignoring high memory
\n
"
);
}
else
if
(
cache_is_vipt_aliasing
())
{
printk
(
KERN_CRIT
"HIGHMEM is not yet supported "
"with VIPT aliasing cache, "
"ignoring high memory
\n
"
);
}
else
{
memmove
(
bank
+
1
,
bank
,
(
meminfo
.
nr_banks
-
i
)
*
sizeof
(
*
bank
));
...
...
@@ -918,6 +924,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
flush_cache_all
();
}
static
void
__init
kmap_init
(
void
)
{
#ifdef CONFIG_HIGHMEM
pmd_t
*
pmd
=
pmd_off_k
(
PKMAP_BASE
);
pte_t
*
pte
=
alloc_bootmem_low_pages
(
2
*
PTRS_PER_PTE
*
sizeof
(
pte_t
));
BUG_ON
(
!
pmd_none
(
*
pmd
)
||
!
pte
);
__pmd_populate
(
pmd
,
__pa
(
pte
)
|
_PAGE_KERNEL_TABLE
);
pkmap_page_table
=
pte
+
PTRS_PER_PTE
;
#endif
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
...
...
@@ -931,6 +948,7 @@ void __init paging_init(struct machine_desc *mdesc)
prepare_page_table
();
bootmem_init
();
devicemaps_init
(
mdesc
);
kmap_init
();
top_pmd
=
pmd_off_k
(
0xffff0000
);
...
...
arch/arm/plat-omap/include/mach/memory.h
View file @
fbf2b1f9
...
...
@@ -61,9 +61,11 @@
#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0))
#define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \
(dma_addr_t)virt_to_lbus(page_address(page)) : \
(dma_addr_t)__virt_to_phys(page_address(page));})
#define __arch_page_to_dma(dev, page) \
({ dma_addr_t __dma = page_to_phys(page); \
if (is_lbus_device(dev)) \
__dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \
__dma; })
#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
lbus_to_virt(addr) : \
...
...
mm/highmem.c
View file @
fbf2b1f9
...
...
@@ -67,6 +67,25 @@ pte_t * pkmap_page_table;
static
DECLARE_WAIT_QUEUE_HEAD
(
pkmap_map_wait
);
/*
* Most architectures have no use for kmap_high_get(), so let's abstract
* the disabling of IRQ out of the locking in that case to save on a
* potential useless overhead.
*/
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
#define lock_kmap() spin_lock_irq(&kmap_lock)
#define unlock_kmap() spin_unlock_irq(&kmap_lock)
#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
#else
#define lock_kmap() spin_lock(&kmap_lock)
#define unlock_kmap() spin_unlock(&kmap_lock)
#define lock_kmap_any(flags) \
do { spin_lock(&kmap_lock); (void)(flags); } while (0)
#define unlock_kmap_any(flags) \
do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
#endif
static
void
flush_all_zero_pkmaps
(
void
)
{
int
i
;
...
...
@@ -113,9 +132,9 @@ static void flush_all_zero_pkmaps(void)
*/
void
kmap_flush_unused
(
void
)
{
spin_lock
(
&
kmap_lock
);
lock_kmap
(
);
flush_all_zero_pkmaps
();
spin_unlock
(
&
kmap_lock
);
unlock_kmap
(
);
}
static
inline
unsigned
long
map_new_virtual
(
struct
page
*
page
)
...
...
@@ -145,10 +164,10 @@ start:
__set_current_state
(
TASK_UNINTERRUPTIBLE
);
add_wait_queue
(
&
pkmap_map_wait
,
&
wait
);
spin_unlock
(
&
kmap_lock
);
unlock_kmap
(
);
schedule
();
remove_wait_queue
(
&
pkmap_map_wait
,
&
wait
);
spin_lock
(
&
kmap_lock
);
lock_kmap
(
);
/* Somebody else might have mapped it while we slept */
if
(
page_address
(
page
))
...
...
@@ -184,29 +203,59 @@ void *kmap_high(struct page *page)
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
*/
spin_lock
(
&
kmap_lock
);
lock_kmap
(
);
vaddr
=
(
unsigned
long
)
page_address
(
page
);
if
(
!
vaddr
)
vaddr
=
map_new_virtual
(
page
);
pkmap_count
[
PKMAP_NR
(
vaddr
)]
++
;
BUG_ON
(
pkmap_count
[
PKMAP_NR
(
vaddr
)]
<
2
);
spin_unlock
(
&
kmap_lock
);
unlock_kmap
(
);
return
(
void
*
)
vaddr
;
}
EXPORT_SYMBOL
(
kmap_high
);
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
/**
* kmap_high_get - pin a highmem page into memory
* @page: &struct page to pin
*
* Returns the page's current virtual memory address, or NULL if no mapping
* exists. When and only when a non null address is returned then a
* matching call to kunmap_high() is necessary.
*
* This can be called from any context.
*/
void
*
kmap_high_get
(
struct
page
*
page
)
{
unsigned
long
vaddr
,
flags
;
lock_kmap_any
(
flags
);
vaddr
=
(
unsigned
long
)
page_address
(
page
);
if
(
vaddr
)
{
BUG_ON
(
pkmap_count
[
PKMAP_NR
(
vaddr
)]
<
1
);
pkmap_count
[
PKMAP_NR
(
vaddr
)]
++
;
}
unlock_kmap_any
(
flags
);
return
(
void
*
)
vaddr
;
}
#endif
/**
* kunmap_high - map a highmem page into memory
* @page: &struct page to unmap
*
* If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
* only from user context.
*/
void
kunmap_high
(
struct
page
*
page
)
{
unsigned
long
vaddr
;
unsigned
long
nr
;
unsigned
long
flags
;
int
need_wakeup
;
spin_lock
(
&
kmap_lock
);
lock_kmap_any
(
flags
);
vaddr
=
(
unsigned
long
)
page_address
(
page
);
BUG_ON
(
!
vaddr
);
nr
=
PKMAP_NR
(
vaddr
);
...
...
@@ -232,7 +281,7 @@ void kunmap_high(struct page *page)
*/
need_wakeup
=
waitqueue_active
(
&
pkmap_map_wait
);
}
spin_unlock
(
&
kmap_lock
);
unlock_kmap_any
(
flags
);
/* do wake-up, if needed, race-free outside of the spin lock */
if
(
need_wakeup
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment