Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
c6baa196
Commit
c6baa196
authored
Dec 04, 2009
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'pending-dma-coherent' into devel
parents
5cb2faa6
26a26d32
Changes
6
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
366 additions
and
326 deletions
+366
-326
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/pgtable.h
+12
-2
arch/arm/include/asm/system.h
arch/arm/include/asm/system.h
+12
-7
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+1
-1
arch/arm/mm/dma-mapping.c
arch/arm/mm/dma-mapping.c
+181
-316
arch/arm/mm/vmregion.c
arch/arm/mm/vmregion.c
+131
-0
arch/arm/mm/vmregion.h
arch/arm/mm/vmregion.h
+29
-0
No files found.
arch/arm/include/asm/pgtable.h
View file @
c6baa196
...
@@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
...
@@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
static
inline
pte_t
pte_mkspecial
(
pte_t
pte
)
{
return
pte
;
}
static
inline
pte_t
pte_mkspecial
(
pte_t
pte
)
{
return
pte
;
}
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
/*
/*
* Mark the prot value as uncacheable and unbufferable.
* Mark the prot value as uncacheable and unbufferable.
*/
*/
#define pgprot_noncached(prot) \
#define pgprot_noncached(prot) \
__pgprot
((pgprot_val(prot) & ~L_PTE_MT_MASK) |
L_PTE_MT_UNCACHED)
__pgprot
_modify(prot, L_PTE_MT_MASK,
L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
#define pgprot_writecombine(prot) \
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
#if __LINUX_ARM_ARCH__ >= 7
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
...
...
arch/arm/include/asm/system.h
View file @
c6baa196
...
@@ -138,21 +138,26 @@ extern unsigned int user_debug;
...
@@ -138,21 +138,26 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
#endif
#ifndef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
#define mb() dmb()
#define rmb() dmb()
#define wmb() dmb()
#else
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#endif
#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_wmb() barrier()
#else
#else
#define mb() dmb()
#define smp_mb() mb()
#define rmb() dmb()
#define smp_rmb() rmb()
#define wmb() dmb()
#define smp_wmb() wmb()
#define smp_mb() dmb()
#define smp_rmb() dmb()
#define smp_wmb() dmb()
#endif
#endif
#define read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
...
...
arch/arm/mm/Makefile
View file @
c6baa196
...
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
...
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
iomap.o
obj-$(CONFIG_MMU)
+=
fault-armv.o flush.o ioremap.o mmap.o
\
obj-$(CONFIG_MMU)
+=
fault-armv.o flush.o ioremap.o mmap.o
\
pgd.o mmu.o
pgd.o mmu.o
vmregion.o
ifneq
($(CONFIG_MMU),y)
ifneq
($(CONFIG_MMU),y)
obj-y
+=
nommu.o
obj-y
+=
nommu.o
...
...
arch/arm/mm/dma-mapping.c
View file @
c6baa196
This diff is collapsed.
Click to expand it.
arch/arm/mm/vmregion.c
0 → 100644
View file @
c6baa196
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "vmregion.h"
/*
* VM region handling support.
*
* This should become something generic, handling VM region allocations for
* vmalloc and similar (ioremap, module space, etc).
*
* I envisage vmalloc()'s supporting vm_struct becoming:
*
* struct vm_struct {
* struct vmregion region;
* unsigned long flags;
* struct page **pages;
* unsigned int nr_pages;
* unsigned long phys_addr;
* };
*
* get_vm_area() would then call vmregion_alloc with an appropriate
* struct vmregion head (eg):
*
* struct vmregion vmalloc_head = {
* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
* .vm_start = VMALLOC_START,
* .vm_end = VMALLOC_END,
* };
*
* However, vmalloc_head.vm_start is variable (typically, it is dependent on
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vmregion_alloc().
*/
struct
arm_vmregion
*
arm_vmregion_alloc
(
struct
arm_vmregion_head
*
head
,
size_t
size
,
gfp_t
gfp
)
{
unsigned
long
addr
=
head
->
vm_start
,
end
=
head
->
vm_end
-
size
;
unsigned
long
flags
;
struct
arm_vmregion
*
c
,
*
new
;
if
(
head
->
vm_end
-
head
->
vm_start
<
size
)
{
printk
(
KERN_WARNING
"%s: allocation too big (requested %#x)
\n
"
,
__func__
,
size
);
goto
out
;
}
new
=
kmalloc
(
sizeof
(
struct
arm_vmregion
),
gfp
);
if
(
!
new
)
goto
out
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
list_for_each_entry
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
((
addr
+
size
)
<
addr
)
goto
nospc
;
if
((
addr
+
size
)
<=
c
->
vm_start
)
goto
found
;
addr
=
c
->
vm_end
;
if
(
addr
>
end
)
goto
nospc
;
}
found:
/*
* Insert this entry _before_ the one we found.
*/
list_add_tail
(
&
new
->
vm_list
,
&
c
->
vm_list
);
new
->
vm_start
=
addr
;
new
->
vm_end
=
addr
+
size
;
new
->
vm_active
=
1
;
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
new
;
nospc:
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
kfree
(
new
);
out:
return
NULL
;
}
static
struct
arm_vmregion
*
__arm_vmregion_find
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
list_for_each_entry
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
(
c
->
vm_active
&&
c
->
vm_start
==
addr
)
goto
out
;
}
c
=
NULL
;
out:
return
c
;
}
struct
arm_vmregion
*
arm_vmregion_find
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
c
=
__arm_vmregion_find
(
head
,
addr
);
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
c
;
}
struct
arm_vmregion
*
arm_vmregion_find_remove
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
c
=
__arm_vmregion_find
(
head
,
addr
);
if
(
c
)
c
->
vm_active
=
0
;
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
c
;
}
void
arm_vmregion_free
(
struct
arm_vmregion_head
*
head
,
struct
arm_vmregion
*
c
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
list_del
(
&
c
->
vm_list
);
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
kfree
(
c
);
}
arch/arm/mm/vmregion.h
0 → 100644
View file @
c6baa196
#ifndef VMREGION_H
#define VMREGION_H
#include <linux/spinlock.h>
#include <linux/list.h>
struct
page
;
struct
arm_vmregion_head
{
spinlock_t
vm_lock
;
struct
list_head
vm_list
;
unsigned
long
vm_start
;
unsigned
long
vm_end
;
};
struct
arm_vmregion
{
struct
list_head
vm_list
;
unsigned
long
vm_start
;
unsigned
long
vm_end
;
struct
page
*
vm_pages
;
int
vm_active
;
};
struct
arm_vmregion
*
arm_vmregion_alloc
(
struct
arm_vmregion_head
*
,
size_t
,
gfp_t
);
struct
arm_vmregion
*
arm_vmregion_find
(
struct
arm_vmregion_head
*
,
unsigned
long
);
struct
arm_vmregion
*
arm_vmregion_find_remove
(
struct
arm_vmregion_head
*
,
unsigned
long
);
void
arm_vmregion_free
(
struct
arm_vmregion_head
*
,
struct
arm_vmregion
*
);
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment