Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
e150e7f2
Commit
e150e7f2
authored
Nov 21, 2007
by
Paul Mundt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sh: Kill off arch/sh64/mm.
Signed-off-by:
Paul Mundt
<
lethal@linux-sh.org
>
parent
df0fb256
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
0 additions
and
391 deletions
+0
-391
arch/sh64/mm/Makefile
arch/sh64/mm/Makefile
+0
-44
arch/sh64/mm/consistent.c
arch/sh64/mm/consistent.c
+0
-53
arch/sh64/mm/hugetlbpage.c
arch/sh64/mm/hugetlbpage.c
+0
-105
arch/sh64/mm/init.c
arch/sh64/mm/init.c
+0
-189
No files found.
arch/sh64/mm/Makefile
deleted
100644 → 0
View file @
df0fb256
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2000, 2001 Paolo Alberelli
# Copyright (C) 2003, 2004 Paul Mundt
#
# Makefile for the sh64-specific parts of the Linux memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
obj-y
:=
cache.o consistent.o extable.o fault.o init.o ioremap.o
\
tlbmiss.o tlb.o
obj-$(CONFIG_HUGETLB_PAGE)
+=
hugetlbpage.o
# Special flags for tlbmiss.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.
# This is required because the code is called from a context in entry.S where
# very few registers have been saved in the exception handler (for speed
# reasons).
# The caller save registers that have been saved and which can be used are
# r2,r3,r4,r5 : argument passing
# r15, r18 : SP and LINK
# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make
# use of them, so it's probably beneficial to performance to save them
# and have them available for it.
#
# The resources not listed below are callee save, i.e. the compiler is free to
# use any of them and will spill them to the stack itself.
CFLAGS_tlbmiss.o
+=
-ffixed-r7
\
-ffixed-r8
-ffixed-r9
-ffixed-r10
-ffixed-r11
-ffixed-r12
\
-ffixed-r13
-ffixed-r14
-ffixed-r16
-ffixed-r17
-ffixed-r19
\
-ffixed-r20
-ffixed-r21
-ffixed-r22
-ffixed-r23
\
-ffixed-r24
-ffixed-r25
-ffixed-r26
-ffixed-r27
\
-ffixed-r36
-ffixed-r37
-ffixed-r38
-ffixed-r39
-ffixed-r40
\
-ffixed-r41
-ffixed-r42
-ffixed-r43
\
-ffixed-r60
-ffixed-r61
-ffixed-r62
\
-fomit-frame-pointer
arch/sh64/mm/consistent.c
deleted
100644 → 0
View file @
df0fb256
/*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003 Paul Mundt (lethal@linux-sh.org)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Dynamic DMA mapping support.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/io.h>
void
*
consistent_alloc
(
struct
pci_dev
*
hwdev
,
size_t
size
,
dma_addr_t
*
dma_handle
)
{
void
*
ret
;
int
gfp
=
GFP_ATOMIC
;
void
*
vp
;
if
(
hwdev
==
NULL
||
hwdev
->
dma_mask
!=
0xffffffff
)
gfp
|=
GFP_DMA
;
ret
=
(
void
*
)
__get_free_pages
(
gfp
,
get_order
(
size
));
/* now call our friend ioremap_nocache to give us an uncached area */
vp
=
ioremap_nocache
(
virt_to_phys
(
ret
),
size
);
if
(
vp
!=
NULL
)
{
memset
(
vp
,
0
,
size
);
*
dma_handle
=
virt_to_phys
(
ret
);
dma_cache_sync
(
NULL
,
ret
,
size
,
DMA_BIDIRECTIONAL
);
}
return
vp
;
}
EXPORT_SYMBOL
(
consistent_alloc
);
void
consistent_free
(
struct
pci_dev
*
hwdev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
)
{
void
*
alloc
;
alloc
=
phys_to_virt
((
unsigned
long
)
dma_handle
);
free_pages
((
unsigned
long
)
alloc
,
get_order
(
size
));
iounmap
(
vaddr
);
}
EXPORT_SYMBOL
(
consistent_free
);
arch/sh64/mm/hugetlbpage.c
deleted
100644 → 0
View file @
df0fb256
/*
* arch/sh64/mm/hugetlbpage.c
*
* SuperH HugeTLB page support.
*
* Cloned from sparc64 by Paul Mundt.
*
* Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
pte_t
*
huge_pte_alloc
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
=
NULL
;
pgd
=
pgd_offset
(
mm
,
addr
);
if
(
pgd
)
{
pmd
=
pmd_alloc
(
mm
,
pgd
,
addr
);
if
(
pmd
)
pte
=
pte_alloc_map
(
mm
,
pmd
,
addr
);
}
return
pte
;
}
pte_t
*
huge_pte_offset
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
=
NULL
;
pgd
=
pgd_offset
(
mm
,
addr
);
if
(
pgd
)
{
pmd
=
pmd_offset
(
pgd
,
addr
);
if
(
pmd
)
pte
=
pte_offset_map
(
pmd
,
addr
);
}
return
pte
;
}
int
huge_pmd_unshare
(
struct
mm_struct
*
mm
,
unsigned
long
*
addr
,
pte_t
*
ptep
)
{
return
0
;
}
void
set_huge_pte_at
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
,
pte_t
entry
)
{
int
i
;
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
set_pte_at
(
mm
,
addr
,
ptep
,
entry
);
ptep
++
;
addr
+=
PAGE_SIZE
;
pte_val
(
entry
)
+=
PAGE_SIZE
;
}
}
pte_t
huge_ptep_get_and_clear
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
)
{
pte_t
entry
;
int
i
;
entry
=
*
ptep
;
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
pte_clear
(
mm
,
addr
,
ptep
);
addr
+=
PAGE_SIZE
;
ptep
++
;
}
return
entry
;
}
struct
page
*
follow_huge_addr
(
struct
mm_struct
*
mm
,
unsigned
long
address
,
int
write
)
{
return
ERR_PTR
(
-
EINVAL
);
}
int
pmd_huge
(
pmd_t
pmd
)
{
return
0
;
}
struct
page
*
follow_huge_pmd
(
struct
mm_struct
*
mm
,
unsigned
long
address
,
pmd_t
*
pmd
,
int
write
)
{
return
NULL
;
}
arch/sh64/mm/init.c
deleted
100644 → 0
View file @
df0fb256
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/mm/init.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
*/
#include <linux/init.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
DEFINE_PER_CPU
(
struct
mmu_gather
,
mmu_gathers
);
/*
* Cache of MMU context last used.
*/
unsigned
long
mmu_context_cache
;
pgd_t
*
mmu_pdtp_cache
;
int
after_bootmem
=
0
;
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving an inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
extern
unsigned
char
empty_zero_page
[
PAGE_SIZE
];
extern
unsigned
char
empty_bad_page
[
PAGE_SIZE
];
extern
pte_t
empty_bad_pte_table
[
PTRS_PER_PTE
];
extern
pgd_t
swapper_pg_dir
[
PTRS_PER_PGD
];
extern
char
_text
,
_etext
,
_edata
,
__bss_start
,
_end
;
extern
char
__init_begin
,
__init_end
;
/* It'd be good if these lines were in the standard header file. */
#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
void
show_mem
(
void
)
{
int
i
,
total
=
0
,
reserved
=
0
;
int
shared
=
0
,
cached
=
0
;
printk
(
"Mem-info:
\n
"
);
show_free_areas
();
printk
(
"Free swap: %6ldkB
\n
"
,
nr_swap_pages
<<
(
PAGE_SHIFT
-
10
));
i
=
max_mapnr
;
while
(
i
--
>
0
)
{
total
++
;
if
(
PageReserved
(
mem_map
+
i
))
reserved
++
;
else
if
(
PageSwapCache
(
mem_map
+
i
))
cached
++
;
else
if
(
page_count
(
mem_map
+
i
))
shared
+=
page_count
(
mem_map
+
i
)
-
1
;
}
printk
(
"%d pages of RAM
\n
"
,
total
);
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
printk
(
"%ld pages in page table cache
\n
"
,
quicklist_total_size
());
}
/*
* paging_init() sets up the page tables.
*
* head.S already did a lot to set up address translation for the kernel.
* Here we comes with:
* . MMU enabled
* . ASID set (SR)
* . some 512MB regions being mapped of which the most relevant here is:
* . CACHED segment (ASID 0 [irrelevant], shared AND NOT user)
* . possible variable length regions being mapped as:
* . UNCACHED segment (ASID 0 [irrelevant], shared AND NOT user)
* . All of the memory regions are placed, independently from the platform
* on high addresses, above 0x80000000.
* . swapper_pg_dir is already cleared out by the .space directive
* in any case swapper does not require a real page directory since
* it's all kernel contained.
*
* Those pesky NULL-reference errors in the kernel are then
* dealt with by not mapping address 0x00000000 at all.
*
*/
void
__init
paging_init
(
void
)
{
unsigned
long
zones_size
[
MAX_NR_ZONES
]
=
{
0
,
};
pgd_init
((
unsigned
long
)
swapper_pg_dir
);
pgd_init
((
unsigned
long
)
swapper_pg_dir
+
sizeof
(
pgd_t
)
*
USER_PTRS_PER_PGD
);
mmu_context_cache
=
MMU_CONTEXT_FIRST_VERSION
;
zones_size
[
ZONE_NORMAL
]
=
MAX_LOW_PFN
-
START_PFN
;
NODE_DATA
(
0
)
->
node_mem_map
=
NULL
;
free_area_init_node
(
0
,
NODE_DATA
(
0
),
zones_size
,
__MEMORY_START
>>
PAGE_SHIFT
,
0
);
}
void
__init
mem_init
(
void
)
{
int
codesize
,
reservedpages
,
datasize
,
initsize
;
int
tmp
;
max_mapnr
=
num_physpages
=
MAX_LOW_PFN
-
START_PFN
;
high_memory
=
(
void
*
)
__va
(
MAX_LOW_PFN
*
PAGE_SIZE
);
/*
* Clear the zero-page.
* This is not required but we might want to re-use
* this very page to pass boot parameters, one day.
*/
memset
(
empty_zero_page
,
0
,
PAGE_SIZE
);
/* this will put all low memory onto the freelists */
totalram_pages
+=
free_all_bootmem_node
(
NODE_DATA
(
0
));
reservedpages
=
0
;
for
(
tmp
=
0
;
tmp
<
num_physpages
;
tmp
++
)
/*
* Only count reserved RAM pages
*/
if
(
PageReserved
(
mem_map
+
tmp
))
reservedpages
++
;
after_bootmem
=
1
;
codesize
=
(
unsigned
long
)
&
_etext
-
(
unsigned
long
)
&
_text
;
datasize
=
(
unsigned
long
)
&
_edata
-
(
unsigned
long
)
&
_etext
;
initsize
=
(
unsigned
long
)
&
__init_end
-
(
unsigned
long
)
&
__init_begin
;
printk
(
"Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)
\n
"
,
(
unsigned
long
)
nr_free_pages
()
<<
(
PAGE_SHIFT
-
10
),
max_mapnr
<<
(
PAGE_SHIFT
-
10
),
codesize
>>
10
,
reservedpages
<<
(
PAGE_SHIFT
-
10
),
datasize
>>
10
,
initsize
>>
10
);
}
void
free_initmem
(
void
)
{
unsigned
long
addr
;
addr
=
(
unsigned
long
)(
&
__init_begin
);
for
(;
addr
<
(
unsigned
long
)(
&
__init_end
);
addr
+=
PAGE_SIZE
)
{
ClearPageReserved
(
virt_to_page
(
addr
));
init_page_count
(
virt_to_page
(
addr
));
free_page
(
addr
);
totalram_pages
++
;
}
printk
(
"Freeing unused kernel memory: %ldk freed
\n
"
,
(
&
__init_end
-
&
__init_begin
)
>>
10
);
}
#ifdef CONFIG_BLK_DEV_INITRD
void
free_initrd_mem
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
p
;
for
(
p
=
start
;
p
<
end
;
p
+=
PAGE_SIZE
)
{
ClearPageReserved
(
virt_to_page
(
p
));
init_page_count
(
virt_to_page
(
p
));
free_page
(
p
);
totalram_pages
++
;
}
printk
(
"Freeing initrd memory: %ldk freed
\n
"
,
(
end
-
start
)
>>
10
);
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment