Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
c3df69cd
Commit
c3df69cd
authored
Nov 04, 2005
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://oak/home/sfr/kernels/iseries/work
parents
292a6c58
1970282f
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
182 additions
and
114 deletions
+182
-114
include/asm-powerpc/tlb.h
include/asm-powerpc/tlb.h
+36
-23
include/asm-powerpc/tlbflush.h
include/asm-powerpc/tlbflush.h
+146
-0
include/asm-ppc64/tlb.h
include/asm-ppc64/tlb.h
+0
-39
include/asm-ppc64/tlbflush.h
include/asm-ppc64/tlbflush.h
+0
-52
No files found.
include/asm-ppc/tlb.h
→
include/asm-p
ower
pc/tlb.h
View file @
c3df69cd
/*
* TLB shootdown specifics for
PPC
* TLB shootdown specifics for
powerpc
*
* Copyright (C) 2002 Anton Blanchard, IBM Corp.
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
...
...
@@ -8,29 +9,53 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _
P
PC_TLB_H
#define _
P
PC_TLB_H
#ifndef _
ASM_POWER
PC_TLB_H
#define _
ASM_POWER
PC_TLB_H
#include <linux/config.h>
#ifndef __powerpc64__
#include <asm/pgtable.h>
#endif
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#ifndef __powerpc64__
#include <asm/page.h>
#include <asm/mmu.h>
#ifdef CONFIG_PPC_STD_MMU
/* Classic PPC with hash-table based MMU... */
#endif
struct
mmu_gather
;
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#if !defined(CONFIG_PPC_STD_MMU)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#elif defined(__powerpc64__)
extern
void
pte_free_finish
(
void
);
static
inline
void
tlb_flush
(
struct
mmu_gather
*
tlb
)
{
flush_tlb_pending
();
pte_free_finish
();
}
#else
extern
void
tlb_flush
(
struct
mmu_gather
*
tlb
);
#endif
/* Get the generic bits... */
#include <asm-generic/tlb.h>
/* Nothing needed here in fact... */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define
tlb_end_vma(tlb, vma)
do { } while (0)
#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
#define
__tlb_remove_tlb_entry(tlb, pte, address)
do { } while (0)
#else
extern
void
flush_hash_entry
(
struct
mm_struct
*
mm
,
pte_t
*
ptep
,
unsigned
long
address
);
...
...
@@ -41,17 +66,5 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
flush_hash_entry
(
tlb
->
mm
,
ptep
,
address
);
}
#else
/* Embedded PPC with software-loaded TLB, very simple... */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/* Get the generic bits... */
#include <asm-generic/tlb.h>
#endif
/* CONFIG_PPC_STD_MMU */
#endif
/* __PPC_TLB_H */
#endif
#endif
/* __ASM_POWERPC_TLB_H */
include/asm-ppc/tlbflush.h
→
include/asm-p
ower
pc/tlbflush.h
View file @
c3df69cd
#ifndef _ASM_POWERPC_TLBFLUSH_H
#define _ASM_POWERPC_TLBFLUSH_H
/*
* include/asm-ppc/tlbflush.h
* TLB flushing:
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
...
...
@@ -7,87 +16,120 @@
* 2 of the License, or (at your option) any later version.
*/
#ifdef __KERNEL__
#ifndef _PPC_TLBFLUSH_H
#define _PPC_TLBFLUSH_H
#include <linux/config.h>
struct
mm_struct
;
#ifdef CONFIG_PPC64
#include <linux/percpu.h>
#include <asm/page.h>
#define PPC64_TLB_BATCH_NR 192
struct
ppc64_tlb_batch
{
unsigned
long
index
;
struct
mm_struct
*
mm
;
pte_t
pte
[
PPC64_TLB_BATCH_NR
];
unsigned
long
vaddr
[
PPC64_TLB_BATCH_NR
];
unsigned
int
large
;
};
DECLARE_PER_CPU
(
struct
ppc64_tlb_batch
,
ppc64_tlb_batch
);
extern
void
__flush_tlb_pending
(
struct
ppc64_tlb_batch
*
batch
);
static
inline
void
flush_tlb_pending
(
void
)
{
struct
ppc64_tlb_batch
*
batch
=
&
get_cpu_var
(
ppc64_tlb_batch
);
if
(
batch
->
index
)
__flush_tlb_pending
(
batch
);
put_cpu_var
(
ppc64_tlb_batch
);
}
extern
void
flush_hash_page
(
unsigned
long
va
,
pte_t
pte
,
int
local
);
void
flush_hash_range
(
unsigned
long
number
,
int
local
);
#else
/* CONFIG_PPC64 */
#include <linux/mm.h>
extern
void
_tlbie
(
unsigned
long
address
);
extern
void
_tlbia
(
void
);
#if defined(CONFIG_4xx)
/*
* TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
* flush_tlb_kernel_range are best implemented as tlbia vs
* specific tlbie's
*/
#if
ndef CONFIG_44x
#define
__tlbia() asm volatile ("sync; tlbia; i
sync" : : : "memory")
#el
se
#define
__tlbia _tlbia
#if
(defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
#define
flush_tlb_pending() asm volatile ("tlbia;
sync" : : : "memory")
#el
if defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
#define
flush_tlb_pending() _tlbia()
#endif
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#elif defined(CONFIG_FSL_BOOKE)
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
/* TODO: determine if flush_tlb_range & flush_tlb_kernel_range
* are best implemented as tlbia vs specific tlbie's */
#endif
/* CONFIG_PPC64 */
#define __tlbia() _tlbia()
#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
{
flush_tlb_pending
();
}
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
{
#ifdef CONFIG_PPC64
flush_tlb_pending
();
#else
_tlbie
(
vmaddr
);
#endif
}
static
inline
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
{
#ifndef CONFIG_PPC64
_tlbie
(
vmaddr
);
#endif
}
static
inline
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
unsigned
long
start
,
unsigned
long
end
)
{
flush_tlb_pending
();
}
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
unsigned
long
end
)
{
flush_tlb_pending
();
}
#else
/* 6xx, 7xx, 7xxx cpus */
struct
mm_struct
;
struct
vm_area_struct
;
extern
void
flush_tlb_mm
(
struct
mm_struct
*
mm
);
extern
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
);
extern
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
);
extern
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
);
#endif
/*
...
...
@@ -96,20 +138,9 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
* about our page-table pages. -- paulus
*/
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
unsigned
long
start
,
unsigned
long
end
)
{
}
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
#endif
/* _PPC_TLBFLUSH_H */
#endif
/*__KERNEL__ */
#endif
/* _ASM_POWERPC_TLBFLUSH_H */
include/asm-ppc64/tlb.h
deleted
100644 → 0
View file @
292a6c58
/*
* TLB shootdown specifics for PPC64
*
* Copyright (C) 2002 Anton Blanchard, IBM Corp.
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _PPC64_TLB_H
#define _PPC64_TLB_H
#include <asm/tlbflush.h>
struct
mmu_gather
;
extern
void
pte_free_finish
(
void
);
static
inline
void
tlb_flush
(
struct
mmu_gather
*
tlb
)
{
flush_tlb_pending
();
pte_free_finish
();
}
/* Avoid pulling in another include just for this */
#define check_pgt_cache() do { } while (0)
/* Get the generic bits... */
#include <asm-generic/tlb.h>
/* Nothing needed here in fact... */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#endif
/* _PPC64_TLB_H */
include/asm-ppc64/tlbflush.h
deleted
100644 → 0
View file @
292a6c58
#ifndef _PPC64_TLBFLUSH_H
#define _PPC64_TLBFLUSH_H
/*
* TLB flushing:
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
#include <linux/percpu.h>
#include <asm/page.h>
#define PPC64_TLB_BATCH_NR 192
struct
mm_struct
;
struct
ppc64_tlb_batch
{
unsigned
long
index
;
struct
mm_struct
*
mm
;
pte_t
pte
[
PPC64_TLB_BATCH_NR
];
unsigned
long
vaddr
[
PPC64_TLB_BATCH_NR
];
unsigned
int
large
;
};
DECLARE_PER_CPU
(
struct
ppc64_tlb_batch
,
ppc64_tlb_batch
);
extern
void
__flush_tlb_pending
(
struct
ppc64_tlb_batch
*
batch
);
static
inline
void
flush_tlb_pending
(
void
)
{
struct
ppc64_tlb_batch
*
batch
=
&
get_cpu_var
(
ppc64_tlb_batch
);
if
(
batch
->
index
)
__flush_tlb_pending
(
batch
);
put_cpu_var
(
ppc64_tlb_batch
);
}
#define flush_tlb_mm(mm) flush_tlb_pending()
#define flush_tlb_page(vma, addr) flush_tlb_pending()
#define flush_tlb_page_nohash(vma, addr) do { } while (0)
#define flush_tlb_range(vma, start, end) \
do { (void)(start); flush_tlb_pending(); } while (0)
#define flush_tlb_kernel_range(start, end) flush_tlb_pending()
#define flush_tlb_pgtables(mm, start, end) do { } while (0)
extern
void
flush_hash_page
(
unsigned
long
va
,
pte_t
pte
,
int
local
);
void
flush_hash_range
(
unsigned
long
number
,
int
local
);
#endif
/* _PPC64_TLBFLUSH_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment