Commit 71ff49d7 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

x86: with the last user gone, remove set_pte_present

Impact: cleanup

set_pte_present() is no longer used, directly or indirectly,
so remove it.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Xen-devel <xen-devel@lists.xensource.com>
Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Avi Kivity <avi@redhat.com>
LKML-Reference: <1237406613-2929-2-git-send-email-jeremy@goop.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b40c7579
...@@ -317,8 +317,6 @@ struct pv_mmu_ops { ...@@ -317,8 +317,6 @@ struct pv_mmu_ops {
#if PAGETABLE_LEVELS >= 3 #if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
void (*pte_clear)(struct mm_struct *mm, unsigned long addr, void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep); pte_t *ptep);
void (*pmd_clear)(pmd_t *pmdp); void (*pmd_clear)(pmd_t *pmdp);
...@@ -1365,13 +1363,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) ...@@ -1365,13 +1363,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
pte.pte, pte.pte >> 32); pte.pte, pte.pte >> 32);
} }
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
/* 5 arg words */
pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
...@@ -1388,12 +1379,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) ...@@ -1388,12 +1379,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
set_pte(ptep, pte); set_pte(ptep, pte);
} }
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
......
...@@ -26,13 +26,6 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) ...@@ -26,13 +26,6 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
native_set_pte(ptep, pte); native_set_pte(ptep, pte);
} }
static inline void native_set_pte_present(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline void native_pmd_clear(pmd_t *pmdp) static inline void native_pmd_clear(pmd_t *pmdp)
{ {
native_set_pmd(pmdp, __pmd(0)); native_set_pmd(pmdp, __pmd(0));
......
...@@ -31,23 +31,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) ...@@ -31,23 +31,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
ptep->pte_low = pte.pte_low; ptep->pte_low = pte.pte_low;
} }
/*
* Since this is only called on user PTEs, and the page fault handler
* must handle the already racy situation of simultaneous page faults,
* we are justified in merely clearing the PTE present bit, followed
* by a set. The ordering here is important.
*/
static inline void native_set_pte_present(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
ptep->pte_low = 0;
smp_wmb();
ptep->pte_high = pte.pte_high;
smp_wmb();
ptep->pte_low = pte.pte_low;
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{ {
set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
......
...@@ -31,8 +31,6 @@ extern struct list_head pgd_list; ...@@ -31,8 +31,6 @@ extern struct list_head pgd_list;
#define set_pte(ptep, pte) native_set_pte(ptep, pte) #define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pte_present(mm, addr, ptep, pte) \
native_set_pte_present(mm, addr, ptep, pte)
#define set_pte_atomic(ptep, pte) \ #define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
......
...@@ -138,12 +138,6 @@ static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte) ...@@ -138,12 +138,6 @@ static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
kvm_mmu_write(ptep, pte_val(pte)); kvm_mmu_write(ptep, pte_val(pte));
} }
static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_pte_clear(struct mm_struct *mm, static void kvm_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
...@@ -220,7 +214,6 @@ static void paravirt_ops_setup(void) ...@@ -220,7 +214,6 @@ static void paravirt_ops_setup(void)
#if PAGETABLE_LEVELS >= 3 #if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic; pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
pv_mmu_ops.set_pte_present = kvm_set_pte_present;
pv_mmu_ops.pte_clear = kvm_pte_clear; pv_mmu_ops.pte_clear = kvm_pte_clear;
pv_mmu_ops.pmd_clear = kvm_pmd_clear; pv_mmu_ops.pmd_clear = kvm_pmd_clear;
#endif #endif
......
...@@ -470,7 +470,6 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -470,7 +470,6 @@ struct pv_mmu_ops pv_mmu_ops = {
#if PAGETABLE_LEVELS >= 3 #if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic, .set_pte_atomic = native_set_pte_atomic,
.set_pte_present = native_set_pte_present,
.pte_clear = native_pte_clear, .pte_clear = native_pte_clear,
.pmd_clear = native_pmd_clear, .pmd_clear = native_pmd_clear,
#endif #endif
......
...@@ -395,11 +395,6 @@ static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) ...@@ -395,11 +395,6 @@ static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
vmi_ops.update_pte(ptep, VMI_PAGE_PT); vmi_ops.update_pte(ptep, VMI_PAGE_PT);
} }
static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
{
vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1));
}
static void vmi_set_pud(pud_t *pudp, pud_t pudval) static void vmi_set_pud(pud_t *pudp, pud_t pudval)
{ {
/* Um, eww */ /* Um, eww */
...@@ -750,7 +745,6 @@ static inline int __init activate_vmi(void) ...@@ -750,7 +745,6 @@ static inline int __init activate_vmi(void)
pv_mmu_ops.set_pmd = vmi_set_pmd; pv_mmu_ops.set_pmd = vmi_set_pmd;
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic; pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
pv_mmu_ops.set_pte_present = vmi_set_pte_present;
pv_mmu_ops.set_pud = vmi_set_pud; pv_mmu_ops.set_pud = vmi_set_pud;
pv_mmu_ops.pte_clear = vmi_pte_clear; pv_mmu_ops.pte_clear = vmi_pte_clear;
pv_mmu_ops.pmd_clear = vmi_pmd_clear; pv_mmu_ops.pmd_clear = vmi_pmd_clear;
......
...@@ -1870,7 +1870,6 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { ...@@ -1870,7 +1870,6 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
.set_pte_atomic = xen_set_pte_atomic, .set_pte_atomic = xen_set_pte_atomic,
.set_pte_present = xen_set_pte_at,
.pte_clear = xen_pte_clear, .pte_clear = xen_pte_clear,
.pmd_clear = xen_pmd_clear, .pmd_clear = xen_pmd_clear,
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment