Commit 6c738ffa authored by Jeff Dike's avatar Jeff Dike Committed by Linus Torvalds

uml: fold mmu_context_skas into mm_context

This patch folds mmu_context_skas into struct mm_context, changing all users
of these structures as needed.
Signed-off-by: default avatarJeff Dike <jdike@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fab95c55
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __SKAS_MMU_H
#define __SKAS_MMU_H
#include "mm_id.h"
#include "asm/ldt.h"
struct mmu_context_skas {
struct mm_id id;
unsigned long last_page_table;
#ifdef CONFIG_3_LEVEL_PGTABLES
unsigned long last_pmd;
#endif
uml_ldt_t ldt;
};
extern void __switch_mm(struct mm_id * mm_idp);
#endif
...@@ -33,7 +33,7 @@ struct host_vm_op { ...@@ -33,7 +33,7 @@ struct host_vm_op {
extern void force_flush_all(void); extern void force_flush_all(void);
extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr, extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force, unsigned long end_addr, int force,
int (*do_ops)(union mm_context *, int (*do_ops)(struct mm_context *,
struct host_vm_op *, int, int, struct host_vm_op *, int, int,
void **)); void **));
extern int flush_tlb_kernel_range_common(unsigned long start, extern int flush_tlb_kernel_range_common(unsigned long start,
......
...@@ -7,10 +7,22 @@ ...@@ -7,10 +7,22 @@
#define __ARCH_UM_MMU_H #define __ARCH_UM_MMU_H
#include "uml-config.h" #include "uml-config.h"
#include "mmu-skas.h" #include "mm_id.h"
#include "asm/ldt.h"
typedef union mm_context { typedef struct mm_context {
struct mmu_context_skas skas; struct mm_id id;
unsigned long last_page_table;
#ifdef CONFIG_3_LEVEL_PGTABLES
unsigned long last_pmd;
#endif
struct uml_ldt ldt;
} mm_context_t; } mm_context_t;
extern void __switch_mm(struct mm_id * mm_idp);
/* Avoid tangled inclusion with asm/ldt.h */
extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
extern void free_ldt(struct mm_context *mm);
#endif #endif
...@@ -23,14 +23,14 @@ void flush_thread(void) ...@@ -23,14 +23,14 @@ void flush_thread(void)
arch_flush_thread(&current->thread.arch); arch_flush_thread(&current->thread.arch);
ret = unmap(&current->mm->context.skas.id, 0, end, 1, &data); ret = unmap(&current->mm->context.id, 0, end, 1, &data);
if (ret) { if (ret) {
printk(KERN_ERR "flush_thread - clearing address space failed, " printk(KERN_ERR "flush_thread - clearing address space failed, "
"err = %d\n", ret); "err = %d\n", ret);
force_sig(SIGKILL, current); force_sig(SIGKILL, current);
} }
__switch_mm(&current->mm->context.skas.id); __switch_mm(&current->mm->context.id);
} }
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
......
...@@ -25,7 +25,7 @@ static void kill_off_processes(void) ...@@ -25,7 +25,7 @@ static void kill_off_processes(void)
if(p->mm == NULL) if(p->mm == NULL)
continue; continue;
pid = p->mm->context.skas.id.u.pid; pid = p->mm->context.id.u.pid;
os_kill_ptraced_process(pid, 1); os_kill_ptraced_process(pid, 1);
} }
} }
......
...@@ -47,9 +47,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, ...@@ -47,9 +47,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
* destroy_context_skas. * destroy_context_skas.
*/ */
mm->context.skas.last_page_table = pmd_page_vaddr(*pmd); mm->context.last_page_table = pmd_page_vaddr(*pmd);
#ifdef CONFIG_3_LEVEL_PGTABLES #ifdef CONFIG_3_LEVEL_PGTABLES
mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
#endif #endif
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
...@@ -66,8 +66,8 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, ...@@ -66,8 +66,8 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
int init_new_context(struct task_struct *task, struct mm_struct *mm) int init_new_context(struct task_struct *task, struct mm_struct *mm)
{ {
struct mmu_context_skas *from_mm = NULL; struct mm_context *from_mm = NULL;
struct mmu_context_skas *to_mm = &mm->context.skas; struct mm_context *to_mm = &mm->context;
unsigned long stack = 0; unsigned long stack = 0;
int ret = -ENOMEM; int ret = -ENOMEM;
...@@ -97,7 +97,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) ...@@ -97,7 +97,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
to_mm->id.stack = stack; to_mm->id.stack = stack;
if (current->mm != NULL && current->mm != &init_mm) if (current->mm != NULL && current->mm != &init_mm)
from_mm = &current->mm->context.skas; from_mm = &current->mm->context;
if (proc_mm) { if (proc_mm) {
ret = new_mm(stack); ret = new_mm(stack);
...@@ -133,7 +133,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) ...@@ -133,7 +133,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
void destroy_context(struct mm_struct *mm) void destroy_context(struct mm_struct *mm)
{ {
struct mmu_context_skas *mmu = &mm->context.skas; struct mm_context *mmu = &mm->context;
if (proc_mm) if (proc_mm)
os_close_file(mmu->id.u.mm_fd); os_close_file(mmu->id.u.mm_fd);
......
...@@ -65,5 +65,5 @@ unsigned long current_stub_stack(void) ...@@ -65,5 +65,5 @@ unsigned long current_stub_stack(void)
if (current->mm == NULL) if (current->mm == NULL)
return 0; return 0;
return current->mm->context.skas.id.stack; return current->mm->context.id.stack;
} }
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
unsigned int prot, struct host_vm_op *ops, int *index, unsigned int prot, struct host_vm_op *ops, int *index,
int last_filled, union mm_context *mmu, void **flush, int last_filled, struct mm_context *mmu, void **flush,
int (*do_ops)(union mm_context *, struct host_vm_op *, int (*do_ops)(struct mm_context *, struct host_vm_op *,
int, int, void **)) int, int, void **))
{ {
__u64 offset; __u64 offset;
...@@ -52,8 +52,8 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, ...@@ -52,8 +52,8 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
static int add_munmap(unsigned long addr, unsigned long len, static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *ops, int *index, int last_filled, struct host_vm_op *ops, int *index, int last_filled,
union mm_context *mmu, void **flush, struct mm_context *mmu, void **flush,
int (*do_ops)(union mm_context *, struct host_vm_op *, int (*do_ops)(struct mm_context *, struct host_vm_op *,
int, int, void **)) int, int, void **))
{ {
struct host_vm_op *last; struct host_vm_op *last;
...@@ -82,8 +82,8 @@ static int add_munmap(unsigned long addr, unsigned long len, ...@@ -82,8 +82,8 @@ static int add_munmap(unsigned long addr, unsigned long len,
static int add_mprotect(unsigned long addr, unsigned long len, static int add_mprotect(unsigned long addr, unsigned long len,
unsigned int prot, struct host_vm_op *ops, int *index, unsigned int prot, struct host_vm_op *ops, int *index,
int last_filled, union mm_context *mmu, void **flush, int last_filled, struct mm_context *mmu, void **flush,
int (*do_ops)(union mm_context *, struct host_vm_op *, int (*do_ops)(struct mm_context *, struct host_vm_op *,
int, int, void **)) int, int, void **))
{ {
struct host_vm_op *last; struct host_vm_op *last;
...@@ -117,8 +117,8 @@ static int add_mprotect(unsigned long addr, unsigned long len, ...@@ -117,8 +117,8 @@ static int add_mprotect(unsigned long addr, unsigned long len,
static inline int update_pte_range(pmd_t *pmd, unsigned long addr, static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct host_vm_op *ops, unsigned long end, struct host_vm_op *ops,
int last_op, int *op_index, int force, int last_op, int *op_index, int force,
union mm_context *mmu, void **flush, struct mm_context *mmu, void **flush,
int (*do_ops)(union mm_context *, int (*do_ops)(struct mm_context *,
struct host_vm_op *, int, int, struct host_vm_op *, int, int,
void **)) void **))
{ {
...@@ -157,8 +157,8 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -157,8 +157,8 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
static inline int update_pmd_range(pud_t *pud, unsigned long addr, static inline int update_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, struct host_vm_op *ops, unsigned long end, struct host_vm_op *ops,
int last_op, int *op_index, int force, int last_op, int *op_index, int force,
union mm_context *mmu, void **flush, struct mm_context *mmu, void **flush,
int (*do_ops)(union mm_context *, int (*do_ops)(struct mm_context *,
struct host_vm_op *, int, int, struct host_vm_op *, int, int,
void **)) void **))
{ {
...@@ -187,8 +187,8 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr, ...@@ -187,8 +187,8 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
static inline int update_pud_range(pgd_t *pgd, unsigned long addr, static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
unsigned long end, struct host_vm_op *ops, unsigned long end, struct host_vm_op *ops,
int last_op, int *op_index, int force, int last_op, int *op_index, int force,
union mm_context *mmu, void **flush, struct mm_context *mmu, void **flush,
int (*do_ops)(union mm_context *, int (*do_ops)(struct mm_context *,
struct host_vm_op *, int, int, struct host_vm_op *, int, int,
void **)) void **))
{ {
...@@ -216,11 +216,11 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr, ...@@ -216,11 +216,11 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
void fix_range_common(struct mm_struct *mm, unsigned long start_addr, void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force, unsigned long end_addr, int force,
int (*do_ops)(union mm_context *, struct host_vm_op *, int (*do_ops)(struct mm_context *, struct host_vm_op *,
int, int, void **)) int, int, void **))
{ {
pgd_t *pgd; pgd_t *pgd;
union mm_context *mmu = &mm->context; struct mm_context *mmu = &mm->context;
struct host_vm_op ops[1]; struct host_vm_op ops[1];
unsigned long addr = start_addr, next; unsigned long addr = start_addr, next;
int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1; int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
...@@ -375,7 +375,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) ...@@ -375,7 +375,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
w = 0; w = 0;
} }
mm_id = &mm->context.skas.id; mm_id = &mm->context.id;
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
(x ? UM_PROT_EXEC : 0)); (x ? UM_PROT_EXEC : 0));
if (pte_newpage(*pte)) { if (pte_newpage(*pte)) {
...@@ -453,7 +453,7 @@ void __flush_tlb_one(unsigned long addr) ...@@ -453,7 +453,7 @@ void __flush_tlb_one(unsigned long addr)
flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
} }
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, static int do_ops(struct mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void **flush) int finished, void **flush)
{ {
struct host_vm_op *op; struct host_vm_op *op;
...@@ -463,17 +463,16 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, ...@@ -463,17 +463,16 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
op = &ops[i]; op = &ops[i];
switch(op->type) { switch(op->type) {
case MMAP: case MMAP:
ret = map(&mmu->skas.id, op->u.mmap.addr, ret = map(&mmu->id, op->u.mmap.addr, op->u.mmap.len,
op->u.mmap.len, op->u.mmap.prot, op->u.mmap.prot, op->u.mmap.fd,
op->u.mmap.fd, op->u.mmap.offset, finished, op->u.mmap.offset, finished, flush);
flush);
break; break;
case MUNMAP: case MUNMAP:
ret = unmap(&mmu->skas.id, op->u.munmap.addr, ret = unmap(&mmu->id, op->u.munmap.addr,
op->u.munmap.len, finished, flush); op->u.munmap.len, finished, flush);
break; break;
case MPROTECT: case MPROTECT:
ret = protect(&mmu->skas.id, op->u.mprotect.addr, ret = protect(&mmu->id, op->u.mprotect.addr,
op->u.mprotect.len, op->u.mprotect.prot, op->u.mprotect.len, op->u.mprotect.prot,
finished, flush); finished, flush);
break; break;
......
...@@ -33,7 +33,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, ...@@ -33,7 +33,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
* Note: I'm unsure: should interrupts be disabled here? * Note: I'm unsure: should interrupts be disabled here?
*/ */
if (!current->active_mm || current->active_mm == &init_mm || if (!current->active_mm || current->active_mm == &init_mm ||
mm_idp != &current->active_mm->context.skas.id) mm_idp != &current->active_mm->context.id)
__switch_mm(mm_idp); __switch_mm(mm_idp);
} }
...@@ -79,8 +79,8 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, ...@@ -79,8 +79,8 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
* PTRACE_LDT possible to implement. * PTRACE_LDT possible to implement.
*/ */
if (current->active_mm && current->active_mm != &init_mm && if (current->active_mm && current->active_mm != &init_mm &&
mm_idp != &current->active_mm->context.skas.id) mm_idp != &current->active_mm->context.id)
__switch_mm(&current->active_mm->context.skas.id); __switch_mm(&current->active_mm->context.id);
} }
return res; return res;
...@@ -135,7 +135,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) ...@@ -135,7 +135,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
{ {
int i, err = 0; int i, err = 0;
unsigned long size; unsigned long size;
uml_ldt_t * ldt = &current->mm->context.skas.ldt; uml_ldt_t * ldt = &current->mm->context.ldt;
if (!ldt->entry_count) if (!ldt->entry_count)
goto out; goto out;
...@@ -203,8 +203,8 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount) ...@@ -203,8 +203,8 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount)
static int write_ldt(void __user * ptr, unsigned long bytecount, int func) static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
{ {
uml_ldt_t * ldt = &current->mm->context.skas.ldt; uml_ldt_t * ldt = &current->mm->context.ldt;
struct mm_id * mm_idp = &current->mm->context.skas.id; struct mm_id * mm_idp = &current->mm->context.id;
int i, err; int i, err;
struct user_desc ldt_info; struct user_desc ldt_info;
struct ldt_entry entry0, *ldt_p; struct ldt_entry entry0, *ldt_p;
...@@ -384,8 +384,7 @@ out_free: ...@@ -384,8 +384,7 @@ out_free:
free_pages((unsigned long)ldt, order); free_pages((unsigned long)ldt, order);
} }
long init_new_ldt(struct mmu_context_skas * new_mm, long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
struct mmu_context_skas * from_mm)
{ {
struct user_desc desc; struct user_desc desc;
short * num_p; short * num_p;
...@@ -483,7 +482,7 @@ long init_new_ldt(struct mmu_context_skas * new_mm, ...@@ -483,7 +482,7 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
} }
void free_ldt(struct mmu_context_skas * mm) void free_ldt(struct mm_context *mm)
{ {
int i; int i;
......
...@@ -30,7 +30,7 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) ...@@ -30,7 +30,7 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
{ {
unsigned long *ptr = addr, tmp; unsigned long *ptr = addr, tmp;
long ret; long ret;
int pid = task->mm->context.skas.id.u.pid; int pid = task->mm->context.id.u.pid;
/* /*
* With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
......
...@@ -11,11 +11,7 @@ ...@@ -11,11 +11,7 @@
#include "asm/semaphore.h" #include "asm/semaphore.h"
#include "asm/host_ldt.h" #include "asm/host_ldt.h"
struct mmu_context_skas;
extern void ldt_host_info(void); extern void ldt_host_info(void);
extern long init_new_ldt(struct mmu_context_skas * to_mm,
struct mmu_context_skas * from_mm);
extern void free_ldt(struct mmu_context_skas * mm);
#define LDT_PAGES_MAX \ #define LDT_PAGES_MAX \
((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE) ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
......
...@@ -29,7 +29,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) ...@@ -29,7 +29,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* possible. * possible.
*/ */
if (old != new && (current->flags & PF_BORROWED_MM)) if (old != new && (current->flags & PF_BORROWED_MM))
__switch_mm(&new->context.skas.id); __switch_mm(&new->context.id);
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
...@@ -41,7 +41,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -41,7 +41,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpu_clear(cpu, prev->cpu_vm_mask); cpu_clear(cpu, prev->cpu_vm_mask);
cpu_set(cpu, next->cpu_vm_mask); cpu_set(cpu, next->cpu_vm_mask);
if(next != &init_mm) if(next != &init_mm)
__switch_mm(&next->context.skas.id); __switch_mm(&next->context.id);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment