Commit ee39b37b authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] freepgt: remove MM_VM_SIZE(mm)

There's only one usage of MM_VM_SIZE(mm) left, and it's a troublesome macro
because mm doesn't contain the (32-bit emulation?) info needed.  But it too is
only needed because we ignore the end from the vma list.

We could make flush_pgtables return that end, or unmap_vmas.  Choose the
latter, since it's a natural fit with unmap_mapping_range_vma needing to know
its restart addr.  This does make more than minimal change, but if unmap_vmas
had returned the end before, this is how we'd have done it, rather than
storing the break_addr in zap_details.

unmap_vmas used to return count of vmas scanned, but that's just debug which
hasn't been useful in a while; and if we want the map_count 0 on exit check
back, it can easily come from the final remove_vm_struct loop.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e0da382c
...@@ -42,14 +42,6 @@ ...@@ -42,14 +42,6 @@
*/ */
#define TASK_SIZE (current->thread.task_size) #define TASK_SIZE (current->thread.task_size)
/*
* MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
* address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
* because the kernel may have installed helper-mappings above TASK_SIZE. For example,
* for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
*/
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
/* /*
* This decides where the kernel will search for a free chunk of vm * This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
......
...@@ -542,10 +542,6 @@ extern struct task_struct *last_task_used_altivec; ...@@ -542,10 +542,6 @@ extern struct task_struct *last_task_used_altivec;
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_USER32 : TASK_SIZE_USER64) TASK_SIZE_USER32 : TASK_SIZE_USER64)
/* We can't actually tell the TASK_SIZE given just the mm, but default
* to the 64-bit case to make sure that enough gets cleaned up. */
#define MM_VM_SIZE(mm) TASK_SIZE_USER64
/* This decides where the kernel will search for a free chunk of vm /* This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
*/ */
......
...@@ -74,8 +74,6 @@ extern struct task_struct *last_task_used_math; ...@@ -74,8 +74,6 @@ extern struct task_struct *last_task_used_math;
#endif /* __s390x__ */ #endif /* __s390x__ */
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
#define HAVE_ARCH_PICK_MMAP_LAYOUT #define HAVE_ARCH_PICK_MMAP_LAYOUT
typedef struct { typedef struct {
......
...@@ -37,10 +37,6 @@ extern int sysctl_legacy_va_layout; ...@@ -37,10 +37,6 @@ extern int sysctl_legacy_va_layout;
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#ifndef MM_VM_SIZE
#define MM_VM_SIZE(mm) ((TASK_SIZE + PGDIR_SIZE - 1) & PGDIR_MASK)
#endif
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
/* /*
...@@ -582,13 +578,12 @@ struct zap_details { ...@@ -582,13 +578,12 @@ struct zap_details {
pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */
spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
unsigned long break_addr; /* Where unmap_vmas stopped */
unsigned long truncate_count; /* Compare vm_truncate_count */ unsigned long truncate_count; /* Compare vm_truncate_count */
}; };
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *); unsigned long size, struct zap_details *);
int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,
struct vm_area_struct *start_vma, unsigned long start_addr, struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *); struct zap_details *);
......
...@@ -645,7 +645,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -645,7 +645,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* @nr_accounted: Place number of unmapped pages in vm-accountable vma's here * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
* @details: details of nonlinear truncation or shared cache invalidation * @details: details of nonlinear truncation or shared cache invalidation
* *
* Returns the number of vma's which were covered by the unmapping. * Returns the end address of the unmapping (restart addr if interrupted).
* *
* Unmap all pages in the vma list. Called under page_table_lock. * Unmap all pages in the vma list. Called under page_table_lock.
* *
...@@ -662,7 +662,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -662,7 +662,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* ensure that any thus-far unmapped pages are flushed before unmap_vmas() * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules. * drops the lock and schedules.
*/ */
int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *details) struct zap_details *details)
...@@ -670,12 +670,11 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, ...@@ -670,12 +670,11 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
unsigned long zap_bytes = ZAP_BLOCK_SIZE; unsigned long zap_bytes = ZAP_BLOCK_SIZE;
unsigned long tlb_start = 0; /* For tlb_finish_mmu */ unsigned long tlb_start = 0; /* For tlb_finish_mmu */
int tlb_start_valid = 0; int tlb_start_valid = 0;
int ret = 0; unsigned long start = start_addr;
spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
int fullmm = tlb_is_full_mm(*tlbp); int fullmm = tlb_is_full_mm(*tlbp);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
unsigned long start;
unsigned long end; unsigned long end;
start = max(vma->vm_start, start_addr); start = max(vma->vm_start, start_addr);
...@@ -688,7 +687,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, ...@@ -688,7 +687,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
if (vma->vm_flags & VM_ACCOUNT) if (vma->vm_flags & VM_ACCOUNT)
*nr_accounted += (end - start) >> PAGE_SHIFT; *nr_accounted += (end - start) >> PAGE_SHIFT;
ret++;
while (start != end) { while (start != end) {
unsigned long block; unsigned long block;
...@@ -719,7 +717,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, ...@@ -719,7 +717,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
if (i_mmap_lock) { if (i_mmap_lock) {
/* must reset count of rss freed */ /* must reset count of rss freed */
*tlbp = tlb_gather_mmu(mm, fullmm); *tlbp = tlb_gather_mmu(mm, fullmm);
details->break_addr = start;
goto out; goto out;
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
...@@ -733,7 +730,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, ...@@ -733,7 +730,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
} }
} }
out: out:
return ret; return start; /* which is now the end (or restart) address */
} }
/** /**
...@@ -743,7 +740,7 @@ out: ...@@ -743,7 +740,7 @@ out:
* @size: number of bytes to zap * @size: number of bytes to zap
* @details: details of nonlinear truncation or shared cache invalidation * @details: details of nonlinear truncation or shared cache invalidation
*/ */
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details) unsigned long size, struct zap_details *details)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
...@@ -753,15 +750,16 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, ...@@ -753,15 +750,16 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
if (is_vm_hugetlb_page(vma)) { if (is_vm_hugetlb_page(vma)) {
zap_hugepage_range(vma, address, size); zap_hugepage_range(vma, address, size);
return; return end;
} }
lru_add_drain(); lru_add_drain();
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm, 0); tlb = tlb_gather_mmu(mm, 0);
unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
tlb_finish_mmu(tlb, address, end); tlb_finish_mmu(tlb, address, end);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return end;
} }
/* /*
...@@ -1348,7 +1346,7 @@ no_new_page: ...@@ -1348,7 +1346,7 @@ no_new_page:
* i_mmap_lock. * i_mmap_lock.
* *
* In order to make forward progress despite repeatedly restarting some * In order to make forward progress despite repeatedly restarting some
* large vma, note the break_addr set by unmap_vmas when it breaks out: * large vma, note the restart_addr from unmap_vmas when it breaks out:
* and restart from that address when we reach that vma again. It might * and restart from that address when we reach that vma again. It might
* have been split or merged, shrunk or extended, but never shifted: so * have been split or merged, shrunk or extended, but never shifted: so
* restart_addr remains valid so long as it remains in the vma's range. * restart_addr remains valid so long as it remains in the vma's range.
...@@ -1386,8 +1384,8 @@ again: ...@@ -1386,8 +1384,8 @@ again:
} }
} }
details->break_addr = end_addr; restart_addr = zap_page_range(vma, start_addr,
zap_page_range(vma, start_addr, end_addr - start_addr, details); end_addr - start_addr, details);
/* /*
* We cannot rely on the break test in unmap_vmas: * We cannot rely on the break test in unmap_vmas:
...@@ -1398,14 +1396,14 @@ again: ...@@ -1398,14 +1396,14 @@ again:
need_break = need_resched() || need_break = need_resched() ||
need_lockbreak(details->i_mmap_lock); need_lockbreak(details->i_mmap_lock);
if (details->break_addr >= end_addr) { if (restart_addr >= end_addr) {
/* We have now completed this vma: mark it so */ /* We have now completed this vma: mark it so */
vma->vm_truncate_count = details->truncate_count; vma->vm_truncate_count = details->truncate_count;
if (!need_break) if (!need_break)
return 0; return 0;
} else { } else {
/* Note restart_addr in vma's truncate_count field */ /* Note restart_addr in vma's truncate_count field */
vma->vm_truncate_count = details->break_addr; vma->vm_truncate_count = restart_addr;
if (!need_break) if (!need_break)
goto again; goto again;
} }
......
...@@ -1900,6 +1900,7 @@ void exit_mmap(struct mm_struct *mm) ...@@ -1900,6 +1900,7 @@ void exit_mmap(struct mm_struct *mm)
struct mmu_gather *tlb; struct mmu_gather *tlb;
struct vm_area_struct *vma = mm->mmap; struct vm_area_struct *vma = mm->mmap;
unsigned long nr_accounted = 0; unsigned long nr_accounted = 0;
unsigned long end;
lru_add_drain(); lru_add_drain();
...@@ -1908,10 +1909,10 @@ void exit_mmap(struct mm_struct *mm) ...@@ -1908,10 +1909,10 @@ void exit_mmap(struct mm_struct *mm)
flush_cache_mm(mm); flush_cache_mm(mm);
tlb = tlb_gather_mmu(mm, 1); tlb = tlb_gather_mmu(mm, 1);
/* Use -1 here to ensure all VMAs in the mm are unmapped */ /* Use -1 here to ensure all VMAs in the mm are unmapped */
mm->map_count -= unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, 0, 0); free_pgtables(&tlb, vma, 0, 0);
tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm)); tlb_finish_mmu(tlb, 0, end);
mm->mmap = mm->mmap_cache = NULL; mm->mmap = mm->mmap_cache = NULL;
mm->mm_rb = RB_ROOT; mm->mm_rb = RB_ROOT;
...@@ -1931,7 +1932,6 @@ void exit_mmap(struct mm_struct *mm) ...@@ -1931,7 +1932,6 @@ void exit_mmap(struct mm_struct *mm)
vma = next; vma = next;
} }
BUG_ON(mm->map_count); /* This is just debugging */
BUG_ON(mm->nr_ptes); /* This is just debugging */ BUG_ON(mm->nr_ptes); /* This is just debugging */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment