Commit f7d0b926 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

mm: define USE_SPLIT_PTLOCKS rather than repeating expression

Define USE_SPLIT_PTLOCKS as a constant expression rather than repeating
"NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS" all over the place.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent adee14b2
...@@ -646,7 +646,7 @@ static spinlock_t *lock_pte(struct page *page) ...@@ -646,7 +646,7 @@ static spinlock_t *lock_pte(struct page *page)
{ {
spinlock_t *ptl = NULL; spinlock_t *ptl = NULL;
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS #if USE_SPLIT_PTLOCKS
ptl = __pte_lockptr(page); ptl = __pte_lockptr(page);
spin_lock(ptl); spin_lock(ptl);
#endif #endif
......
...@@ -919,7 +919,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a ...@@ -919,7 +919,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
} }
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS #if USE_SPLIT_PTLOCKS
/* /*
* We tuck a spinlock to guard each pagetable page into its struct page, * We tuck a spinlock to guard each pagetable page into its struct page,
* at page->private, with BUILD_BUG_ON to make sure that this will not * at page->private, with BUILD_BUG_ON to make sure that this will not
...@@ -932,14 +932,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a ...@@ -932,14 +932,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
} while (0) } while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL) #define pte_lock_deinit(page) ((page)->mapping = NULL)
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
#else #else /* !USE_SPLIT_PTLOCKS */
/* /*
* We use mm->page_table_lock to guard all pagetable pages of the mm. * We use mm->page_table_lock to guard all pagetable pages of the mm.
*/ */
#define pte_lock_init(page) do {} while (0) #define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0) #define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ #endif /* USE_SPLIT_PTLOCKS */
static inline void pgtable_page_ctor(struct page *page) static inline void pgtable_page_ctor(struct page *page)
{ {
......
...@@ -21,11 +21,13 @@ ...@@ -21,11 +21,13 @@
struct address_space; struct address_space;
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS #define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#if USE_SPLIT_PTLOCKS
typedef atomic_long_t mm_counter_t; typedef atomic_long_t mm_counter_t;
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ #else /* !USE_SPLIT_PTLOCKS */
typedef unsigned long mm_counter_t; typedef unsigned long mm_counter_t;
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ #endif /* !USE_SPLIT_PTLOCKS */
/* /*
* Each physical page in the system has a struct page associated with * Each physical page in the system has a struct page associated with
...@@ -65,7 +67,7 @@ struct page { ...@@ -65,7 +67,7 @@ struct page {
* see PAGE_MAPPING_ANON below. * see PAGE_MAPPING_ANON below.
*/ */
}; };
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS #if USE_SPLIT_PTLOCKS
spinlock_t ptl; spinlock_t ptl;
#endif #endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */ struct kmem_cache *slab; /* SLUB: Pointer to slab */
......
...@@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, ...@@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
extern void arch_unmap_area(struct mm_struct *, unsigned long); extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS #if USE_SPLIT_PTLOCKS
/* /*
* The mm counters are not protected by its page_table_lock, * The mm counters are not protected by its page_table_lock,
* so must be incremented atomically. * so must be incremented atomically.
...@@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); ...@@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ #else /* !USE_SPLIT_PTLOCKS */
/* /*
* The mm counters are protected by its page_table_lock, * The mm counters are protected by its page_table_lock,
* so can be incremented directly. * so can be incremented directly.
...@@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); ...@@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define inc_mm_counter(mm, member) (mm)->_##member++ #define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member-- #define dec_mm_counter(mm, member) (mm)->_##member--
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ #endif /* !USE_SPLIT_PTLOCKS */
#define get_mm_rss(mm) \ #define get_mm_rss(mm) \
(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment