Commit 1fd73c6b authored by Ravikiran G Thirumalai's avatar Ravikiran G Thirumalai Committed by Linus Torvalds

[PATCH] Kill L1_CACHE_SHIFT_MAX

Kill L1_CACHE_SHIFT from all arches.  Since L1_CACHE_SHIFT_MAX is not used
anymore with the introduction of INTERNODE_CACHE, kill L1_CACHE_SHIFT_MAX.
Signed-off-by: default avatarRavikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: default avatarShai Fultheim <shai@scalex86.org>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 22fc6ecc
...@@ -20,6 +20,5 @@ ...@@ -20,6 +20,5 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#define L1_CACHE_SHIFT_MAX L1_CACHE_SHIFT
#endif #endif
...@@ -7,9 +7,4 @@ ...@@ -7,9 +7,4 @@
#define L1_CACHE_SHIFT 5 #define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
* largest L1 which this arch supports
*/
#define L1_CACHE_SHIFT_MAX 5
#endif #endif
...@@ -4,6 +4,5 @@ ...@@ -4,6 +4,5 @@
/* Etrax 100LX have 32-byte cache-lines. */ /* Etrax 100LX have 32-byte cache-lines. */
#define L1_CACHE_BYTES 32 #define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5 #define L1_CACHE_SHIFT 5
#define L1_CACHE_SHIFT_MAX 5
#endif /* _ASM_ARCH_CACHE_H */ #endif /* _ASM_ARCH_CACHE_H */
...@@ -4,6 +4,5 @@ ...@@ -4,6 +4,5 @@
/* A cache-line is 32 bytes. */ /* A cache-line is 32 bytes. */
#define L1_CACHE_BYTES 32 #define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5 #define L1_CACHE_SHIFT 5
#define L1_CACHE_SHIFT_MAX 5
#endif /* _ASM_CRIS_ARCH_CACHE_H */ #endif /* _ASM_CRIS_ARCH_CACHE_H */
...@@ -153,7 +153,7 @@ dma_set_mask(struct device *dev, u64 mask) ...@@ -153,7 +153,7 @@ dma_set_mask(struct device *dev, u64 mask)
static inline int static inline int
dma_get_cache_alignment(void) dma_get_cache_alignment(void)
{ {
return (1 << L1_CACHE_SHIFT_MAX); return (1 << INTERNODE_CACHE_SHIFT);
} }
#define dma_is_consistent(d) (1) #define dma_is_consistent(d) (1)
......
...@@ -274,7 +274,7 @@ dma_get_cache_alignment(void) ...@@ -274,7 +274,7 @@ dma_get_cache_alignment(void)
{ {
/* no easy way to get cache size on all processors, so return /* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */ * the maximum possible, to be safe */
return (1 << L1_CACHE_SHIFT_MAX); return (1 << INTERNODE_CACHE_SHIFT);
} }
static inline void static inline void
......
...@@ -10,6 +10,4 @@ ...@@ -10,6 +10,4 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#endif #endif
...@@ -150,7 +150,7 @@ dma_get_cache_alignment(void) ...@@ -150,7 +150,7 @@ dma_get_cache_alignment(void)
{ {
/* no easy way to get cache size on all x86, so return the /* no easy way to get cache size on all x86, so return the
* maximum possible, to be safe */ * maximum possible, to be safe */
return (1 << L1_CACHE_SHIFT_MAX); return (1 << INTERNODE_CACHE_SHIFT);
} }
#define dma_is_consistent(d) (1) #define dma_is_consistent(d) (1)
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# define SMP_CACHE_SHIFT L1_CACHE_SHIFT # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
# define SMP_CACHE_BYTES L1_CACHE_BYTES # define SMP_CACHE_BYTES L1_CACHE_BYTES
......
...@@ -7,6 +7,4 @@ ...@@ -7,6 +7,4 @@
#define L1_CACHE_SHIFT 4 #define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 4
#endif /* _ASM_M32R_CACHE_H */ #endif /* _ASM_M32R_CACHE_H */
...@@ -8,6 +8,4 @@ ...@@ -8,6 +8,4 @@
#define L1_CACHE_SHIFT 4 #define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 4 /* largest L1 which this arch supports */
#endif #endif
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 6
#define SMP_CACHE_SHIFT L1_CACHE_SHIFT #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
extern void flush_data_cache_local(void); /* flushes local data-cache only */ extern void flush_data_cache_local(void); /* flushes local data-cache only */
extern void flush_instruction_cache_local(void); /* flushes local code-cache only */ extern void flush_instruction_cache_local(void); /* flushes local code-cache only */
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#if defined(__powerpc64__) && !defined(__ASSEMBLY__) #if defined(__powerpc64__) && !defined(__ASSEMBLY__)
struct ppc64_caches { struct ppc64_caches {
......
...@@ -229,7 +229,7 @@ static inline int dma_get_cache_alignment(void) ...@@ -229,7 +229,7 @@ static inline int dma_get_cache_alignment(void)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* no easy way to get cache size on all processors, so return /* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */ * the maximum possible, to be safe */
return (1 << L1_CACHE_SHIFT_MAX); return (1 << INTERNODE_CACHE_SHIFT);
#else #else
/* /*
* Each processor family will define its own L1_CACHE_SHIFT, * Each processor family will define its own L1_CACHE_SHIFT,
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#define L1_CACHE_BYTES 256 #define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8 #define L1_CACHE_SHIFT 8
#define L1_CACHE_SHIFT_MAX 8 /* largest L1 which this arch supports */
#define ARCH_KMALLOC_MINALIGN 8 #define ARCH_KMALLOC_MINALIGN 8
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
struct cache_info { struct cache_info {
unsigned int ways; unsigned int ways;
unsigned int sets; unsigned int sets;
......
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1)) #define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1))
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK)
#define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10) #define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10)
/* Largest L1 which this arch supports */
#define L1_CACHE_SHIFT_MAX 5
#ifdef MODULE #ifdef MODULE
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES))) #define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#define L1_CACHE_SHIFT 5 #define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES 32 #define L1_CACHE_BYTES 32
#define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))) #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
#define SMP_CACHE_BYTES 32 #define SMP_CACHE_BYTES 32
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */ #define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
#define SMP_CACHE_BYTES_SHIFT 6 #define SMP_CACHE_BYTES_SHIFT 6
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
......
...@@ -13,9 +13,6 @@ ...@@ -13,9 +13,6 @@
# define L1_CACHE_SHIFT 5 # define L1_CACHE_SHIFT 5
#endif #endif
/* XXX: this is valid for x86 and x86_64. */
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif #endif
...@@ -23,6 +23,4 @@ ...@@ -23,6 +23,4 @@
#define L1_CACHE_SHIFT 4 #define L1_CACHE_SHIFT 4
#endif #endif
#define L1_CACHE_SHIFT_MAX L1_CACHE_SHIFT
#endif /* __V850_CACHE_H__ */ #endif /* __V850_CACHE_H__ */
...@@ -9,6 +9,5 @@ ...@@ -9,6 +9,5 @@
/* L1 cache line size */ /* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment