Commit fd8e43fd authored by Nick Piggin's avatar Nick Piggin Committed by Pekka Enberg

SLQB: fix allocation size checking

SLQB would return ZERO_SIZE_PTR rather than NULL if the requested size is too
large. Debugged by Heiko Carstens. Fix this by checking size edge cases up
front rather than in the slab index calculation.

Additionally, if the size parameter was non-constant and too large, then
the checks may not have been performed at all which could cause corruption.

Next, ARCH_KMALLOC_MINALIGN may not be obeyed if size is non-constant. So
test for KMALLOC_MIN_SIZE in that case.

Finally, if KMALLOC_SHIFT_SLQB_HIGH is larger than 2MB, then kmalloc_index
could silently run off the end of its precomputed table and return a -1
index into the kmalloc slab array, which could result in corruption. Extend
this to allow up to 32MB (to match SLAB), and add a compile-time error in
the case that the table is exceeded (also like SLAB).
Tested-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 03a875ed
...@@ -184,10 +184,7 @@ extern struct kmem_cache kmalloc_caches_dma[KMALLOC_SHIFT_SLQB_HIGH + 1]; ...@@ -184,10 +184,7 @@ extern struct kmem_cache kmalloc_caches_dma[KMALLOC_SHIFT_SLQB_HIGH + 1];
*/ */
static __always_inline int kmalloc_index(size_t size) static __always_inline int kmalloc_index(size_t size)
{ {
if (unlikely(!size)) extern int ____kmalloc_too_large(void);
return 0;
if (unlikely(size > 1UL << KMALLOC_SHIFT_SLQB_HIGH))
return 0;
if (unlikely(size <= KMALLOC_MIN_SIZE)) if (unlikely(size <= KMALLOC_MIN_SIZE))
return KMALLOC_SHIFT_LOW; return KMALLOC_SHIFT_LOW;
...@@ -219,7 +216,11 @@ static __always_inline int kmalloc_index(size_t size) ...@@ -219,7 +216,11 @@ static __always_inline int kmalloc_index(size_t size)
if (size <= 512 * 1024) return 19; if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20; if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21; if (size <= 2 * 1024 * 1024) return 21;
return -1; if (size <= 4 * 1024 * 1024) return 22;
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
return ____kmalloc_too_large();
} }
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
...@@ -238,10 +239,12 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) ...@@ -238,10 +239,12 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{ {
int index; int index;
index = kmalloc_index(size); if (unlikely(size > 1UL << KMALLOC_SHIFT_SLQB_HIGH))
if (unlikely(index == 0)) return NULL;
if (unlikely(!size))
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
index = kmalloc_index(size);
if (likely(!(flags & SLQB_DMA))) if (likely(!(flags & SLQB_DMA)))
return &kmalloc_caches[index]; return &kmalloc_caches[index];
else else
......
...@@ -2514,18 +2514,28 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) ...@@ -2514,18 +2514,28 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
{ {
int index; int index;
if (unlikely(size <= KMALLOC_MIN_SIZE)) {
if (unlikely(!size))
return ZERO_SIZE_PTR;
index = KMALLOC_SHIFT_LOW;
goto got_index;
}
#if L1_CACHE_BYTES >= 128 #if L1_CACHE_BYTES >= 128
if (size <= 128) { if (size <= 128) {
#else #else
if (size <= 192) { if (size <= 192) {
#endif #endif
if (unlikely(!size))
return ZERO_SIZE_PTR;
index = size_index[(size - 1) / 8]; index = size_index[(size - 1) / 8];
} else } else {
if (unlikely(size > 1UL << KMALLOC_SHIFT_SLQB_HIGH))
return NULL;
index = fls(size - 1); index = fls(size - 1);
}
got_index:
if (unlikely((flags & SLQB_DMA))) if (unlikely((flags & SLQB_DMA)))
return &kmalloc_caches_dma[index]; return &kmalloc_caches_dma[index];
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment