Commit 87a927c7 authored by David Woodhouse's avatar David Woodhouse Committed by Linus Torvalds

Fix slab redzone alignment

Commit b46b8f19 fixed a couple of bugs
by switching the redzone to 64 bits. Unfortunately, it neglected to
ensure that the _second_ redzone, after the slab object, is aligned
correctly. This caused illegal instruction faults on sparc32, which for
some reason not entirely clear to me are not trapped and fixed up.

Two things need to be done to fix this:
  - increase the object size, rounding up to alignof(long long) so
    that the second redzone can be aligned correctly.
  - If SLAB_STORE_USER is set but alignof(long long)==8, allow a
    full 64 bits of space for the user word at the end of the buffer,
    even though we may not _use_ the whole 64 bits.

This patch should be a no-op on any 64-bit architecture or any 32-bit
architecture where alignof(long long) == 4. Of the others, it's tested
on ppc32 by myself and a very similar patch was tested on sparc32 by
Mark Fortescue, who reported the new problem.

Also, fix the conditions for FORCED_DEBUG, which hadn't been adjusted to
the new sizes. Again noticed by Mark.
Signed-off-by: default avatarDavid Woodhouse <dwmw2@infradead.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2bcb1b7d
...@@ -137,6 +137,7 @@ ...@@ -137,6 +137,7 @@
/* Shouldn't this be in a header file somewhere? */ /* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *) #define BYTES_PER_WORD sizeof(void *)
#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
#ifndef cache_line_size #ifndef cache_line_size
#define cache_line_size() L1_CACHE_BYTES #define cache_line_size() L1_CACHE_BYTES
...@@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) ...@@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
return (unsigned long long *)(objp + cachep->buffer_size - return (unsigned long long *)(objp + cachep->buffer_size -
sizeof(unsigned long long) - sizeof(unsigned long long) -
BYTES_PER_WORD); REDZONE_ALIGN);
return (unsigned long long *) (objp + cachep->buffer_size - return (unsigned long long *) (objp + cachep->buffer_size -
sizeof(unsigned long long)); sizeof(unsigned long long));
} }
...@@ -2178,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2178,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* above the next power of two: caches with object sizes just above a * above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation. * power of two have a significant amount of internal fragmentation.
*/ */
if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2 * sizeof(unsigned long long)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER; flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU)) if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON; flags |= SLAB_POISON;
...@@ -2219,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2219,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
} }
/* /*
* Redzoning and user store require word alignment. Note this will be * Redzoning and user store require word alignment or possibly larger.
* overridden by architecture or caller mandated alignment if either * Note this will be overridden by architecture or caller mandated
* is greater than BYTES_PER_WORD. * alignment if either is greater than BYTES_PER_WORD.
*/ */
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) if (flags & SLAB_STORE_USER)
ralign = __alignof__(unsigned long long); ralign = BYTES_PER_WORD;
if (flags & SLAB_RED_ZONE) {
ralign = REDZONE_ALIGN;
/* If redzoning, ensure that the second redzone is suitably
* aligned, by adjusting the object size accordingly. */
size += REDZONE_ALIGN - 1;
size &= ~(REDZONE_ALIGN - 1);
}
/* 2) arch mandated alignment */ /* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) { if (ralign < ARCH_SLAB_MINALIGN) {
...@@ -2261,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2261,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
} }
if (flags & SLAB_STORE_USER) { if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of /* user store requires one word storage behind the end of
* the real object. * the real object. But if the second red zone needs to be
* aligned to 64 bits, we must allow that much space.
*/ */
size += BYTES_PER_WORD; if (flags & SLAB_RED_ZONE)
size += REDZONE_ALIGN;
else
size += BYTES_PER_WORD;
} }
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment