Commit 842bbaaa authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Module per-cpu alignment cannot always be met

The module code assumes noone will ever ask for a per-cpu area more than
SMP_CACHE_BYTES aligned.  However, as these cases show, gcc asks sometimes
asks for 32-byte alignment for the per-cpu section on a module, and if
CONFIG_X86_L1_CACHE_SHIFT is 4, we hit that BUG_ON().  This is obviously an
unusual combination, as there have been few reports, but better to warn
than die.

See:
	http://www.ussg.iu.edu/hypermail/linux/kernel/0409.0/0768.html

And more recently:
	http://bugs.gentoo.org/show_bug.cgi?id=97006Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 561fb765
...@@ -250,13 +250,18 @@ static inline unsigned int block_size(int val) ...@@ -250,13 +250,18 @@ static inline unsigned int block_size(int val)
/* Created by linker magic */ /* Created by linker magic */
extern char __per_cpu_start[], __per_cpu_end[]; extern char __per_cpu_start[], __per_cpu_end[];
static void *percpu_modalloc(unsigned long size, unsigned long align) static void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
{ {
unsigned long extra; unsigned long extra;
unsigned int i; unsigned int i;
void *ptr; void *ptr;
BUG_ON(align > SMP_CACHE_BYTES); if (align > SMP_CACHE_BYTES) {
printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
name, align, SMP_CACHE_BYTES);
align = SMP_CACHE_BYTES;
}
ptr = __per_cpu_start; ptr = __per_cpu_start;
for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
...@@ -348,7 +353,8 @@ static int percpu_modinit(void) ...@@ -348,7 +353,8 @@ static int percpu_modinit(void)
} }
__initcall(percpu_modinit); __initcall(percpu_modinit);
#else /* ... !CONFIG_SMP */ #else /* ... !CONFIG_SMP */
static inline void *percpu_modalloc(unsigned long size, unsigned long align) static inline void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
{ {
return NULL; return NULL;
} }
...@@ -1644,7 +1650,8 @@ static struct module *load_module(void __user *umod, ...@@ -1644,7 +1650,8 @@ static struct module *load_module(void __user *umod,
if (pcpuindex) { if (pcpuindex) {
/* We have a special allocation for this section. */ /* We have a special allocation for this section. */
percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
sechdrs[pcpuindex].sh_addralign); sechdrs[pcpuindex].sh_addralign,
mod->name);
if (!percpu) { if (!percpu) {
err = -ENOMEM; err = -ENOMEM;
goto free_mod; goto free_mod;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment