Commit 6e19df96 authored by Nobuhiro Iwamatsu's avatar Nobuhiro Iwamatsu Committed by Pekka Enberg

SLQB: Fix UP + NUMA build

This patch fixes the following build breakage which happens when CONFIG_NUMA is
enabled but CONFIG_SMP is disabled:

    CC      mm/slqb.o
  mm/slqb.c: In function '__slab_free':
  mm/slqb.c:1735: error: implicit declaration of function 'slab_free_to_remote'
  mm/slqb.c: In function 'kmem_cache_open':
  mm/slqb.c:2274: error: implicit declaration of function 'kmem_cache_dyn_array_free'
  mm/slqb.c:2275: warning: label 'error_cpu_array' defined but not used
  mm/slqb.c: In function 'kmem_cache_destroy':
  mm/slqb.c:2395: error: implicit declaration of function 'claim_remote_free_list'
  mm/slqb.c: In function 'kmem_cache_init':
  mm/slqb.c:2885: error: 'per_cpu__kmem_cpu_nodes' undeclared (first use in this function)
  mm/slqb.c:2885: error: (Each undeclared identifier is reported only once
  mm/slqb.c:2885: error: for each function it appears in.)
  mm/slqb.c:2886: error: 'kmem_cpu_cache' undeclared (first use in this function)
  make[1]: *** [mm/slqb.o] Error 1
  make: *** [mm] Error 2

As x86 Kconfig doesn't even allow this combination, one is tempted to
think it's an architecture Kconfig bug. But as it turns out, it's a
perfecly valid configuration. Tony Luck explains:

  UP + NUMA is a special case of memory-only nodes.  There are some (crazy?)
  customers with problems that require very large amounts of memory, but not very
  much cpu horse power.  They buy large multi-node systems and populate all the
  nodes with as much memory as they can afford, but most nodes get zero cpus.

So lets fix that up.

[ tony.luck@intel.com: #ifdef cleanups ]
Signed-off-by: default avatarNobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
Acked-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 52f4d010
...@@ -1224,6 +1224,11 @@ static void claim_remote_free_list(struct kmem_cache *s, ...@@ -1224,6 +1224,11 @@ static void claim_remote_free_list(struct kmem_cache *s,
slqb_stat_inc(l, CLAIM_REMOTE_LIST); slqb_stat_inc(l, CLAIM_REMOTE_LIST);
slqb_stat_add(l, CLAIM_REMOTE_LIST_OBJECTS, nr); slqb_stat_add(l, CLAIM_REMOTE_LIST_OBJECTS, nr);
} }
#else
static inline void claim_remote_free_list(struct kmem_cache *s,
struct kmem_cache_list *l)
{
}
#endif #endif
/* /*
...@@ -1728,7 +1733,7 @@ static __always_inline void __slab_free(struct kmem_cache *s, ...@@ -1728,7 +1733,7 @@ static __always_inline void __slab_free(struct kmem_cache *s,
flush_free_list(s, l); flush_free_list(s, l);
} else { } else {
#ifdef CONFIG_NUMA #ifdef CONFIG_SMP
/* /*
* Freeing an object that was allocated on a remote node. * Freeing an object that was allocated on a remote node.
*/ */
...@@ -1937,7 +1942,9 @@ static DEFINE_PER_CPU(struct kmem_cache_node, kmem_cpu_nodes); /* XXX per-nid */ ...@@ -1937,7 +1942,9 @@ static DEFINE_PER_CPU(struct kmem_cache_node, kmem_cpu_nodes); /* XXX per-nid */
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static struct kmem_cache kmem_node_cache; static struct kmem_cache kmem_node_cache;
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_node_cpus); static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_node_cpus);
#endif
static DEFINE_PER_CPU(struct kmem_cache_node, kmem_node_nodes); /*XXX per-nid */ static DEFINE_PER_CPU(struct kmem_cache_node, kmem_node_nodes); /*XXX per-nid */
#endif #endif
...@@ -2270,7 +2277,7 @@ static int kmem_cache_open(struct kmem_cache *s, ...@@ -2270,7 +2277,7 @@ static int kmem_cache_open(struct kmem_cache *s,
error_nodes: error_nodes:
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
error_node_array: error_node_array:
#ifdef CONFIG_NUMA #if defined(CONFIG_NUMA) && defined(CONFIG_SMP)
kmem_cache_dyn_array_free(s->node_slab); kmem_cache_dyn_array_free(s->node_slab);
error_cpu_array: error_cpu_array:
#endif #endif
...@@ -2370,9 +2377,7 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -2370,9 +2377,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
struct kmem_cache_list *l = &c->list; struct kmem_cache_list *l = &c->list;
#ifdef CONFIG_SMP
claim_remote_free_list(s, l); claim_remote_free_list(s, l);
#endif
flush_free_list_all(s, l); flush_free_list_all(s, l);
WARN_ON(l->freelist.nr); WARN_ON(l->freelist.nr);
...@@ -2595,9 +2600,7 @@ static void kmem_cache_trim_percpu(void *arg) ...@@ -2595,9 +2600,7 @@ static void kmem_cache_trim_percpu(void *arg)
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
struct kmem_cache_list *l = &c->list; struct kmem_cache_list *l = &c->list;
#ifdef CONFIG_SMP
claim_remote_free_list(s, l); claim_remote_free_list(s, l);
#endif
flush_free_list(s, l); flush_free_list(s, l);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
flush_remote_free_cache(s, c); flush_remote_free_cache(s, c);
...@@ -2881,11 +2884,11 @@ void __init kmem_cache_init(void) ...@@ -2881,11 +2884,11 @@ void __init kmem_cache_init(void)
n = &per_cpu(kmem_cache_nodes, i); n = &per_cpu(kmem_cache_nodes, i);
init_kmem_cache_node(&kmem_cache_cache, n); init_kmem_cache_node(&kmem_cache_cache, n);
kmem_cache_cache.node_slab[i] = n; kmem_cache_cache.node_slab[i] = n;
#ifdef CONFIG_SMP
n = &per_cpu(kmem_cpu_nodes, i); n = &per_cpu(kmem_cpu_nodes, i);
init_kmem_cache_node(&kmem_cpu_cache, n); init_kmem_cache_node(&kmem_cpu_cache, n);
kmem_cpu_cache.node_slab[i] = n; kmem_cpu_cache.node_slab[i] = n;
#endif
n = &per_cpu(kmem_node_nodes, i); n = &per_cpu(kmem_node_nodes, i);
init_kmem_cache_node(&kmem_node_cache, n); init_kmem_cache_node(&kmem_node_cache, n);
kmem_node_cache.node_slab[i] = n; kmem_node_cache.node_slab[i] = n;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment