Commit e97e386b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slub: pack objects denser
  slub: Calculate min_objects based on number of processors.
  slub: Drop DEFAULT_MAX_ORDER / DEFAULT_MIN_OBJECTS
  slub: Simplify any_slab_object checks
  slub: Make the order configurable for each slab cache
  slub: Drop fallback to page allocator method
  slub: Fallback to minimal order during slab page allocation
  slub: Update statistics handling for variable order slabs
  slub: Add kmem_cache_order_objects struct
  slub: for_each_object must be passed the number of objects in a slab
  slub: Store max number of objects in the page struct.
  slub: Dump list of objects not freed on kmem_cache_close()
  slub: free_list() cleanup
  slub: improve kmem_cache_destroy() error message
  slob: fix bug - when slob allocates "struct kmem_cache", it does not force alignment.
parents d9dedc13 c124f5b5
......@@ -31,7 +31,7 @@ struct slabinfo {
int hwcache_align, object_size, objs_per_slab;
int sanity_checks, slab_size, store_user, trace;
int order, poison, reclaim_account, red_zone;
unsigned long partial, objects, slabs;
unsigned long partial, objects, slabs, objects_partial, objects_total;
unsigned long alloc_fastpath, alloc_slowpath;
unsigned long free_fastpath, free_slowpath;
unsigned long free_frozen, free_add_partial, free_remove_partial;
......@@ -540,7 +540,8 @@ void slabcache(struct slabinfo *s)
return;
store_size(size_str, slab_size(s));
snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs, s->partial, s->cpu_slabs);
snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs,
s->partial, s->cpu_slabs);
if (!line++)
first_line();
......@@ -776,7 +777,6 @@ void totals(void)
unsigned long used;
unsigned long long wasted;
unsigned long long objwaste;
long long objects_in_partial_slabs;
unsigned long percentage_partial_slabs;
unsigned long percentage_partial_objs;
......@@ -790,18 +790,11 @@ void totals(void)
wasted = size - used;
objwaste = s->slab_size - s->object_size;
objects_in_partial_slabs = s->objects -
(s->slabs - s->partial - s ->cpu_slabs) *
s->objs_per_slab;
if (objects_in_partial_slabs < 0)
objects_in_partial_slabs = 0;
percentage_partial_slabs = s->partial * 100 / s->slabs;
if (percentage_partial_slabs > 100)
percentage_partial_slabs = 100;
percentage_partial_objs = objects_in_partial_slabs * 100
percentage_partial_objs = s->objects_partial * 100
/ s->objects;
if (percentage_partial_objs > 100)
......@@ -823,8 +816,8 @@ void totals(void)
min_objects = s->objects;
if (used < min_used)
min_used = used;
if (objects_in_partial_slabs < min_partobj)
min_partobj = objects_in_partial_slabs;
if (s->objects_partial < min_partobj)
min_partobj = s->objects_partial;
if (percentage_partial_slabs < min_ppart)
min_ppart = percentage_partial_slabs;
if (percentage_partial_objs < min_ppartobj)
......@@ -848,8 +841,8 @@ void totals(void)
max_objects = s->objects;
if (used > max_used)
max_used = used;
if (objects_in_partial_slabs > max_partobj)
max_partobj = objects_in_partial_slabs;
if (s->objects_partial > max_partobj)
max_partobj = s->objects_partial;
if (percentage_partial_slabs > max_ppart)
max_ppart = percentage_partial_slabs;
if (percentage_partial_objs > max_ppartobj)
......@@ -864,7 +857,7 @@ void totals(void)
total_objects += s->objects;
total_used += used;
total_partobj += objects_in_partial_slabs;
total_partobj += s->objects_partial;
total_ppart += percentage_partial_slabs;
total_ppartobj += percentage_partial_objs;
......@@ -1160,6 +1153,8 @@ void read_slab_dir(void)
slab->hwcache_align = get_obj("hwcache_align");
slab->object_size = get_obj("object_size");
slab->objects = get_obj("objects");
slab->objects_partial = get_obj("objects_partial");
slab->objects_total = get_obj("objects_total");
slab->objs_per_slab = get_obj("objs_per_slab");
slab->order = get_obj("order");
slab->partial = get_obj("partial");
......
......@@ -42,7 +42,10 @@ struct page {
* to show when page is mapped
* & limit reverse map searches.
*/
unsigned int inuse; /* SLUB: Nr of objects */
struct { /* SLUB */
u16 inuse;
u16 objects;
};
};
union {
struct {
......
......@@ -29,6 +29,7 @@ enum stat_item {
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
ORDER_FALLBACK, /* Number of times fallback was necessary */
NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu {
......@@ -48,10 +49,20 @@ struct kmem_cache_node {
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
atomic_long_t total_objects;
struct list_head full;
#endif
};
/*
* Word size structure that can be atomically updated or read and that
* contains both the order and the number of objects that a slab of the
* given order would contain.
*/
struct kmem_cache_order_objects {
unsigned long x;
};
/*
* Slab cache management.
*/
......@@ -61,7 +72,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
int order; /* Current preferred allocation order */
struct kmem_cache_order_objects oo;
/*
* Avoid an extra cache line for UP, SMP and for the node local to
......@@ -70,7 +81,8 @@ struct kmem_cache {
struct kmem_cache_node local_node;
/* Allocation and freeing of slabs */
int objects; /* Number of objects in slab */
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(struct kmem_cache *, void *);
......
......@@ -533,7 +533,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *c;
c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
c = slob_alloc(sizeof(struct kmem_cache),
flags, ARCH_KMALLOC_MINALIGN, -1);
if (c) {
c->name = name;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment