Commit b0a3054f authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'slab/for-next'

parents 5d00b860 70916654
This diff is collapsed.
......@@ -41,6 +41,8 @@ Possible debug options are
P Poisoning (object and padding)
U User tracking (free and alloc)
T Trace (please only use on single slabs)
O Switch debugging off for caches that would have
caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is
configured with CONFIG_SLUB_DEBUG_ON)
......@@ -59,6 +61,14 @@ to the dentry cache with
slub_debug=F,dentry
Debugging options may require the minimum possible slab order to increase as
a result of storing the metadata (for example, caches with PAGE_SIZE object
sizes). This has a higher liklihood of resulting in slab allocation errors
in low memory situations or if there's high fragmentation of memory. To
switch off debugging for such caches by default, use
slub_debug=O
In case you forgot to enable debugging on the kernel command line: It is
possible to enable debugging manually when the kernel is up. Look at the
contents of:
......
#ifndef __LINUX_RCU_TYPES_H
#define __LINUX_RCU_TYPES_H
#ifdef __KERNEL__
/**
* struct rcu_head - callback structure for use with RCU
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
*/
struct rcu_head {
struct rcu_head *next;
void (*func)(struct rcu_head *head);
};
#endif
#endif
......@@ -33,6 +33,7 @@
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
#include <linux/rcu_types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
......@@ -41,16 +42,6 @@
#include <linux/lockdep.h>
#include <linux/completion.h>
/**
* struct rcu_head - callback structure for use with RCU
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
*/
struct rcu_head {
struct rcu_head *next;
void (*func)(struct rcu_head *head);
};
/* Exported common interfaces */
extern void synchronize_rcu(void);
extern void synchronize_rcu_bh(void);
......
......@@ -74,6 +74,10 @@
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Following flags should only be used by allocator specific flags */
#define SLAB_ALLOC_PRIVATE 0x000000ffUL
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
......@@ -160,6 +164,8 @@ size_t ksize(const void *);
*/
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#elif defined(CONFIG_SLQB)
#include <linux/slqb_def.h>
#elif defined(CONFIG_SLOB)
#include <linux/slob_def.h>
#else
......@@ -262,7 +268,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* allocator where we care about the real place the memory allocation
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || defined(CONFIG_SLQB_DEBUG)
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
......@@ -280,7 +286,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || defined(CONFIG_SLQB_DEBUG)
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
......
......@@ -34,9 +34,4 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
return kmalloc(size, flags);
}
static inline void kmem_cache_init_late(void)
{
/* Nothing to do */
}
#endif /* __LINUX_SLOB_DEF_H */
#ifndef _LINUX_SLQB_DEF_H
#define _LINUX_SLQB_DEF_H
/*
* SLQB : A slab allocator with object queues.
*
* (C) 2008 Nick Piggin <npiggin@suse.de>
*/
#include <linux/types.h>
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/rcu_types.h>
#include <linux/mm_types.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#define SLAB_NUMA 0x00000001UL /* shortcut */
enum stat_item {
ALLOC, /* Allocation count */
ALLOC_SLAB_FILL, /* Fill freelist from page list */
ALLOC_SLAB_NEW, /* New slab acquired from page allocator */
FREE, /* Free count */
FREE_REMOTE, /* NUMA: freeing to remote list */
FLUSH_FREE_LIST, /* Freelist flushed */
FLUSH_FREE_LIST_OBJECTS, /* Objects flushed from freelist */
FLUSH_FREE_LIST_REMOTE, /* Objects flushed from freelist to remote */
FLUSH_SLAB_PARTIAL, /* Freeing moves slab to partial list */
FLUSH_SLAB_FREE, /* Slab freed to the page allocator */
FLUSH_RFREE_LIST, /* Rfree list flushed */
FLUSH_RFREE_LIST_OBJECTS, /* Rfree objects flushed */
CLAIM_REMOTE_LIST, /* Remote freed list claimed */
CLAIM_REMOTE_LIST_OBJECTS, /* Remote freed objects claimed */
NR_SLQB_STAT_ITEMS
};
/*
* Singly-linked list with head, tail, and nr
*/
struct kmlist {
unsigned long nr;
void **head;
void **tail;
};
/*
* Every kmem_cache_list has a kmem_cache_remote_free structure, by which
* objects can be returned to the kmem_cache_list from remote CPUs.
*/
struct kmem_cache_remote_free {
spinlock_t lock;
struct kmlist list;
} ____cacheline_aligned;
/*
* A kmem_cache_list manages all the slabs and objects allocated from a given
* source. Per-cpu kmem_cache_lists allow node-local allocations. Per-node
* kmem_cache_lists allow off-node allocations (but require locking).
*/
struct kmem_cache_list {
/* Fastpath LIFO freelist of objects */
struct kmlist freelist;
#ifdef CONFIG_SMP
/* remote_free has reached a watermark */
int remote_free_check;
#endif
/* kmem_cache corresponding to this list */
struct kmem_cache *cache;
/* Number of partial slabs (pages) */
unsigned long nr_partial;
/* Slabs which have some free objects */
struct list_head partial;
/* Total number of slabs allocated */
unsigned long nr_slabs;
/* Protects nr_partial, nr_slabs, and partial */
spinlock_t page_lock;
#ifdef CONFIG_SMP
/*
* In the case of per-cpu lists, remote_free is for objects freed by
* non-owner CPU back to its home list. For per-node lists, remote_free
* is always used to free objects.
*/
struct kmem_cache_remote_free remote_free;
#endif
#ifdef CONFIG_SLQB_STATS
unsigned long stats[NR_SLQB_STAT_ITEMS];
#endif
} ____cacheline_aligned;
/*
* Primary per-cpu, per-kmem_cache structure.
*/
struct kmem_cache_cpu {
struct kmem_cache_list list; /* List for node-local slabs */
unsigned int colour_next; /* Next colour offset to use */
#ifdef CONFIG_SMP
/*
* rlist is a list of objects that don't fit on list.freelist (ie.
* wrong node). The objects all correspond to a given kmem_cache_list,
* remote_cache_list. To free objects to another list, we must first
* flush the existing objects, then switch remote_cache_list.
*
* An NR_CPUS or MAX_NUMNODES array would be nice here, but then we
* get to O(NR_CPUS^2) memory consumption situation.
*/
struct kmlist rlist;
struct kmem_cache_list *remote_cache_list;
#endif
} ____cacheline_aligned_in_smp;
/*
* Per-node, per-kmem_cache structure. Used for node-specific allocations.
*/
struct kmem_cache_node {
struct kmem_cache_list list;
spinlock_t list_lock; /* protects access to list */
} ____cacheline_aligned;
/*
* Management object for a slab cache.
*/
struct kmem_cache {
unsigned long flags;
int hiwater; /* LIFO list high watermark */
int freebatch; /* LIFO freelist batch flush size */
#ifdef CONFIG_SMP
struct kmem_cache_cpu **cpu_slab; /* dynamic per-cpu structures */
#else
struct kmem_cache_cpu cpu_slab;
#endif
int objsize; /* Size of object without meta data */
int offset; /* Free pointer offset. */
int objects; /* Number of objects in slab */
#ifdef CONFIG_NUMA
struct kmem_cache_node **node_slab; /* dynamic per-node structures */
#endif
int size; /* Size of object including meta data */
int order; /* Allocation order */
gfp_t allocflags; /* gfp flags to use on allocation */
unsigned int colour_range; /* range of colour counter */
unsigned int colour_off; /* offset per colour */
void (*ctor)(void *);
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
int align; /* Alignment */
int inuse; /* Offset to metadata */
#ifdef CONFIG_SLQB_SYSFS
struct kobject kobj; /* For sysfs */
#endif
} ____cacheline_aligned;
/*
* Kmalloc subsystem.
*/
#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
#else
#define KMALLOC_MIN_SIZE 8
#endif
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
#define KMALLOC_SHIFT_SLQB_HIGH (PAGE_SHIFT + \
((9 <= (MAX_ORDER - 1)) ? 9 : (MAX_ORDER - 1)))
extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_SLQB_HIGH + 1];
extern struct kmem_cache kmalloc_caches_dma[KMALLOC_SHIFT_SLQB_HIGH + 1];
/*
* Constant size allocations use this path to find index into kmalloc caches
* arrays. get_slab() function is used for non-constant sizes.
*/
static __always_inline int kmalloc_index(size_t size)
{
extern int ____kmalloc_too_large(void);
if (unlikely(size <= KMALLOC_MIN_SIZE))
return KMALLOC_SHIFT_LOW;
#if L1_CACHE_BYTES < 64
if (size > 64 && size <= 96)
return 1;
#endif
#if L1_CACHE_BYTES < 128
if (size > 128 && size <= 192)
return 2;
#endif
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
if (size <= 64) return 6;
if (size <= 128) return 7;
if (size <= 256) return 8;
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
if (size <= 32 * 1024) return 15;
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
if (size <= 4 * 1024 * 1024) return 22;
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
return ____kmalloc_too_large();
}
#ifdef CONFIG_ZONE_DMA
#define SLQB_DMA __GFP_DMA
#else
/* Disable "DMA slabs" */
#define SLQB_DMA (__force gfp_t)0
#endif
/*
* Find the kmalloc slab cache for a given combination of allocation flags and
* size. Should really only be used for constant 'size' arguments, due to
* bloat.
*/
static __always_inline struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
int index;
if (unlikely(size > 1UL << KMALLOC_SHIFT_SLQB_HIGH))
return NULL;
if (unlikely(!size))
return ZERO_SIZE_PTR;
index = kmalloc_index(size);
if (likely(!(flags & SLQB_DMA)))
return &kmalloc_caches[index];
else
return &kmalloc_caches_dma[index];
}
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
#define KMALLOC_HEADER (ARCH_KMALLOC_MINALIGN < sizeof(void *) ? \
sizeof(void *) : ARCH_KMALLOC_MINALIGN)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
struct kmem_cache *s;
s = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return kmem_cache_alloc(s, flags);
}
return __kmalloc(size, flags);
}
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size)) {
struct kmem_cache *s;
s = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return kmem_cache_alloc_node(s, flags, node);
}
return __kmalloc_node(size, flags, node);
}
#endif
#endif /* _LINUX_SLQB_DEF_H */
......@@ -153,12 +153,10 @@ static __always_inline int kmalloc_index(size_t size)
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;
#if KMALLOC_MIN_SIZE <= 64
if (size > 64 && size <= 96)
if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
return 1;
if (size > 128 && size <= 192)
if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
return 2;
#endif
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
......@@ -304,6 +302,4 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
}
#endif
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLUB_DEF_H */
......@@ -1010,7 +1010,7 @@ config COMPAT_BRK
choice
prompt "Choose SLAB allocator"
default SLUB
default SLQB
help
This option allows to select a slab allocator.
......@@ -1031,6 +1031,11 @@ config SLUB
and has enhanced diagnostics. SLUB is the default choice for
a slab allocator.
config SLQB
bool "SLQB (Queued allocator)"
help
SLQB is a proposed new slab allocator.
config SLOB
depends on EMBEDDED
bool "SLOB (Simple Allocator)"
......@@ -1086,7 +1091,7 @@ config HAVE_GENERIC_DMA_COHERENT
config SLABINFO
bool
depends on PROC_FS
depends on SLAB || SLUB_DEBUG
depends on SLAB || SLUB_DEBUG || SLQB
default y
config RT_MUTEXES
......
......@@ -336,6 +336,26 @@ config SLUB_STATS
out which slabs are relevant to a particular load.
Try running: slabinfo -DA
config SLQB_DEBUG
default y
bool "Enable SLQB debugging support"
depends on SLQB
config SLQB_DEBUG_ON
default n
bool "SLQB debugging on by default"
depends on SLQB_DEBUG
config SLQB_SYSFS
bool "Create SYSFS entries for slab caches"
default n
depends on SLQB
config SLQB_STATS
bool "Enable SLQB performance statistics"
default n
depends on SLQB_SYSFS
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \
......
......@@ -28,6 +28,7 @@ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_SLQB) += slqb.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
......
......@@ -692,3 +692,8 @@ void __init kmem_cache_init(void)
{
slob_ready = 1;
}
void __init kmem_cache_init_late(void)
{
/* Nothing to do */
}
This diff is collapsed.
......@@ -140,6 +140,13 @@
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER)
/*
* Debugging flags that require metadata to be stored in the slab. These get
* disabled when slub_debug=O is used and a cache's min order increases with
* metadata.
*/
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
/*
* Set of flags that will prevent slab merging
*/
......@@ -325,6 +332,7 @@ static int slub_debug;
#endif
static char *slub_debug_slabs;
static int disable_higher_order_debug;
/*
* Object debugging
......@@ -646,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
print_section("Padding", end - remainder, remainder);
restore_bytes(s, "slab padding", POISON_INUSE, start, end);
restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
return 0;
}
......@@ -976,6 +984,15 @@ static int __init setup_slub_debug(char *str)
*/
goto check_slabs;
if (tolower(*str) == 'o') {
/*
* Avoid enabling debugging on caches if its minimum order
* would increase as a result.
*/
disable_higher_order_debug = 1;
goto out;
}
slub_debug = 0;
if (*str == '-')
/*
......@@ -1026,8 +1043,8 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
* Enable debugging if selected on the kernel commandline.
*/
if (slub_debug && (!slub_debug_slabs ||
strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
flags |= slub_debug;
!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
flags |= slub_debug;
return flags;
}
......@@ -1109,8 +1126,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
}
if (kmemcheck_enabled
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
{
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
......@@ -1560,6 +1576,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
"default order: %d, min order: %d\n", s->name, s->objsize,
s->size, oo_order(s->oo), oo_order(s->min));
if (oo_order(s->min) > get_order(s->objsize))
printk(KERN_WARNING " %s debugging increased min order, use "
"slub_debug=O to disable.\n", s->name);
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long nr_slabs;
......@@ -2001,7 +2021,7 @@ static inline int calculate_order(int size)
return order;
fraction /= 2;
}
min_objects --;
min_objects--;
}
/*
......@@ -2400,6 +2420,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* on bootup.
*/
align = calculate_alignment(flags, align, s->objsize);
s->align = align;
/*
* SLUB stores one object immediately after another beginning from
......@@ -2452,6 +2473,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (!calculate_sizes(s, -1))
goto error;
if (disable_higher_order_debug) {
/*
* Disable debugging flags that store metadata if the min slab
* order increased.
*/
if (get_order(s->size) > get_order(s->objsize)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
if (!calculate_sizes(s, -1))
goto error;
}
}
/*
* The larger the object size is, the more pages we want on the partial
......@@ -2790,6 +2823,11 @@ static s8 size_index[24] = {
2 /* 192 */
};
static inline int size_index_elem(size_t bytes)
{
return (bytes - 1) / 8;
}
static struct kmem_cache *get_slab(size_t size, gfp_t flags)
{
int index;
......@@ -2798,7 +2836,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
if (!size)
return ZERO_SIZE_PTR;
index = size_index[(size - 1) / 8];
index = size_index[size_index_elem(size)];
} else
index = fls(size - 1);
......@@ -3156,10 +3194,12 @@ void __init kmem_cache_init(void)
slab_state = PARTIAL;
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 64) {
if (KMALLOC_MIN_SIZE <= 32) {
create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_NOWAIT);
caches++;
}
if (KMALLOC_MIN_SIZE <= 64) {
create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_NOWAIT);
caches++;
......@@ -3186,17 +3226,28 @@ void __init kmem_cache_init(void)
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
int elem = size_index_elem(i);
if (elem >= ARRAY_SIZE(size_index))
break;
size_index[elem] = KMALLOC_SHIFT_LOW;
}
if (KMALLOC_MIN_SIZE == 128) {
if (KMALLOC_MIN_SIZE == 64) {
/*
* The 96 byte size cache is not used if the alignment
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
size_index[size_index_elem(i)] = 7;
} else if (KMALLOC_MIN_SIZE == 128) {
/*
* The 192 byte sized cache is not used if the alignment
* is 128 byte. Redirect kmalloc to use the 256 byte cache
* instead.
*/
for (i = 128 + 8; i <= 192; i += 8)
size_index[(i - 1) / 8] = 8;
size_index[size_index_elem(i)] = 8;
}
slab_state = UP;
......@@ -4543,8 +4594,11 @@ static int sysfs_slab_add(struct kmem_cache *s)
}
err = sysfs_create_group(&s->kobj, &slab_attr_group);
if (err)
if (err) {
kobject_del(&s->kobj);
kobject_put(&s->kobj);
return err;
}
kobject_uevent(&s->kobj, KOBJ_ADD);
if (!unmergeable) {
/* Setup first alias */
......@@ -4726,7 +4780,7 @@ static const struct file_operations proc_slabinfo_operations = {
static int __init slab_proc_init(void)
{
proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
return 0;
}
module_init(slab_proc_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment