Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
e2b093f3
Commit
e2b093f3
authored
Mar 04, 2010
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'slab/cleanups', 'slab/failslab', 'slab/fixes' and 'slub/percpu' into slab-for-linus
parents
eaa5eec7
f3186a9c
4c13dd3b
44b57f1c
91efd773
Changes
7
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
146 additions
and
260 deletions
+146
-260
Documentation/vm/slub.txt
Documentation/vm/slub.txt
+1
-0
include/linux/fault-inject.h
include/linux/fault-inject.h
+3
-2
include/linux/slab.h
include/linux/slab.h
+5
-0
include/linux/slub_def.h
include/linux/slub_def.h
+12
-15
mm/failslab.c
mm/failslab.c
+15
-3
mm/slab.c
mm/slab.c
+6
-7
mm/slub.c
mm/slub.c
+104
-233
No files found.
Documentation/vm/slub.txt
View file @
e2b093f3
...
...
@@ -41,6 +41,7 @@ Possible debug options are
P Poisoning (object and padding)
U User tracking (free and alloc)
T Trace (please only use on single slabs)
A Toggle failslab filter mark for the cache
O Switch debugging off for caches that would have
caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is
...
...
include/linux/fault-inject.h
View file @
e2b093f3
...
...
@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
#endif
/* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB
extern
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
);
extern
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
,
unsigned
long
flags
);
#else
static
inline
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
)
static
inline
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
,
unsigned
long
flags
)
{
return
false
;
}
...
...
include/linux/slab.h
View file @
e2b093f3
...
...
@@ -70,6 +70,11 @@
#else
# define SLAB_NOTRACK 0x00000000UL
#endif
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB 0x02000000UL
/* Fault injection mark */
#else
# define SLAB_FAILSLAB 0x00000000UL
#endif
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
/* Objects are reclaimable */
...
...
include/linux/slub_def.h
View file @
e2b093f3
...
...
@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void
**
freelist
;
/* Pointer to first free per cpu object */
struct
page
*
page
;
/* The slab from which we are allocating */
int
node
;
/* The node of the page (or -1 for debug) */
unsigned
int
offset
;
/* Freepointer offset (in word units) */
unsigned
int
objsize
;
/* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS
unsigned
stat
[
NR_SLUB_STAT_ITEMS
];
#endif
...
...
@@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
* Slab cache management.
*/
struct
kmem_cache
{
struct
kmem_cache_cpu
*
cpu_slab
;
/* Used for retriving partial slabs etc */
unsigned
long
flags
;
int
size
;
/* The size of an object including meta data */
...
...
@@ -104,11 +103,6 @@ struct kmem_cache {
int
remote_node_defrag_ratio
;
struct
kmem_cache_node
*
node
[
MAX_NUMNODES
];
#endif
#ifdef CONFIG_SMP
struct
kmem_cache_cpu
*
cpu_slab
[
NR_CPUS
];
#else
struct
kmem_cache_cpu
cpu_slab
;
#endif
};
/*
...
...
@@ -135,11 +129,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern
struct
kmem_cache
kmalloc_caches
[
SLUB_PAGE_SHIFT
];
extern
struct
kmem_cache
kmalloc_caches
[
KMALLOC_CACHES
];
/*
* Sorry that the following has to be that ugly but some versions of GCC
...
...
@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return
&
kmalloc_caches
[
index
];
}
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
,
gfp_t
);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
...
...
mm/failslab.c
View file @
e2b093f3
#include <linux/fault-inject.h>
#include <linux/gfp.h>
#include <linux/slab.h>
static
struct
{
struct
fault_attr
attr
;
u32
ignore_gfp_wait
;
int
cache_filter
;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct
dentry
*
ignore_gfp_wait_file
;
struct
dentry
*
cache_filter_file
;
#endif
}
failslab
=
{
.
attr
=
FAULT_ATTR_INITIALIZER
,
.
ignore_gfp_wait
=
1
,
.
cache_filter
=
0
,
};
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
)
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
,
unsigned
long
cache_flags
)
{
if
(
gfpflags
&
__GFP_NOFAIL
)
return
false
;
...
...
@@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags)
if
(
failslab
.
ignore_gfp_wait
&&
(
gfpflags
&
__GFP_WAIT
))
return
false
;
if
(
failslab
.
cache_filter
&&
!
(
cache_flags
&
SLAB_FAILSLAB
))
return
false
;
return
should_fail
(
&
failslab
.
attr
,
size
);
}
...
...
@@ -30,7 +37,6 @@ static int __init setup_failslab(char *str)
__setup
(
"failslab="
,
setup_failslab
);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static
int
__init
failslab_debugfs_init
(
void
)
{
mode_t
mode
=
S_IFREG
|
S_IRUSR
|
S_IWUSR
;
...
...
@@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void)
debugfs_create_bool
(
"ignore-gfp-wait"
,
mode
,
dir
,
&
failslab
.
ignore_gfp_wait
);
if
(
!
failslab
.
ignore_gfp_wait_file
)
{
failslab
.
cache_filter_file
=
debugfs_create_bool
(
"cache-filter"
,
mode
,
dir
,
&
failslab
.
cache_filter
);
if
(
!
failslab
.
ignore_gfp_wait_file
||
!
failslab
.
cache_filter_file
)
{
err
=
-
ENOMEM
;
debugfs_remove
(
failslab
.
cache_filter_file
);
debugfs_remove
(
failslab
.
ignore_gfp_wait_file
);
cleanup_fault_attr_dentries
(
&
failslab
.
attr
);
}
...
...
mm/slab.c
View file @
e2b093f3
...
...
@@ -935,7 +935,6 @@ static int transfer_objects(struct array_cache *to,
from
->
avail
-=
nr
;
to
->
avail
+=
nr
;
to
->
touched
=
1
;
return
nr
;
}
...
...
@@ -983,13 +982,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
if
(
limit
>
1
)
limit
=
12
;
ac_ptr
=
k
m
alloc_node
(
memsize
,
gfp
,
node
);
ac_ptr
=
k
z
alloc_node
(
memsize
,
gfp
,
node
);
if
(
ac_ptr
)
{
for_each_node
(
i
)
{
if
(
i
==
node
||
!
node_online
(
i
))
{
ac_ptr
[
i
]
=
NULL
;
if
(
i
==
node
||
!
node_online
(
i
))
continue
;
}
ac_ptr
[
i
]
=
alloc_arraycache
(
node
,
limit
,
0xbaadf00d
,
gfp
);
if
(
!
ac_ptr
[
i
])
{
for
(
i
--
;
i
>=
0
;
i
--
)
...
...
@@ -2963,8 +2960,10 @@ retry:
spin_lock
(
&
l3
->
list_lock
);
/* See if we can refill from the shared array */
if
(
l3
->
shared
&&
transfer_objects
(
ac
,
l3
->
shared
,
batchcount
))
if
(
l3
->
shared
&&
transfer_objects
(
ac
,
l3
->
shared
,
batchcount
))
{
l3
->
shared
->
touched
=
1
;
goto
alloc_done
;
}
while
(
batchcount
>
0
)
{
struct
list_head
*
entry
;
...
...
@@ -3101,7 +3100,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
if
(
cachep
==
&
cache_cache
)
return
false
;
return
should_failslab
(
obj_size
(
cachep
),
flags
);
return
should_failslab
(
obj_size
(
cachep
),
flags
,
cachep
->
flags
);
}
static
inline
void
*
____cache_alloc
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
...
...
mm/slub.c
View file @
e2b093f3
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment