Commit 1578a2b7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

mm: quicklist: Convert to percpu locked

Use per cpu locked for quicklists as well to make the code
preemptible.

[ tglx: folded Ingo's "release before free page fix" ]
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 46167aec
......@@ -18,7 +18,7 @@ struct quicklist {
int nr_pages;
};
DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
DECLARE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK];
/*
* The two key functions quicklist_alloc and quicklist_free are inline so
......@@ -30,19 +30,27 @@ DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
* The fast patch in quicklist_alloc touched only a per cpu cacheline and
* the first cacheline of the page itself. There is minmal overhead involved.
*/
static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
static inline void *__quicklist_alloc(struct quicklist *q)
{
struct quicklist *q;
void **p = NULL;
void **p = q->page;
q =&get_cpu_var(quicklist)[nr];
p = q->page;
if (likely(p)) {
q->page = p[0];
p[0] = NULL;
q->nr_pages--;
}
put_cpu_var(quicklist);
return p;
}
static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
{
struct quicklist *q;
void **p;
int cpu;
q = &get_cpu_var_locked(quicklist, &cpu)[nr];
p = __quicklist_alloc(q);
put_cpu_var_locked(quicklist, cpu);
if (likely(p))
return p;
......@@ -56,12 +64,13 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
struct page *page)
{
struct quicklist *q;
int cpu;
q = &get_cpu_var(quicklist)[nr];
q = &get_cpu_var_locked(quicklist, &cpu)[nr];
*(void **)p = q->page;
q->page = p;
q->nr_pages++;
put_cpu_var(quicklist);
put_cpu_var_locked(quicklist, cpu);
}
static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
......
......@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/quicklist.h>
DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
DEFINE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK];
#define FRACTION_OF_NODE_MEM 16
......@@ -66,17 +66,14 @@ void quicklist_trim(int nr, void (*dtor)(void *),
{
long pages_to_free;
struct quicklist *q;
int cpu;
q = &get_cpu_var(quicklist)[nr];
q = &get_cpu_var_locked(quicklist, &cpu)[nr];
if (q->nr_pages > min_pages) {
pages_to_free = min_pages_to_free(q, min_pages, max_free);
while (pages_to_free > 0) {
/*
* We pass a gfp_t of 0 to quicklist_alloc here
* because we will never call into the page allocator.
*/
void *p = quicklist_alloc(nr, 0, NULL);
void *p = __quicklist_alloc(q);
if (dtor)
dtor(p);
......@@ -84,7 +81,7 @@ void quicklist_trim(int nr, void (*dtor)(void *),
pages_to_free--;
}
}
put_cpu_var(quicklist);
put_cpu_var_locked(quicklist, cpu);
}
unsigned long quicklist_total_size(void)
......@@ -94,7 +91,7 @@ unsigned long quicklist_total_size(void)
struct quicklist *ql, *q;
for_each_online_cpu(cpu) {
ql = per_cpu(quicklist, cpu);
ql = per_cpu_var_locked(quicklist, cpu);
for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
count += q->nr_pages;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment