Commit addf2c73 authored by Paul Menage's avatar Paul Menage Committed by Linus Torvalds

Cpuset hardwall flag: switch cpusets to use the bulk cgroup_add_files() API

Currently the cpusets mem_exclusive flag is overloaded to mean both
"no-overlapping" and "no GFP_KERNEL allocations outside this cpuset".

These patches add a new mem_hardwall flag with just the allocation restriction
part of the mem_exclusive semantics, without breaking backwards-compatibility
for those who continue to use just mem_exclusive.  Additionally, the cgroup
control file registration for cpusets is cleaned up to reduce boilerplate.

This patch:

This change tidies up the cpusets control file definitions, and reduces the
amount of boilerplate required to add/change control files in the future.
Signed-off-by: default avatarPaul Menage <menage@google.com>
Reviewed-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Acked-by: default avatarPaul Jackson <pj@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9e0c914c
...@@ -1445,113 +1445,97 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) ...@@ -1445,113 +1445,97 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
* for the common functions, 'private' gives the type of file * for the common functions, 'private' gives the type of file
*/ */
static struct cftype cft_cpus = { static struct cftype files[] = {
{
.name = "cpus", .name = "cpus",
.read = cpuset_common_file_read, .read = cpuset_common_file_read,
.write = cpuset_common_file_write, .write = cpuset_common_file_write,
.private = FILE_CPULIST, .private = FILE_CPULIST,
}; },
static struct cftype cft_mems = { {
.name = "mems", .name = "mems",
.read = cpuset_common_file_read, .read = cpuset_common_file_read,
.write = cpuset_common_file_write, .write = cpuset_common_file_write,
.private = FILE_MEMLIST, .private = FILE_MEMLIST,
}; },
static struct cftype cft_cpu_exclusive = { {
.name = "cpu_exclusive", .name = "cpu_exclusive",
.read_u64 = cpuset_read_u64, .read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64, .write_u64 = cpuset_write_u64,
.private = FILE_CPU_EXCLUSIVE, .private = FILE_CPU_EXCLUSIVE,
}; },
static struct cftype cft_mem_exclusive = { {
.name = "mem_exclusive", .name = "mem_exclusive",
.read_u64 = cpuset_read_u64, .read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64, .write_u64 = cpuset_write_u64,
.private = FILE_MEM_EXCLUSIVE, .private = FILE_MEM_EXCLUSIVE,
}; },
static struct cftype cft_sched_load_balance = { {
.name = "sched_load_balance", .name = "sched_load_balance",
.read_u64 = cpuset_read_u64, .read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64, .write_u64 = cpuset_write_u64,
.private = FILE_SCHED_LOAD_BALANCE, .private = FILE_SCHED_LOAD_BALANCE,
}; },
static struct cftype cft_sched_relax_domain_level = { {
.name = "sched_relax_domain_level", .name = "sched_relax_domain_level",
.read = cpuset_common_file_read, .read_u64 = cpuset_read_u64,
.write = cpuset_common_file_write, .write_u64 = cpuset_write_u64,
.private = FILE_SCHED_RELAX_DOMAIN_LEVEL, .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
}; },
static struct cftype cft_memory_migrate = { {
.name = "memory_migrate", .name = "memory_migrate",
.read_u64 = cpuset_read_u64, .read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64, .write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_MIGRATE, .private = FILE_MEMORY_MIGRATE,
}; },
static struct cftype cft_memory_pressure_enabled = { {
.name = "memory_pressure_enabled",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE_ENABLED,
};
static struct cftype cft_memory_pressure = {
.name = "memory_pressure", .name = "memory_pressure",
.read_u64 = cpuset_read_u64, .read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64, .write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE, .private = FILE_MEMORY_PRESSURE,
}; },
static struct cftype cft_spread_page = { {
.name = "memory_spread_page", .name = "memory_spread_page",
.read_u64 = cpuset_read_u64, .read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64, .write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_PAGE, .private = FILE_SPREAD_PAGE,
}; },
static struct cftype cft_spread_slab = { {
.name = "memory_spread_slab", .name = "memory_spread_slab",
.read_u64 = cpuset_read_u64, .read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64, .write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_SLAB, .private = FILE_SPREAD_SLAB,
},
};
static struct cftype cft_memory_pressure_enabled = {
.name = "memory_pressure_enabled",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE_ENABLED,
}; };
static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{ {
int err; int err;
if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0) err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
return err; if (err)
if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss,
&cft_sched_relax_domain_level)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
return err; return err;
/* memory_pressure_enabled is in root cpuset only */ /* memory_pressure_enabled is in root cpuset only */
if (err == 0 && !cont->parent) if (!cont->parent)
err = cgroup_add_file(cont, ss, err = cgroup_add_file(cont, ss,
&cft_memory_pressure_enabled); &cft_memory_pressure_enabled);
return 0; return err;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment