Commit 63add2f2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'cpus4096-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'cpus4096-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  cpu masks: optimize and clean up cpumask_of_cpu()
  cpumask: export cpumask_of_cpu_map
  cpumask: change cpumask_of_cpu_ptr to use new cpumask_of_cpu
  cpumask: put cpumask_of_cpu_map in the initdata section
  cpumask: make cpumask_of_cpu_map generic
parents 3684a601 9e3ee1c3
...@@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr(new_mask, cpu);
int retval; int retval;
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
unsigned int edx_part; unsigned int edx_part;
...@@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
/* Make sure we are running on right CPU */ /* Make sure we are running on right CPU */
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
retval = set_cpus_allowed_ptr(current, new_mask); retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (retval) if (retval)
return -1; return -1;
......
...@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd) ...@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd)
static void drv_write(struct drv_cmd *cmd) static void drv_write(struct drv_cmd *cmd)
{ {
cpumask_t saved_mask = current->cpus_allowed; cpumask_t saved_mask = current->cpus_allowed;
cpumask_of_cpu_ptr_declare(cpu_mask);
unsigned int i; unsigned int i;
for_each_cpu_mask_nr(i, cmd->mask) { for_each_cpu_mask_nr(i, cmd->mask) {
cpumask_of_cpu_ptr_next(cpu_mask, i); set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
set_cpus_allowed_ptr(current, cpu_mask);
do_drv_write(cmd); do_drv_write(cmd);
} }
...@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu) ...@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu)
} aperf_cur, mperf_cur; } aperf_cur, mperf_cur;
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr(cpu_mask, cpu);
unsigned int perf_percent; unsigned int perf_percent;
unsigned int retval; unsigned int retval;
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpu_mask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (get_cpu() != cpu) { if (get_cpu() != cpu) {
/* We were not able to run on requested processor */ /* We were not able to run on requested processor */
put_cpu(); put_cpu();
...@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu) ...@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu)
static unsigned int get_cur_freq_on_cpu(unsigned int cpu) static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{ {
cpumask_of_cpu_ptr(cpu_mask, cpu);
struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
unsigned int freq; unsigned int freq;
unsigned int cached_freq; unsigned int cached_freq;
...@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) ...@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
} }
cached_freq = data->freq_table[data->acpi_data->state].frequency; cached_freq = data->freq_table[data->acpi_data->state].frequency;
freq = extract_freq(get_cur_val(cpu_mask), data); freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
if (freq != cached_freq) { if (freq != cached_freq) {
/* /*
* The dreaded BIOS frequency change behind our back. * The dreaded BIOS frequency change behind our back.
......
...@@ -479,12 +479,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi ...@@ -479,12 +479,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
static int check_supported_cpu(unsigned int cpu) static int check_supported_cpu(unsigned int cpu)
{ {
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr(cpu_mask, cpu);
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
unsigned int rc = 0; unsigned int rc = 0;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpu_mask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) { if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
...@@ -1017,7 +1016,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i ...@@ -1017,7 +1016,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
{ {
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid; u32 checkfid;
u32 checkvid; u32 checkvid;
...@@ -1032,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi ...@@ -1032,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpu_mask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
if (smp_processor_id() != pol->cpu) { if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
...@@ -1107,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1107,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{ {
struct powernow_k8_data *data; struct powernow_k8_data *data;
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr_declare(newmask);
int rc; int rc;
if (!cpu_online(pol->cpu)) if (!cpu_online(pol->cpu))
...@@ -1159,8 +1156,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1159,8 +1156,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
cpumask_of_cpu_ptr_next(newmask, pol->cpu); set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
set_cpus_allowed_ptr(current, newmask);
if (smp_processor_id() != pol->cpu) { if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
...@@ -1182,7 +1178,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1182,7 +1178,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
set_cpus_allowed_ptr(current, &oldmask); set_cpus_allowed_ptr(current, &oldmask);
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
pol->cpus = *newmask; pol->cpus = cpumask_of_cpu(pol->cpu);
else else
pol->cpus = per_cpu(cpu_core_map, pol->cpu); pol->cpus = per_cpu(cpu_core_map, pol->cpu);
data->available_cores = &(pol->cpus); data->available_cores = &(pol->cpus);
...@@ -1248,7 +1244,6 @@ static unsigned int powernowk8_get (unsigned int cpu) ...@@ -1248,7 +1244,6 @@ static unsigned int powernowk8_get (unsigned int cpu)
{ {
struct powernow_k8_data *data; struct powernow_k8_data *data;
cpumask_t oldmask = current->cpus_allowed; cpumask_t oldmask = current->cpus_allowed;
cpumask_of_cpu_ptr(newmask, cpu);
unsigned int khz = 0; unsigned int khz = 0;
unsigned int first; unsigned int first;
...@@ -1258,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu) ...@@ -1258,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
if (!data) if (!data)
return -EINVAL; return -EINVAL;
set_cpus_allowed_ptr(current, newmask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) { if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX printk(KERN_ERR PFX
"limiting to CPU %d failed in powernowk8_get\n", cpu); "limiting to CPU %d failed in powernowk8_get\n", cpu);
......
...@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu) ...@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
unsigned l, h; unsigned l, h;
unsigned clock_freq; unsigned clock_freq;
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr(new_mask, cpu);
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, new_mask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) if (smp_processor_id() != cpu)
return 0; return 0;
...@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy,
* Best effort undo.. * Best effort undo..
*/ */
if (!cpus_empty(*covered_cpus)) { if (!cpus_empty(*covered_cpus))
cpumask_of_cpu_ptr_declare(new_mask);
for_each_cpu_mask_nr(j, *covered_cpus) { for_each_cpu_mask_nr(j, *covered_cpus) {
cpumask_of_cpu_ptr_next(new_mask, j); set_cpus_allowed_ptr(current,
set_cpus_allowed_ptr(current, new_mask); &cpumask_of_cpu(j));
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
} }
}
tmp = freqs.new; tmp = freqs.new;
freqs.new = freqs.old; freqs.new = freqs.old;
......
...@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) ...@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
static unsigned int speedstep_get(unsigned int cpu) static unsigned int speedstep_get(unsigned int cpu)
{ {
cpumask_of_cpu_ptr(newmask, cpu); return _speedstep_get(&cpumask_of_cpu(cpu));
return _speedstep_get(newmask);
} }
/** /**
......
...@@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
unsigned long j; unsigned long j;
int retval; int retval;
cpumask_t oldmask; cpumask_t oldmask;
cpumask_of_cpu_ptr(newmask, cpu);
if (num_cache_leaves == 0) if (num_cache_leaves == 0)
return -ENOENT; return -ENOENT;
...@@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
retval = set_cpus_allowed_ptr(current, newmask); retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (retval) if (retval)
goto out; goto out;
......
...@@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) ...@@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
if (reload) { if (reload) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_of_cpu_ptr_declare(mask);
preempt_disable(); preempt_disable();
load_LDT(pc); load_LDT(pc);
cpumask_of_cpu_ptr_next(mask, smp_processor_id()); if (!cpus_equal(current->mm->cpu_vm_mask,
if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) cpumask_of_cpu(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1); smp_call_function(flush_ldt, current->mm, 1);
preempt_enable(); preempt_enable();
#else #else
......
...@@ -388,7 +388,6 @@ static int do_microcode_update (void) ...@@ -388,7 +388,6 @@ static int do_microcode_update (void)
void *new_mc = NULL; void *new_mc = NULL;
int cpu; int cpu;
cpumask_t old; cpumask_t old;
cpumask_of_cpu_ptr_declare(newmask);
old = current->cpus_allowed; old = current->cpus_allowed;
...@@ -405,8 +404,7 @@ static int do_microcode_update (void) ...@@ -405,8 +404,7 @@ static int do_microcode_update (void)
if (!uci->valid) if (!uci->valid)
continue; continue;
cpumask_of_cpu_ptr_next(newmask, cpu); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, newmask);
error = get_maching_microcode(new_mc, cpu); error = get_maching_microcode(new_mc, cpu);
if (error < 0) if (error < 0)
goto out; goto out;
...@@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu)
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpumask_t old; cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
unsigned int val[2]; unsigned int val[2];
int err = 0; int err = 0;
...@@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu)
return 0; return 0;
old = current->cpus_allowed; old = current->cpus_allowed;
set_cpus_allowed_ptr(current, newmask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
/* Check if the microcode we have in memory matches the CPU */ /* Check if the microcode we have in memory matches the CPU */
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
...@@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu)
static void microcode_init_cpu(int cpu, int resume) static void microcode_init_cpu(int cpu, int resume)
{ {
cpumask_t old; cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
old = current->cpus_allowed; old = current->cpus_allowed;
set_cpus_allowed_ptr(current, newmask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
mutex_lock(&microcode_mutex); mutex_lock(&microcode_mutex);
collect_cpu_info(cpu); collect_cpu_info(cpu);
if (uci->valid && system_state == SYSTEM_RUNNING && !resume) if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
...@@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev, ...@@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev,
if (end == buf) if (end == buf)
return -EINVAL; return -EINVAL;
if (val == 1) { if (val == 1) {
cpumask_t old; cpumask_t old = current->cpus_allowed;
cpumask_of_cpu_ptr(newmask, cpu);
old = current->cpus_allowed;
get_online_cpus(); get_online_cpus();
set_cpus_allowed_ptr(current, newmask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
mutex_lock(&microcode_mutex); mutex_lock(&microcode_mutex);
if (uci->valid) if (uci->valid)
......
...@@ -414,25 +414,20 @@ void native_machine_shutdown(void) ...@@ -414,25 +414,20 @@ void native_machine_shutdown(void)
/* The boot cpu is always logical cpu 0 */ /* The boot cpu is always logical cpu 0 */
int reboot_cpu_id = 0; int reboot_cpu_id = 0;
cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* See if there has been given a command line override */ /* See if there has been given a command line override */
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
cpu_online(reboot_cpu)) { cpu_online(reboot_cpu))
reboot_cpu_id = reboot_cpu; reboot_cpu_id = reboot_cpu;
cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
}
#endif #endif
/* Make certain the cpu I'm about to reboot on is online */ /* Make certain the cpu I'm about to reboot on is online */
if (!cpu_online(reboot_cpu_id)) { if (!cpu_online(reboot_cpu_id))
reboot_cpu_id = smp_processor_id(); reboot_cpu_id = smp_processor_id();
cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
}
/* Make certain I only run on the appropriate processor */ /* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, newmask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
/* O.K Now that I'm on the appropriate processor, /* O.K Now that I'm on the appropriate processor,
* stop all of the others. * stop all of the others.
......
...@@ -80,24 +80,6 @@ static void __init setup_per_cpu_maps(void) ...@@ -80,24 +80,6 @@ static void __init setup_per_cpu_maps(void)
#endif #endif
} }
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
cpumask_t *cpumask_of_cpu_map __read_mostly;
EXPORT_SYMBOL(cpumask_of_cpu_map);
/* requires nr_cpu_ids to be initialized */
static void __init setup_cpumask_of_cpu(void)
{
int i;
/* alloc_bootmem zeroes memory */
cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
for (i = 0; i < nr_cpu_ids; i++)
cpu_set(i, cpumask_of_cpu_map[i]);
}
#else
static inline void setup_cpumask_of_cpu(void) { }
#endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
* Great future not-so-futuristic plan: make i386 and x86_64 do it * Great future not-so-futuristic plan: make i386 and x86_64 do it
...@@ -197,9 +179,6 @@ void __init setup_per_cpu_areas(void) ...@@ -197,9 +179,6 @@ void __init setup_per_cpu_areas(void)
/* Setup node to cpumask map */ /* Setup node to cpumask map */
setup_node_to_cpumask_map(); setup_node_to_cpumask_map();
/* Setup cpumask_of_cpu map */
setup_cpumask_of_cpu();
} }
#endif #endif
......
...@@ -827,7 +827,6 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) ...@@ -827,7 +827,6 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr) static int acpi_processor_get_throttling(struct acpi_processor *pr)
{ {
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr_declare(new_mask);
int ret; int ret;
if (!pr) if (!pr)
...@@ -839,8 +838,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) ...@@ -839,8 +838,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
* Migrate task to the cpu pointed by pr. * Migrate task to the cpu pointed by pr.
*/ */
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
cpumask_of_cpu_ptr_next(new_mask, pr->id); set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
set_cpus_allowed_ptr(current, new_mask);
ret = pr->throttling.acpi_processor_get_throttling(pr); ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */ /* restore the previous state */
set_cpus_allowed_ptr(current, &saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
...@@ -989,7 +987,6 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, ...@@ -989,7 +987,6 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state) int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{ {
cpumask_t saved_mask; cpumask_t saved_mask;
cpumask_of_cpu_ptr_declare(new_mask);
int ret = 0; int ret = 0;
unsigned int i; unsigned int i;
struct acpi_processor *match_pr; struct acpi_processor *match_pr;
...@@ -1028,8 +1025,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1028,8 +1025,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr. * it can be called only for the cpu pointed by pr.
*/ */
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
cpumask_of_cpu_ptr_next(new_mask, pr->id); set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
set_cpus_allowed_ptr(current, new_mask);
ret = p_throttling->acpi_processor_set_throttling(pr, ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state); t_state.target_state);
} else { } else {
...@@ -1060,8 +1056,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1060,8 +1056,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue; continue;
} }
t_state.cpu = i; t_state.cpu = i;
cpumask_of_cpu_ptr_next(new_mask, i); set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
set_cpus_allowed_ptr(current, new_mask);
ret = match_pr->throttling. ret = match_pr->throttling.
acpi_processor_set_throttling( acpi_processor_set_throttling(
match_pr, t_state.target_state); match_pr, t_state.target_state);
......
...@@ -245,7 +245,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, ...@@ -245,7 +245,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
static int smi_request(struct smi_cmd *smi_cmd) static int smi_request(struct smi_cmd *smi_cmd)
{ {
cpumask_t old_mask; cpumask_t old_mask;
cpumask_of_cpu_ptr(new_mask, 0);
int ret = 0; int ret = 0;
if (smi_cmd->magic != SMI_CMD_MAGIC) { if (smi_cmd->magic != SMI_CMD_MAGIC) {
...@@ -256,7 +255,7 @@ static int smi_request(struct smi_cmd *smi_cmd) ...@@ -256,7 +255,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
/* SMI requires CPU 0 */ /* SMI requires CPU 0 */
old_mask = current->cpus_allowed; old_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, new_mask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
if (smp_processor_id() != 0) { if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__func__); __func__);
......
...@@ -229,11 +229,10 @@ xpc_hb_checker(void *ignore) ...@@ -229,11 +229,10 @@ xpc_hb_checker(void *ignore)
int last_IRQ_count = 0; int last_IRQ_count = 0;
int new_IRQ_count; int new_IRQ_count;
int force_IRQ = 0; int force_IRQ = 0;
cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
/* this thread was marked active by xpc_hb_init() */ /* this thread was marked active by xpc_hb_init() */
set_cpus_allowed_ptr(current, cpumask); set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
/* set our heartbeating to other partitions into motion */ /* set our heartbeating to other partitions into motion */
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
......
...@@ -62,15 +62,7 @@ ...@@ -62,15 +62,7 @@
* int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
* *
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
*ifdef CONFIG_HAS_CPUMASK_OF_CPU * (can be used as an lvalue)
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
* cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*else
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
* cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*endif
* CPU_MASK_ALL Initializer - all bits set * CPU_MASK_ALL Initializer - all bits set
* CPU_MASK_NONE Initializer - no bits set * CPU_MASK_NONE Initializer - no bits set
* unsigned long *cpus_addr(mask) Array of unsigned long's in mask * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
...@@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp, ...@@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
} }
/*
* Special-case data structure for "single bit set only" constant CPU masks.
*
* We pre-generate all the 64 (or 32) possible bit positions, with enough
* padding to the left and the right, and return the constant pointer
* appropriately offset.
*/
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
static inline const cpumask_t *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
return (const cpumask_t *)p;
}
/*
* In cases where we take the address of the cpumask immediately,
* gcc optimizes it out (it's a constant) and there's no huge stack
* variable created:
*/
#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); })
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
extern cpumask_t *cpumask_of_cpu_map;
#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
#define cpumask_of_cpu_ptr(v, cpu) \
const cpumask_t *v = &cpumask_of_cpu(cpu)
#define cpumask_of_cpu_ptr_declare(v) \
const cpumask_t *v
#define cpumask_of_cpu_ptr_next(v, cpu) \
v = &cpumask_of_cpu(cpu)
#else
#define cpumask_of_cpu(cpu) \
({ \
typeof(_unused_cpumask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(cpu); \
} else { \
cpus_clear(m); \
cpu_set((cpu), m); \
} \
m; \
})
#define cpumask_of_cpu_ptr(v, cpu) \
cpumask_t _##v = cpumask_of_cpu(cpu); \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_declare(v) \
cpumask_t _##v; \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_next(v, cpu) \
_##v = cpumask_of_cpu(cpu)
#endif
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
......
...@@ -455,3 +455,28 @@ out: ...@@ -455,3 +455,28 @@ out:
#endif /* CONFIG_PM_SLEEP_SMP */ #endif /* CONFIG_PM_SLEEP_SMP */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
* represents all NR_CPUS bits binary values of 1<<nr.
*
* It is used by cpumask_of_cpu() to get a constant address to a CPU
* mask value that has a single bit set only.
*/
/* cpu_bit_bitmap[0] is empty - so we can back into it */
#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
#endif
};
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
...@@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev) ...@@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev)
struct tick_device *td; struct tick_device *td;
int cpu, ret = NOTIFY_OK; int cpu, ret = NOTIFY_OK;
unsigned long flags; unsigned long flags;
cpumask_of_cpu_ptr_declare(cpumask);
spin_lock_irqsave(&tick_device_lock, flags); spin_lock_irqsave(&tick_device_lock, flags);
cpu = smp_processor_id(); cpu = smp_processor_id();
cpumask_of_cpu_ptr_next(cpumask, cpu);
if (!cpu_isset(cpu, newdev->cpumask)) if (!cpu_isset(cpu, newdev->cpumask))
goto out_bc; goto out_bc;
...@@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) ...@@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
curdev = td->evtdev; curdev = td->evtdev;
/* cpu local device ? */ /* cpu local device ? */
if (!cpus_equal(newdev->cpumask, *cpumask)) { if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
/* /*
* If the cpu affinity of the device interrupt can not * If the cpu affinity of the device interrupt can not
...@@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) ...@@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
* If we have a cpu local device already, do not replace it * If we have a cpu local device already, do not replace it
* by a non cpu local device * by a non cpu local device
*/ */
if (curdev && cpus_equal(curdev->cpumask, *cpumask)) if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
goto out_bc; goto out_bc;
} }
...@@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) ...@@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
curdev = NULL; curdev = NULL;
} }
clockevents_exchange_device(curdev, newdev); clockevents_exchange_device(curdev, newdev);
tick_setup_device(td, newdev, cpu, cpumask); tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify(); tick_oneshot_notify();
......
...@@ -213,9 +213,7 @@ static void start_stack_timers(void) ...@@ -213,9 +213,7 @@ static void start_stack_timers(void)
int cpu; int cpu;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
cpumask_of_cpu_ptr(new_mask, cpu); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, new_mask);
start_stack_timer(cpu); start_stack_timer(cpu);
} }
set_cpus_allowed_ptr(current, &saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
......
...@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void) ...@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void)
{ {
unsigned long preempt_count = preempt_count(); unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id(); int this_cpu = raw_smp_processor_id();
cpumask_of_cpu_ptr_declare(this_mask);
if (likely(preempt_count)) if (likely(preempt_count))
goto out; goto out;
...@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) ...@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use * Kernel threads bound to a single CPU can safely use
* smp_processor_id(): * smp_processor_id():
*/ */
cpumask_of_cpu_ptr_next(this_mask, this_cpu); if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
if (cpus_equal(current->cpus_allowed, *this_mask))
goto out; goto out;
/* /*
......
...@@ -310,8 +310,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) ...@@ -310,8 +310,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
switch (m->mode) { switch (m->mode) {
case SVC_POOL_PERCPU: case SVC_POOL_PERCPU:
{ {
cpumask_of_cpu_ptr(cpumask, node); set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
set_cpus_allowed_ptr(task, cpumask);
break; break;
} }
case SVC_POOL_PERNODE: case SVC_POOL_PERNODE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment