Commit 5dd3c994 authored by Rusty Russell's avatar Rusty Russell

cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits.: ia64

Impact: cleanup, futureproof

In fact, all cpumask ops will only be valid (in general) for bit
numbers < nr_cpu_ids.  So use that instead of NR_CPUS in various
places.

This is always safe: no cpu number can be >= nr_cpu_ids, and
nr_cpu_ids is initialized to NR_CPUS at boot.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
parent 40fe697a
...@@ -924,9 +924,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) ...@@ -924,9 +924,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
buffer.length = ACPI_ALLOCATE_BUFFER; buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL; buffer.pointer = NULL;
cpus_complement(tmp_map, cpu_present_map); cpumask_complement(&tmp_map, cpu_present_mask);
cpu = first_cpu(tmp_map); cpu = cpumask_first(&tmp_map);
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
acpi_map_cpu2node(handle, cpu, physid); acpi_map_cpu2node(handle, cpu, physid);
......
...@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg) ...@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
ia64_mca_cmc_int_handler(cmc_irq, arg); ia64_mca_cmc_int_handler(cmc_irq, arg);
for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < NR_CPUS) { if (cpuid < nr_cpu_ids) {
platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
} else { } else {
/* If no log record, switch out of polling mode */ /* If no log record, switch out of polling mode */
...@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg) ...@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
ia64_mca_cpe_int_handler(cpe_irq, arg); ia64_mca_cpe_int_handler(cpe_irq, arg);
for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < NR_CPUS) { if (cpuid < NR_CPUS) {
platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
......
...@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg) ...@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
* /proc/perfmon interface, for debug only * /proc/perfmon interface, for debug only
*/ */
#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) #define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
static void * static void *
pfm_proc_start(struct seq_file *m, loff_t *pos) pfm_proc_start(struct seq_file *m, loff_t *pos)
...@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos) ...@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos)
return PFM_PROC_SHOW_HEADER; return PFM_PROC_SHOW_HEADER;
} }
while (*pos <= NR_CPUS) { while (*pos <= nr_cpu_ids) {
if (cpu_online(*pos - 1)) { if (cpu_online(*pos - 1)) {
return (void *)*pos; return (void *)*pos;
} }
......
...@@ -317,7 +317,7 @@ retry: ...@@ -317,7 +317,7 @@ retry:
} }
n = data->cpu_check; n = data->cpu_check;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
if (cpu_isset(n, data->cpu_event)) { if (cpu_isset(n, data->cpu_event)) {
if (!cpu_online(n)) { if (!cpu_online(n)) {
cpu_clear(n, data->cpu_event); cpu_clear(n, data->cpu_event);
...@@ -326,7 +326,7 @@ retry: ...@@ -326,7 +326,7 @@ retry:
cpu = n; cpu = n;
break; break;
} }
if (++n == NR_CPUS) if (++n == nr_cpu_ids)
n = 0; n = 0;
} }
...@@ -337,7 +337,7 @@ retry: ...@@ -337,7 +337,7 @@ retry:
/* for next read, start checking at next CPU */ /* for next read, start checking at next CPU */
data->cpu_check = cpu; data->cpu_check = cpu;
if (++data->cpu_check == NR_CPUS) if (++data->cpu_check == nr_cpu_ids)
data->cpu_check = 0; data->cpu_check = 0;
snprintf(cmd, sizeof(cmd), "read %d\n", cpu); snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
......
...@@ -730,10 +730,10 @@ static void * ...@@ -730,10 +730,10 @@ static void *
c_start (struct seq_file *m, loff_t *pos) c_start (struct seq_file *m, loff_t *pos)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) while (*pos < nr_cpu_ids && !cpu_online(*pos))
++*pos; ++*pos;
#endif #endif
return *pos < NR_CPUS ? cpu_data(*pos) : NULL; return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
} }
static void * static void *
......
...@@ -750,7 +750,7 @@ nasid_slice_to_cpuid(int nasid, int slice) ...@@ -750,7 +750,7 @@ nasid_slice_to_cpuid(int nasid, int slice)
{ {
long cpu; long cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (cpuid_to_nasid(cpu) == nasid && if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice) cpuid_to_slice(cpu) == slice)
return cpu; return cpu;
......
...@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu) ...@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu)
static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
{ {
if (*offset < NR_CPUS) if (*offset < nr_cpu_ids)
return offset; return offset;
return NULL; return NULL;
} }
...@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) ...@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
{ {
(*offset)++; (*offset)++;
if (*offset < NR_CPUS) if (*offset < nr_cpu_ids)
return offset; return offset;
return NULL; return NULL;
} }
...@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) ...@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
} }
if (cpu < NR_CPUS && cpu_online(cpu)) { if (cpu < nr_cpu_ids && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu); stat = &per_cpu(ptcstats, cpu);
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
......
...@@ -612,7 +612,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) ...@@ -612,7 +612,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
if (cpu != SN_HWPERF_ARG_ANY_CPU) { if (cpu != SN_HWPERF_ARG_ANY_CPU) {
if (cpu >= NR_CPUS || !cpu_online(cpu)) { if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment