Commit fcef5911 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86/irq: remove leftover code from NUMA_MIGRATE_IRQ_DESC

The original feature of migrating irq_desc dynamic was too fragile
and was causing problems: it caused crashes on systems with lots of
cards with MSI-X when user-space irq-balancer was enabled.

We now have new patches that create irq_desc according to device
numa node. This patch removes the leftover bits of the dynamic balancer.

[ Impact: remove dead code ]
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
LKML-Reference: <49F654AF.8000808@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9ec4fa27
...@@ -274,16 +274,6 @@ config SPARSE_IRQ ...@@ -274,16 +274,6 @@ config SPARSE_IRQ
If you don't know what to do here, say N. If you don't know what to do here, say N.
config NUMA_MIGRATE_IRQ_DESC
bool "Move irq desc when changing irq smp_affinity"
depends on SPARSE_IRQ && NUMA
depends on BROKEN
default n
---help---
This enables moving irq_desc to cpu/node that irq will use handled.
If you don't know what to do here, say N.
config X86_MPPARSE config X86_MPPARSE
bool "Enable MPS table" if ACPI bool "Enable MPS table" if ACPI
default y default y
......
...@@ -195,7 +195,6 @@ CONFIG_HIGH_RES_TIMERS=y ...@@ -195,7 +195,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_SPARSE_IRQ=y CONFIG_SPARSE_IRQ=y
# CONFIG_NUMA_MIGRATE_IRQ_DESC is not set
CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_FIND_SMP_CONFIG=y
CONFIG_X86_MPPARSE=y CONFIG_X86_MPPARSE=y
# CONFIG_X86_ELAN is not set # CONFIG_X86_ELAN is not set
......
...@@ -148,9 +148,6 @@ struct irq_cfg { ...@@ -148,9 +148,6 @@ struct irq_cfg {
unsigned move_cleanup_count; unsigned move_cleanup_count;
u8 vector; u8 vector;
u8 move_in_progress : 1; u8 move_in_progress : 1;
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
u8 move_desc_pending : 1;
#endif
}; };
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
...@@ -254,8 +251,7 @@ int arch_init_chip_data(struct irq_desc *desc, int cpu) ...@@ -254,8 +251,7 @@ int arch_init_chip_data(struct irq_desc *desc, int cpu)
return 0; return 0;
} }
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC /* for move_irq_desc */
static void static void
init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
{ {
...@@ -356,19 +352,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) ...@@ -356,19 +352,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
old_desc->chip_data = NULL; old_desc->chip_data = NULL;
} }
} }
/* end for move_irq_desc */
static void
set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg = desc->chip_data;
if (!cfg->move_in_progress) {
/* it means that domain is not changed */
if (!cpumask_intersects(desc->affinity, mask))
cfg->move_desc_pending = 1;
}
}
#endif
#else #else
static struct irq_cfg *irq_cfg(unsigned int irq) static struct irq_cfg *irq_cfg(unsigned int irq)
...@@ -378,13 +362,6 @@ static struct irq_cfg *irq_cfg(unsigned int irq) ...@@ -378,13 +362,6 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
#endif #endif
#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
static inline void
set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
{
}
#endif
struct io_apic { struct io_apic {
unsigned int index; unsigned int index;
unsigned int unused[3]; unsigned int unused[3];
...@@ -592,9 +569,6 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) ...@@ -592,9 +569,6 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
if (assign_irq_vector(irq, cfg, mask)) if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID; return BAD_APICID;
/* check that before desc->addinity get updated */
set_extra_move_desc(desc, mask);
cpumask_copy(desc->affinity, mask); cpumask_copy(desc->affinity, mask);
return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
...@@ -2393,8 +2367,6 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) ...@@ -2393,8 +2367,6 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
if (assign_irq_vector(irq, cfg, mask)) if (assign_irq_vector(irq, cfg, mask))
return; return;
set_extra_move_desc(desc, mask);
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
irte.vector = cfg->vector; irte.vector = cfg->vector;
...@@ -2491,34 +2463,14 @@ static void irq_complete_move(struct irq_desc **descp) ...@@ -2491,34 +2463,14 @@ static void irq_complete_move(struct irq_desc **descp)
struct irq_cfg *cfg = desc->chip_data; struct irq_cfg *cfg = desc->chip_data;
unsigned vector, me; unsigned vector, me;
if (likely(!cfg->move_in_progress)) { if (likely(!cfg->move_in_progress))
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
if (likely(!cfg->move_desc_pending))
return;
/* domain has not changed, but affinity did */
me = smp_processor_id();
if (cpumask_test_cpu(me, desc->affinity)) {
*descp = desc = move_irq_desc(desc, me);
/* get the new one */
cfg = desc->chip_data;
cfg->move_desc_pending = 0;
}
#endif
return; return;
}
vector = ~get_irq_regs()->orig_ax; vector = ~get_irq_regs()->orig_ax;
me = smp_processor_id(); me = smp_processor_id();
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
*descp = desc = move_irq_desc(desc, me);
/* get the new one */
cfg = desc->chip_data;
#endif
send_cleanup_vector(cfg); send_cleanup_vector(cfg);
}
} }
#else #else
static inline void irq_complete_move(struct irq_desc **descp) {} static inline void irq_complete_move(struct irq_desc **descp) {}
......
...@@ -212,16 +212,6 @@ extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); ...@@ -212,16 +212,6 @@ extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
static inline struct irq_desc *
irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
{
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
return irq_to_desc(irq);
#else
return desc;
#endif
}
/* /*
* Migration helpers for obsolete names, they will go away: * Migration helpers for obsolete names, they will go away:
*/ */
......
...@@ -3,5 +3,5 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o ...@@ -3,5 +3,5 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o obj-$(CONFIG_SPARSE_IRQ) += numa_migrate.o
obj-$(CONFIG_PM_SLEEP) += pm.o obj-$(CONFIG_PM_SLEEP) += pm.o
...@@ -359,7 +359,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) ...@@ -359,7 +359,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
spin_lock(&desc->lock); spin_lock(&desc->lock);
mask_ack_irq(desc, irq); mask_ack_irq(desc, irq);
desc = irq_remap_to_desc(irq, desc);
if (unlikely(desc->status & IRQ_INPROGRESS)) if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock; goto out_unlock;
...@@ -438,7 +437,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) ...@@ -438,7 +437,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_INPROGRESS; desc->status &= ~IRQ_INPROGRESS;
out: out:
desc->chip->eoi(irq); desc->chip->eoi(irq);
desc = irq_remap_to_desc(irq, desc);
spin_unlock(&desc->lock); spin_unlock(&desc->lock);
} }
...@@ -475,7 +473,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) ...@@ -475,7 +473,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
!desc->action)) { !desc->action)) {
desc->status |= (IRQ_PENDING | IRQ_MASKED); desc->status |= (IRQ_PENDING | IRQ_MASKED);
mask_ack_irq(desc, irq); mask_ack_irq(desc, irq);
desc = irq_remap_to_desc(irq, desc);
goto out_unlock; goto out_unlock;
} }
kstat_incr_irqs_this_cpu(irq, desc); kstat_incr_irqs_this_cpu(irq, desc);
...@@ -483,7 +480,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) ...@@ -483,7 +480,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
/* Start handling the irq */ /* Start handling the irq */
if (desc->chip->ack) if (desc->chip->ack)
desc->chip->ack(irq); desc->chip->ack(irq);
desc = irq_remap_to_desc(irq, desc);
/* Mark the IRQ currently in progress.*/ /* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS; desc->status |= IRQ_INPROGRESS;
...@@ -544,10 +540,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) ...@@ -544,10 +540,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
if (!noirqdebug) if (!noirqdebug)
note_interrupt(irq, desc, action_ret); note_interrupt(irq, desc, action_ret);
if (desc->chip->eoi) { if (desc->chip->eoi)
desc->chip->eoi(irq); desc->chip->eoi(irq);
desc = irq_remap_to_desc(irq, desc);
}
} }
void void
...@@ -582,10 +576,8 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, ...@@ -582,10 +576,8 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
/* Uninstall? */ /* Uninstall? */
if (handle == handle_bad_irq) { if (handle == handle_bad_irq) {
if (desc->chip != &no_irq_chip) { if (desc->chip != &no_irq_chip)
mask_ack_irq(desc, irq); mask_ack_irq(desc, irq);
desc = irq_remap_to_desc(irq, desc);
}
desc->status |= IRQ_DISABLED; desc->status |= IRQ_DISABLED;
desc->depth = 1; desc->depth = 1;
} }
......
...@@ -458,11 +458,8 @@ unsigned int __do_IRQ(unsigned int irq) ...@@ -458,11 +458,8 @@ unsigned int __do_IRQ(unsigned int irq)
/* /*
* No locking required for CPU-local interrupts: * No locking required for CPU-local interrupts:
*/ */
if (desc->chip->ack) { if (desc->chip->ack)
desc->chip->ack(irq); desc->chip->ack(irq);
/* get new one */
desc = irq_remap_to_desc(irq, desc);
}
if (likely(!(desc->status & IRQ_DISABLED))) { if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action); action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug) if (!noirqdebug)
...@@ -473,10 +470,8 @@ unsigned int __do_IRQ(unsigned int irq) ...@@ -473,10 +470,8 @@ unsigned int __do_IRQ(unsigned int irq)
} }
spin_lock(&desc->lock); spin_lock(&desc->lock);
if (desc->chip->ack) { if (desc->chip->ack)
desc->chip->ack(irq); desc->chip->ack(irq);
desc = irq_remap_to_desc(irq, desc);
}
/* /*
* REPLAY is when Linux resends an IRQ that was dropped earlier * REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested * WAITING is used by probe to mark irqs that are being tested
......
...@@ -97,9 +97,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, ...@@ -97,9 +97,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
/* free the old one */ /* free the old one */
free_one_irq_desc(old_desc, desc); free_one_irq_desc(old_desc, desc);
spin_unlock(&old_desc->lock);
kfree(old_desc); kfree(old_desc);
spin_lock(&desc->lock);
return desc; return desc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment