Commit 54b56170 authored by H. Peter Anvin's avatar H. Peter Anvin

Merge remote branch 'origin/x86/apic' into x86/mrst

Conflicts:
	arch/x86/kernel/apic/io_apic.c
parents 1f91233c d02e30c3
......@@ -1772,6 +1772,12 @@ and is between 256 and 4096 characters. It is defined in the file
purges which is reported from either PAL_VM_SUMMARY or
SAL PALO.
nr_cpus= [SMP] Maximum number of processors that an SMP kernel
could support. nr_cpus=n : n >= 1 limits the kernel to
supporting 'n' processors. Later in runtime you can not
use hotplug cpu feature to put more cpu back to online.
just like you compile the kernel NR_CPUS=n
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
......
......@@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return !(ia64_psr(regs)->i);
}
static inline void handle_irq(int irq, struct pt_regs *regs)
{
__do_IRQ(irq);
}
#define irq_ctx_init(cpu) do { } while (0)
#endif /* _ASM_IA64_XEN_EVENTS_H */
......@@ -881,8 +881,8 @@ __init void prefill_possible_map(void)
possible = available_cpus + additional_cpus;
if (possible > NR_CPUS)
possible = NR_CPUS;
if (possible > nr_cpu_ids)
possible = nr_cpu_ids;
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
possible, max((possible - available_cpus), 0));
......
......@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
current->mm->cached_hole_size = 0;
current->mm->mmap = NULL;
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
......
......@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
extern spinlock_t i8259A_lock;
extern raw_spinlock_t i8259A_lock;
/* the PIC may need a careful delay on some platforms, hence specific calls */
static inline unsigned char inb_pic(unsigned int port)
......
......@@ -158,6 +158,7 @@ extern int io_apic_get_redir_entries(int ioapic);
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
void setup_IO_APIC_irq_extra(u32 gsi);
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
......
......@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
extern int vector_used_by_percpu_irq(unsigned int vector);
extern void init_ISA_irqs(void);
extern int nr_legacy_irqs;
#endif /* _ASM_X86_IRQ_H */
......@@ -28,28 +28,33 @@
#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start
* at 0x20:
* IDT vectors usable for external interrupt sources start at 0x20.
* (0x80 is the syscall vector, 0x30-0x3f are for ISA)
*/
#define FIRST_EXTERNAL_VECTOR 0x20
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
# define IA32_SYSCALL_VECTOR 0x80
#else
# define IA32_SYSCALL_VECTOR 0x80
#endif
/*
* We start allocating at 0x21 to spread out vectors evenly between
* priority levels. (0x80 is the syscall vector)
*/
#define VECTOR_OFFSET_START 1
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration.
* Reserve the lowest usable vector (and hence lowest priority) 0x20 for
* triggering cleanup after irq migration. 0x21-0x2f will still be used
* for device interrupts.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
#define IA32_SYSCALL_VECTOR 0x80
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
#endif
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
* round up to the next 16-vector boundary
*/
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
......@@ -120,13 +125,6 @@
*/
#define MCE_SELF_VECTOR 0xeb
/*
* First APIC vector available to drivers: (vectors 0x30-0xee) we
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#define NR_VECTORS 256
#define FPU_IRQ 13
......@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
#define NR_IRQS_LEGACY 16
#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
#ifdef CONFIG_X86_IO_APIC
# ifdef CONFIG_SPARSE_IRQ
# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
# define NR_IRQS \
(CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# else
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
# else
# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
# endif
# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
# define NR_IRQS \
(CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# endif
#else /* !CONFIG_X86_IO_APIC: */
# define NR_IRQS NR_IRQS_LEGACY
......
......@@ -447,6 +447,12 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
*irq = gsi;
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
setup_IO_APIC_irq_extra(gsi);
#endif
return 0;
}
......@@ -474,7 +480,8 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
}
#endif
acpi_gsi_to_irq(plat_gsi, &irq);
irq = plat_gsi;
return irq;
}
......
This diff is collapsed.
......@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
/* We can be called before check_nmi_watchdog, hence NULL check. */
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
spin_lock(&lock);
raw_spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
show_regs(regs);
dump_stack();
spin_unlock(&lock);
raw_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
rc = 1;
......
......@@ -32,7 +32,7 @@
*/
static int i8259A_auto_eoi;
DEFINE_SPINLOCK(i8259A_lock);
DEFINE_RAW_SPINLOCK(i8259A_lock);
static void mask_and_ack_8259A(unsigned int);
static void mask_8259A(void);
static void unmask_8259A(void);
......@@ -74,13 +74,13 @@ static void disable_8259A_irq(unsigned int irq)
unsigned int mask = 1 << irq;
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static void enable_8259A_irq(unsigned int irq)
......@@ -88,13 +88,13 @@ static void enable_8259A_irq(unsigned int irq)
unsigned int mask = ~(1 << irq);
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static int i8259A_irq_pending(unsigned int irq)
......@@ -103,12 +103,12 @@ static int i8259A_irq_pending(unsigned int irq)
unsigned long flags;
int ret;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
else
ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
......@@ -156,7 +156,7 @@ static void mask_and_ack_8259A(unsigned int irq)
unsigned int irqmask = 1 << irq;
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
......@@ -189,7 +189,7 @@ handle_real_irq:
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
......@@ -291,24 +291,24 @@ static void mask_8259A(void)
{
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static void unmask_8259A(void)
{
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static void init_8259A(int auto_eoi)
......@@ -317,7 +317,7 @@ static void init_8259A(int auto_eoi)
i8259A_auto_eoi = auto_eoi;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
......@@ -362,7 +362,7 @@ static void init_8259A(int auto_eoi)
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
/*
......
......@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
[IRQ1_VECTOR] = 1,
[IRQ2_VECTOR] = 2,
[IRQ3_VECTOR] = 3,
[IRQ4_VECTOR] = 4,
[IRQ5_VECTOR] = 5,
[IRQ6_VECTOR] = 6,
[IRQ7_VECTOR] = 7,
[IRQ8_VECTOR] = 8,
[IRQ9_VECTOR] = 9,
[IRQ10_VECTOR] = 10,
[IRQ11_VECTOR] = 11,
[IRQ12_VECTOR] = 12,
[IRQ13_VECTOR] = 13,
[IRQ14_VECTOR] = 14,
[IRQ15_VECTOR] = 15,
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
[0 ... NR_VECTORS - 1] = -1,
};
int vector_used_by_percpu_irq(unsigned int vector)
......@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
return 0;
}
/* Number of legacy interrupts */
int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
void __init init_ISA_irqs(void)
{
int i;
......@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
void __init init_IRQ(void)
{
int i;
/*
* On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
* If these IRQ's are handled by legacy interrupt-controllers like PIC,
* then this configuration will likely be static after the boot. If
* these IRQ's are handled by more mordern controllers like IO-APIC,
* then this vector space can be freed and re-used dynamically as the
* irq's migrate etc.
*/
for (i = 0; i < nr_legacy_irqs; i++)
per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
x86_init.irqs.intr_init();
}
......
......@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
},
},
{ /* Handle problems with rebooting on the iMac9,1. */
.callback = set_pci_reboot,
.ident = "Apple iMac9,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
},
},
{ }
};
......
......@@ -243,6 +243,11 @@ static void __cpuinit smp_callin(void)
map_cpu_to_logical_apicid();
notify_cpu_starting(cpuid);
/*
* Need to setup vector mappings before we enable interrupts.
*/
__setup_vector_irq(smp_processor_id());
/*
* Get our bogomips.
*
......@@ -317,7 +322,6 @@ notrace static void __cpuinit start_secondary(void *unused)
*/
ipi_call_lock();
lock_vector_lock();
__setup_vector_irq(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
ipi_call_unlock();
......@@ -1216,11 +1220,12 @@ __init void prefill_possible_map(void)
total_cpus = max_t(int, possible, num_processors + disabled_cpus);
if (possible > CONFIG_NR_CPUS) {
/* nr_cpu_ids could be reduced via nr_cpus= */
if (possible > nr_cpu_ids) {
printk(KERN_WARNING
"%d Processors exceeds NR_CPUS limit of %d\n",
possible, CONFIG_NR_CPUS);
possible = CONFIG_NR_CPUS;
possible, nr_cpu_ids);
possible = nr_cpu_ids;
}
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
......
......@@ -70,11 +70,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
* manually to deassert NMI lines for the watchdog if run
* on an 82489DX-based system.
*/
spin_lock(&i8259A_lock);
raw_spin_lock(&i8259A_lock);
outb(0x0c, PIC_MASTER_OCW3);
/* Ack the IRQ; AEOI will end it automatically. */
inb(PIC_MASTER_POLL);
spin_unlock(&i8259A_lock);
raw_spin_unlock(&i8259A_lock);
}
global_clock_event->event_handler(global_clock_event);
......
......@@ -553,7 +553,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
struct irq_desc *desc;
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
raw_spin_lock_irqsave(&i8259A_lock, flags);
/* Find out what's interrupting in the PIIX4 master 8259 */
outb(0x0c, 0x20); /* OCW3 Poll command */
......@@ -590,7 +590,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
outb(0x60 + realirq, 0x20);
}
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
desc = irq_to_desc(realirq);
......@@ -608,7 +608,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
return IRQ_HANDLED;
out_unlock:
spin_unlock_irqrestore(&i8259A_lock, flags);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return IRQ_NONE;
}
......
......@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
static inline unsigned int vmi_get_timer_vector(void)
{
#ifdef CONFIG_X86_IO_APIC
return FIRST_DEVICE_VECTOR;
#else
return FIRST_EXTERNAL_VECTOR;
#endif
return IRQ0_VECTOR;
}
/** vmi clockchip */
......
......@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
acpi_parse_x2apic_affinity, NR_CPUS);
acpi_parse_x2apic_affinity, nr_cpu_ids);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
acpi_parse_processor_affinity, NR_CPUS);
acpi_parse_processor_affinity, nr_cpu_ids);
ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
......
......@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
int bit_idx = __ffs(pending_bits);
int port = (word_idx * BITS_PER_LONG) + bit_idx;
int irq = evtchn_to_irq[port];
struct irq_desc *desc;
if (irq != -1)
handle_irq(irq, regs);
if (irq != -1) {
desc = irq_to_desc(irq);
if (desc)
generic_handle_irq_desc(irq, desc);
}
}
}
......
......@@ -400,7 +400,9 @@ static inline int irq_has_action(unsigned int irq)
/* Dynamic irq helper functions */
extern void dynamic_irq_init(unsigned int irq);
void dynamic_irq_init_keep_chip_data(unsigned int irq);
extern void dynamic_irq_cleanup(unsigned int irq);
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
/* Set/get chip/data for an IRQ: */
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
......
......@@ -149,6 +149,20 @@ static int __init nosmp(char *str)
early_param("nosmp", nosmp);
/* this is hard limit */
static int __init nrcpus(char *str)
{
int nr_cpus;
get_option(&str, &nr_cpus);
if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
nr_cpu_ids = nr_cpus;
return 0;
}
early_param("nr_cpus", nrcpus);
static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
......@@ -584,6 +598,7 @@ asmlinkage void __init start_kernel(void)
local_irq_disable();
}
rcu_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
......@@ -657,7 +672,6 @@ asmlinkage void __init start_kernel(void)
proc_caches_init();
buffer_init();
key_init();
radix_tree_init();
security_init();
vfs_caches_init(totalram_pages);
signals_init();
......
......@@ -18,11 +18,7 @@
#include "internals.h"
/**
* dynamic_irq_init - initialize a dynamically allocated irq
* @irq: irq number to initialize
*/
void dynamic_irq_init(unsigned int irq)
static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
{
struct irq_desc *desc;
unsigned long flags;
......@@ -41,7 +37,8 @@ void dynamic_irq_init(unsigned int irq)
desc->depth = 1;
desc->msi_desc = NULL;
desc->handler_data = NULL;
desc->chip_data = NULL;
if (!keep_chip_data)
desc->chip_data = NULL;
desc->action = NULL;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
......@@ -55,10 +52,26 @@ void dynamic_irq_init(unsigned int irq)
}
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* dynamic_irq_init - initialize a dynamically allocated irq
* @irq: irq number to initialize
*/
void dynamic_irq_cleanup(unsigned int irq)
void dynamic_irq_init(unsigned int irq)
{
dynamic_irq_init_x(irq, false);
}
/**
* dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
* @irq: irq number to initialize
*
* does not set irq_to_desc(irq)->chip_data to NULL
*/
void dynamic_irq_init_keep_chip_data(unsigned int irq)
{
dynamic_irq_init_x(irq, true);
}
static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
......@@ -77,7 +90,8 @@ void dynamic_irq_cleanup(unsigned int irq)
}
desc->msi_desc = NULL;
desc->handler_data = NULL;
desc->chip_data = NULL;
if (!keep_chip_data)
desc->chip_data = NULL;
desc->handle_irq = handle_bad_irq;
desc->chip = &no_irq_chip;
desc->name = NULL;
......@@ -85,6 +99,26 @@ void dynamic_irq_cleanup(unsigned int irq)
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @irq: irq number to initialize
*/
void dynamic_irq_cleanup(unsigned int irq)
{
dynamic_irq_cleanup_x(irq, false);
}
/**
* dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
* @irq: irq number to initialize
*
* does not set irq_to_desc(irq)->chip_data to NULL
*/
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
{
dynamic_irq_cleanup_x(irq, true);
}
/**
* set_irq_chip - set the irq chip for an irq
......
......@@ -19,7 +19,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
#include <linux/bootmem.h>
#include <linux/radix-tree.h>
#include <trace/events/irq.h>
#include "internals.h"
......@@ -87,12 +87,8 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
{
void *ptr;
if (slab_is_available())
ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
GFP_ATOMIC, node);
else
ptr = alloc_bootmem_node(NODE_DATA(node),
nr * sizeof(*desc->kstat_irqs));
ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
GFP_ATOMIC, node);
/*
* don't overwite if can not get new one
......@@ -132,7 +128,26 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
*/
DEFINE_RAW_SPINLOCK(sparse_irq_lock);
struct irq_desc **irq_desc_ptrs __read_mostly;
static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
{
radix_tree_insert(&irq_desc_tree, irq, desc);
}
struct irq_desc *irq_to_desc(unsigned int irq)
{
return radix_tree_lookup(&irq_desc_tree, irq);
}
void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
{
void **ptr;
ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
if (ptr)
radix_tree_replace_slot(ptr, desc);
}
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS_LEGACY-1] = {
......@@ -164,9 +179,6 @@ int __init early_irq_init(void)
legacy_count = ARRAY_SIZE(irq_desc_legacy);
node = first_online_node;
/* allocate irq_desc_ptrs array based on nr_irqs */
irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
/* allocate based on nr_cpu_ids */
kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
sizeof(int), GFP_NOWAIT, node);
......@@ -180,23 +192,12 @@ int __init early_irq_init(void)
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
alloc_desc_masks(&desc[i], node, true);
init_desc_masks(&desc[i]);
irq_desc_ptrs[i] = desc + i;
set_irq_desc(i, &desc[i]);
}
for (i = legacy_count; i < nr_irqs; i++)
irq_desc_ptrs[i] = NULL;
return arch_early_irq_init();
}
struct irq_desc *irq_to_desc(unsigned int irq)
{
if (irq_desc_ptrs && irq < nr_irqs)
return irq_desc_ptrs[irq];
return NULL;
}
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
......@@ -208,21 +209,18 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
return NULL;
}
desc = irq_desc_ptrs[irq];
desc = irq_to_desc(irq);
if (desc)
return desc;
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
desc = irq_to_desc(irq);
if (desc)
goto out_unlock;
if (slab_is_available())
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
else
desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
if (!desc) {
......@@ -231,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
}
init_one_irq_desc(irq, desc, node);
irq_desc_ptrs[irq] = desc;
set_irq_desc(irq, desc);
out_unlock:
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
......
......@@ -21,11 +21,7 @@ extern void clear_kstat_irqs(struct irq_desc *desc);
extern raw_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
extern struct irq_desc **irq_desc_ptrs;
#else
/* irq_desc_ptrs is a fixed size array */
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
#endif
#ifdef CONFIG_PROC_FS
......
......@@ -70,7 +70,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
desc = irq_to_desc(irq);
if (desc && old_desc != desc)
goto out_unlock;
......@@ -90,7 +90,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
goto out_unlock;
}
irq_desc_ptrs[irq] = desc;
replace_irq_desc(irq, desc);
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment