Commit d6c88a50 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

genirq: revert dynarray

Revert the dynarray changes. They need more thought and polishing.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ee32c973
...@@ -102,7 +102,3 @@ config HAVE_CLK ...@@ -102,7 +102,3 @@ config HAVE_CLK
help help
The <linux/clk.h> calls support software clock gating and The <linux/clk.h> calls support software clock gating and
thus are a key power management tool on many systems. thus are a key power management tool on many systems.
config HAVE_DYN_ARRAY
def_bool n
...@@ -33,7 +33,6 @@ config X86 ...@@ -33,7 +33,6 @@ config X86
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_DYN_ARRAY
config ARCH_DEFCONFIG config ARCH_DEFCONFIG
string string
......
...@@ -107,7 +107,6 @@ static int __init parse_noapic(char *str) ...@@ -107,7 +107,6 @@ static int __init parse_noapic(char *str)
} }
early_param("noapic", parse_noapic); early_param("noapic", parse_noapic);
struct irq_cfg;
struct irq_pin_list; struct irq_pin_list;
struct irq_cfg { struct irq_cfg {
unsigned int irq; unsigned int irq;
...@@ -120,7 +119,7 @@ struct irq_cfg { ...@@ -120,7 +119,7 @@ struct irq_cfg {
}; };
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
static struct irq_cfg irq_cfg_legacy[] __initdata = { static struct irq_cfg irq_cfgx[NR_IRQS] = {
[0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
[1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
[2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
...@@ -139,48 +138,26 @@ static struct irq_cfg irq_cfg_legacy[] __initdata = { ...@@ -139,48 +138,26 @@ static struct irq_cfg irq_cfg_legacy[] __initdata = {
[15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
}; };
static struct irq_cfg irq_cfg_init = { .irq = -1U, };
static void init_one_irq_cfg(struct irq_cfg *cfg)
{
memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
}
static struct irq_cfg *irq_cfgx;
static void __init init_work(void *data)
{
struct dyn_array *da = data;
struct irq_cfg *cfg;
int legacy_count;
int i;
cfg = *da->name;
memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
legacy_count = ARRAY_SIZE(irq_cfg_legacy);
for (i = legacy_count; i < *da->nr; i++)
init_one_irq_cfg(&cfg[i]);
}
#define for_each_irq_cfg(irq, cfg) \ #define for_each_irq_cfg(irq, cfg) \
for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq]) for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work); static struct irq_cfg *irq_cfg(unsigned int irq)
struct irq_cfg *irq_cfg(unsigned int irq)
{ {
if (irq < nr_irqs) return irq < nr_irqs ? irq_cfgx + irq : NULL;
return &irq_cfgx[irq];
return NULL;
} }
struct irq_cfg *irq_cfg_alloc(unsigned int irq)
static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
{ {
return irq_cfg(irq); return irq_cfg(irq);
} }
/*
* Rough estimation of how many shared IRQs there are, can be changed
* anytime.
*/
#define MAX_PLUS_SHARED_IRQS NR_IRQS
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
/* /*
* This is performance-critical, we want to do it O(1) * This is performance-critical, we want to do it O(1)
* *
...@@ -193,59 +170,29 @@ struct irq_pin_list { ...@@ -193,59 +170,29 @@ struct irq_pin_list {
struct irq_pin_list *next; struct irq_pin_list *next;
}; };
static struct irq_pin_list *irq_2_pin_head; static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
/* fill one page ? */
static int nr_irq_2_pin = 0x100;
static struct irq_pin_list *irq_2_pin_ptr; static struct irq_pin_list *irq_2_pin_ptr;
static void __init irq_2_pin_init_work(void *data)
static void __init irq_2_pin_init(void)
{ {
struct dyn_array *da = data; struct irq_pin_list *pin = irq_2_pin_head;
struct irq_pin_list *pin;
int i; int i;
pin = *da->name; for (i = 1; i < PIN_MAP_SIZE; i++)
for (i = 1; i < *da->nr; i++)
pin[i-1].next = &pin[i]; pin[i-1].next = &pin[i];
irq_2_pin_ptr = &pin[0]; irq_2_pin_ptr = &pin[0];
} }
DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
static struct irq_pin_list *get_one_free_irq_2_pin(void) static struct irq_pin_list *get_one_free_irq_2_pin(void)
{ {
struct irq_pin_list *pin; struct irq_pin_list *pin = irq_2_pin_ptr;
int i;
pin = irq_2_pin_ptr;
if (pin) {
irq_2_pin_ptr = pin->next;
pin->next = NULL;
return pin;
}
/*
* we run out of pre-allocate ones, allocate more
*/
printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
if (after_bootmem)
pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
GFP_ATOMIC);
else
pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
nr_irq_2_pin, PAGE_SIZE, 0);
if (!pin) if (!pin)
panic("can not get more irq_2_pin\n"); panic("can not get more irq_2_pin\n");
for (i = 1; i < nr_irq_2_pin; i++)
pin[i-1].next = &pin[i];
irq_2_pin_ptr = pin->next; irq_2_pin_ptr = pin->next;
pin->next = NULL; pin->next = NULL;
return pin; return pin;
} }
...@@ -284,6 +231,7 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i ...@@ -284,6 +231,7 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{ {
struct io_apic __iomem *io_apic = io_apic_base(apic); struct io_apic __iomem *io_apic = io_apic_base(apic);
if (sis_apic_bug) if (sis_apic_bug)
writel(reg, &io_apic->index); writel(reg, &io_apic->index);
writel(value, &io_apic->data); writel(value, &io_apic->data);
...@@ -3885,9 +3833,10 @@ static struct resource * __init ioapic_setup_resources(void) ...@@ -3885,9 +3833,10 @@ static struct resource * __init ioapic_setup_resources(void)
void __init ioapic_init_mappings(void) void __init ioapic_init_mappings(void)
{ {
unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
int i;
struct resource *ioapic_res; struct resource *ioapic_res;
int i;
irq_2_pin_init();
ioapic_res = ioapic_setup_resources(); ioapic_res = ioapic_setup_resources();
for (i = 0; i < nr_ioapics; i++) { for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) { if (smp_found_config) {
......
...@@ -140,7 +140,7 @@ static void __init setup_cpu_pda_map(void) ...@@ -140,7 +140,7 @@ static void __init setup_cpu_pda_map(void)
*/ */
void __init setup_per_cpu_areas(void) void __init setup_per_cpu_areas(void)
{ {
ssize_t size, old_size, da_size; ssize_t size, old_size;
char *ptr; char *ptr;
int cpu; int cpu;
unsigned long align = 1; unsigned long align = 1;
...@@ -150,9 +150,8 @@ void __init setup_per_cpu_areas(void) ...@@ -150,9 +150,8 @@ void __init setup_per_cpu_areas(void)
/* Copy section for each CPU (we discard the original) */ /* Copy section for each CPU (we discard the original) */
old_size = PERCPU_ENOUGH_ROOM; old_size = PERCPU_ENOUGH_ROOM;
da_size = per_cpu_dyn_array_size(&align);
align = max_t(unsigned long, PAGE_SIZE, align); align = max_t(unsigned long, PAGE_SIZE, align);
size = roundup(old_size + da_size, align); size = roundup(old_size, align);
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
size); size);
...@@ -182,9 +181,6 @@ void __init setup_per_cpu_areas(void) ...@@ -182,9 +181,6 @@ void __init setup_per_cpu_areas(void)
#endif #endif
per_cpu_offset(cpu) = ptr - __per_cpu_start; per_cpu_offset(cpu) = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
per_cpu_alloc_dyn_array(cpu, ptr + old_size);
} }
printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
......
...@@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) ...@@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
/* /*
* handle this 'virtual interrupt' as a Cobalt one now. * handle this 'virtual interrupt' as a Cobalt one now.
*/ */
kstat_irqs_this_cpu(desc)++; kstat_incr_irqs_this_cpu(realirq, desc);
if (likely(desc->action != NULL)) if (likely(desc->action != NULL))
handle_IRQ_event(realirq, desc->action); handle_IRQ_event(realirq, desc->action);
......
...@@ -145,7 +145,6 @@ SECTIONS ...@@ -145,7 +145,6 @@ SECTIONS
*(.x86_cpu_dev.init) *(.x86_cpu_dev.init)
__x86_cpu_dev_end = .; __x86_cpu_dev_end = .;
} }
DYN_ARRAY_INIT(8)
SECURITY_INIT SECURITY_INIT
. = ALIGN(4); . = ALIGN(4);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
......
...@@ -174,8 +174,6 @@ SECTIONS ...@@ -174,8 +174,6 @@ SECTIONS
} }
__x86_cpu_dev_end = .; __x86_cpu_dev_end = .;
DYN_ARRAY_INIT(8)
SECURITY_INIT SECURITY_INIT
. = ALIGN(8); . = ALIGN(8);
......
...@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl ...@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
kstat_irqs_this_cpu(irq_to_desc(irq))++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
out: out:
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
......
...@@ -558,12 +558,7 @@ struct timer_rand_state { ...@@ -558,12 +558,7 @@ struct timer_rand_state {
unsigned dont_count_entropy:1; unsigned dont_count_entropy:1;
}; };
#ifdef CONFIG_HAVE_DYN_ARRAY
static struct timer_rand_state **irq_timer_state;
DEFINE_DYN_ARRAY(irq_timer_state, sizeof(struct timer_rand_state *), nr_irqs, PAGE_SIZE, NULL);
#else
static struct timer_rand_state *irq_timer_state[NR_IRQS]; static struct timer_rand_state *irq_timer_state[NR_IRQS];
#endif
static struct timer_rand_state *get_timer_rand_state(unsigned int irq) static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
{ {
......
...@@ -19,20 +19,13 @@ struct irq_2_iommu { ...@@ -19,20 +19,13 @@ struct irq_2_iommu {
u8 irte_mask; u8 irte_mask;
}; };
#ifdef CONFIG_HAVE_DYN_ARRAY
static struct irq_2_iommu *irq_2_iommuX;
DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
#else
static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
#endif
static struct irq_2_iommu *irq_2_iommu(unsigned int irq) static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{ {
if (irq < nr_irqs) return (irq < nr_irqs) ?: irq_2_iommuX + irq : NULL;
return &irq_2_iommuX[irq];
return NULL;
} }
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{ {
return irq_2_iommu(irq); return irq_2_iommu(irq);
......
...@@ -210,19 +210,6 @@ ...@@ -210,19 +210,6 @@
* All archs are supposed to use RO_DATA() */ * All archs are supposed to use RO_DATA() */
#define RODATA RO_DATA(4096) #define RODATA RO_DATA(4096)
#define DYN_ARRAY_INIT(align) \
. = ALIGN((align)); \
.dyn_array.init : AT(ADDR(.dyn_array.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__dyn_array_start) = .; \
*(.dyn_array.init) \
VMLINUX_SYMBOL(__dyn_array_end) = .; \
} \
. = ALIGN((align)); \
.per_cpu_dyn_array.init : AT(ADDR(.per_cpu_dyn_array.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_dyn_array_start) = .; \
*(.per_cpu_dyn_array.init) \
VMLINUX_SYMBOL(__per_cpu_dyn_array_end) = .; \
}
#define SECURITY_INIT \ #define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \ VMLINUX_SYMBOL(__security_initcall_start) = .; \
......
...@@ -247,49 +247,6 @@ struct obs_kernel_param { ...@@ -247,49 +247,6 @@ struct obs_kernel_param {
/* Relies on boot_command_line being set */ /* Relies on boot_command_line being set */
void __init parse_early_param(void); void __init parse_early_param(void);
struct dyn_array {
void **name;
unsigned long size;
unsigned int *nr;
unsigned long align;
void (*init_work)(void *);
};
extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[];
extern struct dyn_array *__per_cpu_dyn_array_start[], *__per_cpu_dyn_array_end[];
#define DEFINE_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
static struct dyn_array __dyn_array_##nameX __initdata = \
{ .name = (void **)&(nameX),\
.size = sizeX,\
.nr = &(nrX),\
.align = alignX,\
.init_work = init_workX,\
}; \
static struct dyn_array *__dyn_array_ptr_##nameX __used \
__attribute__((__section__(".dyn_array.init"))) = \
&__dyn_array_##nameX
#define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
DEFINE_DYN_ARRAY_ADDR(nameX, nameX, sizeX, nrX, alignX, init_workX)
#define DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
static struct dyn_array __per_cpu_dyn_array_##nameX __initdata = \
{ .name = (void **)&(addrX),\
.size = sizeX,\
.nr = &(nrX),\
.align = alignX,\
.init_work = init_workX,\
}; \
static struct dyn_array *__per_cpu_dyn_array_ptr_##nameX __used \
__attribute__((__section__(".per_cpu_dyn_array.init"))) = \
&__per_cpu_dyn_array_##nameX
#define DEFINE_PER_CPU_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, nameX, nrX, alignX, init_workX)
extern void pre_alloc_dyn_array(void);
extern unsigned long per_cpu_dyn_array_size(unsigned long *align);
extern void per_cpu_alloc_dyn_array(int cpu, char *ptr);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/** /**
......
...@@ -139,8 +139,6 @@ struct irq_chip { ...@@ -139,8 +139,6 @@ struct irq_chip {
const char *typename; const char *typename;
}; };
struct timer_rand_state;
struct irq_2_iommu;
/** /**
* struct irq_desc - interrupt descriptor * struct irq_desc - interrupt descriptor
* *
...@@ -167,9 +165,6 @@ struct irq_2_iommu; ...@@ -167,9 +165,6 @@ struct irq_2_iommu;
*/ */
struct irq_desc { struct irq_desc {
unsigned int irq; unsigned int irq;
#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned int *kstat_irqs;
#endif
irq_flow_handler_t handle_irq; irq_flow_handler_t handle_irq;
struct irq_chip *chip; struct irq_chip *chip;
struct msi_desc *msi_desc; struct msi_desc *msi_desc;
...@@ -198,23 +193,13 @@ struct irq_desc { ...@@ -198,23 +193,13 @@ struct irq_desc {
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
#ifndef CONFIG_HAVE_DYN_ARRAY
/* could be removed if we get rid of all irq_desc reference */
extern struct irq_desc irq_desc[NR_IRQS]; extern struct irq_desc irq_desc[NR_IRQS];
#else
extern struct irq_desc *irq_desc;
#endif
static inline struct irq_desc *irq_to_desc(unsigned int irq) static inline struct irq_desc *irq_to_desc(unsigned int irq)
{ {
return (irq < nr_irqs) ? irq_desc + irq : NULL; return (irq < nr_irqs) ? irq_desc + irq : NULL;
} }
#ifdef CONFIG_HAVE_DYN_ARRAY
#define kstat_irqs_this_cpu(DESC) \
((DESC)->kstat_irqs[smp_processor_id()])
#endif
/* /*
* Migration helpers for obsolete names, they will go away: * Migration helpers for obsolete names, they will go away:
*/ */
......
...@@ -28,9 +28,7 @@ struct cpu_usage_stat { ...@@ -28,9 +28,7 @@ struct cpu_usage_stat {
struct kernel_stat { struct kernel_stat {
struct cpu_usage_stat cpustat; struct cpu_usage_stat cpustat;
#ifndef CONFIG_HAVE_DYN_ARRAY
unsigned int irqs[NR_IRQS]; unsigned int irqs[NR_IRQS];
#endif
}; };
DECLARE_PER_CPU(struct kernel_stat, kstat); DECLARE_PER_CPU(struct kernel_stat, kstat);
...@@ -41,20 +39,18 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); ...@@ -41,20 +39,18 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
extern unsigned long long nr_context_switches(void); extern unsigned long long nr_context_switches(void);
#ifndef CONFIG_HAVE_DYN_ARRAY struct irq_desc;
#define kstat_irqs_this_cpu(irq) \
(kstat_this_cpu.irqs[irq])
#endif
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
struct irq_desc *desc)
{
kstat_this_cpu.irqs[irq]++;
}
#ifndef CONFIG_HAVE_DYN_ARRAY
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{ {
return kstat_cpu(cpu).irqs[irq]; return kstat_cpu(cpu).irqs[irq];
} }
#else
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
#endif
/* /*
* Number of interrupts per specific IRQ source, since bootup * Number of interrupts per specific IRQ source, since bootup
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
obj-y := main.o dyn_array.o version.o mounts.o obj-y := main.o version.o mounts.o
ifneq ($(CONFIG_BLK_DEV_INITRD),y) ifneq ($(CONFIG_BLK_DEV_INITRD),y)
obj-y += noinitramfs.o obj-y += noinitramfs.o
else else
......
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
void __init pre_alloc_dyn_array(void)
{
#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned long total_size = 0, size, phys;
unsigned long max_align = 1;
struct dyn_array **daa;
char *ptr;
/* get the total size at first */
for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) {
struct dyn_array *da = *daa;
printk(KERN_DEBUG "dyn_array %pF size:%#lx nr:%d align:%#lx\n",
da->name, da->size, *da->nr, da->align);
size = da->size * (*da->nr);
total_size += roundup(size, da->align);
if (da->align > max_align)
max_align = da->align;
}
if (total_size)
printk(KERN_DEBUG "dyn_array total_size: %#lx\n",
total_size);
else
return;
/* allocate them all together */
max_align = max_t(unsigned long, max_align, PAGE_SIZE);
ptr = __alloc_bootmem(total_size, max_align, 0);
phys = virt_to_phys(ptr);
for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) {
struct dyn_array *da = *daa;
size = da->size * (*da->nr);
phys = roundup(phys, da->align);
printk(KERN_DEBUG "dyn_array %pF ==> [%#lx - %#lx]\n",
da->name, phys, phys + size);
*da->name = phys_to_virt(phys);
phys += size;
if (da->init_work)
da->init_work(da);
}
#else
#ifdef CONFIG_GENERIC_HARDIRQS
unsigned int i;
for (i = 0; i < NR_IRQS; i++)
irq_desc[i].irq = i;
#endif
#endif
}
unsigned long __init per_cpu_dyn_array_size(unsigned long *align)
{
unsigned long total_size = 0;
#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned long size;
struct dyn_array **daa;
unsigned max_align = 1;
for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
struct dyn_array *da = *daa;
printk(KERN_DEBUG "per_cpu_dyn_array %pF size:%#lx nr:%d align:%#lx\n",
da->name, da->size, *da->nr, da->align);
size = da->size * (*da->nr);
total_size += roundup(size, da->align);
if (da->align > max_align)
max_align = da->align;
}
if (total_size) {
printk(KERN_DEBUG "per_cpu_dyn_array total_size: %#lx\n",
total_size);
*align = max_align;
}
#endif
return total_size;
}
#ifdef CONFIG_SMP
void __init per_cpu_alloc_dyn_array(int cpu, char *ptr)
{
#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned long size, phys;
struct dyn_array **daa;
unsigned long addr;
void **array;
phys = virt_to_phys(ptr);
for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
struct dyn_array *da = *daa;
size = da->size * (*da->nr);
phys = roundup(phys, da->align);
printk(KERN_DEBUG "per_cpu_dyn_array %pF ==> [%#lx - %#lx]\n",
da->name, phys, phys + size);
addr = (unsigned long)da->name;
addr += per_cpu_offset(cpu);
array = (void **)addr;
*array = phys_to_virt(phys);
*da->name = *array; /* so init_work could use it directly */
phys += size;
if (da->init_work)
da->init_work(da);
}
#endif
}
#endif
...@@ -391,23 +391,17 @@ EXPORT_SYMBOL(__per_cpu_offset); ...@@ -391,23 +391,17 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void __init setup_per_cpu_areas(void) static void __init setup_per_cpu_areas(void)
{ {
unsigned long size, i, old_size; unsigned long size, i;
char *ptr; char *ptr;
unsigned long nr_possible_cpus = num_possible_cpus(); unsigned long nr_possible_cpus = num_possible_cpus();
unsigned long align = 1;
unsigned da_size;
/* Copy section for each CPU (we discard the original) */ /* Copy section for each CPU (we discard the original) */
old_size = PERCPU_ENOUGH_ROOM; size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
da_size = per_cpu_dyn_array_size(&align);
align = max_t(unsigned long, PAGE_SIZE, align);
size = ALIGN(old_size + da_size, align);
ptr = alloc_bootmem_pages(size * nr_possible_cpus); ptr = alloc_bootmem_pages(size * nr_possible_cpus);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
__per_cpu_offset[i] = ptr - __per_cpu_start; __per_cpu_offset[i] = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
per_cpu_alloc_dyn_array(i, ptr + old_size);
ptr += size; ptr += size;
} }
} }
...@@ -573,7 +567,6 @@ asmlinkage void __init start_kernel(void) ...@@ -573,7 +567,6 @@ asmlinkage void __init start_kernel(void)
printk(KERN_NOTICE); printk(KERN_NOTICE);
printk(linux_banner); printk(linux_banner);
setup_arch(&command_line); setup_arch(&command_line);
pre_alloc_dyn_array();
mm_init_owner(&init_mm, &init_task); mm_init_owner(&init_mm, &init_task);
setup_command_line(command_line); setup_command_line(command_line);
unwind_setup(); unwind_setup();
......
...@@ -326,11 +326,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) ...@@ -326,11 +326,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely(desc->status & IRQ_INPROGRESS)) if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock; goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
#ifdef CONFIG_HAVE_DYN_ARRAY kstat_incr_irqs_this_cpu(irq, desc);
kstat_irqs_this_cpu(desc)++;
#else
kstat_irqs_this_cpu(irq)++;
#endif
action = desc->action; action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED))) if (unlikely(!action || (desc->status & IRQ_DISABLED)))
...@@ -371,11 +367,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) ...@@ -371,11 +367,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely(desc->status & IRQ_INPROGRESS)) if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock; goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
#ifdef CONFIG_HAVE_DYN_ARRAY kstat_incr_irqs_this_cpu(irq, desc);
kstat_irqs_this_cpu(desc)++;
#else
kstat_irqs_this_cpu(irq)++;
#endif
/* /*
* If its disabled or no action available * If its disabled or no action available
...@@ -422,11 +414,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) ...@@ -422,11 +414,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
goto out; goto out;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
#ifdef CONFIG_HAVE_DYN_ARRAY kstat_incr_irqs_this_cpu(irq, desc);
kstat_irqs_this_cpu(desc)++;
#else
kstat_irqs_this_cpu(irq)++;
#endif
/* /*
* If its disabled or no action available * If its disabled or no action available
...@@ -490,11 +478,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) ...@@ -490,11 +478,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
mask_ack_irq(desc, irq); mask_ack_irq(desc, irq);
goto out_unlock; goto out_unlock;
} }
#ifdef CONFIG_HAVE_DYN_ARRAY kstat_incr_irqs_this_cpu(irq, desc);
kstat_irqs_this_cpu(desc)++;
#else
kstat_irqs_this_cpu(irq)++;
#endif
/* Start handling the irq */ /* Start handling the irq */
desc->chip->ack(irq); desc->chip->ack(irq);
...@@ -549,11 +533,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) ...@@ -549,11 +533,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{ {
irqreturn_t action_ret; irqreturn_t action_ret;
#ifdef CONFIG_HAVE_DYN_ARRAY kstat_incr_irqs_this_cpu(irq, desc);
kstat_irqs_this_cpu(desc)++;
#else
kstat_irqs_this_cpu(irq)++;
#endif
if (desc->chip->ack) if (desc->chip->ack)
desc->chip->ack(irq); desc->chip->ack(irq);
......
...@@ -18,11 +18,6 @@ ...@@ -18,11 +18,6 @@
#include "internals.h" #include "internals.h"
/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
static struct lock_class_key irq_desc_lock_class;
/** /**
* handle_bad_irq - handle spurious and unhandled irqs * handle_bad_irq - handle spurious and unhandled irqs
* @irq: the interrupt number * @irq: the interrupt number
...@@ -30,15 +25,10 @@ static struct lock_class_key irq_desc_lock_class; ...@@ -30,15 +25,10 @@ static struct lock_class_key irq_desc_lock_class;
* *
* Handles spurious and unhandled IRQ's. It also prints a debugmessage. * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/ */
void void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{ {
print_irq_desc(irq, desc); print_irq_desc(irq, desc);
#ifdef CONFIG_HAVE_DYN_ARRAY kstat_incr_irqs_this_cpu(irq, desc);
kstat_irqs_this_cpu(desc)++;
#else
kstat_irqs_this_cpu(irq)++;
#endif
ack_bad_irq(irq); ack_bad_irq(irq);
} }
...@@ -59,80 +49,6 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc) ...@@ -59,80 +49,6 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc)
int nr_irqs = NR_IRQS; int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs); EXPORT_SYMBOL_GPL(nr_irqs);
#ifdef CONFIG_HAVE_DYN_ARRAY
static struct irq_desc irq_desc_init = {
.irq = -1U,
.status = IRQ_DISABLED,
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
};
static void init_one_irq_desc(struct irq_desc *desc)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
}
extern int after_bootmem;
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
{
unsigned long bytes, total_bytes;
char *ptr;
int i;
unsigned long phys;
/* Compute how many bytes we need per irq and allocate them */
bytes = nr * sizeof(unsigned int);
total_bytes = bytes * nr_desc;
if (after_bootmem)
ptr = kzalloc(total_bytes, GFP_ATOMIC);
else
ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
if (!ptr)
panic(" can not allocate kstat_irqs\n");
phys = __pa(ptr);
printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
for (i = 0; i < nr_desc; i++) {
desc[i].kstat_irqs = (unsigned int *)ptr;
ptr += bytes;
}
}
static void __init init_work(void *data)
{
struct dyn_array *da = data;
int i;
struct irq_desc *desc;
desc = *da->name;
for (i = 0; i < *da->nr; i++) {
init_one_irq_desc(&desc[i]);
desc[i].irq = i;
}
/* init kstat_irqs, nr_cpu_ids is ready already */
init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
}
struct irq_desc *irq_desc;
DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
#else
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = { [0 ... NR_IRQS-1] = {
.status = IRQ_DISABLED, .status = IRQ_DISABLED,
...@@ -146,8 +62,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { ...@@ -146,8 +62,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
} }
}; };
#endif
/* /*
* What should we do if we get a hw irq event on an illegal vector? * What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself. * Each architecture has to answer this themself.
...@@ -258,11 +172,8 @@ unsigned int __do_IRQ(unsigned int irq) ...@@ -258,11 +172,8 @@ unsigned int __do_IRQ(unsigned int irq)
struct irqaction *action; struct irqaction *action;
unsigned int status; unsigned int status;
#ifdef CONFIG_HAVE_DYN_ARRAY kstat_incr_irqs_this_cpu(irq, desc);
kstat_irqs_this_cpu(desc)++;
#else
kstat_irqs_this_cpu(irq)++;
#endif
if (CHECK_IRQ_PER_CPU(desc->status)) { if (CHECK_IRQ_PER_CPU(desc->status)) {
irqreturn_t action_ret; irqreturn_t action_ret;
...@@ -351,23 +262,16 @@ out: ...@@ -351,23 +262,16 @@ out:
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
static struct lock_class_key irq_desc_lock_class;
void early_init_irq_lock_class(void) void early_init_irq_lock_class(void)
{ {
#ifndef CONFIG_HAVE_DYN_ARRAY
int i; int i;
for (i = 0; i < nr_irqs; i++) for (i = 0; i < nr_irqs; i++)
lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
#endif
} }
#endif #endif
#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc->kstat_irqs[cpu];
}
#endif
EXPORT_SYMBOL(kstat_irqs_cpu);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment