Commit 1230366a authored by Tony Lindgren's avatar Tony Lindgren

Manual merge to make things compile after updating to 2.6.18-rc4

Some things are probably broken right now with the generic irq
code update.
parent 617dbdff
......@@ -21,12 +21,12 @@
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
#include <linux/config.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/random.h>
......@@ -38,28 +38,9 @@
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
/*
* Maximum IRQ count. Currently, this is arbitary. However, it should
* not be set too low to prevent false triggering. Conversely, if it
* is set too high, then you could miss a stuck IRQ.
*
* Maybe we ought to set a timer and re-enable the IRQ at a later time?
*/
#define MAX_IRQ_CNT 100000
static int noirqdebug;
static volatile unsigned long irq_err_count;
static DEFINE_SPINLOCK(irq_controller_lock);
static LIST_HEAD(irq_pending);
struct irqdesc irq_desc[NR_IRQS];
void (*init_arch_irq)(void) __initdata = NULL;
/*
* No architecture-specific irq_finish function defined in arm/arch/irqs.h.
*/
......@@ -67,163 +48,8 @@ void (*init_arch_irq)(void) __initdata = NULL;
#define irq_finish(irq) do { } while (0)
#endif
/*
* Dummy mask/unmask handler
*/
void dummy_mask_unmask_irq(unsigned int irq)
{
}
irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
{
return IRQ_NONE;
}
void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
irq_err_count += 1;
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
static struct irqchip bad_chip = {
.ack = dummy_mask_unmask_irq,
.mask = dummy_mask_unmask_irq,
.unmask = dummy_mask_unmask_irq,
};
static struct irqdesc bad_irq_desc = {
.chip = &bad_chip,
.handle = do_bad_IRQ,
.pend = LIST_HEAD_INIT(bad_irq_desc.pend),
.disable_depth = 1,
};
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
struct irqdesc *desc = irq_desc + irq;
while (desc->running)
barrier();
}
EXPORT_SYMBOL(synchronize_irq);
#define smp_set_running(desc) do { desc->running = 1; } while (0)
#define smp_clear_running(desc) do { desc->running = 0; } while (0)
#else
#define smp_set_running(desc) do { } while (0)
#define smp_clear_running(desc) do { } while (0)
#endif
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Enables and disables
* are nested. We do this lazily.
*
* This function may be called from IRQ context.
*/
void disable_irq_nosync(unsigned int irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&irq_controller_lock, flags);
desc->disable_depth++;
list_del_init(&desc->pend);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
EXPORT_SYMBOL(disable_irq_nosync);
/**
* disable_irq - disable an irq and wait for completion
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Enables and disables
* are nested. This functions waits for any pending IRQ
* handlers for this interrupt to complete before returning.
* If you use this function while holding a resource the IRQ
* handler may need you will deadlock.
*
* This function may be called - with care - from IRQ context.
*/
void disable_irq(unsigned int irq)
{
struct irqdesc *desc = irq_desc + irq;
disable_irq_nosync(irq);
if (desc->action)
synchronize_irq(irq);
}
EXPORT_SYMBOL(disable_irq);
/**
* enable_irq - enable interrupt handling on an irq
* @irq: Interrupt to enable
*
* Re-enables the processing of interrupts on this IRQ line.
* Note that this may call the interrupt handler, so you may
* get unexpected results if you hold IRQs disabled.
*
* This function may be called from IRQ context.
*/
void enable_irq(unsigned int irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&irq_controller_lock, flags);
if (unlikely(!desc->disable_depth)) {
printk("enable_irq(%u) unbalanced from %p\n", irq,
__builtin_return_address(0));
} else if (!--desc->disable_depth) {
desc->probing = 0;
desc->chip->unmask(irq);
/*
* If the interrupt is waiting to be processed,
* try to re-run it. We can't directly run it
* from here since the caller might be in an
* interrupt-protected region.
*/
if (desc->pending && list_empty(&desc->pend)) {
desc->pending = 0;
if (!desc->chip->retrigger ||
desc->chip->retrigger(irq))
list_add(&desc->pend, &irq_pending);
}
}
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
EXPORT_SYMBOL(enable_irq);
/*
* Enable wake on selected irq
*/
void enable_irq_wake(unsigned int irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&irq_controller_lock, flags);
if (desc->chip->set_wake)
desc->chip->set_wake(irq, 1);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
EXPORT_SYMBOL(enable_irq_wake);
void disable_irq_wake(unsigned int irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&irq_controller_lock, flags);
if (desc->chip->set_wake)
desc->chip->set_wake(irq, 0);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
EXPORT_SYMBOL(disable_irq_wake);
void (*init_arch_irq)(void) __initdata = NULL;
unsigned long irq_err_count;
int show_interrupts(struct seq_file *p, void *v)
{
......@@ -243,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_controller_lock, flags);
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
......@@ -251,13 +77,14 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
spin_unlock_irqrestore(&irq_controller_lock, flags);
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
#ifdef CONFIG_ARCH_ACORN
show_fiq_list(p, v);
......@@ -271,267 +98,11 @@ unlock:
return 0;
}
/*
* IRQ lock detection.
*
* Hopefully, this should get us out of a few locked situations.
* However, it may take a while for this to happen, since we need
* a large number if IRQs to appear in the same jiffie with the
* same instruction pointer (or within 2 instructions).
*/
static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
{
unsigned long instr_ptr = instruction_pointer(regs);
if (desc->lck_jif == jiffies &&
desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
desc->lck_cnt += 1;
if (desc->lck_cnt > MAX_IRQ_CNT) {
printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
return 1;
}
} else {
desc->lck_cnt = 0;
desc->lck_pc = instruction_pointer(regs);
desc->lck_jif = jiffies;
}
return 0;
}
static void
report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
{
static int count = 100;
struct irqaction *action;
if (noirqdebug)
return;
if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
if (!count)
return;
count--;
printk("irq%u: bogus retval mask %x\n", irq, ret);
} else {
desc->irqs_unhandled++;
if (desc->irqs_unhandled <= 99900)
return;
desc->irqs_unhandled = 0;
printk("irq%u: nobody cared\n", irq);
}
show_regs(regs);
dump_stack();
printk(KERN_ERR "handlers:");
action = desc->action;
do {
printk("\n" KERN_ERR "[<%p>]", action->handler);
print_symbol(" (%s)", (unsigned long)action->handler);
action = action->next;
} while (action);
printk("\n");
}
static int
__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
{
unsigned int status;
int ret, retval = 0;
spin_unlock(&irq_controller_lock);
#ifdef CONFIG_NO_IDLE_HZ
if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) {
spin_lock(&system_timer->dyn_tick->lock);
if (system_timer->dyn_tick->state & DYN_TICK_ENABLED)
system_timer->dyn_tick->handler(irq, 0, regs);
spin_unlock(&system_timer->dyn_tick->lock);
}
#endif
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
status = 0;
do {
ret = action->handler(irq, action->dev_id, regs);
if (ret == IRQ_HANDLED)
status |= action->flags;
retval |= ret;
action = action->next;
} while (action);
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
spin_lock_irq(&irq_controller_lock);
return retval;
}
/*
* This is for software-decoded IRQs. The caller is expected to
* handle the ack, clear, mask and unmask issues.
*/
void
do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
struct irqaction *action;
const unsigned int cpu = smp_processor_id();
desc->triggered = 1;
kstat_cpu(cpu).irqs[irq]++;
smp_set_running(desc);
action = desc->action;
if (action) {
int ret = __do_irq(irq, action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
}
smp_clear_running(desc);
}
/*
* Most edge-triggered IRQ implementations seem to take a broken
* approach to this. Hence the complexity.
*/
void
do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
const unsigned int cpu = smp_processor_id();
desc->triggered = 1;
/*
* If we're currently running this IRQ, or its disabled,
* we shouldn't process the IRQ. Instead, turn on the
* hardware masks.
*/
if (unlikely(desc->running || desc->disable_depth))
goto running;
/*
* Acknowledge and clear the IRQ, but don't mask it.
*/
desc->chip->ack(irq);
/*
* Mark the IRQ currently in progress.
*/
desc->running = 1;
kstat_cpu(cpu).irqs[irq]++;
do {
struct irqaction *action;
action = desc->action;
if (!action)
break;
if (desc->pending && !desc->disable_depth) {
desc->pending = 0;
desc->chip->unmask(irq);
}
__do_irq(irq, action, regs);
} while (desc->pending && !desc->disable_depth);
desc->running = 0;
/*
* If we were disabled or freed, shut down the handler.
*/
if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
return;
running:
/*
* We got another IRQ while this one was masked or
* currently running. Delay it.
*/
desc->pending = 1;
desc->chip->mask(irq);
desc->chip->ack(irq);
}
/*
* Level-based IRQ handler. Nice and simple.
*/
void
do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
struct irqaction *action;
const unsigned int cpu = smp_processor_id();
desc->triggered = 1;
/*
* Acknowledge, clear _AND_ disable the interrupt.
*/
desc->chip->ack(irq);
if (likely(!desc->disable_depth)) {
kstat_cpu(cpu).irqs[irq]++;
smp_set_running(desc);
/*
* Return with this interrupt masked if no action
*/
action = desc->action;
if (action) {
int ret = __do_irq(irq, desc->action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
if (likely(!desc->disable_depth &&
!check_irq_lock(desc, irq, regs)))
desc->chip->unmask(irq);
}
smp_clear_running(desc);
}
}
static void do_pending_irqs(struct pt_regs *regs)
{
struct list_head head, *l, *n;
do {
struct irqdesc *desc;
/*
* First, take the pending interrupts off the list.
* The act of calling the handlers may add some IRQs
* back onto the list.
*/
head = irq_pending;
INIT_LIST_HEAD(&irq_pending);
head.next->prev = &head;
head.prev->next = &head;
/*
* Now run each entry. We must delete it from our
* list before calling the handler.
*/
list_for_each_safe(l, n, &head) {
desc = list_entry(l, struct irqdesc, pend);
list_del_init(&desc->pend);
desc_handle_irq(desc - irq_desc, desc, regs);
}
/*
* The list must be empty.
*/
BUG_ON(!list_empty(&head));
} while (!list_empty(&irq_pending));
}
/* Handle bad interrupts */
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
.lock = SPIN_LOCK_UNLOCKED
};
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
......@@ -550,96 +121,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
desc = &bad_irq_desc;
irq_enter();
spin_lock(&irq_controller_lock);
desc_handle_irq(irq, desc, regs);
/*
* Now re-run any pending interrupts.
*/
if (!list_empty(&irq_pending))
do_pending_irqs(regs);
desc_handle_irq(irq, desc, regs);
/* AT91 specific workaround */
irq_finish(irq);
spin_unlock(&irq_controller_lock);
irq_exit();
}
void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
{
struct irqdesc *desc;
unsigned long flags;
if (irq >= NR_IRQS) {
printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
return;
}
if (handle == NULL)
handle = do_bad_IRQ;
desc = irq_desc + irq;
if (is_chained && desc->chip == &bad_chip)
printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
spin_lock_irqsave(&irq_controller_lock, flags);
if (handle == do_bad_IRQ) {
desc->chip->mask(irq);
desc->chip->ack(irq);
desc->disable_depth = 1;
}
desc->handle = handle;
if (handle != do_bad_IRQ && is_chained) {
desc->valid = 0;
desc->probe_ok = 0;
desc->disable_depth = 0;
desc->chip->unmask(irq);
}
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
void set_irq_chip(unsigned int irq, struct irqchip *chip)
{
struct irqdesc *desc;
unsigned long flags;
if (irq >= NR_IRQS) {
printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
return;
}
if (chip == NULL)
chip = &bad_chip;
desc = irq_desc + irq;
spin_lock_irqsave(&irq_controller_lock, flags);
desc->chip = chip;
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
int set_irq_type(unsigned int irq, unsigned int type)
{
struct irqdesc *desc;
unsigned long flags;
int ret = -ENXIO;
if (irq >= NR_IRQS) {
printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
return -ENODEV;
}
desc = irq_desc + irq;
if (desc->chip->set_type) {
spin_lock_irqsave(&irq_controller_lock, flags);
ret = desc->chip->set_type(irq, type);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
return ret;
}
EXPORT_SYMBOL(set_irq_type);
void set_irq_flags(unsigned int irq, unsigned int iflags)
{
struct irqdesc *desc;
......@@ -651,422 +141,43 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
}
desc = irq_desc + irq;
spin_lock_irqsave(&irq_controller_lock, flags);
desc->valid = (iflags & IRQF_VALID) != 0;
desc->probe_ok = (iflags & IRQF_PROBE) != 0;
desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
int setup_irq(unsigned int irq, struct irqaction *new)
{
int shared = 0;
struct irqaction *old, **p;
unsigned long flags;
struct irqdesc *desc;
/*
* Some drivers like serial.c use request_irq() heavily,
* so we have to be careful not to interfere with a
* running system.
*/
if (new->flags & SA_SAMPLE_RANDOM) {
/*
* This function might sleep, we want to call it first,
* outside of the atomic block.
* Yes, this might clear the entropy pool if the wrong
* driver is attempted to be loaded, without actually
* installing a new handler, but is this really a problem,
* only the sysadmin is able to do this.
*/
rand_initialize_irq(irq);
}
/*
* The following block of code has to be executed atomically
*/
desc = irq_desc + irq;
spin_lock_irqsave(&irq_controller_lock, flags);
p = &desc->action;
if ((old = *p) != NULL) {
/*
* Can't share interrupts unless both agree to and are
* the same type.
*/
if (!(old->flags & new->flags & SA_SHIRQ) ||
(~old->flags & new->flags) & SA_TRIGGER_MASK) {
spin_unlock_irqrestore(&irq_controller_lock, flags);
return -EBUSY;
}
/* add new interrupt at end of irq queue */
do {
p = &old->next;
old = *p;
} while (old);
shared = 1;
}
*p = new;
if (!shared) {
desc->probing = 0;
desc->running = 0;
desc->pending = 0;
desc->disable_depth = 1;
if (new->flags & SA_TRIGGER_MASK &&
desc->chip->set_type) {
unsigned int type = new->flags & SA_TRIGGER_MASK;
desc->chip->set_type(irq, type);
}
if (!desc->noautoenable) {
desc->disable_depth = 0;
desc->chip->unmask(irq);
}
}
spin_unlock_irqrestore(&irq_controller_lock, flags);
return 0;
}
/**
* request_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* This call allocates interrupt resources and enables the
* interrupt line and IRQ handling. From the point this
* call is made your handler function may be invoked. Since
* your handler function must clear any interrupt the board
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
*
* If your interrupt is shared you must pass a non NULL dev_id
* as this is required when freeing the interrupt.
*
* Flags:
*
* SA_SHIRQ Interrupt is shared
*
* SA_INTERRUPT Disable local interrupts while processing
*
* SA_SAMPLE_RANDOM The interrupt can be used for entropy
*
*/
int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irq_flags, const char * devname, void *dev_id)
{
unsigned long retval;
struct irqaction *action;
if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
(irq_flags & SA_SHIRQ && !dev_id))
return -EINVAL;
action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->flags = irq_flags;
cpus_clear(action->mask);
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
retval = setup_irq(irq, action);
if (retval)
kfree(action);
return retval;
}
EXPORT_SYMBOL(request_irq);
/**
* free_irq - free an interrupt
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
* Remove an interrupt handler. The handler is removed and if the
* interrupt line is no longer in use by any driver it is disabled.
* On a shared IRQ the caller must ensure the interrupt is disabled
* on the card it drives before calling this function.
*
* This function must not be called from interrupt context.
*/
void free_irq(unsigned int irq, void *dev_id)
{
struct irqaction * action, **p;
unsigned long flags;
if (irq >= NR_IRQS || !irq_desc[irq].valid) {
printk(KERN_ERR "Trying to free IRQ%d\n",irq);
dump_stack();
return;
}
spin_lock_irqsave(&irq_controller_lock, flags);
for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
if (action->dev_id != dev_id)
continue;
/* Found it - now free it */
*p = action->next;
break;
}
spin_unlock_irqrestore(&irq_controller_lock, flags);
if (!action) {
printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
dump_stack();
} else {
synchronize_irq(irq);
kfree(action);
}
}
EXPORT_SYMBOL(free_irq);
static DECLARE_MUTEX(probe_sem);
/* Start the interrupt probing. Unlike other architectures,
* we don't return a mask of interrupts from probe_irq_on,
* but return the number of interrupts enabled for the probe.
* The interrupts which have been enabled for probing is
* instead recorded in the irq_desc structure.
*/
unsigned long probe_irq_on(void)
{
unsigned int i, irqs = 0;
unsigned long delay;
down(&probe_sem);
/*
* first snaffle up any unassigned but
* probe-able interrupts
*/
spin_lock_irq(&irq_controller_lock);
for (i = 0; i < NR_IRQS; i++) {
if (!irq_desc[i].probe_ok || irq_desc[i].action)
continue;
irq_desc[i].probing = 1;
irq_desc[i].triggered = 0;
if (irq_desc[i].chip->set_type)
irq_desc[i].chip->set_type(i, IRQT_PROBE);
irq_desc[i].chip->unmask(i);
irqs += 1;
}
spin_unlock_irq(&irq_controller_lock);
/*
* wait for spurious interrupts to mask themselves out again
*/
for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
/* min 100ms delay */;
/*
* now filter out any obviously spurious interrupts
*/
spin_lock_irq(&irq_controller_lock);
for (i = 0; i < NR_IRQS; i++) {
if (irq_desc[i].probing && irq_desc[i].triggered) {
irq_desc[i].probing = 0;
irqs -= 1;
}
}
spin_unlock_irq(&irq_controller_lock);
return irqs;
}
EXPORT_SYMBOL(probe_irq_on);
unsigned int probe_irq_mask(unsigned long irqs)
{
unsigned int mask = 0, i;
spin_lock_irq(&irq_controller_lock);
for (i = 0; i < 16 && i < NR_IRQS; i++)
if (irq_desc[i].probing && irq_desc[i].triggered)
mask |= 1 << i;
spin_unlock_irq(&irq_controller_lock);
up(&probe_sem);
return mask;
}
EXPORT_SYMBOL(probe_irq_mask);
/*
* Possible return values:
* >= 0 - interrupt number
* -1 - no interrupt/many interrupts
*/
int probe_irq_off(unsigned long irqs)
{
unsigned int i;
int irq_found = NO_IRQ;
/*
* look at the interrupts, and find exactly one
* that we were probing has been triggered
*/
spin_lock_irq(&irq_controller_lock);
for (i = 0; i < NR_IRQS; i++) {
if (irq_desc[i].probing &&
irq_desc[i].triggered) {
if (irq_found != NO_IRQ) {
irq_found = NO_IRQ;
goto out;
}
irq_found = i;
}
}
if (irq_found == -1)
irq_found = NO_IRQ;
out:
spin_unlock_irq(&irq_controller_lock);
up(&probe_sem);
return irq_found;
}
EXPORT_SYMBOL(probe_irq_off);
#ifdef CONFIG_SMP
static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
spin_lock_irq(&irq_controller_lock);
desc->cpu = cpu;
desc->chip->set_cpu(desc, irq, cpu);
spin_unlock_irq(&irq_controller_lock);
}
#ifdef CONFIG_PROC_FS
static int
irq_affinity_read_proc(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
struct irqdesc *desc = irq_desc + ((int)data);
int len = cpumask_scnprintf(page, count, desc->affinity);
if (count - len < 2)
return -EINVAL;
page[len++] = '\n';
page[len] = '\0';
return len;
}
static int
irq_affinity_write_proc(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
unsigned int irq = (unsigned int)data;
struct irqdesc *desc = irq_desc + irq;
cpumask_t affinity, tmp;
int ret = -EIO;
if (!desc->chip->set_cpu)
goto out;
ret = cpumask_parse(buffer, count, affinity);
if (ret)
goto out;
cpus_and(tmp, affinity, cpu_online_map);
if (cpus_empty(tmp)) {
ret = -EINVAL;
goto out;
}
desc->affinity = affinity;
route_irq(desc, irq, first_cpu(tmp));
ret = count;
out:
return ret;
}
#endif
#endif
void __init init_irq_proc(void)
{
#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
struct proc_dir_entry *dir;
int irq;
dir = proc_mkdir("irq", NULL);
if (!dir)
return;
for (irq = 0; irq < NR_IRQS; irq++) {
struct proc_dir_entry *entry;
struct irqdesc *desc;
char name[16];
desc = irq_desc + irq;
memset(name, 0, sizeof(name));
snprintf(name, sizeof(name) - 1, "%u", irq);
desc->procdir = proc_mkdir(name, dir);
if (!desc->procdir)
continue;
entry = create_proc_entry("smp_affinity", 0600, desc->procdir);
if (entry) {
entry->nlink = 1;
entry->data = (void *)irq;
entry->read_proc = irq_affinity_read_proc;
entry->write_proc = irq_affinity_write_proc;
}
}
#endif
spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (iflags & IRQF_VALID)
desc->status &= ~IRQ_NOREQUEST;
if (iflags & IRQF_PROBE)
desc->status &= ~IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
desc->status &= ~IRQ_NOAUTOEN;
spin_unlock_irqrestore(&desc->lock, flags);
}
void __init init_IRQ(void)
{
struct irqdesc *desc;
int irq;
for (irq = 0; irq < NR_IRQS; irq++)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_DELAYED_DISABLE |
IRQ_NOPROBE;
#ifdef CONFIG_SMP
bad_irq_desc.affinity = CPU_MASK_ALL;
bad_irq_desc.cpu = smp_processor_id();
#endif
for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
*desc = bad_irq_desc;
INIT_LIST_HEAD(&desc->pend);
}
init_arch_irq();
}
static int __init noirqdebug_setup(char *str)
#ifdef CONFIG_HOTPLUG_CPU
static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
{
noirqdebug = 1;
return 1;
}
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
__setup("noirqdebug", noirqdebug_setup);
spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of_cpu(cpu));
spin_unlock_irq(&desc->lock);
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* The CPU has been marked offline. Migrate IRQs off this CPU. If
* the affinity settings do not allow other CPUs, force them onto any
......
......@@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mtd/mtd.h>
......
......@@ -52,7 +52,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id,
static struct irqaction omap2_gp_timer_irq = {
.name = "gp timer",
.flags = SA_INTERRUPT | SA_TIMER,
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = omap2_gp_timer_interrupt,
};
......
......@@ -28,9 +28,9 @@
#include <asm/arch/clock.h>
LIST_HEAD(clocks);
DEFINE_SPINLOCK(clockfw_lock);
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clockfw_lock);
static struct clk_functions *arch_clock;
......
......@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
......
......@@ -29,52 +29,52 @@
.text
/* Prototype: int __arch_copy_to_user_dsp_2b(void *to, const char *from)
/* Prototype: int __copy_to_user_dsp_2b(void *to, const char *from)
* Purpose : copy 2 bytes to user memory from kernel(DSP) memory
* escaping from unexpected byte swap using __arch_copy_to_user()
* escaping from unexpected byte swap using __copy_to_user()
* in OMAP architecture.
* Params : to - user memory
* : from - kernel(DSP) memory
* Returns : success = 0, failure = 2
*/
ENTRY(__arch_copy_to_user_dsp_2b)
ENTRY(__copy_to_user_dsp_2b)
stmfd sp!, {r4, lr}
ldrb r3, [r1], #1
ldrb r4, [r1], #1
USER( strbt r4, [r0], #1) @ May fault
USER( strbt r3, [r0], #1) @ May fault
mov r0, #0
LOADREGS(fd,sp!,{r4, pc})
ldmfd sp!, {r4, pc}
.section .fixup,"ax"
.align 0
9001: mov r0, #2
LOADREGS(fd,sp!, {r4, pc})
ldmfd sp!, {r4, pc}
.previous
/* Prototype: unsigned long __arch_copy_from_user_dsp_2b(void *to, const void *from);
/* Prototype: unsigned long __copy_from_user_dsp_2b(void *to, const void *from);
* Purpose : copy 2 bytes from user memory to kernel(DSP) memory
* escaping from unexpected byte swap using __arch_copy_to_user()
* escaping from unexpected byte swap using __copy_to_user()
* in OMAP architecture.
* Params : to - kernel (DSP) memory
* : from - user memory
* Returns : success = 0, failure = 2
*/
ENTRY(__arch_copy_from_user_dsp_2b)
ENTRY(__copy_from_user_dsp_2b)
stmfd sp!, {r4, lr}
USER( ldrbt r3, [r1], #1) @ May fault
USER( ldrbt r4, [r1], #1) @ May fault
strb r4, [r0], #1
strb r3, [r0], #1
mov r0, #0
LOADREGS(fd,sp!,{r4, pc})
ldmfd sp!, {r4, pc}
.section .fixup,"ax"
.align 0
9001: mov r3, #0
strh r3, [r0], #2
mov r0, #2
LOADREGS(fd,sp!, {r4, pc})
ldmfd sp!, {r4, pc}
.previous
......@@ -33,9 +33,9 @@
#define HAVE_ASM_COPY_FROM_USER_DSP_2B
#ifdef HAVE_ASM_COPY_FROM_USER_DSP_2B
extern unsigned long __arch_copy_from_user_dsp_2b(void *to,
extern unsigned long __copy_from_user_dsp_2b(void *to,
const void __user *from);
extern unsigned long __arch_copy_to_user_dsp_2b(void __user *to,
extern unsigned long __copy_to_user_dsp_2b(void __user *to,
const void *from);
#endif
......@@ -51,7 +51,7 @@ static __inline__ unsigned long copy_from_user_dsp_2b(void *to,
{
unsigned short tmp;
if (__arch_copy_from_user(&tmp, from, 2))
if (__copy_from_user(&tmp, from, 2))
return 2;
/* expecting compiler to generate "strh" instruction */
*((unsigned short *)to) = tmp;
......@@ -77,7 +77,7 @@ static __inline__ unsigned long copy_from_user_dsp(void *to, const void *from,
/* dest not aligned -- copy 2 bytes */
if (((unsigned long)to & 2) && (n >= 2)) {
#ifdef HAVE_ASM_COPY_FROM_USER_DSP_2B
if (__arch_copy_from_user_dsp_2b(to, from))
if (__copy_from_user_dsp_2b(to, from))
#else
if (copy_from_user_dsp_2b(to, from))
#endif
......@@ -89,14 +89,14 @@ static __inline__ unsigned long copy_from_user_dsp(void *to, const void *from,
/* middle 4*n bytes */
last_n = n & 2;
n4 = n - last_n;
if ((n = __arch_copy_from_user(to, from, n4)) != 0)
if ((n = __copy_from_user(to, from, n4)) != 0)
return n + last_n;
/* last 2 bytes */
if (last_n) {
to += n4;
from += n4;
#ifdef HAVE_ASM_COPY_FROM_USER_DSP_2B
if (__arch_copy_from_user_dsp_2b(to, from))
if (__copy_from_user_dsp_2b(to, from))
#else
if (copy_from_user_dsp_2b(to, from))
#endif
......@@ -108,7 +108,7 @@ static __inline__ unsigned long copy_from_user_dsp(void *to, const void *from,
* DARAM/SARAM with 4-byte alignment or
* external memory
*/
n = __arch_copy_from_user(to, from, n);
n = __copy_from_user(to, from, n);
}
}
else /* security hole - plug it */
......@@ -122,7 +122,7 @@ static __inline__ unsigned long copy_to_user_dsp_2b(void *to, const void *from)
/* expecting compiler to generate "strh" instruction */
unsigned short tmp = *(unsigned short *)from;
return __arch_copy_to_user(to, &tmp, 2);
return __copy_to_user(to, &tmp, 2);
}
#endif
......@@ -144,7 +144,7 @@ static __inline__ unsigned long copy_to_user_dsp(void *to, const void *from,
/* dest not aligned -- copy 2 bytes */
if (((unsigned long)to & 2) && (n >= 2)) {
#ifdef HAVE_ASM_COPY_FROM_USER_DSP_2B
if (__arch_copy_to_user_dsp_2b(to, from))
if (__copy_to_user_dsp_2b(to, from))
#else
if (copy_to_user_dsp_2b(to, from))
#endif
......@@ -156,14 +156,14 @@ static __inline__ unsigned long copy_to_user_dsp(void *to, const void *from,
/* middle 4*n bytes */
last_n = n & 2;
n4 = n - last_n;
if ((n = __arch_copy_to_user(to, from, n4)) != 0)
if ((n = __copy_to_user(to, from, n4)) != 0)
return n + last_n;
/* last 2 bytes */
if (last_n) {
to += n4;
from += n4;
#ifdef HAVE_ASM_COPY_FROM_USER_DSP_2B
if (__arch_copy_to_user_dsp_2b(to, from))
if (__copy_to_user_dsp_2b(to, from))
#else
if (copy_to_user_dsp_2b(to, from))
#endif
......@@ -175,7 +175,7 @@ static __inline__ unsigned long copy_to_user_dsp(void *to, const void *from,
* DARAM/SARAM with 4-byte alignment or
* external memory
*/
n = __arch_copy_to_user(to, from, n);
n = __copy_to_user(to, from, n);
}
}
return n;
......
......@@ -21,7 +21,6 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
......
......@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
......
......@@ -11,7 +11,6 @@
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sched.h>
......@@ -784,7 +783,7 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
desc->chip->ack(irq);
bank = (struct gpio_bank *) desc->data;
bank = get_irq_data(irq);
if (bank->method == METHOD_MPUIO)
isr_reg = bank->base + OMAP_MPUIO_GPIO_INT;
#ifdef CONFIG_ARCH_OMAP15XX
......@@ -851,7 +850,8 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
/* Don't run the handler if it's already running
* or was disabled lazely.
*/
if (unlikely((d->disable_depth || d->running))) {
if (unlikely((d->depth ||
(d->status & IRQ_INPROGRESS)))) {
irq_mask = 1 <<
(gpio_irq - bank->virtual_irq_start);
/* The unmasking will be done by
......@@ -860,22 +860,22 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
* it's already running.
*/
_enable_gpio_irqbank(bank, irq_mask, 0);
if (!d->disable_depth) {
if (!d->depth) {
/* Level triggered interrupts
* won't ever be reentered
*/
BUG_ON(level_mask & irq_mask);
d->pending = 1;
d->status |= IRQ_PENDING;
}
continue;
}
d->running = 1;
desc_handle_irq(gpio_irq, d, regs);
d->running = 0;
if (unlikely(d->pending && !d->disable_depth)) {
if (unlikely((d->status & IRQ_PENDING) && !d->depth)) {
irq_mask = 1 <<
(gpio_irq - bank->virtual_irq_start);
d->pending = 0;
d->status &= ~IRQ_PENDING;
_enable_gpio_irqbank(bank, irq_mask, 1);
retrigger |= irq_mask;
}
......@@ -944,7 +944,8 @@ static void mpuio_unmask_irq(unsigned int irq)
_set_gpio_irqenable(bank, gpio, 1);
}
static struct irqchip gpio_irq_chip = {
static struct irq_chip gpio_irq_chip = {
.name = "GPIO",
.ack = gpio_ack_irq,
.mask = gpio_mask_irq,
.unmask = gpio_unmask_irq,
......@@ -952,7 +953,8 @@ static struct irqchip gpio_irq_chip = {
.set_wake = gpio_wake_enable,
};
static struct irqchip mpuio_irq_chip = {
static struct irq_chip mpuio_irq_chip = {
.name = "MPUIO",
.ack = mpuio_ack_irq,
.mask = mpuio_mask_irq,
.unmask = mpuio_unmask_irq
......
......@@ -34,7 +34,6 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
......@@ -268,7 +267,7 @@ static struct dyn_tick_timer omap_dyn_tick_timer = {
static struct irqaction omap_32k_timer_irq = {
.name = "32KHz timer",
.flags = SA_INTERRUPT | SA_TIMER,
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = omap_32k_timer_interrupt,
};
......
......@@ -72,6 +72,8 @@ source "drivers/edac/Kconfig"
source "drivers/rtc/Kconfig"
source "drivers/dma/Kconfig"
source "drivers/ssi/Kconfig"
endmenu
......@@ -30,6 +30,7 @@
#include <linux/serial_reg.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
#include <linux/irq.h>
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
......
......@@ -33,6 +33,7 @@
#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
......
......@@ -33,6 +33,7 @@
#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
......
......@@ -76,14 +76,15 @@ endif
obj-$(CONFIG_TOSHIBA) += toshiba.o
obj-$(CONFIG_I8K) += i8k.o
obj-$(CONFIG_DS1620) += ds1620.o
obj-$(CONFIG_HW_RANDOM) += hw_random.o
obj-$(CONFIG_OMAP_RNG) += omap-rng.o
obj-$(CONFIG_HW_RANDOM) += hw_random/
obj-$(CONFIG_FTAPE) += ftape/
obj-$(CONFIG_COBALT_LCD) += lcd.o
obj-$(CONFIG_PPDEV) += ppdev.o
obj-$(CONFIG_NWBUTTON) += nwbutton.o
obj-$(CONFIG_NWFLASH) += nwflash.o
obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
......
......@@ -63,12 +63,18 @@ config LEDS_S3C24XX
This option enables support for LEDs connected to GPIO lines
on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
config LEDS_OMAP_PWM
tristate "LED Support for OMAP PWM-controlled LEDs"
depends on LEDS_CLASS && ARCH_OMAP && OMAP_DM_TIMER
config LEDS_AMS_DELTA
tristate "LED Support for the Amstrad Delta (E3)"
depends LEDS_CLASS && MACH_AMS_DELTA
help
This options enables support for LEDs connected to GPIO lines
controlled by a PWM timer on OMAP CPUs.
This option enables support for the LEDs on Amstrad Delta (E3).
config LEDS_NET48XX
tristate "LED Support for Soekris net48xx series Error LED"
depends on LEDS_CLASS && SCx200_GPIO
help
This option enables support for the Soekris net4801 and net4826 error
LED.
config LEDS_OMAP
tristate "LED Support for OMAP GPIO LEDs"
......@@ -76,6 +82,13 @@ config LEDS_OMAP
help
This option enables support for the LEDs on OMAP processors.
config LEDS_OMAP_PWM
tristate "LED Support for OMAP PWM-controlled LEDs"
depends on LEDS_CLASS && ARCH_OMAP && OMAP_DM_TIMER
help
This options enables support for LEDs connected to GPIO lines
controlled by a PWM timer on OMAP CPUs.
comment "LED Triggers"
config LEDS_TRIGGERS
......@@ -100,5 +113,14 @@ config LEDS_TRIGGER_IDE_DISK
This allows LEDs to be controlled by IDE disk activity.
If unsure, say Y.
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
depends LEDS_TRIGGERS
help
This allows LEDs to be controlled by a CPU load average.
The flash frequency is a hyperbolic function of the 1-minute
load average.
If unsure, say Y.
endmenu
......@@ -11,9 +11,12 @@ obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
obj-$(CONFIG_LEDS_OMAP) += leds-omap.o
obj-$(CONFIG_LEDS_OMAP_PWM) += leds-omap-pwm.o
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
......@@ -11,7 +11,6 @@
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
......
......@@ -23,6 +23,14 @@ config MTD_NAND_VERIFY_WRITE
device thinks the write was successful, a bit could have been
flipped accidentaly due to device wear or something else.
config MTD_NAND_ECC_SMC
bool "NAND ECC Smart Media byte order"
depends on MTD_NAND
default n
help
Software ECC according to the Smart Media Specification.
The original Linux implementation had byte 0 and 1 swapped.
config MTD_NAND_AUTCPU12
tristate "SmartMediaCard on autronix autcpu12 board"
depends on MTD_NAND && ARCH_AUTCPU12
......@@ -49,6 +57,12 @@ config MTD_NAND_SPIA
help
If you had to ask, you don't have one. Say 'N'.
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
depends on MACH_AMS_DELTA && MTD_NAND
help
Support for NAND flash on Amstrad E3 (Delta).
config MTD_NAND_OMAP
tristate "NAND Flash device on OMAP H3/H2/P2 boards"
depends on ARM && ARCH_OMAP1 && MTD_NAND && (MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_PERSEUS2)
......@@ -64,10 +78,16 @@ config MTD_NAND_OMAP_HW
config MTD_NAND_TOTO
tristate "NAND Flash device on TOTO board"
depends on ARCH_OMAP && MTD_NAND
depends on ARCH_OMAP && MTD_NAND && BROKEN
help
Support for NAND flash on Texas Instruments Toto platform.
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX && MTD_NAND
help
Support for NAND flash on Technologic Systems TS-7250 platform.
config MTD_NAND_IDS
tristate
......@@ -89,7 +109,7 @@ config MTD_NAND_RTC_FROM4
config MTD_NAND_PPCHAMELEONEVB
tristate "NAND Flash device on PPChameleonEVB board"
depends on PPCHAMELEONEVB && MTD_NAND
depends on PPCHAMELEONEVB && MTD_NAND && BROKEN
help
This enables the NAND flash driver on the PPChameleon EVB Board.
......@@ -100,7 +120,7 @@ config MTD_NAND_S3C2410
This enables the NAND flash controller on the S3C2410 and S3C2440
SoCs
No board specfic support is done by this driver, each board
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
......@@ -122,6 +142,22 @@ config MTD_NAND_S3C2410_HWECC
currently not be able to switch to software, as there is no
implementation for ECC method used by the S3C2410
config MTD_NAND_NDFC
tristate "NDFC NanD Flash Controller"
depends on MTD_NAND && 44x
help
NDFC Nand Flash Controllers are integrated in EP44x SoCs
config MTD_NAND_S3C2410_CLKSTOP
bool "S3C2410 NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
default n
help
Stop the clock to the NAND controller when there is no chip
selected to save power. This will mean there is a small delay
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
config MTD_NAND_DISKONCHIP
tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
depends on MTD_NAND && EXPERIMENTAL
......@@ -196,11 +232,24 @@ config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
depends on MTD_NAND && ARCH_PXA
config MTD_NAND_CS553X
tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
depends on MTD_NAND && X86_32 && (X86_PC || X86_GENERICARCH)
help
The CS553x companion chips for the AMD Geode processor
include NAND flash controllers with built-in hardware ECC
capabilities; enabling this option will allow you to use
these. The driver will check the MSRs to verify that the
controller is enabled for NAND, and currently requires that
the controller be in MMIO mode.
If you say "m", the module will be called "cs553x_nand.ko".
config MTD_NAND_NANDSIM
tristate "Support for NAND Flash Simulator"
depends on MTD_NAND && MTD_PARTITIONS
help
The simulator may simulate verious NAND flash chips for the
The simulator may simulate various NAND flash chips for the
MTD nand layer.
endmenu
......@@ -7,6 +7,7 @@ obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
......@@ -17,7 +18,10 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_OMAP) += omap-nand-flash.o
obj-$(CONFIG_MTD_NAND_OMAP_HW) += omap-hw.o
......
......@@ -14,7 +14,7 @@
* This file is licenced under the GPL.
*/
#include <linux/signal.h> /* SA_INTERRUPT */
#include <linux/signal.h> /* IRQF_DISABLED */
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
......@@ -353,7 +353,7 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
retval = -ENXIO;
goto err2;
}
retval = usb_add_hcd(hcd, irq, SA_INTERRUPT);
retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
if (retval)
goto err2;
......
......@@ -4,6 +4,21 @@
menu "Graphics support"
config FIRMWARE_EDID
bool "Enable firmware EDID"
default y
---help---
This enables access to the EDID transferred from the firmware.
On the i386, this is from the Video BIOS. Enable this if DDC/I2C
transfers do not work for your driver and if you are using
nvidiafb, i810fb or savagefb.
In general, choosing Y for this option is safe. If you
experience extremely long delays while booting before you get
something on your display, try setting this to N. Matrox cards in
combination with certain motherboards and monitors are known to
suffer from this problem.
config FB
tristate "Support for frame buffer devices"
---help---
......@@ -70,21 +85,12 @@ config FB_MACMODES
depends on FB
default n
config FB_FIRMWARE_EDID
bool "Enable firmware EDID"
config FB_BACKLIGHT
bool
depends on FB
default y
---help---
This enables access to the EDID transferred from the firmware.
On the i386, this is from the Video BIOS. Enable this if DDC/I2C
transfers do not work for your driver and if you are using
nvidiafb, i810fb or savagefb.
In general, choosing Y for this option is safe. If you
experience extremely long delays while booting before you get
something on your display, try setting this to N. Matrox cards in
combination with certain motherboards and monitors are known to
suffer from this problem.
select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
default n
config FB_MODE_HELPERS
bool "Enable Video Mode Handling Helpers"
......@@ -167,6 +173,69 @@ config FB_ARMCLCD
here and read <file:Documentation/modules.txt>. The module
will be called amba-clcd.
choice
depends on FB_ARMCLCD && (ARCH_LH7A40X || ARCH_LH7952X)
prompt "LCD Panel"
default FB_ARMCLCD_SHARP_LQ035Q7DB02
config FB_ARMCLCD_SHARP_LQ035Q7DB02_HRTFT
bool "LogicPD LCD 3.5\" QVGA w/HRTFT IC"
help
This is an implementation of the Sharp LQ035Q7DB02, a 3.5"
color QVGA, HRTFT panel. The LogicPD device includes an
an integrated HRTFT controller IC.
The native resolution is 240x320.
config FB_ARMCLCD_SHARP_LQ057Q3DC02
bool "LogicPD LCD 5.7\" QVGA"
help
This is an implementation of the Sharp LQ057Q3DC02, a 5.7"
color QVGA, TFT panel. The LogicPD device includes an
The native resolution is 320x240.
config FB_ARMCLCD_SHARP_LQ64D343
bool "LogicPD LCD 6.4\" VGA"
help
This is an implementation of the Sharp LQ64D343, a 6.4"
color VGA, TFT panel. The LogicPD device includes an
The native resolution is 640x480.
config FB_ARMCLCD_SHARP_LQ10D368
bool "LogicPD LCD 10.4\" VGA"
help
This is an implementation of the Sharp LQ10D368, a 10.4"
color VGA, TFT panel. The LogicPD device includes an
The native resolution is 640x480.
config FB_ARMCLCD_SHARP_LQ121S1DG41
bool "LogicPD LCD 12.1\" SVGA"
help
This is an implementation of the Sharp LQ121S1DG41, a 12.1"
color SVGA, TFT panel. The LogicPD device includes an
The native resolution is 800x600.
This panel requires a clock rate may be an integer fraction
of the base LCDCLK frequency. The driver will select the
highest frequency available that is lower than the maximum
allowed. The panel may flicker if the clock rate is
slower than the recommended minimum.
config FB_ARMCLCD_AUO_A070VW01_WIDE
bool "AU Optronics A070VW01 LCD 7.0\" WIDE"
help
This is an implementation of the AU Optronics, a 7.0"
WIDE Color. The native resolution is 234x480.
config FB_ARMCLCD_HITACHI
bool "Hitachi Wide Screen 800x480"
help
This is an implementation of the Hitachi 800x480.
endchoice
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && (ARCH_ACORN || ARCH_CLPS7500)
......@@ -353,7 +422,7 @@ config FB_OF
config FB_CONTROL
bool "Apple \"control\" display support"
depends on (FB = y) && PPC_PMAC
depends on (FB = y) && PPC_PMAC && PPC32
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
......@@ -364,7 +433,7 @@ config FB_CONTROL
config FB_PLATINUM
bool "Apple \"platinum\" display support"
depends on (FB = y) && PPC_PMAC
depends on (FB = y) && PPC_PMAC && PPC32
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
......@@ -375,7 +444,7 @@ config FB_PLATINUM
config FB_VALKYRIE
bool "Apple \"valkyrie\" display support"
depends on (FB = y) && (MAC || PPC_PMAC)
depends on (FB = y) && (MAC || (PPC_PMAC && PPC32))
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
......@@ -386,7 +455,7 @@ config FB_VALKYRIE
config FB_CT65550
bool "Chips 65550 display support"
depends on (FB = y) && PPC
depends on (FB = y) && PPC32
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
......@@ -483,10 +552,14 @@ config FB_VESA
You will get a boot time penguin logo at no additional cost. Please
read <file:Documentation/fb/vesafb.txt>. If unsure, say Y.
config VIDEO_SELECT
bool
depends on FB_VESA
default y
config FB_IMAC
bool "Intel-based Macintosh Framebuffer Support"
depends on (FB = y) && X86
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
This is the frame buffer device driver for the Intel-based Macintosh
config FB_HGA
tristate "Hercules mono graphics support"
......@@ -510,12 +583,6 @@ config FB_HGA_ACCEL
This will compile the Hercules mono graphics with
acceleration functions.
config VIDEO_SELECT
bool
depends on (FB = y) && X86
default y
config FB_SGIVW
tristate "SGI Visual Workstation framebuffer support"
depends on FB && X86_VISWS
......@@ -654,6 +721,14 @@ config FB_NVIDIA_I2C
independently validate video mode parameters, you should say Y
here.
config FB_NVIDIA_BACKLIGHT
bool "Support for backlight control"
depends on FB_NVIDIA && PMAC_BACKLIGHT
select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
config FB_RIVA
tristate "nVidia Riva support"
depends on FB && PCI
......@@ -692,6 +767,14 @@ config FB_RIVA_DEBUG
of debugging informations to provide to the maintainer when
something goes wrong.
config FB_RIVA_BACKLIGHT
bool "Support for backlight control"
depends on FB_RIVA && PMAC_BACKLIGHT
select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
config FB_I810
tristate "Intel 810/815 support (EXPERIMENTAL)"
depends on FB && EXPERIMENTAL && PCI && X86_32
......@@ -743,7 +826,7 @@ config FB_I810_I2C
config FB_INTEL
tristate "Intel 830M/845G/852GM/855GM/865G support (EXPERIMENTAL)"
depends on FB && EXPERIMENTAL && PCI && X86_32
depends on FB && EXPERIMENTAL && PCI && X86
select AGP
select AGP_INTEL
select FB_MODE_HELPERS
......@@ -930,6 +1013,7 @@ config FB_RADEON
There is a product page at
http://apps.ati.com/ATIcompare/
config FB_RADEON_I2C
bool "DDC/I2C for ATI Radeon support"
depends on FB_RADEON
......@@ -937,6 +1021,14 @@ config FB_RADEON_I2C
help
Say Y here if you want DDC/I2C support for your Radeon board.
config FB_RADEON_BACKLIGHT
bool "Support for backlight control"
depends on FB_RADEON && PMAC_BACKLIGHT
select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
config FB_RADEON_DEBUG
bool "Lots of debug output from Radeon driver"
depends on FB_RADEON
......@@ -961,6 +1053,14 @@ config FB_ATY128
To compile this driver as a module, choose M here: the
module will be called aty128fb.
config FB_ATY128_BACKLIGHT
bool "Support for backlight control"
depends on FB_ATY128 && PMAC_BACKLIGHT
select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
config FB_ATY
tristate "ATI Mach64 display support" if PCI || ATARI
depends on FB && !SPARC32
......@@ -1003,6 +1103,14 @@ config FB_ATY_GX
is at
<http://support.ati.com/products/pc/mach64/graphics_xpression.html>.
config FB_ATY_BACKLIGHT
bool "Support for backlight control"
depends on FB_ATY && PMAC_BACKLIGHT
select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
config FB_S3TRIO
bool "S3 Trio display support"
depends on (FB = y) && PPC && BROKEN
......@@ -1402,6 +1510,26 @@ config FB_PXA_PARAMETERS
<file:Documentation/fb/pxafb.txt> describes the available parameters.
config FB_MBX
tristate "2700G LCD framebuffer support"
depends on FB && ARCH_PXA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
Framebuffer driver for the Intel 2700G (Marathon) Graphics
Accelerator
config FB_MBX_DEBUG
bool "Enable debugging info via debugfs"
depends on FB_MBX && DEBUG_FS
default n
---help---
Enable this if you want debugging information using the debug
filesystem (debugfs)
If unsure, say N.
config FB_W100
tristate "W100 frame buffer support"
depends on FB && PXA_SHARPSL
......@@ -1441,7 +1569,20 @@ config FB_S3C2410_DEBUG
Turn on debugging messages. Note that you can set/unset at run time
through sysfs
source "drivers/video/omap/Kconfig"
config FB_PNX4008_DUM
tristate "Display Update Module support on Philips PNX4008 board"
depends on FB && ARCH_PNX4008
---help---
Say Y here to enable support for PNX4008 Display Update Module (DUM)
config FB_PNX4008_DUM_RGB
tristate "RGB Framebuffer support on Philips PNX4008 board"
depends on FB_PNX4008_DUM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
Say Y here to enable support for PNX4008 RGB Framebuffer
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
......@@ -1463,6 +1604,9 @@ config FB_VIRTUAL
module will be called vfb.
If unsure, say N.
source "drivers/video/omap/Kconfig"
if VT
source "drivers/video/console/Kconfig"
endif
......@@ -1471,7 +1615,7 @@ if FB || SGI_NEWPORT_CONSOLE
source "drivers/video/logo/Kconfig"
endif
if FB && SYSFS
if SYSFS
source "drivers/video/backlight/Kconfig"
endif
......
......@@ -24,8 +24,8 @@
#include <linux/console.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h> /* For in_interrupt() */
#include <linux/config.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/security.h>
......@@ -56,7 +56,7 @@ int console_printk[4] = {
DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
};
EXPORT_SYMBOL(console_printk);
EXPORT_UNUSED_SYMBOL(console_printk); /* June 2006 */
/*
* Low lever drivers may need that to know if they can schedule in
......@@ -71,6 +71,7 @@ EXPORT_SYMBOL(oops_in_progress);
* driver system.
*/
static DECLARE_MUTEX(console_sem);
static DECLARE_MUTEX(secondary_console_sem);
struct console *console_drivers;
/*
* This is used for debugging the mess that is the VT code by
......@@ -80,7 +81,7 @@ struct console *console_drivers;
* path in the console code where we end up in places I want
* locked without the console sempahore held
*/
static int console_locked;
static int console_locked, console_suspended;
/*
* logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars
......@@ -330,7 +331,9 @@ static void __call_console_drivers(unsigned long start, unsigned long end)
struct console *con;
for (con = console_drivers; con; con = con->next) {
if ((con->flags & CON_ENABLED) && con->write)
if ((con->flags & CON_ENABLED) && con->write &&
(cpu_online(smp_processor_id()) ||
(con->flags & CON_ANYTIME)))
con->write(con, &LOG_BUF(start), end - start);
}
}
......@@ -470,6 +473,18 @@ __attribute__((weak)) unsigned long long printk_clock(void)
return sched_clock();
}
/* Check if we have any console registered that can be called early in boot. */
static int have_callable_console(void)
{
struct console *con;
for (con = console_drivers; con; con = con->next)
if (con->flags & CON_ANYTIME)
return 1;
return 0;
}
/**
* printk - print a kernel message
* @fmt: format string
......@@ -520,7 +535,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
zap_locks();
/* This stops the holder of console_sem just where we want him */
spin_lock_irqsave(&logbuf_lock, flags);
local_irq_save(flags);
lockdep_off();
spin_lock(&logbuf_lock);
printk_cpu = smp_processor_id();
/* Emit the output into the temporary buffer */
......@@ -587,27 +604,31 @@ asmlinkage int vprintk(const char *fmt, va_list args)
log_level_unknown = 1;
}
if (!cpu_online(smp_processor_id())) {
if (!down_trylock(&console_sem)) {
/*
* Some console drivers may assume that per-cpu resources have
* been allocated. So don't allow them to be called by this
* CPU until it is officially up. We shouldn't be calling into
* random console drivers on a CPU which doesn't exist yet..
* We own the drivers. We can drop the spinlock and
* let release_console_sem() print the text, maybe ...
*/
printk_cpu = UINT_MAX;
spin_unlock_irqrestore(&logbuf_lock, flags);
goto out;
}
if (!down_trylock(&console_sem)) {
console_locked = 1;
printk_cpu = UINT_MAX;
spin_unlock(&logbuf_lock);
/*
* We own the drivers. We can drop the spinlock and let
* release_console_sem() print the text
* Console drivers may assume that per-cpu resources have
* been allocated. So unless they're explicitly marked as
* being able to cope (CON_ANYTIME) don't call them until
* this CPU is officially up.
*/
printk_cpu = UINT_MAX;
spin_unlock_irqrestore(&logbuf_lock, flags);
if (cpu_online(smp_processor_id()) || have_callable_console()) {
console_may_schedule = 0;
release_console_sem();
} else {
/* Release by hand to avoid flushing the buffer. */
console_locked = 0;
up(&console_sem);
}
lockdep_on();
local_irq_restore(flags);
} else {
/*
* Someone else owns the drivers. We drop the spinlock, which
......@@ -615,9 +636,11 @@ asmlinkage int vprintk(const char *fmt, va_list args)
* console drivers with the output which we just produced.
*/
printk_cpu = UINT_MAX;
spin_unlock_irqrestore(&logbuf_lock, flags);
spin_unlock(&logbuf_lock);
lockdep_on();
local_irq_restore(flags);
}
out:
preempt_enable();
return printed_len;
}
......@@ -719,6 +742,23 @@ int __init add_preferred_console(char *name, int idx, char *options)
return 0;
}
/**
* suspend_console - suspend the console subsystem
*
* This disables printk() while we go into suspend states
*/
void suspend_console(void)
{
acquire_console_sem();
console_suspended = 1;
}
void resume_console(void)
{
console_suspended = 0;
release_console_sem();
}
/**
* acquire_console_sem - lock the console system for exclusive use.
*
......@@ -730,6 +770,10 @@ int __init add_preferred_console(char *name, int idx, char *options)
void acquire_console_sem(void)
{
BUG_ON(in_interrupt());
if (console_suspended) {
down(&secondary_console_sem);
return;
}
down(&console_sem);
console_locked = 1;
console_may_schedule = 1;
......@@ -750,7 +794,7 @@ int is_console_locked(void)
{
return console_locked;
}
EXPORT_SYMBOL(is_console_locked);
EXPORT_UNUSED_SYMBOL(is_console_locked); /* June 2006 */
/**
* release_console_sem - unlock the console system
......@@ -772,6 +816,13 @@ void release_console_sem(void)
unsigned long _con_start, _log_end;
unsigned long wake_klogd = 0;
if (console_suspended) {
up(&secondary_console_sem);
return;
}
console_may_schedule = 0;
for ( ; ; ) {
spin_lock_irqsave(&logbuf_lock, flags);
wake_klogd |= log_start - log_end;
......@@ -785,11 +836,17 @@ void release_console_sem(void)
local_irq_restore(flags);
}
console_locked = 0;
console_may_schedule = 0;
up(&console_sem);
spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait))
if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) {
/*
* If we printk from within the lock dependency code,
* from within the scheduler code, then do not lock
* up due to self-recursion:
*/
if (!lockdep_internal())
wake_up_interruptible(&log_wait);
}
}
EXPORT_SYMBOL(release_console_sem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment