Commit d94ddace authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'rt/base' into rt/head

Conflicts:
	kernel/irq/chip.c
	kernel/irq/internals.h
	kernel/irq/manage.c

Convert forced irq threading to the new ONESHOT infrastructure
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parents a66b9c21 4eec1a1e
......@@ -69,7 +69,7 @@ static void put_ldisc(struct tty_ldisc *ld)
* We really want an "atomic_dec_and_lock_irqsave()",
* but we don't have it, so this does it by hand.
*/
local_irq_save(flags);
local_irq_save_nort(flags);
if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
struct tty_ldisc_ops *ldo = ld->ops;
......@@ -80,7 +80,7 @@ static void put_ldisc(struct tty_ldisc *ld)
kfree(ld);
return;
}
local_irq_restore(flags);
local_irq_restore_nort(flags);
}
/**
......
......@@ -50,6 +50,9 @@
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
* registered first in an shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NODELAY - Interrupt is not force threaded
*/
#define IRQF_DISABLED 0x00000020
......@@ -60,7 +63,8 @@
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
#define IRQF_NODELAY 0x00002000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NODELAY 0x00004000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NODELAY)
......
......@@ -69,6 +69,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
......@@ -100,6 +102,9 @@ struct msi_desc;
* @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @set_wake: enable/disable power-management wake-on of an IRQ
*
* @bus_lock: function to lock access to slow bus (i2c) chips
* @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips
*
* @release: release function solely used by UML
* @typename: obsoleted by name, kept as migration helper
*/
......@@ -123,6 +128,9 @@ struct irq_chip {
int (*set_type)(unsigned int irq, unsigned int flow_type);
int (*set_wake)(unsigned int irq, unsigned int on);
void (*bus_lock)(unsigned int irq);
void (*bus_sync_unlock)(unsigned int irq);
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
void (*release)(unsigned int irq, void *dev_id);
......@@ -380,6 +388,8 @@ set_irq_chained_handler(unsigned int irq,
__set_irq_handler(irq, handle, 1, NULL);
}
extern void set_irq_nested_thread(unsigned int irq, int nest);
extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);
......
......@@ -222,6 +222,34 @@ int set_irq_chip_data(unsigned int irq, void *data)
}
EXPORT_SYMBOL(set_irq_chip_data);
/**
* set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
*
* @irq: Interrupt number
* @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag
*
* The IRQ_NESTED_THREAD flag indicates that on
* request_threaded_irq() no separate interrupt thread should be
* created for the irq as the handler are called nested in the
* context of a demultiplexing interrupt handler thread.
*/
void set_irq_nested_thread(unsigned int irq, int nest)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
if (!desc)
return;
atomic_spin_lock_irqsave(&desc->lock, flags);
if (nest)
desc->status |= IRQ_NESTED_THREAD;
else
desc->status &= ~IRQ_NESTED_THREAD;
atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
/*
* default enable function
*/
......@@ -300,6 +328,45 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
}
}
/*
* handle_nested_irq - Handle a nested irq from a irq thread
* @irq: the interrupt number
*
* Handle interrupts which are nested into a threaded interrupt
* handler. The handler function is called inside the calling
* threads context.
*/
void handle_nested_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
irqreturn_t action_ret;
might_sleep();
atomic_spin_lock_irq(&desc->lock);
kstat_incr_irqs_this_cpu(irq, desc);
action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
atomic_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
atomic_spin_lock_irq(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
atomic_spin_unlock_irq(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
......@@ -383,8 +450,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
atomic_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask &&
!desc->forced_threads_active)
if (unlikely(desc->status & IRQ_ONESHOT))
desc->status |= IRQ_MASKED;
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
atomic_spin_unlock(&desc->lock);
......@@ -427,6 +496,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
goto out;
}
if ((desc->status & IRQ_ONESHOT) && desc->chip->mask)
desc->chip->mask(irq);
desc->status |= IRQ_INPROGRESS;
desc->status &= ~IRQ_PENDING;
atomic_spin_unlock(&desc->lock);
......@@ -480,8 +552,13 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
if (desc->chip->ack)
desc->chip->ack(irq);
if (unlikely(desc->status & IRQ_ONESHOT)) {
desc->status |= IRQ_MASKED;
mask_ack_irq(desc, irq);
} else {
if (desc->chip->ack)
desc->chip->ack(irq);
}
/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
......@@ -574,6 +651,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->chip = &dummy_irq_chip;
}
chip_bus_lock(irq, desc);
atomic_spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
......@@ -592,7 +670,9 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->depth = 0;
desc->chip->startup(irq);
}
atomic_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
......
......@@ -356,6 +356,25 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
"but no thread function available.", irq, action->name);
}
/*
* Momentary workaround until I have a brighter idea how to handle the
* accounting of forced threaded (shared) handlers.
*/
irqreturn_t handle_irq_action(unsigned int irq, struct irqaction *action)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc->status & IRQ_ONESHOT) {
unsigned long flags;
atomic_spin_lock_irqsave(&desc->lock, flags);
desc->forced_threads_active |= action->thread_mask;
atomic_spin_unlock_irqrestore(&desc->lock, flags);
return IRQ_WAKE_THREAD;
}
return action->handler(irq, action->dev_id);
}
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
......
......@@ -44,15 +44,18 @@ extern int irq_select_affinity_usr(unsigned int irq);
extern void irq_set_thread_affinity(struct irq_desc *desc);
#ifdef CONFIG_PREEMPT_HARDIRQS
extern irqreturn_t handle_irq_action(unsigned int irq,struct irqaction *action);
#else
static inline irqreturn_t
handle_irq_action(unsigned int irq, struct irqaction *action)
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
{
return action->handler(irq, action->dev_id);
if (unlikely(desc->chip->bus_lock))
desc->chip->bus_lock(irq);
}
static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
{
if (unlikely(desc->chip->bus_sync_unlock))
desc->chip->bus_sync_unlock(irq);
}
#endif
/*
* Debugging printout:
......
......@@ -230,9 +230,11 @@ void disable_irq_nosync(unsigned int irq)
if (!desc)
return;
chip_bus_lock(irq, desc);
atomic_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, false);
atomic_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(disable_irq_nosync);
......@@ -295,7 +297,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
* matches the last disable, processing of interrupts on this
* IRQ line is re-enabled.
*
* This function may be called from IRQ context.
* This function may be called from IRQ context only when
* desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
*/
void enable_irq(unsigned int irq)
{
......@@ -305,9 +308,11 @@ void enable_irq(unsigned int irq)
if (!desc)
return;
chip_bus_lock(irq, desc);
atomic_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
atomic_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(enable_irq);
......@@ -437,40 +442,46 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret;
}
#ifdef CONFIG_PREEMPT_HARDIRQS
/*
* handler function for forced irq threading. Dummy code. See
* handle_irq_action() below.
* Default primary interrupt handler for threaded interrupts. Is
* assigned as primary handler when request_threaded_irq is called
* with handler == NULL. Useful for oneshot interrupts.
*/
static irqreturn_t preempt_hardirq_handler(int irq, void *dev_id)
static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
{
return IRQ_WAKE_THREAD;
}
/*
* Momentary workaround until I have a brighter idea how to handle the
* accounting of forced thread handlers.
* Primary handler for nested threaded interrupts. Should never be
* called.
*/
irqreturn_t handle_irq_action(unsigned int irq, struct irqaction *action)
static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
{
if (action->handler == preempt_hardirq_handler) {
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
WARN(1, "Primary handler called for nested irq %d\n", irq);
return IRQ_NONE;
}
atomic_spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_PREEMPT_HARDIRQS
/*
* If the caller does not request irq threading then the handler
* becomes the thread function and we use the above handler as the
* primary hardirq context handler.
*/
static void preempt_hardirq_setup(struct irqaction *new)
{
if (new->thread_fn || (new->flags & IRQF_NODELAY))
return;
/* FIXME: use some flag to do that */
if (desc->handle_irq == handle_fasteoi_irq) {
if (desc->chip->mask)
desc->chip->mask(irq);
}
desc->forced_threads_active |= action->thread_mask;
atomic_spin_unlock_irqrestore(&desc->lock, flags);
return IRQ_WAKE_THREAD;
}
return action->handler(irq, action->dev_id);
new->flags |= IRQF_ONESHOT;
new->thread_fn = new->handler;
new->handler = irq_default_primary_handler;
}
#else
static inline void preempt_hardirq_setup(struct irqaction *new) { }
#endif
/*
* forced threaded interrupts need to unmask the interrupt line
*/
......@@ -479,7 +490,7 @@ static int preempt_hardirq_thread_done(struct irq_desc *desc,
{
unsigned long masked;
if (action->handler != preempt_hardirq_handler)
if (!(desc->status & IRQ_ONESHOT))
return 0;
again:
atomic_spin_lock_irq(&desc->lock);
......@@ -524,20 +535,6 @@ again:
return 0;
}
/*
* If the caller does not request irq threading then the handler
* becomes the thread function and we use the above handler as the
* primary hardirq context handler.
*/
static void preempt_hardirq_setup(struct irqaction *new)
{
if (new->thread_fn || (new->flags & IRQF_NODELAY))
return;
new->thread_fn = new->handler;
new->handler = preempt_hardirq_handler;
}
static inline void
preempt_hardirq_cleanup(struct irq_desc *desc, struct irqaction *action)
{
......@@ -545,17 +542,6 @@ preempt_hardirq_cleanup(struct irq_desc *desc, struct irqaction *action)
preempt_hardirq_thread_done(desc, action);
}
#else
static inline void preempt_hardirq_setup(struct irqaction *new) { }
static inline int
preempt_hardirq_thread_done(struct irq_desc *d, struct irqaction *a)
{
return 0;
}
static inline void
preempt_hardirq_cleanup(struct irq_desc *d, struct irqaction *a) { }
#endif
static int
irq_wait_for_interrupt(struct irq_desc *desc, struct irqaction *action)
{
......@@ -690,7 +676,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
struct irqaction *old, **old_ptr;
const char *old_name = NULL;
unsigned long flags, thread_mask = 0;
int shared = 0;
int nested, shared = 0;
int ret;
if (!desc)
......@@ -715,13 +701,32 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
rand_initialize_irq(irq);
}
/* Preempt-RT setup for forced threading */
preempt_hardirq_setup(new);
/*
* Threaded handler ?
* Check whether the interrupt nests into another interrupt
* thread.
*/
nested = desc->status & IRQ_NESTED_THREAD;
if (nested) {
if (!new->thread_fn)
return -EINVAL;
/*
* Replace the primary handler which was provided from
* the driver for non nested interrupt handling by the
* dummy function which warns when called.
*/
new->handler = irq_nested_primary_handler;
}
/*
* Create a handler thread when a thread function is supplied
* and the interrupt does not nest into another interrupt
* thread.
*/
if (new->thread_fn) {
if (new->thread_fn && !nested) {
struct task_struct *t;
t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
......@@ -798,9 +803,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->status |= IRQ_PER_CPU;
#endif
desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
if (new->flags & IRQF_ONESHOT)
desc->status |= IRQ_ONESHOT;
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
......@@ -825,6 +833,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
(int)(new->flags & IRQF_TRIGGER_MASK));
}
new->irq = irq;
*old_ptr = new;
/* Reset broken irq detection when installing new handler */
......@@ -842,14 +851,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
atomic_spin_unlock_irqrestore(&desc->lock, flags);
new->irq = irq;
register_irq_proc(irq, desc);
new->dir = NULL;
register_handler_proc(irq, new);
if (new->thread)
wake_up_process(new->thread);
return 0;
mismatch:
......@@ -1007,7 +1012,14 @@ EXPORT_SYMBOL_GPL(remove_irq);
*/
void free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
if (!desc)
return;
chip_bus_lock(irq, desc);
kfree(__free_irq(irq, dev_id));
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(free_irq);
......@@ -1016,6 +1028,8 @@ EXPORT_SYMBOL(free_irq);
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts
* If NULL and thread_fn != NULL the default
* primary handler is installed
* @thread_fn: Function called from the irq handler thread
* If NULL, no irq thread is created
* @irqflags: Interrupt type flags
......@@ -1095,8 +1109,12 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
if (desc->status & IRQ_NOREQUEST)
return -EINVAL;
if (!handler)
return -EINVAL;
if (!handler) {
if (!thread_fn)
return -EINVAL;
handler = irq_default_primary_handler;
}
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
......@@ -1108,7 +1126,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
action->name = devname;
action->dev_id = dev_id;
chip_bus_lock(irq, desc);
retval = __setup_irq(irq, desc, action);
chip_bus_sync_unlock(irq, desc);
if (retval)
kfree(action);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment