Commit ac8be955 authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] SMTC: Instant IPI replay.

SMTC pseudo-interrupts between TCs are deferred and queued if the target
TC is interrupt-inhibited (IXMT). In the first SMTC prototypes, these
queued IPIs were serviced on return to user mode, or on entry into the
kernel idle loop. The INSTANT_REPLAY option dispatches them as part of
local_irq_restore() processing, which adds runtime overhead (hence the
option to turn it off), but ensures that IPIs are handled promptly even
under heavy I/O interrupt load.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 9ee79a3d
...@@ -1568,6 +1568,20 @@ config MIPS_MT_FPAFF ...@@ -1568,6 +1568,20 @@ config MIPS_MT_FPAFF
depends on MIPS_MT depends on MIPS_MT
default y default y
config MIPS_MT_SMTC_INSTANT_REPLAY
bool "Low-latency Dispatch of Deferred SMTC IPIs"
depends on MIPS_MT_SMTC
default y
help
SMTC pseudo-interrupts between TCs are deferred and queued
if the target TC is interrupt-inhibited (IXMT). In the first
SMTC prototypes, these queued IPIs were serviced on return
to user mode, or on entry into the kernel idle loop. The
INSTANT_REPLAY option dispatches them as part of local_irq_restore()
processing, which adds runtime overhead (hence the option to turn
it off), but ensures that IPIs are handled promptly even under
heavy I/O interrupt load.
config MIPS_VPE_LOADER_TOM config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux" bool "Load VPE program into memory hidden from linux"
depends on MIPS_VPE_LOADER depends on MIPS_VPE_LOADER
......
...@@ -1017,6 +1017,33 @@ void setup_cross_vpe_interrupts(void) ...@@ -1017,6 +1017,33 @@ void setup_cross_vpe_interrupts(void)
* SMTC-specific hacks invoked from elsewhere in the kernel. * SMTC-specific hacks invoked from elsewhere in the kernel.
*/ */
void smtc_ipi_replay(void)
{
/*
* To the extent that we've ever turned interrupts off,
* we may have accumulated deferred IPIs. This is subtle.
* If we use the smtc_ipi_qdepth() macro, we'll get an
* exact number - but we'll also disable interrupts
* and create a window of failure where a new IPI gets
* queued after we test the depth but before we re-enable
* interrupts. So long as IXMT never gets set, however,
* we should be OK: If we pick up something and dispatch
* it here, that's great. If we see nothing, but concurrent
* with this operation, another TC sends us an IPI, IXMT
* is clear, and we'll handle it as a real pseudo-interrupt
* and not a pseudo-pseudo interrupt.
*/
if (IPIQ[smp_processor_id()].depth > 0) {
struct smtc_ipi *pipi;
extern void self_ipi(struct smtc_ipi *);
while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
self_ipi(pipi);
smtc_cpu_stats[smp_processor_id()].selfipis++;
}
}
}
void smtc_idle_loop_hook(void) void smtc_idle_loop_hook(void)
{ {
#ifdef SMTC_IDLE_HOOK_DEBUG #ifdef SMTC_IDLE_HOOK_DEBUG
...@@ -1113,29 +1140,14 @@ void smtc_idle_loop_hook(void) ...@@ -1113,29 +1140,14 @@ void smtc_idle_loop_hook(void)
if (pdb_msg != &id_ho_db_msg[0]) if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
#endif /* SMTC_IDLE_HOOK_DEBUG */ #endif /* SMTC_IDLE_HOOK_DEBUG */
/* /*
* To the extent that we've ever turned interrupts off, * Replay any accumulated deferred IPIs. If "Instant Replay"
* we may have accumulated deferred IPIs. This is subtle. * is in use, there should never be any.
* If we use the smtc_ipi_qdepth() macro, we'll get an
* exact number - but we'll also disable interrupts
* and create a window of failure where a new IPI gets
* queued after we test the depth but before we re-enable
* interrupts. So long as IXMT never gets set, however,
* we should be OK: If we pick up something and dispatch
* it here, that's great. If we see nothing, but concurrent
* with this operation, another TC sends us an IPI, IXMT
* is clear, and we'll handle it as a real pseudo-interrupt
* and not a pseudo-pseudo interrupt.
*/ */
if (IPIQ[smp_processor_id()].depth > 0) { #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
struct smtc_ipi *pipi; smtc_ipi_replay();
extern void self_ipi(struct smtc_ipi *); #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
self_ipi(pipi);
smtc_cpu_stats[smp_processor_id()].selfipis++;
}
}
} }
void smtc_soft_dump(void) void smtc_soft_dump(void)
......
...@@ -15,6 +15,27 @@ ...@@ -15,6 +15,27 @@
#include <asm/hazards.h> #include <asm/hazards.h>
/*
* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs,
* at the cost of branch and call overhead on each local_irq_restore()
*/
#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
extern void smtc_ipi_replay(void);
#define irq_restore_epilog(flags) \
do { \
if (!(flags & 0x0400)) \
smtc_ipi_replay(); \
} while (0)
#else
#define irq_restore_epilog(ignore) do { } while (0)
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
__asm__ ( __asm__ (
" .macro raw_local_irq_enable \n" " .macro raw_local_irq_enable \n"
" .set push \n" " .set push \n"
...@@ -193,6 +214,7 @@ do { \ ...@@ -193,6 +214,7 @@ do { \
: "=r" (__tmp1) \ : "=r" (__tmp1) \
: "0" (flags) \ : "0" (flags) \
: "memory"); \ : "memory"); \
irq_restore_epilog(flags); \
} while(0) } while(0)
static inline int raw_irqs_disabled_flags(unsigned long flags) static inline int raw_irqs_disabled_flags(unsigned long flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment