Commit 52f04fcf authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Arnd Bergmann

[POWERPC] spu sched: forced preemption at execution

If we start a spu context with realtime priority we want it to run
immediately and not wait until some other lower priority thread has
finished.  Try to find a suitable victim and use it's spu in this
case.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarArnd Bergmann <arnd.bergmann@de.ibm.com>
parent ae7b4c52
...@@ -53,6 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -53,6 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->owner = get_task_mm(current); ctx->owner = get_task_mm(current);
if (gang) if (gang)
spu_gang_add_ctx(gang, ctx); spu_gang_add_ctx(gang, ctx);
ctx->rt_priority = current->rt_priority;
ctx->prio = current->prio; ctx->prio = current->prio;
goto out; goto out;
out_free: out_free:
......
...@@ -281,6 +281,74 @@ static struct spu *spu_get_idle(struct spu_context *ctx) ...@@ -281,6 +281,74 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
return spu; return spu;
} }
/**
* find_victim - find a lower priority context to preempt
* @ctx: canidate context for running
*
* Returns the freed physical spu to run the new context on.
*/
static struct spu *find_victim(struct spu_context *ctx)
{
struct spu_context *victim = NULL;
struct spu *spu;
int node, n;
/*
* Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't
* exactly fair, but so far the whole spu schedule tries to keep
* a strong node affinity. We might want to fine-tune this in
* the future.
*/
restart:
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
if (!node_allowed(node))
continue;
mutex_lock(&spu_prio->active_mutex[node]);
list_for_each_entry(spu, &spu_prio->active_list[node], list) {
struct spu_context *tmp = spu->ctx;
if (tmp->rt_priority < ctx->rt_priority &&
(!victim || tmp->rt_priority < victim->rt_priority))
victim = spu->ctx;
}
mutex_unlock(&spu_prio->active_mutex[node]);
if (victim) {
/*
* This nests ctx->state_mutex, but we always lock
* higher priority contexts before lower priority
* ones, so this is safe until we introduce
* priority inheritance schemes.
*/
if (!mutex_trylock(&victim->state_mutex)) {
victim = NULL;
goto restart;
}
spu = victim->spu;
if (!spu) {
/*
* This race can happen because we've dropped
* the active list mutex. No a problem, just
* restart the search.
*/
mutex_unlock(&victim->state_mutex);
victim = NULL;
goto restart;
}
spu_unbind_context(spu, victim);
mutex_unlock(&victim->state_mutex);
return spu;
}
}
return NULL;
}
/** /**
* spu_activate - find a free spu for a context and execute it * spu_activate - find a free spu for a context and execute it
* @ctx: spu context to schedule * @ctx: spu context to schedule
...@@ -300,6 +368,12 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) ...@@ -300,6 +368,12 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
struct spu *spu; struct spu *spu;
spu = spu_get_idle(ctx); spu = spu_get_idle(ctx);
/*
* If this is a realtime thread we try to get it running by
* preempting a lower priority thread.
*/
if (!spu && ctx->rt_priority)
spu = find_victim(ctx);
if (spu) { if (spu) {
spu_bind_context(spu, ctx); spu_bind_context(spu, ctx);
return 0; return 0;
......
...@@ -83,6 +83,7 @@ struct spu_context { ...@@ -83,6 +83,7 @@ struct spu_context {
/* scheduler fields */ /* scheduler fields */
struct list_head rq; struct list_head rq;
unsigned long sched_flags; unsigned long sched_flags;
unsigned long rt_priority;
int prio; int prio;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment