Commit 944b380e authored by Paul Mackerras's avatar Paul Mackerras
parents fff5f528 c7eb7347
This diff is collapsed.
......@@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
* Enabling/disabling interrupts for the entire performance monitoring unit.
*/
u32 cbe_query_pm_interrupts(u32 cpu)
{
return cbe_read_pm(cpu, pm_status);
}
EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
u32 cbe_clear_pm_interrupts(u32 cpu)
u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
{
/* Reading pm_status clears the interrupt bits. */
return cbe_query_pm_interrupts(cpu);
return cbe_read_pm(cpu, pm_status);
}
EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts);
EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
{
......@@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
void cbe_disable_pm_interrupts(u32 cpu)
{
cbe_clear_pm_interrupts(cpu);
cbe_get_and_clear_pm_interrupts(cpu);
cbe_write_pm(cpu, pm_status, 0);
}
EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
......
......@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
}
spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref);
init_rwsem(&ctx->state_sema);
mutex_init(&ctx->state_mutex);
init_MUTEX(&ctx->run_sema);
init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq);
......@@ -53,6 +53,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->owner = get_task_mm(current);
if (gang)
spu_gang_add_ctx(gang, ctx);
ctx->rt_priority = current->rt_priority;
ctx->policy = current->policy;
ctx->prio = current->prio;
INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
goto out;
out_free:
kfree(ctx);
......@@ -65,9 +69,9 @@ void destroy_spu_context(struct kref *kref)
{
struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref);
down_write(&ctx->state_sema);
mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx);
up_write(&ctx->state_sema);
mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa);
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
......@@ -96,16 +100,6 @@ void spu_forget(struct spu_context *ctx)
spu_release(ctx);
}
void spu_acquire(struct spu_context *ctx)
{
down_read(&ctx->state_sema);
}
void spu_release(struct spu_context *ctx)
{
up_read(&ctx->state_sema);
}
void spu_unmap_mappings(struct spu_context *ctx)
{
if (ctx->local_store)
......@@ -124,83 +118,84 @@ void spu_unmap_mappings(struct spu_context *ctx)
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
}
/**
* spu_acquire_exclusive - lock spu contex and protect against userspace access
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int spu_acquire_exclusive(struct spu_context *ctx)
{
int ret = 0;
int ret = -EINVAL;
down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
goto out;
}
spu_acquire(ctx);
/*
* Context is about to be freed, so we can't acquire it anymore.
*/
if (!ctx->owner)
goto out_unlock;
if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0);
if (ret)
goto out;
ctx->state = SPU_STATE_RUNNABLE;
goto out_unlock;
} else {
/* We need to exclude userspace access to the context. */
/*
* We need to exclude userspace access to the context.
*
* To protect against memory access we invalidate all ptes
* and make sure the pagefault handlers block on the mutex.
*/
spu_unmap_mappings(ctx);
}
out:
if (ret)
up_write(&ctx->state_sema);
return 0;
out_unlock:
spu_release(ctx);
return ret;
}
int spu_acquire_runnable(struct spu_context *ctx)
/**
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
{
int ret = 0;
down_read(&ctx->state_sema);
if (ctx->state == SPU_STATE_RUNNABLE) {
ctx->spu->prio = current->prio;
return 0;
}
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
goto out;
}
int ret = -EINVAL;
spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0);
/*
* Context is about to be freed, so we can't acquire it anymore.
*/
if (!ctx->owner)
goto out_unlock;
ret = spu_activate(ctx, flags);
if (ret)
goto out;
ctx->state = SPU_STATE_RUNNABLE;
goto out_unlock;
}
downgrade_write(&ctx->state_sema);
/* On success, we return holding the lock */
return ret;
out:
/* Release here, to simplify calling code. */
up_write(&ctx->state_sema);
return 0;
out_unlock:
spu_release(ctx);
return ret;
}
/**
* spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
*/
void spu_acquire_saved(struct spu_context *ctx)
{
down_read(&ctx->state_sema);
if (ctx->state == SPU_STATE_SAVED)
return;
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
if (ctx->state == SPU_STATE_RUNNABLE) {
spu_acquire(ctx);
if (ctx->state != SPU_STATE_SAVED)
spu_deactivate(ctx);
ctx->state = SPU_STATE_SAVED;
}
downgrade_write(&ctx->state_sema);
}
......@@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= LS_SIZE)
return NOPFN_SIGBUS;
spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) {
......@@ -164,7 +167,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
/* error here usually means a signal.. we might want to test
* the error code more precisely though
*/
ret = spu_acquire_runnable(ctx);
ret = spu_acquire_runnable(ctx, 0);
if (ret)
return NOPFN_REFAULT;
......@@ -1306,7 +1309,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
if (ret)
goto out;
spu_acquire_runnable(ctx);
spu_acquire_runnable(ctx, 0);
if (file->f_flags & O_NONBLOCK) {
ret = ctx->ops->send_mfc_command(ctx, &cmd);
} else {
......
......@@ -133,7 +133,7 @@ out_drop_priv:
spu_mfc_sr1_set(ctx->spu, sr1);
out_unlock:
spu_release_exclusive(ctx);
spu_release(ctx);
out:
return ret;
}
......@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
int ret;
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
ret = spu_acquire_runnable(ctx);
ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
if (ret)
return ret;
......@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
spu_release(ctx);
ret = spu_setup_isolated(ctx);
if (!ret)
ret = spu_acquire_runnable(ctx);
ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
}
/* if userspace has set the runcntrl register (eg, to issue an
......@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
if (runcntl == 0)
runcntl = SPU_RUNCNTL_RUNNABLE;
} else
} else {
spu_start_tick(ctx);
ctx->ops->npc_write(ctx, *npc);
}
ctx->ops->runcntl_write(ctx, runcntl);
return ret;
......@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
{
int ret = 0;
spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);
spu_release(ctx);
......@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
}
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret)
if (ret) {
spu_stop_tick(ctx);
goto out2;
}
continue;
}
ret = spu_process_events(ctx);
......@@ -361,4 +366,3 @@ out:
up(&ctx->run_sema);
return ret;
}
This diff is collapsed.
......@@ -23,7 +23,7 @@
#define SPUFS_H
#include <linux/kref.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
......@@ -37,11 +37,13 @@ enum {
};
struct spu_context_ops;
#define SPU_CONTEXT_PREEMPT 0UL
struct spu_gang;
/* ctx->sched_flags */
enum {
SPU_SCHED_WAKE = 0,
};
struct spu_context {
struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */
......@@ -56,7 +58,7 @@ struct spu_context {
u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct rw_semaphore state_sema;
struct mutex state_mutex;
struct semaphore run_sema;
struct mm_struct *owner;
......@@ -77,6 +79,14 @@ struct spu_context {
struct list_head gang_list;
struct spu_gang *gang;
/* scheduler fields */
struct list_head rq;
struct delayed_work sched_work;
unsigned long sched_flags;
unsigned long rt_priority;
int policy;
int prio;
};
struct spu_gang {
......@@ -161,6 +171,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
/* context management */
static inline void spu_acquire(struct spu_context *ctx)
{
mutex_lock(&ctx->state_mutex);
}
static inline void spu_release(struct spu_context *ctx)
{
mutex_unlock(&ctx->state_mutex);
}
struct spu_context * alloc_spu_context(struct spu_gang *gang);
void destroy_spu_context(struct kref *kref);
struct spu_context * get_spu_context(struct spu_context *ctx);
......@@ -168,20 +188,18 @@ int put_spu_context(struct spu_context *ctx);
void spu_unmap_mappings(struct spu_context *ctx);
void spu_forget(struct spu_context *ctx);
void spu_acquire(struct spu_context *ctx);
void spu_release(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
void spu_acquire_saved(struct spu_context *ctx);
int spu_acquire_exclusive(struct spu_context *ctx);
static inline void spu_release_exclusive(struct spu_context *ctx)
{
up_write(&ctx->state_sema);
}
int spu_activate(struct spu_context *ctx, u64 flags);
enum {
SPU_ACTIVATE_NOWAKE = 1,
};
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
void spu_start_tick(struct spu_context *ctx);
void spu_stop_tick(struct spu_context *ctx);
void spu_sched_tick(struct work_struct *work);
int __init spu_sched_init(void);
void __exit spu_sched_exit(void);
......
......@@ -2811,7 +2811,6 @@ static void dump_spu_fields(struct spu *spu)
DUMP_FIELD(spu, "0x%lx", irqs[2]);
DUMP_FIELD(spu, "0x%x", slb_replace);
DUMP_FIELD(spu, "%d", pid);
DUMP_FIELD(spu, "%d", prio);
DUMP_FIELD(spu, "0x%p", mm);
DUMP_FIELD(spu, "0x%p", ctx);
DUMP_FIELD(spu, "0x%p", rq);
......
......@@ -53,6 +53,11 @@
#define CBE_PM_CTR_POLARITY 0x01000000
#define CBE_PM_CTR_COUNT_CYCLES 0x00800000
#define CBE_PM_CTR_ENABLE 0x00400000
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
/* Macros for the pm_status register. */
#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7)))
......@@ -89,8 +94,7 @@ extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
extern void cbe_disable_pm_interrupts(u32 cpu);
extern u32 cbe_query_pm_interrupts(u32 cpu);
extern u32 cbe_clear_pm_interrupts(u32 cpu);
extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
extern void cbe_sync_irq(int node);
/* Utility functions, macros */
......@@ -103,11 +107,4 @@ extern u32 cbe_get_hw_thread_id(int cpu);
#define CBE_COUNT_PROBLEM_MODE 2
#define CBE_COUNT_ALL_MODES 3
/* Macros for the pm07_control registers. */
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
#endif /* __ASM_CELL_PMU_H__ */
......@@ -129,7 +129,6 @@ struct spu {
struct spu_runqueue *rq;
unsigned long long timestamp;
pid_t pid;
int prio;
int class_0_pending;
spinlock_t register_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment