Commit 8b9909de authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] sys_move_pages should be callable from an SPU
  [POWERPC] Wire up sys_epoll_pwait
  [POWERPC] Allocate syscall number for sys_getcpu
  [POWERPC] update cell_defconfig
  [POWERPC] ps3: always make sure were running on a PS3
  [POWERPC] Fix spu SLB invalidations
  [POWERPC] avoid SPU_ACTIVATE_NOWAKE optimization
  [POWERPC] spufs: fix possible memory corruption is spufs_mem_write
parents 44a50851 7b3c384d
This diff is collapsed.
...@@ -685,6 +685,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -685,6 +685,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
"non-cacheable mapping\n"); "non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K; psize = mmu_vmalloc_psize = MMU_PAGE_4K;
} }
#ifdef CONFIG_SPE_BASE
spu_flush_all_slbs(mm);
#endif
} }
if (user_region) { if (user_region) {
if (psize != get_paca()->context.user_psize) { if (psize != get_paca()->context.user_psize) {
...@@ -759,6 +762,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, ...@@ -759,6 +762,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
mmu_psize_defs[MMU_PAGE_4K].sllp; mmu_psize_defs[MMU_PAGE_4K].sllp;
get_paca()->context = mm->context; get_paca()->context = mm->context;
slb_flush_and_rebolt(); slb_flush_and_rebolt();
#ifdef CONFIG_SPE_BASE
spu_flush_all_slbs(mm);
#endif
} }
} }
if (mm->context.user_psize == MMU_PAGE_64K) if (mm->context.user_psize == MMU_PAGE_64K)
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/spu.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
...@@ -513,6 +514,9 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff) ...@@ -513,6 +514,9 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
if ((addr + len) > 0x100000000UL) if ((addr + len) > 0x100000000UL)
err = open_high_hpage_areas(current->mm, err = open_high_hpage_areas(current->mm,
HTLB_AREA_MASK(addr, len)); HTLB_AREA_MASK(addr, len));
#ifdef CONFIG_SPE_BASE
spu_flush_all_slbs(current->mm);
#endif
if (err) { if (err) {
printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
" failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n", " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
......
...@@ -38,8 +38,61 @@ ...@@ -38,8 +38,61 @@
const struct spu_management_ops *spu_management_ops; const struct spu_management_ops *spu_management_ops;
const struct spu_priv1_ops *spu_priv1_ops; const struct spu_priv1_ops *spu_priv1_ops;
static struct list_head spu_list[MAX_NUMNODES];
static LIST_HEAD(spu_full_list);
static DEFINE_MUTEX(spu_mutex);
static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED;
EXPORT_SYMBOL_GPL(spu_priv1_ops); EXPORT_SYMBOL_GPL(spu_priv1_ops);
void spu_invalidate_slbs(struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
out_be64(&priv2->slb_invalidate_all_W, 0UL);
}
EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
/* This is called by the MM core when a segment size is changed, to
* request a flush of all the SPEs using a given mm
*/
void spu_flush_all_slbs(struct mm_struct *mm)
{
struct spu *spu;
unsigned long flags;
spin_lock_irqsave(&spu_list_lock, flags);
list_for_each_entry(spu, &spu_full_list, full_list) {
if (spu->mm == mm)
spu_invalidate_slbs(spu);
}
spin_unlock_irqrestore(&spu_list_lock, flags);
}
/* The hack below stinks... try to do something better one of
* these days... Does it even work properly with NR_CPUS == 1 ?
*/
static inline void mm_needs_global_tlbie(struct mm_struct *mm)
{
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
/* Global TLBIE broadcast required with SPEs. */
__cpus_setall(&mm->cpu_vm_mask, nr);
}
void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
{
unsigned long flags;
spin_lock_irqsave(&spu_list_lock, flags);
spu->mm = mm;
spin_unlock_irqrestore(&spu_list_lock, flags);
if (mm)
mm_needs_global_tlbie(mm);
}
EXPORT_SYMBOL_GPL(spu_associate_mm);
static int __spu_trap_invalid_dma(struct spu *spu) static int __spu_trap_invalid_dma(struct spu *spu)
{ {
pr_debug("%s\n", __FUNCTION__); pr_debug("%s\n", __FUNCTION__);
...@@ -74,6 +127,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) ...@@ -74,6 +127,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
struct spu_priv2 __iomem *priv2 = spu->priv2; struct spu_priv2 __iomem *priv2 = spu->priv2;
struct mm_struct *mm = spu->mm; struct mm_struct *mm = spu->mm;
u64 esid, vsid, llp; u64 esid, vsid, llp;
int psize;
pr_debug("%s\n", __FUNCTION__); pr_debug("%s\n", __FUNCTION__);
...@@ -90,22 +144,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) ...@@ -90,22 +144,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
case USER_REGION_ID: case USER_REGION_ID:
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
if (in_hugepage_area(mm->context, ea)) if (in_hugepage_area(mm->context, ea))
llp = mmu_psize_defs[mmu_huge_psize].sllp; psize = mmu_huge_psize;
else else
#endif #endif
llp = mmu_psize_defs[mmu_virtual_psize].sllp; psize = mm->context.user_psize;
vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
SLB_VSID_USER | llp; SLB_VSID_USER;
break; break;
case VMALLOC_REGION_ID: case VMALLOC_REGION_ID:
llp = mmu_psize_defs[mmu_virtual_psize].sllp; if (ea < VMALLOC_END)
psize = mmu_vmalloc_psize;
else
psize = mmu_io_psize;
vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
SLB_VSID_KERNEL | llp; SLB_VSID_KERNEL;
break; break;
case KERNEL_REGION_ID: case KERNEL_REGION_ID:
llp = mmu_psize_defs[mmu_linear_psize].sllp; psize = mmu_linear_psize;
vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
SLB_VSID_KERNEL | llp; SLB_VSID_KERNEL;
break; break;
default: default:
/* Future: support kernel segments so that drivers /* Future: support kernel segments so that drivers
...@@ -114,9 +171,10 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) ...@@ -114,9 +171,10 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
pr_debug("invalid region access at %016lx\n", ea); pr_debug("invalid region access at %016lx\n", ea);
return 1; return 1;
} }
llp = mmu_psize_defs[psize].sllp;
out_be64(&priv2->slb_index_W, spu->slb_replace); out_be64(&priv2->slb_index_W, spu->slb_replace);
out_be64(&priv2->slb_vsid_RW, vsid); out_be64(&priv2->slb_vsid_RW, vsid | llp);
out_be64(&priv2->slb_esid_RW, esid); out_be64(&priv2->slb_esid_RW, esid);
spu->slb_replace++; spu->slb_replace++;
...@@ -330,10 +388,6 @@ static void spu_free_irqs(struct spu *spu) ...@@ -330,10 +388,6 @@ static void spu_free_irqs(struct spu *spu)
free_irq(spu->irqs[2], spu); free_irq(spu->irqs[2], spu);
} }
static struct list_head spu_list[MAX_NUMNODES];
static LIST_HEAD(spu_full_list);
static DEFINE_MUTEX(spu_mutex);
static void spu_init_channels(struct spu *spu) static void spu_init_channels(struct spu *spu)
{ {
static const struct { static const struct {
...@@ -593,6 +647,7 @@ static int __init create_spu(void *data) ...@@ -593,6 +647,7 @@ static int __init create_spu(void *data)
struct spu *spu; struct spu *spu;
int ret; int ret;
static int number; static int number;
unsigned long flags;
ret = -ENOMEM; ret = -ENOMEM;
spu = kzalloc(sizeof (*spu), GFP_KERNEL); spu = kzalloc(sizeof (*spu), GFP_KERNEL);
...@@ -620,8 +675,10 @@ static int __init create_spu(void *data) ...@@ -620,8 +675,10 @@ static int __init create_spu(void *data)
goto out_free_irqs; goto out_free_irqs;
mutex_lock(&spu_mutex); mutex_lock(&spu_mutex);
spin_lock_irqsave(&spu_list_lock, flags);
list_add(&spu->list, &spu_list[spu->node]); list_add(&spu->list, &spu_list[spu->node]);
list_add(&spu->full_list, &spu_full_list); list_add(&spu->full_list, &spu_full_list);
spin_unlock_irqrestore(&spu_list_lock, flags);
mutex_unlock(&spu_mutex); mutex_unlock(&spu_mutex);
goto out; goto out;
......
...@@ -63,8 +63,8 @@ static ssize_t ...@@ -63,8 +63,8 @@ static ssize_t
spufs_mem_read(struct file *file, char __user *buffer, spufs_mem_read(struct file *file, char __user *buffer,
size_t size, loff_t *pos) size_t size, loff_t *pos)
{ {
int ret;
struct spu_context *ctx = file->private_data; struct spu_context *ctx = file->private_data;
ssize_t ret;
spu_acquire(ctx); spu_acquire(ctx);
ret = __spufs_mem_read(ctx, buffer, size, pos); ret = __spufs_mem_read(ctx, buffer, size, pos);
...@@ -74,25 +74,29 @@ spufs_mem_read(struct file *file, char __user *buffer, ...@@ -74,25 +74,29 @@ spufs_mem_read(struct file *file, char __user *buffer,
static ssize_t static ssize_t
spufs_mem_write(struct file *file, const char __user *buffer, spufs_mem_write(struct file *file, const char __user *buffer,
size_t size, loff_t *pos) size_t size, loff_t *ppos)
{ {
struct spu_context *ctx = file->private_data; struct spu_context *ctx = file->private_data;
char *local_store; char *local_store;
loff_t pos = *ppos;
int ret; int ret;
size = min_t(ssize_t, LS_SIZE - *pos, size); if (pos < 0)
if (size <= 0) return -EINVAL;
if (pos > LS_SIZE)
return -EFBIG; return -EFBIG;
*pos += size; if (size > LS_SIZE - pos)
size = LS_SIZE - pos;
spu_acquire(ctx); spu_acquire(ctx);
local_store = ctx->ops->get_ls(ctx); local_store = ctx->ops->get_ls(ctx);
ret = copy_from_user(local_store + *pos - size, ret = copy_from_user(local_store + pos, buffer, size);
buffer, size) ? -EFAULT : size;
spu_release(ctx); spu_release(ctx);
return ret;
if (ret)
return -EFAULT;
*ppos = pos + size;
return size;
} }
static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
......
...@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
int ret; int ret;
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); ret = spu_acquire_runnable(ctx, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
spu_release(ctx); spu_release(ctx);
ret = spu_setup_isolated(ctx); ret = spu_setup_isolated(ctx);
if (!ret) if (!ret)
ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); ret = spu_acquire_runnable(ctx, 0);
} }
/* if userspace has set the runcntrl register (eg, to issue an /* if userspace has set the runcntrl register (eg, to issue an
......
...@@ -127,14 +127,6 @@ static void spu_remove_from_active_list(struct spu *spu) ...@@ -127,14 +127,6 @@ static void spu_remove_from_active_list(struct spu *spu)
mutex_unlock(&spu_prio->active_mutex[node]); mutex_unlock(&spu_prio->active_mutex[node]);
} }
static inline void mm_needs_global_tlbie(struct mm_struct *mm)
{
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
/* Global TLBIE broadcast required with SPEs. */
__cpus_setall(&mm->cpu_vm_mask, nr);
}
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
...@@ -167,8 +159,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) ...@@ -167,8 +159,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
ctx->spu = spu; ctx->spu = spu;
ctx->ops = &spu_hw_ops; ctx->ops = &spu_hw_ops;
spu->pid = current->pid; spu->pid = current->pid;
spu->mm = ctx->owner; spu_associate_mm(spu, ctx->owner);
mm_needs_global_tlbie(spu->mm);
spu->ibox_callback = spufs_ibox_callback; spu->ibox_callback = spufs_ibox_callback;
spu->wbox_callback = spufs_wbox_callback; spu->wbox_callback = spufs_wbox_callback;
spu->stop_callback = spufs_stop_callback; spu->stop_callback = spufs_stop_callback;
...@@ -205,7 +196,7 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) ...@@ -205,7 +196,7 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
spu->stop_callback = NULL; spu->stop_callback = NULL;
spu->mfc_callback = NULL; spu->mfc_callback = NULL;
spu->dma_callback = NULL; spu->dma_callback = NULL;
spu->mm = NULL; spu_associate_mm(spu, NULL);
spu->pid = 0; spu->pid = 0;
ctx->ops = &spu_backing_ops; ctx->ops = &spu_backing_ops;
ctx->spu = NULL; ctx->spu = NULL;
...@@ -263,7 +254,6 @@ static void spu_prio_wait(struct spu_context *ctx) ...@@ -263,7 +254,6 @@ static void spu_prio_wait(struct spu_context *ctx)
{ {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
if (!signal_pending(current)) { if (!signal_pending(current)) {
mutex_unlock(&ctx->state_mutex); mutex_unlock(&ctx->state_mutex);
...@@ -272,7 +262,6 @@ static void spu_prio_wait(struct spu_context *ctx) ...@@ -272,7 +262,6 @@ static void spu_prio_wait(struct spu_context *ctx)
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
remove_wait_queue(&ctx->stop_wq, &wait); remove_wait_queue(&ctx->stop_wq, &wait);
clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
} }
/** /**
...@@ -292,7 +281,7 @@ static void spu_reschedule(struct spu *spu) ...@@ -292,7 +281,7 @@ static void spu_reschedule(struct spu *spu)
best = sched_find_first_bit(spu_prio->bitmap); best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) { if (best < MAX_PRIO) {
struct spu_context *ctx = spu_grab_context(best); struct spu_context *ctx = spu_grab_context(best);
if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags)) if (ctx)
wake_up(&ctx->stop_wq); wake_up(&ctx->stop_wq);
} }
spin_unlock(&spu_prio->runq_lock); spin_unlock(&spu_prio->runq_lock);
...@@ -414,8 +403,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) ...@@ -414,8 +403,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
} }
spu_add_to_rq(ctx); spu_add_to_rq(ctx);
if (!(flags & SPU_ACTIVATE_NOWAKE)) spu_prio_wait(ctx);
spu_prio_wait(ctx);
spu_del_from_rq(ctx); spu_del_from_rq(ctx);
} while (!signal_pending(current)); } while (!signal_pending(current));
......
...@@ -41,7 +41,7 @@ struct spu_gang; ...@@ -41,7 +41,7 @@ struct spu_gang;
/* ctx->sched_flags */ /* ctx->sched_flags */
enum { enum {
SPU_SCHED_WAKE = 0, SPU_SCHED_WAKE = 0, /* currently unused */
}; };
struct spu_context { struct spu_context {
...@@ -191,9 +191,7 @@ void spu_forget(struct spu_context *ctx); ...@@ -191,9 +191,7 @@ void spu_forget(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
void spu_acquire_saved(struct spu_context *ctx); void spu_acquire_saved(struct spu_context *ctx);
int spu_acquire_exclusive(struct spu_context *ctx); int spu_acquire_exclusive(struct spu_context *ctx);
enum {
SPU_ACTIVATE_NOWAKE = 1,
};
int spu_activate(struct spu_context *ctx, unsigned long flags); int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx); void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx); void spu_yield(struct spu_context *ctx);
......
...@@ -468,26 +468,6 @@ static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) ...@@ -468,26 +468,6 @@ static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
MFC_CNTL_PURGE_DMA_COMPLETE); MFC_CNTL_PURGE_DMA_COMPLETE);
} }
static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Save, Step 29:
* If MFC_SR1[R]='1', save SLBs in CSA.
*/
if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
for (i = 0; i < 8; i++) {
out_be64(&priv2->slb_index_W, i);
eieio();
csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW);
csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW);
eieio();
}
}
}
static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
{ {
/* Save, Step 30: /* Save, Step 30:
...@@ -708,20 +688,6 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) ...@@ -708,20 +688,6 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
} }
static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 45:
* Restore, Step 19:
* If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
*/
if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
out_be64(&priv2->slb_invalidate_all_W, 0UL);
eieio();
}
}
static inline void get_kernel_slb(u64 ea, u64 slb[2]) static inline void get_kernel_slb(u64 ea, u64 slb[2])
{ {
u64 llp; u64 llp;
...@@ -765,7 +731,7 @@ static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) ...@@ -765,7 +731,7 @@ static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
* MFC_SR1[R]=1 (in other words, assume that * MFC_SR1[R]=1 (in other words, assume that
* translation is desired by OS environment). * translation is desired by OS environment).
*/ */
invalidate_slbs(csa, spu); spu_invalidate_slbs(spu);
get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); get_kernel_slb((unsigned long)&spu_save_code[0], code_slb);
get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb); get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
load_mfc_slb(spu, code_slb, 0); load_mfc_slb(spu, code_slb, 0);
...@@ -1718,27 +1684,6 @@ static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) ...@@ -1718,27 +1684,6 @@ static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
} }
} }
static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Restore, Step 68:
* If MFC_SR1[R]='1', restore SLBs from CSA.
*/
if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) {
for (i = 0; i < 8; i++) {
out_be64(&priv2->slb_index_W, i);
eieio();
out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]);
out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]);
eieio();
}
out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W);
eieio();
}
}
static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
{ {
/* Restore, Step 69: /* Restore, Step 69:
...@@ -1875,7 +1820,6 @@ static void save_csa(struct spu_state *prev, struct spu *spu) ...@@ -1875,7 +1820,6 @@ static void save_csa(struct spu_state *prev, struct spu *spu)
set_mfc_tclass_id(prev, spu); /* Step 26. */ set_mfc_tclass_id(prev, spu); /* Step 26. */
purge_mfc_queue(prev, spu); /* Step 27. */ purge_mfc_queue(prev, spu); /* Step 27. */
wait_purge_complete(prev, spu); /* Step 28. */ wait_purge_complete(prev, spu); /* Step 28. */
save_mfc_slbs(prev, spu); /* Step 29. */
setup_mfc_sr1(prev, spu); /* Step 30. */ setup_mfc_sr1(prev, spu); /* Step 30. */
save_spu_npc(prev, spu); /* Step 31. */ save_spu_npc(prev, spu); /* Step 31. */
save_spu_privcntl(prev, spu); /* Step 32. */ save_spu_privcntl(prev, spu); /* Step 32. */
...@@ -1987,7 +1931,7 @@ static void harvest(struct spu_state *prev, struct spu *spu) ...@@ -1987,7 +1931,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
reset_spu_privcntl(prev, spu); /* Step 16. */ reset_spu_privcntl(prev, spu); /* Step 16. */
reset_spu_lslr(prev, spu); /* Step 17. */ reset_spu_lslr(prev, spu); /* Step 17. */
setup_mfc_sr1(prev, spu); /* Step 18. */ setup_mfc_sr1(prev, spu); /* Step 18. */
invalidate_slbs(prev, spu); /* Step 19. */ spu_invalidate_slbs(spu); /* Step 19. */
reset_ch_part1(prev, spu); /* Step 20. */ reset_ch_part1(prev, spu); /* Step 20. */
reset_ch_part2(prev, spu); /* Step 21. */ reset_ch_part2(prev, spu); /* Step 21. */
enable_interrupts(prev, spu); /* Step 22. */ enable_interrupts(prev, spu); /* Step 22. */
...@@ -2055,7 +1999,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu) ...@@ -2055,7 +1999,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu)
restore_spu_mb(next, spu); /* Step 65. */ restore_spu_mb(next, spu); /* Step 65. */
check_ppu_mb_stat(next, spu); /* Step 66. */ check_ppu_mb_stat(next, spu); /* Step 66. */
check_ppuint_mb_stat(next, spu); /* Step 67. */ check_ppuint_mb_stat(next, spu); /* Step 67. */
restore_mfc_slbs(next, spu); /* Step 68. */ spu_invalidate_slbs(spu); /* Modified Step 68. */
restore_mfc_sr1(next, spu); /* Step 69. */ restore_mfc_sr1(next, spu); /* Step 69. */
restore_other_spu_access(next, spu); /* Step 70. */ restore_other_spu_access(next, spu); /* Step 70. */
restore_spu_runcntl(next, spu); /* Step 71. */ restore_spu_runcntl(next, spu); /* Step 71. */
......
...@@ -294,7 +294,7 @@ static int __init ps3_mm_add_memory(void) ...@@ -294,7 +294,7 @@ static int __init ps3_mm_add_memory(void)
unsigned long nr_pages; unsigned long nr_pages;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return 0; return -ENODEV;
BUG_ON(!mem_init_done); BUG_ON(!mem_init_done);
......
...@@ -172,7 +172,7 @@ int __init ps3_system_bus_init(void) ...@@ -172,7 +172,7 @@ int __init ps3_system_bus_init(void)
int result; int result;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return 0; return -ENODEV;
result = bus_register(&ps3_system_bus_type); result = bus_register(&ps3_system_bus_type);
BUG_ON(result); BUG_ON(result);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/ioctl.h> #include <linux/ioctl.h>
#include <asm/firmware.h>
#include <asm/lv1call.h> #include <asm/lv1call.h>
#include <asm/ps3av.h> #include <asm/ps3av.h>
#include <asm/ps3.h> #include <asm/ps3.h>
...@@ -947,7 +949,12 @@ static struct ps3_vuart_port_driver ps3av_driver = { ...@@ -947,7 +949,12 @@ static struct ps3_vuart_port_driver ps3av_driver = {
static int ps3av_module_init(void) static int ps3av_module_init(void)
{ {
int error = ps3_vuart_port_driver_register(&ps3av_driver); int error;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
error = ps3_vuart_port_driver_register(&ps3av_driver);
if (error) { if (error) {
printk(KERN_ERR printk(KERN_ERR
"%s: ps3_vuart_port_driver_register failed %d\n", "%s: ps3_vuart_port_driver_register failed %d\n",
......
...@@ -22,7 +22,10 @@ ...@@ -22,7 +22,10 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <asm/firmware.h>
#include <asm/ps3.h> #include <asm/ps3.h>
#include "vuart.h" #include "vuart.h"
MODULE_AUTHOR("Sony Corporation"); MODULE_AUTHOR("Sony Corporation");
...@@ -598,6 +601,9 @@ static struct ps3_vuart_port_driver ps3_sys_manager = { ...@@ -598,6 +601,9 @@ static struct ps3_vuart_port_driver ps3_sys_manager = {
static int __init ps3_sys_manager_init(void) static int __init ps3_sys_manager_init(void)
{ {
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
return ps3_vuart_port_driver_register(&ps3_sys_manager); return ps3_vuart_port_driver_register(&ps3_sys_manager);
} }
......
...@@ -1031,7 +1031,7 @@ int __init ps3_vuart_bus_init(void) ...@@ -1031,7 +1031,7 @@ int __init ps3_vuart_bus_init(void)
pr_debug("%s:%d:\n", __func__, __LINE__); pr_debug("%s:%d:\n", __func__, __LINE__);
if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return 0; return -ENODEV;
init_MUTEX(&vuart_bus_priv.probe_mutex); init_MUTEX(&vuart_bus_priv.probe_mutex);
result = bus_register(&ps3_vuart_bus); result = bus_register(&ps3_vuart_bus);
......
...@@ -165,6 +165,13 @@ int spu_irq_class_0_bottom(struct spu *spu); ...@@ -165,6 +165,13 @@ int spu_irq_class_0_bottom(struct spu *spu);
int spu_irq_class_1_bottom(struct spu *spu); int spu_irq_class_1_bottom(struct spu *spu);
void spu_irq_setaffinity(struct spu *spu, int cpu); void spu_irq_setaffinity(struct spu *spu, int cpu);
extern void spu_invalidate_slbs(struct spu *spu);
extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
/* Calls from the memory management to the SPU */
struct mm_struct;
extern void spu_flush_all_slbs(struct mm_struct *mm);
/* system callbacks from the SPU */ /* system callbacks from the SPU */
struct spu_syscall_block { struct spu_syscall_block {
u64 nr_ret; u64 nr_ret;
......
...@@ -221,8 +221,6 @@ struct spu_priv2_collapsed { ...@@ -221,8 +221,6 @@ struct spu_priv2_collapsed {
* @spu_chnlcnt_RW: Array of saved channel counts. * @spu_chnlcnt_RW: Array of saved channel counts.
* @spu_chnldata_RW: Array of saved channel data. * @spu_chnldata_RW: Array of saved channel data.
* @suspend_time: Time stamp when decrementer disabled. * @suspend_time: Time stamp when decrementer disabled.
* @slb_esid_RW: Array of saved SLB esid entries.
* @slb_vsid_RW: Array of saved SLB vsid entries.
* *
* Structure representing the whole of the SPU * Structure representing the whole of the SPU
* context save area (CSA). This struct contains * context save area (CSA). This struct contains
...@@ -245,8 +243,6 @@ struct spu_state { ...@@ -245,8 +243,6 @@ struct spu_state {
u32 spu_mailbox_data[4]; u32 spu_mailbox_data[4];
u32 pu_mailbox_data[1]; u32 pu_mailbox_data[1];
unsigned long suspend_time; unsigned long suspend_time;
u64 slb_esid_RW[8];
u64 slb_vsid_RW[8];
spinlock_t register_lock; spinlock_t register_lock;
}; };
......
...@@ -304,5 +304,6 @@ SYSCALL_SPU(fchmodat) ...@@ -304,5 +304,6 @@ SYSCALL_SPU(fchmodat)
SYSCALL_SPU(faccessat) SYSCALL_SPU(faccessat)
COMPAT_SYS_SPU(get_robust_list) COMPAT_SYS_SPU(get_robust_list)
COMPAT_SYS_SPU(set_robust_list) COMPAT_SYS_SPU(set_robust_list)
COMPAT_SYS(move_pages) COMPAT_SYS_SPU(move_pages)
SYSCALL_SPU(getcpu) SYSCALL_SPU(getcpu)
COMPAT_SYS(epoll_pwait)
...@@ -324,10 +324,12 @@ ...@@ -324,10 +324,12 @@
#define __NR_get_robust_list 299 #define __NR_get_robust_list 299
#define __NR_set_robust_list 300 #define __NR_set_robust_list 300
#define __NR_move_pages 301 #define __NR_move_pages 301
#define __NR_getcpu 302
#define __NR_epoll_pwait 303
#ifdef __KERNEL__ #ifdef __KERNEL__
#define __NR_syscalls 302 #define __NR_syscalls 304
#define __NR__exit __NR_exit #define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls #define NR_syscalls __NR_syscalls
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment