Commit 22da3176 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Fixes for the SLB shadow buffer code
  [POWERPC] Fix a compile warning in powermac/feature.c
  [POWERPC] Fix a compile warning in pci_32.c
  [POWERPC] Fix parse_drconf_memory() for 64-bit start addresses
  [POWERPC] Fix num_cpus calculation in smp_call_function_map()
  [POWERPC] ps3: Fix section mismatch in ps3/setup.c
  [POWERPC] spufs: Fix affinity after introduction of node_allowed() calls
  [POWERPC] Fix special PTE code for secondary hash bucket
  [POWERPC] Expand RPN field to 34 bits when using 64k pages
parents 3e847423 67439b76
...@@ -389,8 +389,11 @@ BEGIN_FTR_SECTION ...@@ -389,8 +389,11 @@ BEGIN_FTR_SECTION
ld r9,PACA_SLBSHADOWPTR(r13) ld r9,PACA_SLBSHADOWPTR(r13)
li r12,0 li r12,0
std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
eieio
std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
eieio
std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
eieio
slbie r6 slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */ slbie r6 /* Workaround POWER5 < DD2.1 issue */
......
...@@ -581,8 +581,11 @@ pcibios_assign_resources(void) ...@@ -581,8 +581,11 @@ pcibios_assign_resources(void)
if ((r->flags & IORESOURCE_UNSET) && r->end && if ((r->flags & IORESOURCE_UNSET) && r->end &&
(!ppc_md.pcibios_enable_device_hook || (!ppc_md.pcibios_enable_device_hook ||
!ppc_md.pcibios_enable_device_hook(dev, 1))) { !ppc_md.pcibios_enable_device_hook(dev, 1))) {
int rc;
r->flags &= ~IORESOURCE_UNSET; r->flags &= ~IORESOURCE_UNSET;
pci_assign_resource(dev, idx); rc = pci_assign_resource(dev, idx);
BUG_ON(rc);
} }
} }
......
...@@ -212,11 +212,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, ...@@ -212,11 +212,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
atomic_set(&data.finished, 0); atomic_set(&data.finished, 0);
spin_lock(&call_lock); spin_lock(&call_lock);
/* Must grab online cpu count with preempt disabled, otherwise
* it can change. */
num_cpus = num_online_cpus() - 1;
if (!num_cpus)
goto done;
/* remove 'self' from the map */ /* remove 'self' from the map */
if (cpu_isset(smp_processor_id(), map)) if (cpu_isset(smp_processor_id(), map))
...@@ -224,7 +219,9 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, ...@@ -224,7 +219,9 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
/* sanity check the map, remove any non-online processors. */ /* sanity check the map, remove any non-online processors. */
cpus_and(map, map, cpu_online_map); cpus_and(map, map, cpu_online_map);
if (cpus_empty(map))
num_cpus = cpus_weight(map);
if (!num_cpus)
goto done; goto done;
call_data = &data; call_data = &data;
......
...@@ -472,10 +472,12 @@ _GLOBAL(htab_call_hpte_insert1) ...@@ -472,10 +472,12 @@ _GLOBAL(htab_call_hpte_insert1)
/* Now try secondary slot */ /* Now try secondary slot */
/* real page number in r5, PTE RPN value + index */ /* real page number in r5, PTE RPN value + index */
rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT andis. r0,r31,_PAGE_4K_PFN@h
srdi r5,r31,PTE_RPN_SHIFT
bne- 3f
sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
add r5,r5,r25 add r5,r5,r25
sldi r5,r5,HW_PAGE_SHIFT 3: sldi r5,r5,HW_PAGE_SHIFT
/* Calculate secondary group hash */ /* Calculate secondary group hash */
andc r0,r27,r28 andc r0,r27,r28
......
...@@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
mmu_psize_defs[mmu_vmalloc_psize].sllp) { mmu_psize_defs[mmu_vmalloc_psize].sllp) {
get_paca()->vmalloc_sllp = get_paca()->vmalloc_sllp =
mmu_psize_defs[mmu_vmalloc_psize].sllp; mmu_psize_defs[mmu_vmalloc_psize].sllp;
slb_flush_and_rebolt(); slb_vmalloc_update();
} }
#endif /* CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC_64K_PAGES */
......
...@@ -307,9 +307,9 @@ static void __init parse_drconf_memory(struct device_node *memory) ...@@ -307,9 +307,9 @@ static void __init parse_drconf_memory(struct device_node *memory)
const unsigned int *lm, *dm, *aa; const unsigned int *lm, *dm, *aa;
unsigned int ls, ld, la; unsigned int ls, ld, la;
unsigned int n, aam, aalen; unsigned int n, aam, aalen;
unsigned long lmb_size, size; unsigned long lmb_size, size, start;
int nid, default_nid = 0; int nid, default_nid = 0;
unsigned int start, ai, flags; unsigned int ai, flags;
lm = of_get_property(memory, "ibm,lmb-size", &ls); lm = of_get_property(memory, "ibm,lmb-size", &ls);
dm = of_get_property(memory, "ibm,dynamic-memory", &ld); dm = of_get_property(memory, "ibm,dynamic-memory", &ld);
......
...@@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) ...@@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
} }
static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, static inline void slb_shadow_update(unsigned long ea,
unsigned long flags,
unsigned long entry) unsigned long entry)
{ {
/* /*
...@@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, ...@@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
* updating it. * updating it.
*/ */
get_slb_shadow()->save_area[entry].esid = 0; get_slb_shadow()->save_area[entry].esid = 0;
barrier(); smp_wmb();
get_slb_shadow()->save_area[entry].vsid = vsid; get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
barrier(); smp_wmb();
get_slb_shadow()->save_area[entry].esid = esid; get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
smp_wmb();
} }
static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
...@@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, ...@@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
* we don't get a stale entry here if we get preempted by PHYP * we don't get a stale entry here if we get preempted by PHYP
* between these two statements. * between these two statements.
*/ */
slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), slb_shadow_update(ea, flags, entry);
entry);
asm volatile("slbmte %0,%1" : asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, flags)), : "r" (mk_vsid_data(ea, flags)),
...@@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void) ...@@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void)
ksp_esid_data &= ~SLB_ESID_V; ksp_esid_data &= ~SLB_ESID_V;
/* Only third entry (stack) may change here so only resave that */ /* Only third entry (stack) may change here so only resave that */
slb_shadow_update(ksp_esid_data, slb_shadow_update(get_paca()->kstack, lflags, 2);
mk_vsid_data(ksp_esid_data, lflags), 2);
/* We need to do this all in asm, so we're sure we don't touch /* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */ * the stack between the slbia and rebolting it. */
...@@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void) ...@@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void)
: "memory"); : "memory");
} }
void slb_vmalloc_update(void)
{
unsigned long vflags;
vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
slb_shadow_update(VMALLOC_START, vflags, 1);
slb_flush_and_rebolt();
}
/* Flush all user entries from the segment table of the current processor. */ /* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm) void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{ {
......
...@@ -351,7 +351,8 @@ static void aff_set_ref_point_location(struct spu_gang *gang) ...@@ -351,7 +351,8 @@ static void aff_set_ref_point_location(struct spu_gang *gang)
lowest_offset = ctx->aff_offset; lowest_offset = ctx->aff_offset;
} }
gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset); gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
lowest_offset);
} }
static struct spu *ctx_location(struct spu *ref, int offset, int node) static struct spu *ctx_location(struct spu *ref, int offset, int node)
......
...@@ -826,13 +826,15 @@ core99_ata100_enable(struct device_node *node, long value) ...@@ -826,13 +826,15 @@ core99_ata100_enable(struct device_node *node, long value)
if (value) { if (value) {
if (pci_device_from_OF_node(node, &pbus, &pid) == 0) if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
pdev = pci_find_slot(pbus, pid); pdev = pci_get_bus_and_slot(pbus, pid);
if (pdev == NULL) if (pdev == NULL)
return 0; return 0;
rc = pci_enable_device(pdev); rc = pci_enable_device(pdev);
if (rc == 0)
pci_set_master(pdev);
pci_dev_put(pdev);
if (rc) if (rc)
return rc; return rc;
pci_set_master(pdev);
} }
return 0; return 0;
} }
......
...@@ -109,7 +109,7 @@ static void ps3_panic(char *str) ...@@ -109,7 +109,7 @@ static void ps3_panic(char *str)
#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
static void prealloc(struct ps3_prealloc *p) static void __init prealloc(struct ps3_prealloc *p)
{ {
if (!p->size) if (!p->size)
return; return;
......
...@@ -262,6 +262,7 @@ extern void slb_initialize(void); ...@@ -262,6 +262,7 @@ extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void); extern void slb_flush_and_rebolt(void);
extern void stab_initialize(unsigned long stab); extern void stab_initialize(unsigned long stab);
extern void slb_vmalloc_update(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* /*
......
...@@ -49,12 +49,10 @@ ...@@ -49,12 +49,10 @@
/* Shift to put page number into pte. /* Shift to put page number into pte.
* *
* That gives us a max RPN of 32 bits, which means a max of 48 bits * That gives us a max RPN of 34 bits, which means a max of 50 bits
* of addressable physical space. * of addressable physical space, or 46 bits for the special 4k PFNs.
* We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but
* 32 makes PTEs more readable for debugging for now :)
*/ */
#define PTE_RPN_SHIFT (32) #define PTE_RPN_SHIFT (30)
#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment