Commit d6d46551 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc: Don't use alloc_bootmem() in init_IRQ() path
  powerpc: Fix spin_event_timeout() to be robust over context switches
  powerpc: Use pr_devel() in do_dcache_icache_coherency()
  powerpc/cell: Use pr_devel() in axon_msi.c
  powerpc: Use pr_devel() in arch/powerpc/mm/gup.c
  powerpc: Cleanup & use pr_devel() in arch/powerpc/mm/slb.c
  powerpc/perf_counter: Remove duplicated #include
  powerpc: Use pr_devel() in arch/powerpc/mm/mmu_context_nohash.c
  powerpc/pseries: Use pr_devel() in xics.c
  powerpc: Remove unnecessary semicolons
  powerpc/pseries: Use pr_devel() in pseries LPAR HPTE routines
  powerpc/44x: Fix build error with -Werror for Warp platform
  powerpc/4xx: Have Warp take advantage of GPIO LEDs default-state = keep
  powerpc/44x: Update Warp defconfig
parents b43f3cbd ea96025a
......@@ -261,10 +261,11 @@
compatible = "gpio-leds";
green {
gpios = <&GPIO1 0 0>;
default-state = "on";
default-state = "keep";
};
red {
gpios = <&GPIO1 1 0>;
default-state = "keep";
};
};
......
This diff is collapsed.
......@@ -63,6 +63,8 @@ extern void udelay(unsigned long usecs);
udelay(delay); \
else \
cpu_relax(); \
if (!__ret) \
__ret = (condition); \
__ret; \
})
......
......@@ -10,7 +10,6 @@
*/
#include <linux/string.h>
#include <linux/perf_counter.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
......
......@@ -10,7 +10,6 @@
*/
#include <linux/string.h>
#include <linux/perf_counter.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
......
......@@ -159,7 +159,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
int psize;
#endif
pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
start &= PAGE_MASK;
addr = start;
......@@ -170,7 +170,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start, len)))
goto slow_irqon;
pr_debug(" aligned: %lx .. %lx\n", start, end);
pr_devel(" aligned: %lx .. %lx\n", start, end);
#ifdef CONFIG_HUGETLB_PAGE
/* We bail out on slice boundary crossing when hugetlb is
......@@ -234,7 +234,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
do {
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
ptep = huge_pte_offset(mm, a);
pr_debug(" %016lx: huge ptep %p\n", a, ptep);
pr_devel(" %016lx: huge ptep %p\n", a, ptep);
if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
&nr))
goto slow;
......@@ -249,7 +249,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
#ifdef CONFIG_PPC64
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
#endif
pr_debug(" %016lx: normal pgd %p\n", addr,
pr_devel(" %016lx: normal pgd %p\n", addr,
(void *)pgd_val(pgd));
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
......@@ -269,7 +269,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
slow:
local_irq_enable();
slow_irqon:
pr_debug(" slow path ! nr = %d\n", nr);
pr_devel(" slow path ! nr = %d\n", nr);
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
......
......@@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id)
id = first_context;
continue;
}
pr_debug("[%d] steal context %d from mm @%p\n",
pr_devel("[%d] steal context %d from mm @%p\n",
smp_processor_id(), id, mm);
/* Mark this mm has having no context anymore */
......@@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id)
/* Pick up the victim mm */
mm = context_mm[id];
pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
......@@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
spin_lock(&context_lock);
#ifndef DEBUG_STEAL_ONLY
pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
cpu, next, next->context.active, next->context.id);
#endif
......@@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.active++;
if (prev) {
#ifndef DEBUG_STEAL_ONLY
pr_debug(" old context %p active was: %d\n",
pr_devel(" old context %p active was: %d\n",
prev, prev->context.active);
#endif
WARN_ON(prev->context.active < 1);
......@@ -236,7 +236,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.id = id;
#ifndef DEBUG_STEAL_ONLY
pr_debug("[%d] picked up new id %d, nrf is now %d\n",
pr_devel("[%d] picked up new id %d, nrf is now %d\n",
cpu, id, nr_free_contexts);
#endif
......@@ -247,7 +247,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
* local TLB for it and unmark it before we use it
*/
if (test_bit(id, stale_map[cpu])) {
pr_debug("[%d] flushing stale context %d for mm @%p !\n",
pr_devel("[%d] flushing stale context %d for mm @%p !\n",
cpu, id, next);
local_flush_tlb_mm(next);
......@@ -314,13 +314,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
break;
......
......@@ -129,12 +129,12 @@ static pte_t do_dcache_icache_coherency(pte_t pte)
page = pfn_to_page(pfn);
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
pr_debug("do_dcache_icache_coherency... flushing\n");
pr_devel("do_dcache_icache_coherency... flushing\n");
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
else
pr_debug("do_dcache_icache_coherency... already clean\n");
pr_devel("do_dcache_icache_coherency... already clean\n");
return __pte(pte_val(pte) | _PAGE_HWEXEC);
}
......
......@@ -14,8 +14,6 @@
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
......@@ -27,11 +25,6 @@
#include <linux/compiler.h>
#include <asm/udbg.h>
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG pr_debug
#endif
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
......@@ -285,13 +278,13 @@ void slb_initialize(void)
patch_slb_encoding(slb_compare_rr_to_size,
mmu_slb_size);
DBG("SLB: linear LLP = %04lx\n", linear_llp);
DBG("SLB: io LLP = %04lx\n", io_llp);
pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
pr_devel("SLB: io LLP = %04lx\n", io_llp);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
patch_slb_encoding(slb_miss_kernel_load_vmemmap,
SLB_VSID_KERNEL | vmemmap_llp);
DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
#endif
}
......
......@@ -72,7 +72,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
psize = get_slice_psize(mm, addr);;
psize = get_slice_psize(mm, addr);
#else
BUG();
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
......
......@@ -185,7 +185,7 @@ struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
goto fail;
if (shdr_str.sh_type != SHT_STRTAB)
goto fail;;
goto fail;
for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
if (copy_from_user(&sym, spu_elf_start +
......
......@@ -64,8 +64,6 @@ define_machine(warp) {
};
static u32 post_info;
static int __init warp_post_info(void)
{
struct device_node *np;
......@@ -87,10 +85,9 @@ static int __init warp_post_info(void)
iounmap(fpga);
if (post1 || post2) {
if (post1 || post2)
printk(KERN_INFO "Warp POST %08x %08x\n", post1, post2);
post_info = 1;
} else
else
printk(KERN_INFO "Warp POST OK\n");
return 0;
......@@ -166,6 +163,9 @@ static irqreturn_t temp_isr(int irq, void *context)
value ^= 1;
mdelay(500);
}
/* Not reached */
return IRQ_HANDLED;
}
static int pika_setup_leds(void)
......@@ -179,15 +179,10 @@ static int pika_setup_leds(void)
}
for_each_child_of_node(np, child)
if (strcmp(child->name, "green") == 0) {
if (strcmp(child->name, "green") == 0)
green_led = of_get_gpio(child, 0);
/* Turn back on the green LED */
gpio_set_value(green_led, 1);
} else if (strcmp(child->name, "red") == 0) {
else if (strcmp(child->name, "red") == 0)
red_led = of_get_gpio(child, 0);
/* Set based on post */
gpio_set_value(red_led, post_info);
}
of_node_put(np);
......
......@@ -147,7 +147,7 @@ int __init pq2ads_pci_init_irq(void)
goto out;
}
priv = alloc_bootmem(sizeof(struct pq2ads_pci_pic));
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
of_node_put(np);
ret = -ENOMEM;
......
......@@ -85,7 +85,7 @@ static inline void axon_msi_debug_setup(struct device_node *dn,
static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
{
pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
dcr_write(msic->dcr_host, dcr_n, val);
}
......@@ -98,7 +98,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
int retry = 0;
write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
pr_debug("axon_msi: original write_offset 0x%x\n", write_offset);
pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
/* write_offset doesn't wrap properly, so we have to mask it */
write_offset &= MSIC_FIFO_SIZE_MASK;
......@@ -108,7 +108,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
msi = le32_to_cpu(msic->fifo_virt[idx]);
msi &= 0xFFFF;
pr_debug("axon_msi: woff %x roff %x msi %x\n",
pr_devel("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
......@@ -123,12 +123,12 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
*/
udelay(1);
retry++;
pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
continue;
}
if (retry) {
pr_debug("axon_msi: late irq 0x%x, retry %d\n",
pr_devel("axon_msi: late irq 0x%x, retry %d\n",
msi, retry);
retry = 0;
}
......@@ -332,7 +332,7 @@ static int axon_msi_shutdown(struct of_device *device)
struct axon_msic *msic = dev_get_drvdata(&device->dev);
u32 tmp;
pr_debug("axon_msi: disabling %s\n",
pr_devel("axon_msi: disabling %s\n",
msic->irq_host->of_node->full_name);
tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
......@@ -349,7 +349,7 @@ static int axon_msi_probe(struct of_device *device,
unsigned int virq;
int dcr_base, dcr_len;
pr_debug("axon_msi: setting up dn %s\n", dn->full_name);
pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
if (!msic) {
......@@ -403,7 +403,7 @@ static int axon_msi_probe(struct of_device *device,
set_irq_data(virq, msic);
set_irq_chained_handler(virq, axon_msi_cascade);
pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
/* Enable the MSIC hardware */
msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
......@@ -484,13 +484,13 @@ void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
if (addr == OF_BAD_ADDR) {
pr_debug("axon_msi: couldn't translate reg property\n");
pr_devel("axon_msi: couldn't translate reg property\n");
return;
}
msic->trigger = ioremap(addr, 0x4);
if (!msic->trigger) {
pr_debug("axon_msi: ioremap failed\n");
pr_devel("axon_msi: ioremap failed\n");
return;
}
......@@ -498,7 +498,7 @@ void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
msic, &fops_msic)) {
pr_debug("axon_msi: debugfs_create_file failed!\n");
pr_devel("axon_msi: debugfs_create_file failed!\n");
return;
}
}
......
......@@ -251,7 +251,7 @@ static void g5_pfunc_switch_volt(int speed_mode)
static struct pmf_function *pfunc_cpu_setfreq_high;
static struct pmf_function *pfunc_cpu_setfreq_low;
static struct pmf_function *pfunc_cpu_getfreq;
static struct pmf_function *pfunc_slewing_done;;
static struct pmf_function *pfunc_slewing_done;
static int g5_pfunc_switch_freq(int speed_mode)
{
......
......@@ -609,7 +609,7 @@ static int pmacpic_find_viaint(void)
np = of_find_node_by_name(NULL, "via-pmu");
if (np == NULL)
goto not_found;
viaint = irq_of_parse_and_map(np, 0);;
viaint = irq_of_parse_and_map(np, 0);
not_found:
#endif /* CONFIG_ADB_PMU */
......
......@@ -284,7 +284,6 @@ static int ps3_sb_free_mmio_region(struct ps3_mmio_region *r)
int result;
dump_mmio_region(r);
;
result = lv1_unmap_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
r->lpar_addr);
......
......@@ -286,7 +286,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long hpte_v, hpte_r;
if (!(vflags & HPTE_V_BOLTED))
pr_debug("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
......@@ -294,7 +294,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
pr_debug(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
......@@ -311,7 +311,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
if (unlikely(lpar_rc == H_PTEG_FULL)) {
if (!(vflags & HPTE_V_BOLTED))
pr_debug(" full\n");
pr_devel(" full\n");
return -1;
}
......@@ -322,11 +322,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
if (!(vflags & HPTE_V_BOLTED))
pr_debug(" lpar err %lu\n", lpar_rc);
pr_devel(" lpar err %lu\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
pr_debug(" -> slot: %lu\n", slot & 7);
pr_devel(" -> slot: %lu\n", slot & 7);
/* Because of iSeries, we have to pass down the secondary
* bucket bit here as well
......@@ -418,17 +418,17 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
want_v = hpte_encode_avpn(va, psize, ssize);
pr_debug(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
want_v, slot, flags, psize);
lpar_rc = plpar_pte_protect(flags, slot, want_v);
if (lpar_rc == H_NOT_FOUND) {
pr_debug("not found !\n");
pr_devel("not found !\n");
return -1;
}
pr_debug("ok\n");
pr_devel("ok\n");
BUG_ON(lpar_rc != H_SUCCESS);
......@@ -503,7 +503,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
unsigned long lpar_rc;
unsigned long dummy1, dummy2;
pr_debug(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
slot, va, psize, local);
want_v = hpte_encode_avpn(va, psize, ssize);
......
......@@ -190,10 +190,10 @@ static void xics_unmask_irq(unsigned int virq)
int call_status;
int server;
pr_debug("xics: unmask virq %d\n", virq);
pr_devel("xics: unmask virq %d\n", virq);
irq = (unsigned int)irq_map[virq].hwirq;
pr_debug(" -> map to hwirq 0x%x\n", irq);
pr_devel(" -> map to hwirq 0x%x\n", irq);
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
......@@ -252,7 +252,7 @@ static void xics_mask_irq(unsigned int virq)
{
unsigned int irq;
pr_debug("xics: mask virq %d\n", virq);
pr_devel("xics: mask virq %d\n", virq);
irq = (unsigned int)irq_map[virq].hwirq;
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
......@@ -414,7 +414,7 @@ static int xics_host_match(struct irq_host *h, struct device_node *node)
static int xics_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
/* Insert the interrupt mapping into the radix tree for fast lookup */
irq_radix_revmap_insert(xics_host, virq, hw);
......
......@@ -965,7 +965,7 @@ static inline void fsl_rio_info(struct device *dev, u32 ccsr)
break;
default:
str = "Unknown";
break;;
break;
}
dev_info(dev, "Hardware port width: %s\n", str);
......
......@@ -728,12 +728,10 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
if (ret)
return NULL;
ipic = alloc_bootmem(sizeof(struct ipic));
ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
if (ipic == NULL)
return NULL;
memset(ipic, 0, sizeof(struct ipic));
ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
NR_IPIC_INTS,
&ipic_host_ops, 0);
......
......@@ -508,9 +508,8 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
/* Allocate fixups array */
mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL);
BUG_ON(mpic->fixups == NULL);
memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
/* Init spinlock */
spin_lock_init(&mpic->fixup_lock);
......@@ -1109,9 +1108,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
psize /= 4;
bits = intvec_top + 1;
mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long);
mpic->protected = alloc_bootmem(mapsize);
mpic->protected = kzalloc(mapsize, GFP_KERNEL);
BUG_ON(mpic->protected == NULL);
memset(mpic->protected, 0, mapsize);
for (i = 0; i < psize; i++) {
if (psrc[i] > intvec_top)
continue;
......@@ -1353,7 +1351,8 @@ void __init mpic_init(struct mpic *mpic)
#ifdef CONFIG_PM
/* allocate memory to save mpic state */
mpic->save_data = alloc_bootmem(mpic->num_sources * sizeof(struct mpic_irq_save));
mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data),
GFP_KERNEL);
BUG_ON(mpic->save_data == NULL);
#endif
}
......
......@@ -1531,7 +1531,7 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
*/
/* Calculate window size */
sa = (0xffffffffffffffffull << ilog2(ep_size));;
sa = (0xffffffffffffffffull << ilog2(ep_size));
/* Setup BAR0 */
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
......@@ -1550,7 +1550,7 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
} else {
/* Calculate window size */
sa = (0xffffffffffffffffull << ilog2(size));;
sa = (0xffffffffffffffffull << ilog2(size));
if (res->flags & IORESOURCE_PREFETCH)
sa |= 0x8;
......
......@@ -333,12 +333,10 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
if (ret)
return;
qe_ic = alloc_bootmem(sizeof(struct qe_ic));
qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
if (qe_ic == NULL)
return;
memset(qe_ic, 0, sizeof(struct qe_ic));
qe_ic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
NR_QE_IC_INTS, &qe_ic_host_ops, 0);
if (qe_ic->irqhost == NULL)
......
......@@ -258,11 +258,10 @@ static struct uic * __init uic_init_one(struct device_node *node)
BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
uic = alloc_bootmem(sizeof(*uic));
uic = kzalloc(sizeof(*uic), GFP_KERNEL);
if (! uic)
return NULL; /* FIXME: panic? */
memset(uic, 0, sizeof(*uic));
spin_lock_init(&uic->lock);
indexp = of_get_property(node, "cell-index", &len);
if (!indexp || (len != sizeof(u32))) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment