Commit a1042aa2 authored by Paul Mundt's avatar Paul Mundt

sh: check for existing mappings for bolted PMB entries.

When entries are being bolted unconditionally it's possible that the boot
loader has established mappings that are within range that we don't want
to clobber. Perform some basic validation to ensure that the new mapping
is out of range before allowing the entry setup to take place.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 6eb3c735
...@@ -128,13 +128,67 @@ static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) ...@@ -128,13 +128,67 @@ static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
return pmb_flags; return pmb_flags;
} }
static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
{ {
return (b->vpn == (a->vpn + a->size)) && return (b->vpn == (a->vpn + a->size)) &&
(b->ppn == (a->ppn + a->size)) && (b->ppn == (a->ppn + a->size)) &&
(b->flags == a->flags); (b->flags == a->flags);
} }
static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
unsigned long size)
{
int i;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe, *iter;
unsigned long span;
if (!test_bit(i, pmb_map))
continue;
pmbe = &pmb_entry_list[i];
/*
* See if VPN and PPN are bounded by an existing mapping.
*/
if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
continue;
if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
continue;
/*
* Now see if we're in range of a simple mapping.
*/
if (size <= pmbe->size) {
read_unlock(&pmb_rwlock);
return true;
}
span = pmbe->size;
/*
* Finally for sizes that involve compound mappings, walk
* the chain.
*/
for (iter = pmbe->link; iter; iter = iter->link)
span += iter->size;
/*
* Nothing else to do if the range requirements are met.
*/
if (size <= span) {
read_unlock(&pmb_rwlock);
return true;
}
}
read_unlock(&pmb_rwlock);
return false;
}
static bool pmb_size_valid(unsigned long size) static bool pmb_size_valid(unsigned long size)
{ {
int i; int i;
...@@ -272,64 +326,62 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, ...@@ -272,64 +326,62 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
unsigned long size, pgprot_t prot) unsigned long size, pgprot_t prot)
{ {
struct pmb_entry *pmbp, *pmbe; struct pmb_entry *pmbp, *pmbe;
unsigned long pmb_flags; unsigned long flags, pmb_flags;
int i, mapped; int i, mapped;
if (!pmb_addr_valid(vaddr, size)) if (!pmb_addr_valid(vaddr, size))
return -EFAULT; return -EFAULT;
if (pmb_mapping_exists(vaddr, phys, size))
return 0;
pmb_flags = pgprot_to_pmb_flags(prot); pmb_flags = pgprot_to_pmb_flags(prot);
pmbp = NULL; pmbp = NULL;
again: do {
for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
unsigned long flags; if (size < pmb_sizes[i].size)
continue;
if (size < pmb_sizes[i].size)
continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags |
pmb_sizes[i].flag, PMB_NO_ENTRY);
pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, if (IS_ERR(pmbe)) {
PMB_NO_ENTRY); pmb_unmap_entry(pmbp, mapped);
if (IS_ERR(pmbe)) { return PTR_ERR(pmbe);
pmb_unmap_entry(pmbp, mapped); }
return PTR_ERR(pmbe);
}
spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = pmb_sizes[i].size; spin_lock_irqsave(&pmbe->lock, flags);
__set_pmb_entry(pmbe); pmbe->size = pmb_sizes[i].size;
phys += pmbe->size; __set_pmb_entry(pmbe);
vaddr += pmbe->size;
size -= pmbe->size;
/* phys += pmbe->size;
* Link adjacent entries that span multiple PMB entries vaddr += pmbe->size;
* for easier tear-down. size -= pmbe->size;
*/
if (likely(pmbp)) {
spin_lock(&pmbp->lock);
pmbp->link = pmbe;
spin_unlock(&pmbp->lock);
}
pmbp = pmbe; /*
* Link adjacent entries that span multiple PMB
* entries for easier tear-down.
*/
if (likely(pmbp)) {
spin_lock(&pmbp->lock);
pmbp->link = pmbe;
spin_unlock(&pmbp->lock);
}
/* pmbp = pmbe;
* Instead of trying smaller sizes on every iteration
* (even if we succeed in allocating space), try using
* pmb_sizes[i].size again.
*/
i--;
mapped++;
spin_unlock_irqrestore(&pmbe->lock, flags); /*
} * Instead of trying smaller sizes on every
* iteration (even if we succeed in allocating
* space), try using pmb_sizes[i].size again.
*/
i--;
mapped++;
if (size >= SZ_16M) spin_unlock_irqrestore(&pmbe->lock, flags);
goto again; }
} while (size >= SZ_16M);
return 0; return 0;
} }
...@@ -374,7 +426,7 @@ void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, ...@@ -374,7 +426,7 @@ void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
orig_addr = vaddr = (unsigned long)area->addr; orig_addr = vaddr = (unsigned long)area->addr;
ret = pmb_bolt_mapping(vaddr, phys, size, prot); ret = pmb_bolt_mapping(vaddr, phys, size, prot);
if (ret != 0) if (unlikely(ret != 0))
return ERR_PTR(ret); return ERR_PTR(ret);
return (void __iomem *)(offset + (char *)orig_addr); return (void __iomem *)(offset + (char *)orig_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment