Commit 14778d90 authored by David S. Miller's avatar David S. Miller

[SPARC]: Respect vm_page_prot in io_remap_page_range().

Make sure the callers do a pgprot_noncached() on
vma->vm_page_prot.

Pointed out by Hugh Dickens.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e952f31b
......@@ -76,7 +76,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
vma->vm_pgoff = (offset >> PAGE_SHIFT) |
((unsigned long)space << 28UL);
prot = __pgprot(pg_iobits);
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
......
......@@ -22,8 +22,6 @@ struct ctx_list *ctx_list_pool;
struct ctx_list ctx_free;
struct ctx_list ctx_used;
unsigned int pg_iobits;
extern void ld_mmu_sun4c(void);
extern void ld_mmu_srmmu(void);
......
......@@ -2130,6 +2130,13 @@ static unsigned long srmmu_pte_to_pgoff(pte_t pte)
return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
}
static pgprot_t srmmu_pgprot_noncached(pgprot_t prot)
{
prot &= ~__pgprot(SRMMU_CACHE);
return prot;
}
/* Load up routines and constants for sun4m and sun4d mmu */
void __init ld_mmu_srmmu(void)
{
......@@ -2150,9 +2157,9 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
/* Functions */
BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM);
#ifndef CONFIG_SMP
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
#endif
......
......@@ -1589,7 +1589,10 @@ static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
{
unsigned long page_entry;
unsigned long page_entry, pg_iobits;
pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
......@@ -2134,6 +2137,13 @@ void __init sun4c_paging_init(void)
printk("SUN4C: %d mmu entries for the kernel\n", cnt);
}
static pgprot_t sun4c_pgprot_noncached(pgprot_t prot)
{
prot |= __pgprot(_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE);
return prot;
}
/* Load up routines and constants for sun4c mmu */
void __init ld_mmu_sun4c(void)
{
......@@ -2156,10 +2166,9 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
/* Functions */
BTFIXUPSET_CALL(pgprot_noncached, sun4c_pgprot_noncached, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
......
......@@ -656,6 +656,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
__pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
......@@ -663,7 +664,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret)
return ret;
vma->vm_flags |= VM_IO;
return 0;
}
......
......@@ -140,7 +140,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
vma->vm_pgoff = phys_base >> PAGE_SHIFT;
prot = __pgprot(pg_iobits);
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
......
......@@ -619,6 +619,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
#endif
offset = dev->driver->get_reg_ofs(dev);
#ifdef __sparc__
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
......
......@@ -71,9 +71,8 @@ flash_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
pgprot_val(vma->vm_page_prot) |= _PAGE_E;
vma->vm_flags |= (VM_SHM | VM_LOCKED);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
return -EAGAIN;
......
......@@ -1169,11 +1169,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
vma->vm_pgoff = off >> PAGE_SHIFT;
/* This is an IO map - tell maydump to skip this VMA */
vma->vm_flags |= VM_IO | VM_RESERVED;
#if defined(__sparc_v9__)
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
#else
#if defined(__mc68000__)
#if defined(CONFIG_SUN3)
pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
......@@ -1195,7 +1190,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
#elif defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3)
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
#elif defined(__mips__)
#elif defined(__mips__) || defined(__sparc_v9__)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#elif defined(__hppa__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
......@@ -1212,7 +1207,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
#endif /* !__sparc_v9__ */
return 0;
#endif /* !sparc32 */
}
......
......@@ -58,6 +58,8 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
/* To stop the swapper from even considering these pages */
vma->vm_flags |= (VM_IO | VM_RESERVED);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Each page, see which map applies */
for (page = 0; page < size; ){
map_size = 0;
......
......@@ -269,11 +269,14 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
BTFIXUPDEF_INT(pte_modify_mask)
static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
......@@ -309,9 +312,6 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
#define pte_unmap(pte) do{}while(0)
#define pte_unmap_nested(pte) do{}while(0)
/* The permissions for pgprot_val to make a page mapped on the obio space */
extern unsigned int pg_iobits;
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment