Commit 44620a65 authored by Hari Kanigeri's avatar Hari Kanigeri

plug dmm with proc module

this patch plugs the dmm module with Proc4430 driver
and procmgr module. This provides both Map and Unmap
functionality.
Signed-off-by: default avatarHari Kanigeri <h-kanigeri2@ti.com>
parent 2a1b3fca
......@@ -116,6 +116,11 @@
#define DSP_MAPVMALLOCADDR 0x00000080
#define PG_MASK(pg_size) (~((pg_size)-1))
#define PG_ALIGN_LOW(addr, pg_size) ((addr) & PG_MASK(pg_size))
#define PG_ALIGN_HIGH(addr, pg_size) (((addr)+(pg_size)-1) & PG_MASK(pg_size))
struct mmu_entry {
u32 ul_phy_addr ;
u32 ul_virt_addr ;
......@@ -153,5 +158,6 @@ void dbg_print_ptes(bool ashow_inv_entries, bool ashow_repeat_entries);
int ducati_setup(void);
void ducati_destroy(void);
u32 get_ducati_virt_mem();
int ducati_mem_map(u32 va, u32 da, u32 num_bytes, u32 map_attr);
int ducati_mem_unmap(u32 da, u32 num_bytes);
#endif /* _DDUCATIMMU_ENABLER_H_*/
......@@ -112,7 +112,7 @@ extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
u32 page_size,
struct hw_mmu_map_attrs_t *map_attrs);
extern hw_status hw_mmu_pt_clear(const u32 pg_tbl_va,
extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
u32 pg_size,
u32 virtual_addr);
......
......@@ -225,7 +225,7 @@ func_exit:
* Purpose:
*Free a chunk of reserved DSP/IVA address space.
*/
int dmm_unreserve_memory(u32 rsv_addr)
int dmm_unreserve_memory(u32 rsv_addr, u32 *psize)
{
struct map_page *chunk;
int status = 0;
......@@ -239,6 +239,7 @@ int dmm_unreserve_memory(u32 rsv_addr)
WARN_ON(status < 0);
if (status == 0) {
chunk->b_reserved = false;
*psize = chunk->region_size * PAGE_SIZE;
/* NOTE: We do NOT coalesce free regions here.
* Free regions are coalesced in get_region(), as it traverses
*the whole mapping table
......
......@@ -32,7 +32,7 @@
int dmm_reserve_memory(u32 size, u32 *p_rsv_addr);
int dmm_unreserve_memory(u32 rsv_addr);
int dmm_unreserve_memory(u32 rsv_addr, u32 *psize);
void dmm_destroy(void);
......
......@@ -22,6 +22,40 @@
#include <linux/io.h>
#include <linux/module.h>
#include <asm/page.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/autoconf.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <linux/io.h>
#include <linux/syscalls.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/device.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
#include <syslink/ducatienabler.h>
......@@ -43,9 +77,8 @@
*/
#define DUCATI_BASEIMAGE_PHYSICAL_ADDRESS 0x83600000
#define PG_MASK(pg_size) (~((pg_size)-1))
#define PG_ALIGN_LOW(addr, pg_size) ((addr) & PG_MASK(pg_size))
#define PG_ALIGN_HIGH(addr, pg_size) (((addr)+(pg_size)-1) & PG_MASK(pg_size))
#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT)
/* Attributes used to manage the DSP MMU page tables */
struct pg_table_attrs {
......@@ -94,6 +127,13 @@ static u32 base_ducati_l2_mmu;
static u32 shm_phys_addr;
static u32 shm_virt_addr;
static void bad_page_dump(u32 pa, struct page *pg)
{
pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
pr_emerg("Bad page state in process '%s'\n", current->comm);
BUG();
}
/*============================================
* Print the DSP MMU Table Entries
*/
......@@ -344,6 +384,224 @@ static int pte_update(u32 pa, u32 va, u32 size,
return status;
}
/*
* ======== ducati_mem_unmap ========
* Invalidate the PTEs for the DSP VA block to be unmapped.
*
* PTEs of a mapped memory block are contiguous in any page table
* So, instead of looking up the PTE address for every 4K block,
* we clear consecutive PTEs until we unmap all the bytes
*/
int ducati_mem_unmap(u32 da, u32 num_bytes)
{
u32 L1_base_va;
u32 L2_base_va;
u32 L2_base_pa;
u32 L2_page_num;
u32 pte_val;
u32 pte_size;
u32 pte_count;
u32 pte_addr_l1;
u32 pte_addr_l2 = 0;
u32 rem_bytes;
u32 rem_bytes_l2;
u32 vaCurr;
struct page *pg = NULL;
int status = 0;
u32 temp;
u32 patemp = 0;
u32 pAddr;
u32 numof4Kpages = 0;
DPRINTK("> ducati_mem_unmap hDevContext %x, va %x, "
"NumBytes %x\n", hDevContext, da, num_bytes);
vaCurr = da;
rem_bytes = num_bytes;
rem_bytes_l2 = 0;
L1_base_va = p_pt_attrs->l1_base_va;
pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
while (rem_bytes) {
u32 vaCurrOrig = vaCurr;
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
pte_val = *(u32 *)pte_addr_l1;
pte_size = hw_mmu_pte_sizel1(pte_val);
if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
/*
* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA
*/
L2_base_pa = hw_mmu_pte_sizel1(pte_val);
L2_base_va = L2_base_pa - p_pt_attrs->l2_base_pa
+ p_pt_attrs->l2_base_va;
L2_page_num = (L2_base_pa - p_pt_attrs->l2_base_pa) /
HW_MMU_COARSE_PAGE_SIZE;
/*
* Find the L2 PTE address from which we will start
* clearing, the number of PTEs to be cleared on this
* page, and the size of VA space that needs to be
* cleared on this L2 page
*/
pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) /
sizeof(u32);
if (rem_bytes < (pte_count * PAGE_SIZE))
pte_count = rem_bytes / PAGE_SIZE;
rem_bytes_l2 = pte_count * PAGE_SIZE;
DPRINTK("ducati_mem_unmap L2_base_pa %x, "
"L2_base_va %x pte_addr_l2 %x,"
"rem_bytes_l2 %x\n", L2_base_pa, L2_base_va,
pte_addr_l2, rem_bytes_l2);
/*
* Unmap the VA space on this L2 PT. A quicker way
* would be to clear pte_count entries starting from
* pte_addr_l2. However, below code checks that we don't
* clear invalid entries or less than 64KB for a 64KB
* entry. Similar checking is done for L1 PTEs too
* below
*/
while (rem_bytes_l2) {
pte_val = *(u32 *)pte_addr_l2;
pte_size = hw_mmu_pte_sizel2(pte_val);
/* vaCurr aligned to pte_size? */
if ((pte_size != 0) && (rem_bytes_l2
>= pte_size) &&
!(vaCurr & (pte_size - 1))) {
/* Collect Physical addresses from VA */
pAddr = (pte_val & ~(pte_size - 1));
if (pte_size == HW_PAGE_SIZE_64KB)
numof4Kpages = 16;
else
numof4Kpages = 1;
temp = 0;
while (temp++ < numof4Kpages) {
if (pfn_valid
(__phys_to_pfn
(patemp))) {
pg = phys_to_page
(pAddr);
if (page_count
(pg) < 1) {
bad_page_dump
(pAddr, pg);
}
SetPageDirty(pg);
page_cache_release(pg);
}
pAddr += HW_PAGE_SIZE_4KB;
}
if (hw_mmu_pte_clear(pte_addr_l2,
vaCurr, pte_size) == RET_OK) {
rem_bytes_l2 -= pte_size;
vaCurr += pte_size;
pte_addr_l2 += (pte_size >> 12)
* sizeof(u32);
} else {
status = -EFAULT;
goto EXIT_LOOP;
}
} else
status = -EFAULT;
}
if (rem_bytes_l2 != 0) {
status = -EFAULT;
goto EXIT_LOOP;
}
p_pt_attrs->pg_info[L2_page_num].num_entries -=
pte_count;
if (p_pt_attrs->pg_info[L2_page_num].num_entries
== 0) {
/*
* Clear the L1 PTE pointing to the
* L2 PT
*/
if (RET_OK != hw_mmu_pte_clear(L1_base_va,
vaCurrOrig, HW_MMU_COARSE_PAGE_SIZE)) {
status = -EFAULT;
goto EXIT_LOOP;
}
}
rem_bytes -= pte_count * PAGE_SIZE;
DPRINTK("ducati_mem_unmap L2_page_num %x, "
"num_entries %x, pte_count %x, status: 0x%x\n",
L2_page_num,
p_pt_attrs->pg_info[L2_page_num].num_entries,
pte_count, status);
} else
/* vaCurr aligned to pte_size? */
/* pte_size = 1 MB or 16 MB */
if ((pte_size != 0) && (rem_bytes >= pte_size) &&
!(vaCurr & (pte_size - 1))) {
if (pte_size == HW_PAGE_SIZE_1MB)
numof4Kpages = 256;
else
numof4Kpages = 4096;
temp = 0;
/* Collect Physical addresses from VA */
pAddr = (pte_val & ~(pte_size - 1));
while (temp++ < numof4Kpages) {
pg = phys_to_page(pAddr);
if (page_count(pg) < 1)
bad_page_dump(pAddr, pg);
SetPageDirty(pg);
page_cache_release(pg);
pAddr += HW_PAGE_SIZE_4KB;
}
if (hw_mmu_pte_clear(L1_base_va, vaCurr,
pte_size) == RET_OK) {
rem_bytes -= pte_size;
vaCurr += pte_size;
} else {
status = -EFAULT;
goto EXIT_LOOP;
}
} else {
status = -EFAULT;
}
}
/*
* It is better to flush the TLB here, so that any stale old entries
* get flushed
*/
EXIT_LOOP:
hw_mmu_tlb_flushAll(base_ducati_l2_mmu);
DPRINTK("ducati_mem_unmap vaCurr %x, pte_addr_l1 %x "
"pte_addr_l2 %x\n", vaCurr, pte_addr_l1, pte_addr_l2);
DPRINTK("< ducati_mem_unmap status %x rem_bytes %x, "
"rem_bytes_l2 %x\n", status, rem_bytes, rem_bytes_l2);
return status;
}
/*
* ======== user_va2pa ========
* Purpose:
* This function walks through the Linux page tables to convert a userland
* virtual address to physical address
*/
static u32 user_va2pa(struct mm_struct *mm, u32 address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, address);
if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
pmd = pmd_offset(pgd, address);
if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
ptep = pte_offset_map(pmd, address);
if (ptep) {
pte = *ptep;
if (pte_present(pte))
return pte & PAGE_MASK;
}
}
}
return 0;
}
/*============================================
* This function maps MPU buffer to the DSP address space. It performs
......@@ -352,15 +610,25 @@ static int pte_update(u32 pa, u32 va, u32 size,
* All address & size arguments are assumed to be page aligned (in proc.c)
*
*/
static int ducati_mem_map(u32 ul_mpu_addr, u32 ul_virt_addr,
int ducati_mem_map(u32 mpu_addr, u32 ul_virt_addr,
u32 num_bytes, u32 map_attr)
{
u32 attrs;
int status = 0;
struct hw_mmu_map_attrs_t hw_attrs;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
struct task_struct *curr_task = current;
u32 write = 0;
u32 va = 0;
u32 pa = 0;
int pg_i = 0;
int pg_num = 0;
struct page *mappedPage, *pg;
int num_usr_pages = 0;
DPRINTK("> WMD_BRD_MemMap pa %x, va %x, "
"size %x, map_attr %x\n", ul_mpu_addr, ul_virt_addr,
"size %x, map_attr %x\n", mpu_addr, ul_virt_addr,
num_bytes, map_attr);
if (num_bytes == 0)
return -EINVAL;
......@@ -399,13 +667,117 @@ static int ducati_mem_map(u32 ul_mpu_addr, u32 ul_virt_addr,
return -EINVAL;
}
}
status = pte_update(ul_mpu_addr, ul_virt_addr, num_bytes, &hw_attrs);
/*
* Do OS-specific user-va to pa translation.
* Combine physically contiguous regions to reduce TLBs.
* Pass the translated pa to PteUpdate.
*/
if ((attrs & DSP_MAPPHYSICALADDR)) {
status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
&hw_attrs);
goto func_cont;
}
/*
* Important Note: mpu_addr is mapped from user application process
* to current process - it must lie completely within the current
* virtual memory address space in order to be of use to us here!
*/
down_read(&mm->mmap_sem);
vma = find_vma(mm, mpu_addr);
/*
* It is observed that under some circumstances, the user buffer is
* spread across several VMAs. So loop through and check if the entire
* user buffer is covered
*/
while ((vma) && (mpu_addr + num_bytes > vma->vm_end)) {
/* jump to the next VMA region */
vma = find_vma(mm, vma->vm_end + 1);
}
if (!vma) {
status = -EINVAL;
up_read(&mm->mmap_sem);
goto func_cont;
}
if (vma->vm_flags & VM_IO) {
num_usr_pages = num_bytes / PAGE_SIZE;
va = mpu_addr;
/* Get the physical addresses for user buffer */
for (pg_i = 0; pg_i < num_usr_pages; pg_i++) {
pa = user_va2pa(mm, va);
if (!pa) {
status = -EFAULT;
pr_err("DSPBRIDGE: VM_IO mapping physical"
"address is invalid\n");
break;
}
if (pfn_valid(__phys_to_pfn(pa))) {
pg = phys_to_page(pa);
get_page(pg);
if (page_count(pg) < 1) {
pr_err("Bad page in VM_IO buffer\n");
bad_page_dump(pa, pg);
}
}
status = pte_set(pa, va, HW_PAGE_SIZE_4KB, &hw_attrs);
if (WARN_ON(status < 0))
break;
va += HW_PAGE_SIZE_4KB;
va += HW_PAGE_SIZE_4KB;
pa += HW_PAGE_SIZE_4KB;
}
} else {
num_usr_pages = num_bytes / PAGE_SIZE;
if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
write = 1;
for (pg_i = 0; pg_i < num_usr_pages; pg_i++) {
pg_num = get_user_pages(curr_task, mm, mpu_addr, 1,
write, 1, &mappedPage, NULL);
if (pg_num > 0) {
if (page_count(mappedPage) < 1) {
pr_err("Bad page count after doing"
"get_user_pages on"
"user buffer\n");
bad_page_dump(page_to_phys(mappedPage),
mappedPage);
}
status = pte_set(page_to_phys(mappedPage), va,
HW_PAGE_SIZE_4KB, &hw_attrs);
if (WARN_ON(status < 0))
break;
va += HW_PAGE_SIZE_4KB;
mpu_addr += HW_PAGE_SIZE_4KB;
} else {
pr_err("DSPBRIDGE: get_user_pages FAILED,"
"MPU addr = 0x%x,"
"vma->vm_flags = 0x%lx,"
"get_user_pages Err"
"Value = %d, Buffer"
"size=0x%x\n", mpu_addr,
vma->vm_flags, pg_num,
num_bytes);
status = -EFAULT;
break;
}
}
}
up_read(&mm->mmap_sem);
func_cont:
/* Don't propogate Linux or HW status to upper layers */
if (status < 0) {
/*
* Roll out the mapped pages incase it failed in middle of
* mapping
*/
if (pg_i)
ducati_mem_unmap(ul_virt_addr, (pg_i * PAGE_SIZE));
}
/* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region */
hw_mmu_tlb_flushAll(base_ducati_l2_mmu);
WARN_ON(status < 0);
DPRINTK("< WMD_BRD_MemMap status %x\n", status);
return status;
}
......@@ -568,8 +940,10 @@ static int add_entry_ext(u32 *phys_addr, u32 *dsp_addr,
u32 entry_size = 0;
int status = 0;
u32 page_size = HW_PAGE_SIZE_1MB;
u32 flags = DSP_MAPELEMSIZE32;
u32 flags = 0;
flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
DSP_MAPPHYSICALADDR);
while ((mapped_size < size) && (status == 0)) {
/* get_mmu_entry_size fills the size_tlb and entry_size
......
......@@ -104,17 +104,7 @@ enum hw_mmu_pgsiz_t {
* Identifier : base_address
* Type : const u32
* Description : Base Address of instance of MMU module
*
* RETURNS:
*
* Type : hw_status
* Description : RET_OK -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer Paramater was set to NULL
*
* PURPOSE: : Flush the TLB entry pointed by the lock counter register
* even if this entry is set protected
*
* METHOD: : Check the Input parameter and Flush a single entry in the TLB.
-------------------------------------------------------------------------
*/
......@@ -147,15 +137,6 @@ static hw_status mmu_flsh_entry(const u32 base_address);
* Type : const u32
* Description : virtual Address
*
* RETURNS:
*
* Type : hw_status
* Description : RET_OK -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer Paramater was set to NULL
* RET_PARAM_OUT_OF_RANGE -- Input Parameter out of Range
*
* PURPOSE: : Set MMU_CAM reg
*
* METHOD: : Check the Input parameters and set the CAM entry.
-------------------------------------------------------------------------
*/
......@@ -193,13 +174,7 @@ static hw_status mme_set_cam_entry(const u32 base_address,
* Type : hw_mmu_mixed_size_t
* Description : Element Size to follow CPU or TLB
*
* RETURNS:
*
* Type : hw_status
* Description : RET_OK -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer Paramater was set to NULL
* RET_PARAM_OUT_OF_RANGE -- Input Parameter out of Range
*
* PURPOSE: : Set MMU_CAM reg
*
* METHOD: : Check the Input parameters and set the RAM entry.
......@@ -431,9 +406,9 @@ hw_status hw_mmu_tlb_flush(const u32 base_address,
/* Generate the 20-bit tag from virtual address */
virt_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
mme_set_cam_entry (base_address, pg_sizeBits, 0, 0, virt_addr_tag);
mme_set_cam_entry(base_address, pg_sizeBits, 0, 0, virt_addr_tag);
mmu_flsh_entry (base_address);
mmu_flsh_entry(base_address);
return status;
}
......@@ -502,7 +477,8 @@ hw_status hw_mmu_tlb_add(const u32 base_address,
/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
mmu_lck_crnt_vctmwite32(base_address, entryNum);
/* Enable loading of an entry in TLB by writing 1 into LD_TLB_REG register */
/* Enable loading of an entry in TLB by writing 1 into LD_TLB_REG
register */
mmu_ld_tlbwrt_reg32(base_address, MMU_LOAD_TLB);
......@@ -527,7 +503,8 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
switch (page_size) {
case HW_PAGE_SIZE_4KB:
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr & MMU_SMALL_PAGE_MASK);
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
MMU_SMALL_PAGE_MASK);
pte_val = ((physical_addr & MMU_SMALL_PAGE_MASK) |
(map_attrs->endianism << 9) |
(map_attrs->element_size << 4) |
......@@ -537,7 +514,8 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
case HW_PAGE_SIZE_64KB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr & MMU_LARGE_PAGE_MASK);
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
MMU_LARGE_PAGE_MASK);
pte_val = ((physical_addr & MMU_LARGE_PAGE_MASK) |
(map_attrs->endianism << 9) |
(map_attrs->element_size << 4) |
......@@ -546,7 +524,8 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
break;
case HW_PAGE_SIZE_1MB:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr & MMU_SECTION_ADDR_MASK);
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val = ((((physical_addr & MMU_SECTION_ADDR_MASK) |
(map_attrs->endianism << 15) |
(map_attrs->element_size << 10) |
......@@ -557,7 +536,8 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
case HW_PAGE_SIZE_16MB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr & MMU_SSECTION_ADDR_MASK);
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
MMU_SSECTION_ADDR_MASK);
pte_val = (((physical_addr & MMU_SSECTION_ADDR_MASK) |
(map_attrs->endianism << 15) |
(map_attrs->element_size << 10) |
......@@ -567,7 +547,8 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
break;
case HW_MMU_COARSE_PAGE_SIZE:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr & MMU_SECTION_ADDR_MASK);
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
break;
......@@ -583,7 +564,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
}
EXPORT_SYMBOL(hw_mmu_pte_set);
hw_status hw_mmu_pt_clear(const u32 pg_tbl_va,
hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
u32 virtual_addr,
u32 pg_size)
{
......@@ -624,7 +605,7 @@ hw_status hw_mmu_pt_clear(const u32 pg_tbl_va,
return status;
}
EXPORT_SYMBOL(hw_mmu_pt_clear);
EXPORT_SYMBOL(hw_mmu_pte_clear);
/* ============================================================================
* LOCAL FUNCTIONS
......@@ -653,7 +634,7 @@ static hw_status mmu_flsh_entry(const u32 base_address)
EXPORT_SYMBOL(mmu_flsh_entry);
/*
-----------------------------------------------------------------------------
NAME : mme_set_cam_entry -
NAME : mme_set_cam_entry
-----------------------------------------------------
*/
static hw_status mme_set_cam_entry(const u32 base_address,
......@@ -670,7 +651,8 @@ static hw_status mme_set_cam_entry(const u32 base_address,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
mmuCamReg = (virt_addr_tag << 12);
mmuCamReg = (mmuCamReg) | (page_size) | (valid_bit << 2) | (preserve_bit << 3);
mmuCamReg = (mmuCamReg) | (page_size) | (valid_bit << 2)
| (preserve_bit << 3);
/* write values to register */
MMUMMU_CAMWriteRegister32(base_address, mmuCamReg);
......@@ -679,8 +661,8 @@ static hw_status mme_set_cam_entry(const u32 base_address,
}
EXPORT_SYMBOL(mme_set_cam_entry);
/*
-----------------------------------------------------------------------------
NAME : mmu_set_ram_entry -
----------------------------------------------------
NAME : mmu_set_ram_entry
-----------------------------------------------------
*/
static hw_status mmu_set_ram_entry(const u32 base_address,
......@@ -695,12 +677,14 @@ static hw_status mmu_set_ram_entry(const u32 base_address,
/*Check the input Parameters*/
CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
CHECK_INPUT_RANGE_MIN0(element_size, MMU_ELEMENTSIZE_MAX, RET_PARAM_OUT_OF_RANGE,
CHECK_INPUT_RANGE_MIN0(element_size, MMU_ELEMENTSIZE_MAX,
RET_PARAM_OUT_OF_RANGE,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
mmuRamReg = (physical_addr & MMU_ADDR_MASK);
mmuRamReg = (mmuRamReg) | ((endianism << 9) | (element_size << 7) | (mixedSize << 6));
mmuRamReg = (mmuRamReg) | ((endianism << 9) | (element_size << 7)
| (mixedSize << 6));
/* write values to register */
MMUMMU_RAMWriteRegister32(base_address, mmuRamReg);
......@@ -737,11 +721,9 @@ long hw_mmu_tlb_dump(const u32 base_address, bool shw_inv_entries)
if ((cam & 0x4) != 0) {
printk(KERN_ALERT "TLB Entry [0x%x]: VA = 0x%x PA = 0x%x\
Protected = 0x%x\n)",
i,
(cam & MMU_ADDR_MASK),
(ram & MMU_ADDR_MASK),
printk(KERN_ALERT "TLB Entry [0x%x]: VA = 0x%x PA = 0x%x"
"Protected = 0x%x\n)",
i, (cam & MMU_ADDR_MASK), (ram & MMU_ADDR_MASK),
(cam & 0x8) ? 1 : 0);
} else if (shw_inv_entries != false) {
......@@ -781,13 +763,3 @@ u32 hw_mmu_pte_phyaddr(u32 pte_val, u32 pte_size)
return ret_val;
}
EXPORT_SYMBOL(hw_mmu_pte_phyaddr);
......@@ -26,7 +26,7 @@
#include "../procmgr.h"
#include "../procmgr_drvdefs.h"
#include "proc4430.h"
#include "dmm4430.h"
#include <syslink/multiproc.h>
#include <syslink/ducatienabler.h>
......@@ -229,6 +229,7 @@ void *proc4430_create(u16 proc_id, const struct proc4430_params *params)
handle->proc_fxn_table.translateAddr =
&proc4430_translate_addr;
handle->proc_fxn_table.map = &proc4430_map;
handle->proc_fxn_table.unmap = &proc4430_unmap;
handle->state = PROC_MGR_STATE_UNKNOWN;
handle->object = vmalloc
(sizeof(struct proc4430_object));
......@@ -475,9 +476,48 @@ int proc4430_translate_addr(void *handle,
* function also maps the specified address to slave MMU space.
*/
int proc4430_map(void *handle, u32 proc_addr,
u32 size, u32 *mapped_addr, u32 *mapped_size)
u32 size, u32 *mapped_addr, u32 *mapped_size, u32 map_attribs)
{
int retval = 0;
/* TODO */
u32 da_align;
u32 da;
u32 va_align;
u32 size_align;
dmm_reserve_memory(size, &da);
/* Calculate the page-aligned PA, VA and size */
da_align = PG_ALIGN_LOW((u32)da, PAGE_SIZE);
va_align = PG_ALIGN_LOW(proc_addr, PAGE_SIZE);
size_align = PG_ALIGN_HIGH(size + (u32)proc_addr - va_align, PAGE_SIZE);
retval = ducati_mem_map(va_align, da_align, size_align, map_attribs);
/* Mapped address = MSB of DA | LSB of VA */
*mapped_addr = (da_align | (proc_addr & (PAGE_SIZE - 1)));
return retval;
}
/*=================================================
* Function to unmap slave address to host address space
*
* UnMap the provided slave address to master address space. This
* function also unmaps the specified address to slave MMU space.
*/
int proc4430_unmap(void *handle, u32 mapped_addr)
{
int da_align;
int ret_val = 0;
int size_align;
da_align = PG_ALIGN_LOW((u32)mapped_addr, PAGE_SIZE);
ret_val = dmm_unreserve_memory(da_align, &size_align);
if (WARN_ON(ret_val < 0))
goto error_exit;
ret_val = ducati_mem_unmap(da_align, size_align);
if (WARN_ON(ret_val < 0))
goto error_exit;
return 0;
error_exit:
printk(KERN_WARNING "proc4430_unmap failed !!!!\n");
return ret_val;
}
......@@ -101,7 +101,10 @@ int proc4430_translate_addr(void *handle, void **dst_addr,
/* Function to map slave address to host address space */
int proc4430_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
u32 *mapped_size);
u32 *mapped_size, u32 map_attribs);
/* Function to unmap the slave address to host address space */
int proc4430_unmap(void *handle, u32 mapped_addr);
/* =================================================
* APIs
......
......@@ -118,7 +118,13 @@ typedef int (*processor_translate_addr_fxn) (void *handle, void **dst_addr,
* address space
*/
typedef int (*processor_map_fxn) (void *handle, u32 proc_addr, u32 size,
u32 *mapped_addr, u32 *mapped_size);
u32 *mapped_addr, u32 *mapped_size, u32 map_attribs);
/*
*Function pointer type for the function to map address to slave
* address space
*/
typedef int (*processor_unmap_fxn) (void *handle, u32 mapped_addr);
/* =============================
* Function table interface
......@@ -146,6 +152,8 @@ struct processor_fxn_table {
/* Function to translate between address ranges */
processor_map_fxn map;
/* Function to map slave addresses to master address space */
processor_unmap_fxn unmap;
/* Function to unmap slave addresses to master address space */
};
/* =============================
......@@ -163,7 +171,7 @@ struct processor_object {
/* State of the slave processor */
enum proc_mgr_boot_mode boot_mode;
/* Boot mode for the slave processor. */
void * object;
void *object;
/* Pointer to Processor-specific object. */
u16 proc_id;
/* Processor ID addressed by this Processor instance. */
......
......@@ -315,7 +315,7 @@ inline int processor_translate_addr(void *handle, void **dst_addr,
* and returns the mapped address and size.
*/
inline int processor_map(void *handle, u32 proc_addr, u32 size,
u32 *mapped_addr, u32 *mapped_size)
u32 *mapped_addr, u32 *mapped_size, u32 map_attribs)
{
int retval = 0;
struct processor_object *proc_handle =
......@@ -329,10 +329,24 @@ inline int processor_map(void *handle, u32 proc_addr, u32 size,
BUG_ON(proc_handle->proc_fxn_table.map == NULL);
retval = proc_handle->proc_fxn_table.map(handle, proc_addr,
size, mapped_addr, mapped_size);
size, mapped_addr, mapped_size, map_attribs);
return retval;
}
/*
* Function to unmap address to slave address space.
*
* This function unmap the provided slave address
*/
inline int processor_unmap(void *handle, u32 mapped_addr)
{
int retval = 0;
struct processor_object *proc_handle =
(struct processor_object *)handle;
retval = proc_handle->proc_fxn_table.unmap(handle, mapped_addr);
return retval;
}
/*
* Function that registers for notification when the slave
......
......@@ -64,7 +64,9 @@ int processor_translate_addr(void *handle, void **dst_addr,
/* Function to map address to slave address space */
int processor_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
u32 *mapped_size);
u32 *mapped_size, u32 map_attribs);
/* Function to unmap address to slave address space */
int processor_unmap(void *handle, u32 mapped_addr);
/* Function that registers for notification when the slave processor
* transitions to any of the states specified.
......
......@@ -650,7 +650,7 @@ EXPORT_SYMBOL(proc_mgr_translate_addr);
*
*/
int proc_mgr_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
u32 *mapped_size, enum proc_mgr_map_type type)
u32 *mapped_size, u32 map_attribs)
{
int retval = 0;
struct proc_mgr_object *proc_mgr_handle =
......@@ -664,13 +664,35 @@ int proc_mgr_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
/* Map to host address space. */
retval = processor_map(proc_mgr_handle->proc_handle, proc_addr,
size, mapped_addr, mapped_size);
size, mapped_addr, mapped_size, map_attribs);
WARN_ON(retval < 0);
mutex_unlock(proc_mgr_obj_state.gate_handle);
return retval;;
}
EXPORT_SYMBOL(proc_mgr_map);
/*============================================
* Function to unmap address to slave address space.
*
* This function unmaps the provided slave address to a host address
*
*/
int proc_mgr_unmap(void *handle, u32 mapped_addr)
{
int retval = 0;
struct proc_mgr_object *proc_mgr_handle =
(struct proc_mgr_object *)handle;
WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
/* Map to host address space. */
retval = processor_unmap(proc_mgr_handle->proc_handle, mapped_addr);
WARN_ON(retval < 0);
mutex_unlock(proc_mgr_obj_state.gate_handle);
return retval;;
}
EXPORT_SYMBOL(proc_mgr_unmap);
/*=================================
* Function that registers for notification when the slave
* processor transitions to any of the states specified.
......
......@@ -165,10 +165,10 @@ typedef int (*proc_mgr_callback_fxn)(u16 proc_id, void *handle,
enum proc_mgr_state from_state, enum proc_mgr_state to_state);
/* Function to get the default configuration for the ProcMgr module. */
void proc_mgr_get_config(struct proc_mgr_config*cfg);
void proc_mgr_get_config(struct proc_mgr_config *cfg);
/* Function to setup the ProcMgr module. */
int proc_mgr_setup(struct proc_mgr_config*cfg);
int proc_mgr_setup(struct proc_mgr_config *cfg);
/* Function to destroy the ProcMgr module. */
int proc_mgr_destroy(void);
......@@ -240,7 +240,10 @@ int proc_mgr_translate_addr(void *handle, void **dst_addr,
/* Function that maps the specified slave address to master address space. */
int proc_mgr_map(void *handle, u32 proc_addr, u32 size,
u32 *mappedAddr, u32 *mapped_size, enum proc_mgr_map_type type);
u32 *mappedAddr, u32 *mapped_size, u32 map_attribs);
/* Function that unmaps the specified slave address to master address space. */
int proc_mgr_unmap(void *handle, u32 mapped_addr);
/* Function that registers for notification when the slave processor
* transitions to any of the states specified.
......
......@@ -74,10 +74,10 @@ static void __exit proc_mgr_drv_finalize_module(void);
/*
* name DriverOps
*
* desc Function to invoke the APIs through ioctl.
* desc Function to invoke the APIs through ioctl
*
*/
static struct file_operations procmgr_fops = {
static const struct file_operations procmgr_fops = {
.open = proc_mgr_drv_open,
.ioctl = proc_mgr_drv_ioctl,
.release = proc_mgr_drv_release,
......@@ -498,7 +498,7 @@ static int proc_mgr_drv_ioctl(struct inode *inode, struct file *filp,
src_args.proc_addr, src_args.size,
&(src_args.mapped_addr),
&(src_args.mapped_size),
src_args.type);
src_args.map_attribs);
if (WARN_ON(retval < 0))
goto func_exit;
retval = copy_to_user((void *)(args),
......@@ -508,6 +508,21 @@ static int proc_mgr_drv_ioctl(struct inode *inode, struct file *filp,
}
break;
case CMD_PROCMGR_UNMAP:
{
struct proc_mgr_cmd_args_unmap src_args;
/* Copy the full args from user-side. */
retval = copy_from_user((void *)&src_args,
(const void *)(args),
sizeof(struct proc_mgr_cmd_args_unmap));
if (WARN_ON(retval < 0))
goto func_exit;
retval = proc_mgr_unmap(src_args.handle,
(src_args.mapped_addr));
WARN_ON(retval < 0);
}
case CMD_PROCMGR_REGISTERNOTIFY:
{
struct proc_mgr_cmd_args_register_notify src_args;
......@@ -555,8 +570,7 @@ func_exit:
/* Set the retval and copy the common args to user-side. */
command_args.api_status = retval;
retval = copy_to_user((void *)cmd_args,
(const void *) &command_args,
sizeof(struct proc_mgr_cmd_args));
(const void *)&command_args, sizeof(struct proc_mgr_cmd_args));
WARN_ON(retval < 0);
return retval;
......
......@@ -171,6 +171,12 @@ struct proc_mgr_cmd_args {
*/
#define CMD_PROCMGR_GETPROCINFO (PROCMGR_BASE_CMD + 26)
/*
* Command for ProcMgr_unmap
*/
#define CMD_PROCMGR_UNMAP (PROCMGR_BASE_CMD + 27)
/* ----------------------------------------------------------------------------
* Command arguments for ProcMgr
......@@ -460,10 +466,22 @@ struct proc_mgr_cmd_args_map {
/*Return parameter: Mapped address in host address space */
u32 mapped_size;
/*Return parameter: Mapped size */
enum proc_mgr_map_type type;
u32 map_attribs;
/*Type of mapping. */
};
/*
* Command arguments for ProcMgr_map
*/
struct proc_mgr_cmd_args_unmap {
struct proc_mgr_cmd_args commond_args;
/*Common command args */
void *handle;
/*Handle to the ProcMgr object */
u32 mapped_addr;
/* Mapped address in host address space */
};
/*
* Command arguments for ProcMgr_registerNotify
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment