Commit 0ebc126d authored by Hari Kanigeri's avatar Hari Kanigeri

SYSLINK:proc-adapt proc module to iommu module

This patch migrates proc module to use iommu module.
Signed-off-by: default avatarHari Kanigeri <h-kanigeri2@ti.com>
parent 483294fb
......@@ -56,7 +56,8 @@
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
#include <mach/iommu.h>
#include "../../../arch/arm/plat-omap/iopgtable.h"
#include <syslink/ducatienabler.h>
......@@ -66,9 +67,6 @@
#define DPRINTK(fmt, args...)
#endif
#define base_ducati_l2_mmuPhys 0x55082000
/*
* Macro to define the physical memory address for the
* Ducati Base image. The 74Mb memory is preallocated
......@@ -80,38 +78,6 @@
#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT)
/* Attributes used to manage the DSP MMU page tables */
struct pg_table_attrs {
struct sync_cs_object *hcs_object;/* Critical section object handle */
u32 l1_base_pa; /* Physical address of the L1 PT */
u32 l1_base_va; /* Virtual address of the L1 PT */
u32 l1_size; /* Size of the L1 PT */
u32 l1_tbl_alloc_pa;
/* Physical address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_va;
/* Virtual address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_sz;
/* Size of consistent memory allocated for L1 table.
* May not be aligned */
u32 l2_base_pa; /* Physical address of the L2 PT */
u32 l2_base_va; /* Virtual address of the L2 PT */
u32 l2_size; /* Size of the L2 PT */
u32 l2_tbl_alloc_pa;
/* Physical address of Allocated mem for L2 table. May not be aligned */
u32 l2_tbl_alloc_va;
/* Virtual address of Allocated mem for L2 table. May not be aligned */
u32 ls_tbl_alloc_sz;
/* Size of consistent memory allocated for L2 table.
* May not be aligned */
u32 l2_num_pages; /* Number of allocated L2 PT */
struct page_info *pg_info;
};
/* Attributes of L2 page tables for DSP MMU.*/
struct page_info {
/* Number of valid PTEs in the L2 PT*/
u32 num_entries;
};
enum pagetype {
SECTION = 0,
......@@ -120,13 +86,12 @@ enum pagetype {
SUPER_SECTION = 3
};
static struct pg_table_attrs *p_pt_attrs;
static u32 mmu_index_next;
static u32 base_ducati_l2_mmu;
static u32 shm_phys_addr;
static u32 shm_virt_addr;
struct iommu *ducati_iommu_ptr;
static void bad_page_dump(u32 pa, struct page *pg)
{
pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
......@@ -134,208 +99,69 @@ static void bad_page_dump(u32 pa, struct page *pg)
BUG();
}
/*============================================
* Print the DSP MMU Table Entries
*/
void dbg_print_ptes(bool ashow_inv_entries, bool ashow_repeat_entries)
{
u32 pte_val;
u32 pte_size;
u32 last_sect = 0;
u32 this_sect = 0;
u32 cur_l1_entry;
u32 cur_l2_entry;
u32 pg_tbl_va;
u32 l1_base_va;
u32 l2_base_va = 0;
u32 l2_base_pa = 0;
l1_base_va = p_pt_attrs->l1_base_va;
pg_tbl_va = l1_base_va;
DPRINTK("\n*** Currently programmed PTEs : Max possible L1 Entries"
"[%d] ***\n", (p_pt_attrs->l1_size / sizeof(u32)));
/* Walk all L1 entries, dump out info. Dive into L2 if necessary */
for (cur_l1_entry = 0; cur_l1_entry <
(p_pt_attrs->l1_size / sizeof(u32)); cur_l1_entry++) {
/*pte_val = pL1PgTbl[cur_l1_entry];*/
pte_val = *((u32 *)(pg_tbl_va + (cur_l1_entry * sizeof(u32))));
pte_size = hw_mmu_pte_sizel1(pte_val);
if (pte_size == HW_PAGE_SIZE_16MB) {
this_sect = hw_mmu_pte_phyaddr(pte_val, pte_size);
if (this_sect != last_sect) {
last_sect = this_sect;
DPRINTK("PTE L1 [16 MB] -> VA = "
"0x%x PA = 0x%x\n",
cur_l1_entry << 24, this_sect);
} else if (ashow_repeat_entries != false)
DPRINTK(" {REPEAT}\n");
} else if (pte_size == HW_PAGE_SIZE_1MB) {
this_sect = hw_mmu_pte_phyaddr(pte_val, pte_size);
if (this_sect != last_sect) {
last_sect = this_sect;
DPRINTK("PTE L1 [1 MB ] -> VA = "
"0x%x PA = 0x%x\n",
cur_l1_entry << 20, this_sect);
} else if (ashow_repeat_entries != false)
DPRINTK(" {REPEAT}\n");
} else if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
/* Get the L2 data for this */
DPRINTK("PTE L1 [L2 ] -> VA = "
"0x%x\n", cur_l1_entry << 20);
/* Get the L2 PA from the L1 PTE, and find corresponding L2 VA*/
l2_base_pa = hw_mmu_pte_coarsel1(pte_val);
l2_base_va = l2_base_pa - p_pt_attrs->l2_base_pa +
p_pt_attrs->l2_base_va;
for (cur_l2_entry = 0;
cur_l2_entry < (HW_MMU_COARSE_PAGE_SIZE / sizeof(u32));
cur_l2_entry++) {
pte_val = *((u32 *)(l2_base_va +
(cur_l2_entry * sizeof(u32))));
pte_size = hw_mmu_pte_sizel2(pte_val);
if ((pte_size == HW_PAGE_SIZE_64KB) ||
(pte_size == HW_PAGE_SIZE_4KB)) {
this_sect = hw_mmu_pte_phyaddr
(pte_val, pte_size);
if (this_sect != last_sect) {
last_sect = this_sect;
DPRINTK("PTE L2 [%s KB] ->"
"VA = 0x%x PA = 0x%x\n",
(pte_size ==
HW_PAGE_SIZE_64KB) ?
"64" : "4",
((cur_l1_entry << 20)
| (cur_l2_entry << 12)),
this_sect);
} else if (ashow_repeat_entries
!= false)
DPRINTK("{REPEAT}");
} else if (ashow_inv_entries != false) {
DPRINTK("PTE L2 [INVALID] -> VA = "
"0x%x",
((cur_l1_entry << 20) |
(cur_l2_entry << 12)));
continue;
}
}
} else if (ashow_inv_entries != false) {
/* Entry is invalid (not set), skip it */
DPRINTK("PTE L1 [INVALID] -> VA = 0x%x",
cur_l1_entry << 20);
continue;
}
}
/* Dump the TLB entries as well */
DPRINTK("\n*** Currently programmed TLBs ***\n");
hw_mmu_tlb_dump(base_ducati_l2_mmu, false);
DPRINTK("*** DSP MMU DUMP COMPLETED ***\n");
}
/*============================================
* This function calculates PTE address (MPU virtual) to be updated
* It also manages the L2 page tables
*/
static int pte_set(u32 pa, u32 va, u32 size, struct hw_mmu_map_attrs_t *attrs)
{
u32 i;
u32 pte_val;
u32 pte_addr_l1;
u32 pte_size;
u32 pg_tbl_va; /* Base address of the PT that will be updated */
u32 l1_base_va;
/* Compiler warns that the next three variables might be used
* uninitialized in this function. Doesn't seem so. Working around,
* anyways. */
u32 l2_base_va = 0;
u32 l2_base_pa = 0;
u32 l2_page_num = 0;
struct pg_table_attrs *pt = p_pt_attrs;
int status = 0;
DPRINTK("> pte_set ppg_table_attrs %x, pa %x, va %x, "
"size %x, attrs %x\n", (u32)pt, pa, va, size, (u32)attrs);
l1_base_va = pt->l1_base_va;
pg_tbl_va = l1_base_va;
if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
pte_val = *(u32 *)pte_addr_l1;
pte_size = hw_mmu_pte_sizel1(pte_val);
} else {
return -EINVAL;
}
/* FIX ME */
/* TODO: ADD synchronication element*/
/* sync_enter_cs(pt->hcs_object);*/
if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
/* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA */
l2_base_pa = hw_mmu_pte_coarsel1(pte_val);
l2_base_va = l2_base_pa - pt->l2_base_pa +
pt->l2_base_va;
l2_page_num = (l2_base_pa - pt->l2_base_pa) /
HW_MMU_COARSE_PAGE_SIZE;
} else if (pte_size == 0) {
/* L1 PTE is invalid. Allocate a L2 PT and
* point the L1 PTE to it */
/* Find a free L2 PT. */
for (i = 0; (i < pt->l2_num_pages) &&
(pt->pg_info[i].num_entries != 0); i++)
;;
if (i < pt->l2_num_pages) {
l2_page_num = i;
l2_base_pa = pt->l2_base_pa + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
l2_base_va = pt->l2_base_va + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
/* Endianness attributes are ignored for
* HW_MMU_COARSE_PAGE_SIZE */
status =
hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
HW_MMU_COARSE_PAGE_SIZE, attrs);
} else {
status = -ENOMEM;
}
} else {
/* Found valid L1 PTE of another size.
* Should not overwrite it. */
status = -EINVAL;
struct iotlb_entry tlb_entry;
switch (size) {
case HW_PAGE_SIZE_16MB:
tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
break;
case HW_PAGE_SIZE_1MB:
tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
break;
case HW_PAGE_SIZE_64KB:
tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
break;
case HW_PAGE_SIZE_4KB:
tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
break;
}
if (status == 0) {
pg_tbl_va = l2_base_va;
if (size == HW_PAGE_SIZE_64KB)
pt->pg_info[l2_page_num].num_entries += 16;
else
pt->pg_info[l2_page_num].num_entries++;
DPRINTK("L2 BaseVa %x, BasePa %x, "
"PageNum %x num_entries %x\n", l2_base_va,
l2_base_pa, l2_page_num,
pt->pg_info[l2_page_num].num_entries);
tlb_entry.prsvd = MMU_CAM_P;
tlb_entry.valid = MMU_CAM_V;
switch (attrs->element_size) {
case HW_ELEM_SIZE_8BIT:
tlb_entry.elsz = MMU_RAM_ELSZ_8;
break;
case HW_ELEM_SIZE_16BIT:
tlb_entry.elsz = MMU_RAM_ELSZ_16;
break;
case HW_ELEM_SIZE_32BIT:
tlb_entry.elsz = MMU_RAM_ELSZ_32;
break;
case HW_ELEM_SIZE_64BIT:
tlb_entry.elsz = 0x3; /* No translation */
break;
}
/* sync_leave_cs(pt->hcs_object);*/
switch (attrs->endianism) {
case HW_LITTLE_ENDIAN:
tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
break;
case HW_BIG_ENDIAN:
tlb_entry.endian = MMU_RAM_ENDIAN_BIG;
break;
}
if (status == 0) {
DPRINTK("PTE pg_tbl_va %x, pa %x, va %x, size %x\n",
pg_tbl_va, pa, va, size);
DPRINTK("PTE endianism %x, element_size %x, "
"mixedSize %x\n", attrs->endianism,
attrs->element_size, attrs->mixedSize);
status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
if (status == RET_OK)
status = 0;
switch (attrs->mixedSize) {
case HW_MMU_TLBES:
tlb_entry.mixed = 0;
break;
case HW_MMU_CPUES:
tlb_entry.mixed = MMU_RAM_MIXED;
break;
}
DPRINTK("< pte_set status %x\n", status);
return status;
tlb_entry.da = va;
tlb_entry.pa = pa;
DPRINTK("pte set ducati_iommu_ptr = 0x%x, tlb_entry = 0x%x \n",
ducati_iommu_ptr, tlb_entry);
if (iopgtable_store_entry(ducati_iommu_ptr, &tlb_entry))
goto error_exit;
return 0;
error_exit:
printk(KERN_ERR "pte set failure \n");
return -EFAULT;
}
......@@ -394,187 +220,57 @@ static int pte_update(u32 pa, u32 va, u32 size,
*/
int ducati_mem_unmap(u32 da, u32 num_bytes)
{
u32 L1_base_va;
u32 L2_base_va;
u32 L2_base_pa;
u32 L2_page_num;
u32 pte_val;
u32 pte_size;
u32 pte_count;
u32 pte_addr_l1;
u32 pte_addr_l2 = 0;
u32 rem_bytes;
u32 rem_bytes_l2;
u32 vaCurr;
u32 bytes;
struct page *pg = NULL;
int status = 0;
u32 temp;
u32 patemp = 0;
u32 pAddr;
u32 numof4Kpages = 0;
DPRINTK("> ducati_mem_unmap da 0x%x, "
"NumBytes 0x%x\n", da, num_bytes);
vaCurr = da;
rem_bytes = num_bytes;
rem_bytes_l2 = 0;
L1_base_va = p_pt_attrs->l1_base_va;
pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
while (rem_bytes) {
u32 vaCurrOrig = vaCurr;
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
pte_val = *(u32 *)pte_addr_l1;
pte_size = hw_mmu_pte_sizel1(pte_val);
if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
/*
* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA
*/
L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
L2_base_va = L2_base_pa - p_pt_attrs->l2_base_pa
+ p_pt_attrs->l2_base_va;
L2_page_num = (L2_base_pa - p_pt_attrs->l2_base_pa) /
HW_MMU_COARSE_PAGE_SIZE;
/*
* Find the L2 PTE address from which we will start
* clearing, the number of PTEs to be cleared on this
* page, and the size of VA space that needs to be
* cleared on this L2 page
*/
pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) /
sizeof(u32);
if (rem_bytes < (pte_count * PAGE_SIZE))
pte_count = rem_bytes / PAGE_SIZE;
rem_bytes_l2 = pte_count * PAGE_SIZE;
DPRINTK("ducati_mem_unmap L2_base_pa %x, "
"L2_base_va %x pte_addr_l2 %x,"
"rem_bytes_l2 %x\n", L2_base_pa, L2_base_va,
pte_addr_l2, rem_bytes_l2);
/*
* Unmap the VA space on this L2 PT. A quicker way
* would be to clear pte_count entries starting from
* pte_addr_l2. However, below code checks that we don't
* clear invalid entries or less than 64KB for a 64KB
* entry. Similar checking is done for L1 PTEs too
* below
*/
while (rem_bytes_l2) {
pte_val = *(u32 *)pte_addr_l2;
pte_size = hw_mmu_pte_sizel2(pte_val);
/* vaCurr aligned to pte_size? */
if ((pte_size != 0) && (rem_bytes_l2
>= pte_size) &&
!(vaCurr & (pte_size - 1))) {
/* Collect Physical addresses from VA */
pAddr = (pte_val & ~(pte_size - 1));
if (pte_size == HW_PAGE_SIZE_64KB)
numof4Kpages = 16;
else
numof4Kpages = 1;
temp = 0;
while (temp++ < numof4Kpages) {
if (pfn_valid
(__phys_to_pfn
(patemp))) {
pg = phys_to_page
(pAddr);
if (page_count
(pg) < 1) {
bad_page_dump
(pAddr, pg);
}
SetPageDirty(pg);
page_cache_release(pg);
}
pAddr += HW_PAGE_SIZE_4KB;
}
if (hw_mmu_pte_clear(pte_addr_l2,
vaCurr, pte_size) == RET_OK) {
rem_bytes_l2 -= pte_size;
vaCurr += pte_size;
pte_addr_l2 += (pte_size >> 12)
* sizeof(u32);
int temp = 0;
u32 nent;
u32 phyaddress;
s32 numofBytes = num_bytes;
while (num_bytes > 0) {
u32 *iopgd = iopgd_offset(ducati_iommu_ptr, da);
if (*iopgd & IOPGD_TABLE) {
u32 *iopte = iopte_offset(iopgd, da);
if (*iopte & IOPTE_LARGE) {
nent = 16;
/* rewind to the 1st entry */
iopte = (u32 *)((u32)iopte & IOLARGE_MASK);
} else
nent = 1;
phyaddress = (*iopte) & IOPAGE_MASK;
} else {
status = -EFAULT;
goto EXIT_LOOP;
}
if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
nent = 4096;
/* rewind to the 1st entry */
iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK);
} else
status = -EFAULT;
}
if (rem_bytes_l2 != 0) {
status = -EFAULT;
goto EXIT_LOOP;
}
p_pt_attrs->pg_info[L2_page_num].num_entries -=
pte_count;
if (p_pt_attrs->pg_info[L2_page_num].num_entries
== 0) {
/*
* Clear the L1 PTE pointing to the
* L2 PT
*/
if (RET_OK != hw_mmu_pte_clear(L1_base_va,
vaCurrOrig, HW_MMU_COARSE_PAGE_SIZE)) {
status = -EFAULT;
goto EXIT_LOOP;
nent = 256;
phyaddress = (*iopgd) & IOPGD_MASK;
}
for (temp = 0; temp < nent; temp++) {
if (pfn_valid(__phys_to_pfn(phyaddress))) {
pg = phys_to_page(phyaddress);
if (page_count(pg) < 1) {
pr_info("DSPBRIDGE:UNMAP function: "
"COUNT 0 FOR PA 0x%x,"
" size = 0x%x\n",
phyaddress, numofBytes);
bad_page_dump(phyaddress, pg);
}
rem_bytes -= pte_count * PAGE_SIZE;
DPRINTK("ducati_mem_unmap L2_page_num %x, "
"num_entries %x, pte_count %x, status: 0x%x\n",
L2_page_num,
p_pt_attrs->pg_info[L2_page_num].num_entries,
pte_count, status);
} else
/* vaCurr aligned to pte_size? */
/* pte_size = 1 MB or 16 MB */
if ((pte_size != 0) && (rem_bytes >= pte_size) &&
!(vaCurr & (pte_size - 1))) {
if (pte_size == HW_PAGE_SIZE_1MB)
numof4Kpages = 256;
else
numof4Kpages = 4096;
temp = 0;
/* Collect Physical addresses from VA */
pAddr = (pte_val & ~(pte_size - 1));
while (temp++ < numof4Kpages) {
pg = phys_to_page(pAddr);
if (page_count(pg) < 1)
bad_page_dump(pAddr, pg);
SetPageDirty(pg);
page_cache_release(pg);
pAddr += HW_PAGE_SIZE_4KB;
}
if (hw_mmu_pte_clear(L1_base_va, vaCurr,
pte_size) == RET_OK) {
rem_bytes -= pte_size;
vaCurr += pte_size;
} else {
status = -EFAULT;
goto EXIT_LOOP;
phyaddress += HW_PAGE_SIZE_4KB;
}
} else {
status = -EFAULT;
bytes = iopgtable_clear_entry(ducati_iommu_ptr, da);
num_bytes -= bytes;
da += bytes;
}
}
/*
* It is better to flush the TLB here, so that any stale old entries
* get flushed
*/
EXIT_LOOP:
hw_mmu_tlb_flushAll(base_ducati_l2_mmu);
DPRINTK("ducati_mem_unmap vaCurr %x, pte_addr_l1 %x "
"pte_addr_l2 %x\n", vaCurr, pte_addr_l1, pte_addr_l2);
DPRINTK("< ducati_mem_unmap status %x rem_bytes %x, "
"rem_bytes_l2 %x\n", status, rem_bytes, rem_bytes_l2);
return status;
}
return 0;
}
/*
* ======== user_va2pa ========
* Purpose:
......@@ -602,7 +298,6 @@ static u32 user_va2pa(struct mm_struct *mm, u32 address)
return 0;
}
/*============================================
* This function maps MPU buffer to the DSP address space. It performs
* linear to physical address translation if required. It translates each
......@@ -770,11 +465,6 @@ func_cont:
if (pg_i)
ducati_mem_unmap(ul_virt_addr, (pg_i * PAGE_SIZE));
}
/* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region */
hw_mmu_tlb_flushAll(base_ducati_l2_mmu);
WARN_ON(status < 0);
DPRINTK("< WMD_BRD_MemMap status %x\n", status);
return status;
......@@ -884,9 +574,10 @@ static int add_dsp_mmu_entry(u32 *phys_addr, u32 *dsp_addr,
u32 entry_size = 0;
int status = 0;
u32 page_size = HW_PAGE_SIZE_1MB;
struct hw_mmu_map_attrs_t map_attrs = { HW_LITTLE_ENDIAN,
HW_ELEM_SIZE_16BIT,
HW_MMU_CPUES };
u32 flags = 0;
flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
DSP_MAPPHYSICALADDR);
DPRINTK("Entered add_dsp_mmu_entry phys_addr = "
"0x%x, dsp_addr = 0x%x,size = 0x%x\n",
......@@ -905,21 +596,15 @@ static int add_dsp_mmu_entry(u32 *phys_addr, u32 *dsp_addr,
else if (size_tlb == SMALL_PAGE)
page_size = HW_PAGE_SIZE_4KB;
if (status == 0) {
hw_mmu_tlb_add((base_ducati_l2_mmu),
*phys_addr, *dsp_addr,
page_size, mmu_index_next++,
&map_attrs, HW_SET, HW_SET);
ducati_mem_map(*phys_addr,
*dsp_addr, page_size, flags);
mapped_size += entry_size;
*phys_addr += entry_size;
*dsp_addr += entry_size;
if (mmu_index_next > 32) {
status = -EINVAL;
break;
}
}
}
DPRINTK("Exited add_dsp_mmu_entryphys_addr = \
0x%x, dsp_addr = 0x%x\n",
DPRINTK("Exited add_dsp_mmu_entryphys_addr = "
"0x%x, dsp_addr = 0x%x\n",
*phys_addr, *dsp_addr);
return status;
}
......@@ -979,8 +664,6 @@ static int add_entry_ext(u32 *phys_addr, u32 *dsp_addr,
int ducati_mmu_init(u32 a_phy_addr)
{
int ret_val = 0;
u32 ducati_mmu_linear_addr = base_ducati_l2_mmu;
u32 reg_value = 0;
u32 phys_addr = 0;
u32 num_l4_entries;
u32 i = 0;
......@@ -994,14 +677,6 @@ int ducati_mmu_init(u32 a_phy_addr)
num_l3_mem_entries = sizeof(l3_memory_regions) /
sizeof(struct memory_entry);
DPRINTK("\n Programming Ducati MMU using linear address [0x%x]",
ducati_mmu_linear_addr);
/* Disable the MMU & TWL */
hw_mmu_disable(base_ducati_l2_mmu);
hw_mmu_twl_disable(base_ducati_l2_mmu);
mmu_index_next = 0;
phys_addr = a_phy_addr;
DPRINTK("Value before calling add_dsp_mmu_entry phys_addr = 0x%x,"
"ducati_boot_addr = 0x%x\n",
......@@ -1015,12 +690,6 @@ int ducati_mmu_init(u32 a_phy_addr)
"ducati_boot_addr = 0x%x\n",
phys_addr, ducati_boot_addr);
/* Lock the base counter*/
hw_mmu_numlocked_set(ducati_mmu_linear_addr,
mmu_index_next);
hw_mmu_victim_numset(ducati_mmu_linear_addr,
mmu_index_next);
printk(KERN_ALERT " Programming Ducati memory regions\n");
printk(KERN_ALERT "=========================================\n");
for (i = 0; i < num_l3_mem_entries; i++) {
......@@ -1069,26 +738,6 @@ int ducati_mmu_init(u32 a_phy_addr)
goto error_exit;
}
}
/* Set the TTB to point to the L1 page table's physical address */
hw_mmu_ttbset(ducati_mmu_linear_addr, p_pt_attrs->l1_base_pa);
/* Enable the TWL */
hw_mmu_twl_enable(ducati_mmu_linear_addr);
hw_mmu_enable(ducati_mmu_linear_addr);
/* MMU Debug Statements */
reg_value = *((REG u32 *)(ducati_mmu_linear_addr + 0x40));
DPRINTK(" Ducati TWL Status [0x%x]\n", reg_value);
reg_value = *((REG u32 *)(ducati_mmu_linear_addr + 0x4C));
DPRINTK(" Ducati TTB Address [0x%x]\n", reg_value);
reg_value = *((REG u32 *)(ducati_mmu_linear_addr + 0x44));
DPRINTK(" Ducati MMU Status [0x%x]\n", reg_value);
/* Dump the MMU Entries */
dbg_print_ptes(false, false);
DPRINTK(" Programmed Ducati BootVectors 0x0 to first page at [0x%x]",
a_phy_addr);
......@@ -1100,124 +749,6 @@ error_exit:
return ret_val;
}
/*========================================
* This sets up the Ducati processor MMU Page tables
*
*/
int init_mmu_page_attribs(u32 l1_size, u32 l1_allign, u32 ls_num_of_pages)
{
u32 pg_tbl_pa;
u32 pg_tbl_va;
u32 align_size;
int status = 0;
base_ducati_l2_mmu = (u32)ioremap(base_ducati_l2_mmuPhys, 0x4000);
p_pt_attrs = kmalloc(sizeof(struct pg_table_attrs), GFP_ATOMIC);
if (p_pt_attrs)
memset(p_pt_attrs, 0, sizeof(struct pg_table_attrs));
else {
status = -ENOMEM;
goto error_exit;
}
p_pt_attrs->l1_size = l1_size;
align_size = p_pt_attrs->l1_size;
/* Align sizes are expected to be power of 2 */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32)__get_dma_pages(GFP_KERNEL,
get_order(p_pt_attrs->l1_size));
if (pg_tbl_va == (u32)NULL) {
DPRINTK("dma_alloc_coherent failed 0x%x\n", pg_tbl_va);
status = -ENOMEM;
goto error_exit;
}
pg_tbl_pa = __pa(pg_tbl_va);
/* Check if the PA is aligned for us */
if ((pg_tbl_pa) & (align_size-1)) {
/* PA not aligned to page table size ,*/
/* try with more allocation and align */
free_pages(pg_tbl_va, get_order(p_pt_attrs->l1_size));
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32)__get_dma_pages(GFP_KERNEL,
get_order(p_pt_attrs->l1_size * 2));
if (pg_tbl_va == (u32)NULL) {
DPRINTK("__get_dma_pages failed 0x%x\n", pg_tbl_va);
status = -ENOMEM;
goto error_exit;
}
pg_tbl_pa = __pa(pg_tbl_va);
/* We should be able to get aligned table now */
p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
p_pt_attrs->l1_tbl_alloc_sz = p_pt_attrs->l1_size * 2;
/* Align the PA to the next 'align' boundary */
p_pt_attrs->l1_base_pa = ((pg_tbl_pa) + (align_size-1)) &
(~(align_size-1));
p_pt_attrs->l1_base_va = pg_tbl_va + (p_pt_attrs->l1_base_pa -
pg_tbl_pa);
} else {
/* We got aligned PA, cool */
p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
p_pt_attrs->l1_tbl_alloc_sz = p_pt_attrs->l1_size;
p_pt_attrs->l1_base_pa = pg_tbl_pa;
p_pt_attrs->l1_base_va = pg_tbl_va;
}
if (p_pt_attrs->l1_base_va)
memset((u8 *)p_pt_attrs->l1_base_va, 0x00, p_pt_attrs->l1_size);
p_pt_attrs->l2_num_pages = ls_num_of_pages;
p_pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
p_pt_attrs->l2_num_pages;
align_size = 4; /* Make it u32 aligned */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32)__get_dma_pages(GFP_KERNEL,
get_order(p_pt_attrs->l2_size));
if (pg_tbl_va == (u32)NULL) {
DPRINTK("dma_alloc_coherent failed 0x%x\n", pg_tbl_va);
status = -ENOMEM;
goto error_exit;
}
pg_tbl_pa = __pa(pg_tbl_va);
p_pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
p_pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
p_pt_attrs->ls_tbl_alloc_sz = p_pt_attrs->l2_size;
p_pt_attrs->l2_base_pa = pg_tbl_pa;
p_pt_attrs->l2_base_va = pg_tbl_va;
if (p_pt_attrs->l2_base_va)
memset((u8 *)p_pt_attrs->l2_base_va, 0x00, p_pt_attrs->l2_size);
p_pt_attrs->pg_info = kmalloc(sizeof(struct page_info), GFP_ATOMIC);
if (p_pt_attrs->pg_info)
memset(p_pt_attrs->pg_info, 0, sizeof(struct page_info));
else {
DPRINTK("memory allocation fails for p_pt_attrs->pg_info ");
status = -ENOMEM;
goto error_exit;
}
DPRINTK("L1 pa %x, va %x, size %x\n L2 pa %x, va "
"%x, size %x\n", p_pt_attrs->l1_base_pa,
p_pt_attrs->l1_base_va, p_pt_attrs->l1_size,
p_pt_attrs->l2_base_pa, p_pt_attrs->l2_base_va,
p_pt_attrs->l2_size);
DPRINTK("p_pt_attrs %x L2 NumPages %x pg_info %x\n",
(u32)p_pt_attrs, p_pt_attrs->l2_num_pages,
(u32)p_pt_attrs->pg_info);
return 0;
error_exit:
kfree(p_pt_attrs->pg_info);
if (p_pt_attrs->l1_base_va) {
free_pages(p_pt_attrs->l1_base_va,
get_order(p_pt_attrs->l1_tbl_alloc_sz));
}
if (p_pt_attrs->l2_base_va) {
free_pages(p_pt_attrs->l2_base_va,
get_order(p_pt_attrs->ls_tbl_alloc_sz));
}
WARN_ON(1);
printk(KERN_ALERT "init_mmu_page_attribs FAILED !!!!!\n");
return status;
}
/*========================================
* This sets up the Ducati processor
*
......@@ -1225,7 +756,12 @@ error_exit:
int ducati_setup(void)
{
int ret_val = 0;
ret_val = init_mmu_page_attribs(0x8000, 14, 128);
ducati_iommu_ptr = iommu_get("ducati");
if (IS_ERR(ducati_iommu_ptr)) {
pr_err("Error iommu_get\n");
return -EFAULT;
}
if (WARN_ON(ret_val < 0))
goto error_exit;
ret_val = ducati_mmu_init(DUCATI_BASEIMAGE_PHYSICAL_ADDRESS);
......@@ -1246,17 +782,9 @@ EXPORT_SYMBOL(ducati_setup);
*/
void ducati_destroy(void)
{
DPRINTK(" Freeing memory allocated in mmu_de_init\n");
if (p_pt_attrs->l2_tbl_alloc_va) {
free_pages(p_pt_attrs->l2_tbl_alloc_va,
get_order(p_pt_attrs->ls_tbl_alloc_sz));
}
if (p_pt_attrs->l1_tbl_alloc_va) {
free_pages(p_pt_attrs->l1_tbl_alloc_va,
get_order(p_pt_attrs->l1_tbl_alloc_sz));
}
if (p_pt_attrs)
kfree((void *)p_pt_attrs);
DPRINTK(" Release IOMMU pointer\n");
iommu_put(ducati_iommu_ptr);
ducati_iommu_ptr = NULL;
return;
}
EXPORT_SYMBOL(ducati_destroy);
......@@ -1271,3 +799,5 @@ u32 get_ducati_virt_mem()
return shm_virt_addr;
}
EXPORT_SYMBOL(get_ducati_virt_mem);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment