Commit 3a05a606 authored by Toshihiro Kobayashi's avatar Toshihiro Kobayashi Committed by Paul Mundt

ARM: OMAP: DSP gateway updates.

- Add api_ck control needed for McBSP initialization.
- Bug fix for process list handling
- Conversion from internal pool allocators to mempool API.
Signed-off-by: default avatarToshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
Signed-off-by: default avatarPaul Mundt <paul.mundt@nokia.com>
parent 7a0d12e1
...@@ -280,14 +280,25 @@ static int __init omap_dsp_init(void) ...@@ -280,14 +280,25 @@ static int __init omap_dsp_init(void)
return PTR_ERR(api_ck_handle); return PTR_ERR(api_ck_handle);
} }
/* This is needed for McBSP init, released in late_initcall */
clk_enable(api_ck_handle);
__dsp_enable(); __dsp_enable();
mpui_byteswap_off(); mpui_byteswap_off();
mpui_wordswap_on(); mpui_wordswap_on();
tc_wordswap(); tc_wordswap();
init_done = 1; init_done = 1;
printk(KERN_INFO "omap_dsp_init() done\n");
return 0;
}
static int dsp_late_init(void)
{
clk_disable(api_ck_handle);
return 0; return 0;
} }
late_initcall(dsp_late_init);
static void dsp_cpustat_update(void) static void dsp_cpustat_update(void)
{ {
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
* *
* Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com> * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
* *
* Conversion to mempool API and ARM MMU section mapping
* by Paul Mundt <paul.mundt@nokia.com>
*
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 2 of the License, or
...@@ -33,6 +36,7 @@ ...@@ -33,6 +36,7 @@
#include <linux/fb.h> #include <linux/fb.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mempool.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -118,6 +122,20 @@ static void *dspvect_page; ...@@ -118,6 +122,20 @@ static void *dspvect_page;
static unsigned long dsp_fault_adr; static unsigned long dsp_fault_adr;
static struct mem_sync_struct mem_sync; static struct mem_sync_struct mem_sync;
static void *mempool_alloc_from_pool(mempool_t *pool,
unsigned int __nocast gfp_mask)
{
spin_lock_irq(&pool->lock);
if (likely(pool->curr_nr)) {
void *element = pool->elements[--pool->curr_nr];
spin_unlock_irq(&pool->lock);
return element;
}
spin_unlock_irq(&pool->lock);
return mempool_alloc(pool, gfp_mask);
}
static __inline__ unsigned long lineup_offset(unsigned long adr, static __inline__ unsigned long lineup_offset(unsigned long adr,
unsigned long ref, unsigned long ref,
unsigned long mask) unsigned long mask)
...@@ -171,60 +189,35 @@ int dsp_mem_sync_config(struct mem_sync_struct *sync) ...@@ -171,60 +189,35 @@ int dsp_mem_sync_config(struct mem_sync_struct *sync)
return 0; return 0;
} }
/* static mempool_t *kmem_pool_1M;
* kmem_reserve(), kmem_release(): static mempool_t *kmem_pool_64K;
* reserve or release kernel memory for exmap().
*
* exmap() might request consecutive 1MB or 64kB,
* but it will be difficult after memory pages are fragmented.
* So, user can reserve such memory blocks in the early phase
* through kmem_reserve().
*/
struct kmem_pool {
struct semaphore sem;
unsigned long buf[16];
int count;
};
#define KMEM_POOL_INIT(name) \ static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
{ \ {
.sem = __SEMAPHORE_INIT((name).sem, 1), \ return (void *)__get_dma_pages(gfp, (unsigned int)order);
} }
#define DECLARE_KMEM_POOL(name) \
struct kmem_pool name = KMEM_POOL_INIT(name)
DECLARE_KMEM_POOL(kmem_pool_1M); static void dsp_pool_free(void *buf, void *order)
DECLARE_KMEM_POOL(kmem_pool_64K); {
free_pages((unsigned long)buf, (unsigned int)order);
}
static void dsp_kmem_release(void) static void dsp_kmem_release(void)
{ {
int i; if (kmem_pool_64K) {
mempool_destroy(kmem_pool_64K);
down(&kmem_pool_1M.sem); kmem_pool_64K = NULL;
for (i = 0; i < kmem_pool_1M.count; i++) {
if (kmem_pool_1M.buf[i])
free_pages(kmem_pool_1M.buf[i], ORDER_1MB);
} }
kmem_pool_1M.count = 0;
up(&kmem_pool_1M.sem);
down(&kmem_pool_64K.sem); if (kmem_pool_1M) {
for (i = 0; i < kmem_pool_64K.count; i++) { mempool_destroy(kmem_pool_1M);
if (kmem_pool_64K.buf[i]) kmem_pool_1M = NULL;
free_pages(kmem_pool_64K.buf[i], ORDER_64KB);
} }
kmem_pool_64K.count = 0;
up(&kmem_pool_1M.sem);
} }
static int dsp_kmem_reserve(unsigned long size) static int dsp_kmem_reserve(unsigned long size)
{ {
unsigned long buf; unsigned long len = size;
unsigned int order;
unsigned long unit;
unsigned long _size;
struct kmem_pool *pool;
int i;
/* alignment check */ /* alignment check */
if (!is_aligned(size, SZ_64KB)) { if (!is_aligned(size, SZ_64KB)) {
...@@ -239,115 +232,109 @@ static int dsp_kmem_reserve(unsigned long size) ...@@ -239,115 +232,109 @@ static int dsp_kmem_reserve(unsigned long size)
return -EINVAL; return -EINVAL;
} }
for (_size = size; _size; _size -= unit) { if (size >= SZ_1MB) {
if (_size >= SZ_1MB) { int nr = size >> 20;
unit = SZ_1MB;
order = ORDER_1MB;
pool = &kmem_pool_1M;
} else {
unit = SZ_64KB;
order = ORDER_64KB;
pool = &kmem_pool_64K;
}
buf = __get_dma_pages(GFP_KERNEL, order); if (likely(!kmem_pool_1M))
if (!buf) kmem_pool_1M = mempool_create(nr,
return size - _size; dsp_pool_alloc,
down(&pool->sem); dsp_pool_free,
for (i = 0; i < 16; i++) { (void *)ORDER_1MB);
if (!pool->buf[i]) { else
pool->buf[i] = buf; mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
pool->count++; GFP_KERNEL);
buf = 0;
break;
}
}
up(&pool->sem);
if (buf) { /* pool is full */ size &= ~(0xf << 20);
free_pages(buf, order);
return size - _size;
}
} }
return size; if (size >= SZ_64KB) {
} int nr = size >> 16;
static unsigned long dsp_mem_get_dma_pages(unsigned int order) if (likely(!kmem_pool_64K))
{ kmem_pool_64K = mempool_create(nr,
struct kmem_pool *pool; dsp_pool_alloc,
unsigned long buf = 0; dsp_pool_free,
int i; (void *)ORDER_64KB);
else
mempool_resize(kmem_pool_64K,
kmem_pool_64K->min_nr + nr, GFP_KERNEL);
switch (order) { size &= ~(0xf << 16);
case ORDER_1MB:
pool = &kmem_pool_1M;
break;
case ORDER_64KB:
pool = &kmem_pool_64K;
break;
default:
pool = NULL;
} }
if (pool) { if (size)
down(&pool->sem); len -= size;
for (i = 0; i < pool->count; i++) {
if (pool->buf[i]) {
buf = pool->buf[i];
pool->buf[i] = 0;
break;
}
}
up(&pool->sem);
if (buf)
return buf;
}
/* other size or not found in pool */ return len;
return __get_dma_pages(GFP_KERNEL, order);
} }
static void dsp_mem_free_pages(unsigned long buf, unsigned int order) static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
{ {
struct kmem_pool *pool;
struct page *page, *ps, *pe; struct page *page, *ps, *pe;
int i;
ps = virt_to_page(buf); ps = virt_to_page(buf);
pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order))); pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
for (page = ps; page < pe; page++) {
for (page = ps; page < pe; page++)
ClearPageReserved(page); ClearPageReserved(page);
}
/* if (buf) {
* return buffer to kmem_pool or paging system if ((order == ORDER_64KB) && likely(kmem_pool_64K))
*/ mempool_free((void *)buf, kmem_pool_64K);
switch (order) { else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
case ORDER_1MB: mempool_free((void *)buf, kmem_pool_1M);
pool = &kmem_pool_1M; else
break; free_pages(buf, order);
case ORDER_64KB:
pool = &kmem_pool_64K;
break;
default:
pool = NULL;
} }
}
if (pool) { static inline void
down(&pool->sem); exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
for (i = 0; i < pool->count; i++) { {
if (!pool->buf[i]) { pgd_t *pgd;
pool->buf[i] = buf; pud_t *pud;
buf = 0; pmd_t *pmd;
} pte_t *pte;
}
up(&pool->sem); pgd = pgd_offset_k(virt);
pud = pud_offset(pgd, virt);
pmd = pmd_offset(pud, virt);
if (pmd_none(*pmd)) {
pte = pte_alloc_one_kernel(&init_mm, 0);
if (!pte)
return;
/* note: two PMDs will be set */
pmd_populate_kernel(&init_mm, pmd, pte);
} }
/* other size or pool is filled */ pte = pte_offset_kernel(pmd, virt);
if (buf) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
free_pages(buf, order); }
static inline int
exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset_k(virt);
pud = pud_alloc(&init_mm, pgd, virt);
pmd = pmd_alloc(&init_mm, pud, virt);
if (virt & (1 << 20))
pmd++;
if (!pmd_none(*pmd))
/* No good, fall back on smaller mappings. */
return -EINVAL;
*pmd = __pmd(phys | prot);
flush_pmd_entry(pmd);
return 0;
} }
/* /*
...@@ -357,59 +344,130 @@ static int exmap_set_armmmu(unsigned long virt, unsigned long phys, ...@@ -357,59 +344,130 @@ static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
unsigned long size) unsigned long size)
{ {
long off; long off;
unsigned long sz_left; pgprot_t prot_pte;
pmd_t *pmdp; int prot_sect;
pte_t *ptep;
int prot_pmd, prot_pte;
printk(KERN_DEBUG printk(KERN_DEBUG
"omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n", "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
virt, phys, size); virt, phys, size);
prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO); prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE; L_PTE_DIRTY | L_PTE_WRITE);
pmdp = pmd_offset(pgd_offset_k(virt), virt); prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
if (pmd_none(*pmdp)) { PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
ptep = pte_alloc_one_kernel(&init_mm, 0);
if (ptep == NULL) if (cpu_architecture() <= CPU_ARCH_ARMv5)
return -ENOMEM; prot_sect |= PMD_BIT4;
/* note: two PMDs will be set */
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
off = phys - virt; off = phys - virt;
for (sz_left = size;
sz_left >= PAGE_SIZE; while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
sz_left -= PAGE_SIZE, virt += PAGE_SIZE) { exmap_alloc_pte(virt, virt + off, prot_pte);
ptep = pte_offset_kernel(pmdp, virt);
set_pte(ptep, __pte((virt + off) | prot_pte)); virt += PAGE_SIZE;
size -= PAGE_SIZE;
}
/* XXX: Not yet.. confuses dspfb -- PFM. */
#if 0
while (size >= (PGDIR_SIZE / 2)) {
if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
break;
virt += (PGDIR_SIZE / 2);
size -= (PGDIR_SIZE / 2);
} }
if (sz_left) #endif
BUG();
while (size >= PAGE_SIZE) {
exmap_alloc_pte(virt, virt + off, prot_pte);
virt += PAGE_SIZE;
size -= PAGE_SIZE;
}
BUG_ON(size);
return 0; return 0;
} }
static inline void
exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
pte_t *pte;
pte = pte_offset_map(pmd, addr);
do {
if (pte_none(*pte))
continue;
pte_clear(&init_mm, addr, pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
}
static inline void
exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (addr & (1 << 20))
pmd++;
if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
*pmd = __pmd(0);
clean_pmd_entry(pmd);
continue;
}
if (pmd_none_or_clear_bad(pmd))
continue;
exmap_clear_pte_range(pmd, addr, next);
} while (pmd++, addr = next, addr != end);
}
static inline void
exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
{
pud_t *pud;
unsigned long next;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
exmap_clear_pmd_range(pud, addr, next);
} while (pud++, addr = next, addr != end);
}
static void exmap_clear_armmmu(unsigned long virt, unsigned long size) static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
{ {
unsigned long sz_left; unsigned long next, end;
pmd_t *pmdp; pgd_t *pgd;
pte_t *ptep;
printk(KERN_DEBUG printk(KERN_DEBUG
"omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n", "omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
virt, size); virt, size);
for (sz_left = size; pgd = pgd_offset_k(virt);
sz_left >= PAGE_SIZE; end = virt + size;
sz_left -= PAGE_SIZE, virt += PAGE_SIZE) { do {
pmdp = pmd_offset(pgd_offset_k(virt), virt); next = pgd_addr_end(virt, end);
ptep = pte_offset_kernel(pmdp, virt); if (pgd_none_or_clear_bad(pgd))
pte_clear(&init_mm, virt, ptep); continue;
}
if (sz_left) exmap_clear_pud_range(pgd, virt, next);
BUG(); } while (pgd++, virt = next, virt != end);
} }
static int exmap_valid(void *vadr, size_t len) static int exmap_valid(void *vadr, size_t len)
...@@ -848,41 +906,41 @@ found_free: ...@@ -848,41 +906,41 @@ found_free:
is_aligned(_dspadr, SZ_1MB)) { is_aligned(_dspadr, SZ_1MB)) {
unit = SZ_1MB; unit = SZ_1MB;
slst = DSPMMU_CAM_L_SLST_1MB; slst = DSPMMU_CAM_L_SLST_1MB;
order = ORDER_1MB;
} else if ((_size >= SZ_64KB) && } else if ((_size >= SZ_64KB) &&
(is_aligned(_padr, SZ_64KB) || (padr == 0)) && (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
is_aligned(_dspadr, SZ_64KB)) { is_aligned(_dspadr, SZ_64KB)) {
unit = SZ_64KB; unit = SZ_64KB;
slst = DSPMMU_CAM_L_SLST_64KB; slst = DSPMMU_CAM_L_SLST_64KB;
order = ORDER_64KB; } else {
} else /* if (_size >= SZ_4KB) */ {
unit = SZ_4KB; unit = SZ_4KB;
slst = DSPMMU_CAM_L_SLST_4KB; slst = DSPMMU_CAM_L_SLST_4KB;
order = ORDER_4KB;
}
#if 0 /* 1KB is not enabled */
else if (_size >= SZ_1KB) {
unit = SZ_1KB;
slst = DSPMMU_CAM_L_SLST_1KB;
order = XXX;
} }
#endif
order = get_order(unit);
/* buffer allocation */ /* buffer allocation */
if (type == EXMAP_TYPE_MEM) { if (type == EXMAP_TYPE_MEM) {
struct page *page, *ps, *pe; struct page *page, *ps, *pe;
buf = (void *)dsp_mem_get_dma_pages(order); if ((order == ORDER_1MB) && likely(kmem_pool_1M))
if (buf == NULL) { buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
status = -ENOMEM; else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
goto fail; buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
else {
buf = (void *)__get_dma_pages(GFP_KERNEL, order);
if (buf == NULL) {
status = -ENOMEM;
goto fail;
}
} }
/* mark the pages as reserved; this is needed for mmap */ /* mark the pages as reserved; this is needed for mmap */
ps = virt_to_page(buf); ps = virt_to_page(buf);
pe = virt_to_page(buf + unit); pe = virt_to_page(buf + unit);
for (page = ps; page < pe; page++) {
for (page = ps; page < pe; page++)
SetPageReserved(page); SetPageReserved(page);
}
_padr = __pa(buf); _padr = __pa(buf);
} }
...@@ -1569,13 +1627,13 @@ static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap); ...@@ -1569,13 +1627,13 @@ static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
static ssize_t kmem_pool_show(struct device *dev, static ssize_t kmem_pool_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
int count_1M, count_64K, total; int nr_1M, nr_64K, total;
count_1M = kmem_pool_1M.count; nr_1M = kmem_pool_1M->min_nr;
count_64K = kmem_pool_64K.count; nr_64K = kmem_pool_64K->min_nr;
total = count_1M * SZ_1MB + count_64K * SZ_64KB; total = nr_1M * SZ_1MB + nr_64K * SZ_64KB;
return sprintf(buf, "0x%x %d %d\n", total, count_1M, count_64K); return sprintf(buf, "0x%x %d %d\n", total, nr_1M, nr_64K);
} }
static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool); static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool);
......
...@@ -26,20 +26,18 @@ ...@@ -26,20 +26,18 @@
struct proc_list { struct proc_list {
struct list_head list_head; struct list_head list_head;
struct task_struct *tsk; pid_t pid;
unsigned int cnt; unsigned int cnt;
}; };
static __inline__ void proc_list_add(struct list_head *list, static __inline__ void proc_list_add(struct list_head *list,
struct task_struct *tsk) struct task_struct *tsk)
{ {
struct list_head *ptr;
struct proc_list *pl; struct proc_list *pl;
struct proc_list *new; struct proc_list *new;
list_for_each(ptr, list) { list_for_each_entry(pl, list, list_head) {
pl = list_entry(ptr, struct proc_list, list_head); if (pl->pid == tsk->pid) {
if (pl->tsk == tsk) {
/* /*
* this process has opened DSP devices multi time * this process has opened DSP devices multi time
*/ */
...@@ -49,7 +47,7 @@ static __inline__ void proc_list_add(struct list_head *list, ...@@ -49,7 +47,7 @@ static __inline__ void proc_list_add(struct list_head *list,
} }
new = kmalloc(sizeof(struct proc_list), GFP_KERNEL); new = kmalloc(sizeof(struct proc_list), GFP_KERNEL);
new->tsk = tsk; new->pid = tsk->pid;
new->cnt = 1; new->cnt = 1;
list_add_tail(&new->list_head, list); list_add_tail(&new->list_head, list);
} }
...@@ -57,12 +55,10 @@ static __inline__ void proc_list_add(struct list_head *list, ...@@ -57,12 +55,10 @@ static __inline__ void proc_list_add(struct list_head *list,
static __inline__ void proc_list_del(struct list_head *list, static __inline__ void proc_list_del(struct list_head *list,
struct task_struct *tsk) struct task_struct *tsk)
{ {
struct list_head *ptr; struct proc_list *pl, *next;
struct proc_list *pl;
list_for_each(ptr, list) { list_for_each_entry(pl, list, list_head) {
pl = list_entry(ptr, struct proc_list, list_head); if (pl->pid == tsk->pid) {
if (pl->tsk == tsk) {
if (--pl->cnt == 0) { if (--pl->cnt == 0) {
list_del(&pl->list_head); list_del(&pl->list_head);
kfree(pl); kfree(pl);
...@@ -70,6 +66,23 @@ static __inline__ void proc_list_del(struct list_head *list, ...@@ -70,6 +66,23 @@ static __inline__ void proc_list_del(struct list_head *list,
return; return;
} }
} }
/*
* correspinding pid wasn't found in the list
* -- this means the caller of proc_list_del is different from
* the proc_list_add's caller. in this case, the parent is
* cleaning up the context of a killed child.
* let's delete exiting task from the list.
*/
/* need to lock tasklist_lock before calling find_task_by_pid_type. */
read_lock(&tasklist_lock);
list_for_each_entry_safe(pl, next, list, list_head) {
if (find_task_by_pid_type(PIDTYPE_PID, pl->pid) == NULL) {
list_del(&pl->list_head);
kfree(pl);
}
}
read_unlock(&tasklist_lock);
} }
static __inline__ void proc_list_flush(struct list_head *list) static __inline__ void proc_list_flush(struct list_head *list)
......
...@@ -273,6 +273,26 @@ static __inline__ int down_tasksem_interruptible(struct taskdev *dev, ...@@ -273,6 +273,26 @@ static __inline__ int down_tasksem_interruptible(struct taskdev *dev,
return ret; return ret;
} }
static void proclist_send_sigbus(struct list_head *list)
{
siginfo_t info;
struct proc_list *pl;
struct task_struct *tsk;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = SI_KERNEL;
info._sifields._sigfault._addr = NULL;
/* need to lock tasklist_lock before calling find_task_by_pid_type. */
read_lock(&tasklist_lock);
list_for_each_entry(pl, list, list_head) {
if ((tsk = find_task_by_pid_type(PIDTYPE_PID, pl->pid)) != NULL)
send_sig_info(SIGBUS, &info, tsk);
}
read_unlock(&tasklist_lock);
}
static int dsp_task_flush_buf(struct dsptask *task) static int dsp_task_flush_buf(struct dsptask *task)
{ {
unsigned short ttyp = task->ttyp; unsigned short ttyp = task->ttyp;
...@@ -1666,22 +1686,11 @@ static int dsp_rmdev_minor(unsigned char minor) ...@@ -1666,22 +1686,11 @@ static int dsp_rmdev_minor(unsigned char minor)
case OMAP_DSP_DEVSTATE_ATTACHED: case OMAP_DSP_DEVSTATE_ATTACHED:
/* task is working. kill it. */ /* task is working. kill it. */
{ dev->state = OMAP_DSP_DEVSTATE_KILLING;
siginfo_t info; proclist_send_sigbus(&dev->proc_list);
struct proc_list *pl; spin_unlock(&dev->state_lock);
dsp_tdel_bh(minor, OMAP_DSP_MBCMD_TDEL_KILL);
dev->state = OMAP_DSP_DEVSTATE_KILLING; goto invalidate;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = SI_KERNEL;
info._sifields._sigfault._addr = NULL;
list_for_each_entry(pl, &dev->proc_list, list_head) {
send_sig_info(SIGBUS, &info, pl->tsk);
}
spin_unlock(&dev->state_lock);
dsp_tdel_bh(minor, OMAP_DSP_MBCMD_TDEL_KILL);
goto invalidate;
}
case OMAP_DSP_DEVSTATE_ADDREQ: case OMAP_DSP_DEVSTATE_ADDREQ:
/* open() is waiting. drain it. */ /* open() is waiting. drain it. */
...@@ -2003,8 +2012,6 @@ int dsp_tdel(unsigned char minor) ...@@ -2003,8 +2012,6 @@ int dsp_tdel(unsigned char minor)
int dsp_tkill(unsigned char minor) int dsp_tkill(unsigned char minor)
{ {
struct taskdev *dev; struct taskdev *dev;
siginfo_t info;
struct proc_list *pl;
if ((minor >= TASKDEV_MAX) || ((dev = taskdev[minor]) == NULL)) { if ((minor >= TASKDEV_MAX) || ((dev = taskdev[minor]) == NULL)) {
printk(KERN_ERR printk(KERN_ERR
...@@ -2020,13 +2027,7 @@ int dsp_tkill(unsigned char minor) ...@@ -2020,13 +2027,7 @@ int dsp_tkill(unsigned char minor)
return -EINVAL; return -EINVAL;
} }
dev->state = OMAP_DSP_DEVSTATE_KILLING; dev->state = OMAP_DSP_DEVSTATE_KILLING;
info.si_signo = SIGBUS; proclist_send_sigbus(&dev->proc_list);
info.si_errno = 0;
info.si_code = SI_KERNEL;
info._sifields._sigfault._addr = NULL;
list_for_each_entry(pl, &dev->proc_list, list_head) {
send_sig_info(SIGBUS, &info, pl->tsk);
}
spin_unlock(&dev->state_lock); spin_unlock(&dev->state_lock);
return dsp_tdel_bh(minor, OMAP_DSP_MBCMD_TDEL_KILL); return dsp_tdel_bh(minor, OMAP_DSP_MBCMD_TDEL_KILL);
...@@ -2437,22 +2438,14 @@ void mbx1_tdel(struct mbcmd *mb) ...@@ -2437,22 +2438,14 @@ void mbx1_tdel(struct mbcmd *mb)
void mbx1_err_fatal(unsigned char tid) void mbx1_err_fatal(unsigned char tid)
{ {
struct dsptask *task = dsptask[tid]; struct dsptask *task = dsptask[tid];
struct proc_list *pl;
siginfo_t info;
if ((tid >= TASKDEV_MAX) || (task == NULL)) { if ((tid >= TASKDEV_MAX) || (task == NULL)) {
printk(KERN_ERR "mbx: FATAL ERR with illegal tid! %d\n", tid); printk(KERN_ERR "mbx: FATAL ERR with illegal tid! %d\n", tid);
return; return;
} }
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = SI_KERNEL;
info._sifields._sigfault._addr = NULL;
spin_lock(&task->dev->state_lock); spin_lock(&task->dev->state_lock);
list_for_each_entry(pl, &task->dev->proc_list, list_head) { proclist_send_sigbus(&task->dev->proc_list);
send_sig_info(SIGBUS, &info, pl->tsk);
}
spin_unlock(&task->dev->state_lock); spin_unlock(&task->dev->state_lock);
} }
...@@ -2656,7 +2649,7 @@ static ssize_t proc_list_show(struct device *d, struct device_attribute *attr, ...@@ -2656,7 +2649,7 @@ static ssize_t proc_list_show(struct device *d, struct device_attribute *attr,
dev = to_taskdev(d); dev = to_taskdev(d);
spin_lock(&dev->state_lock); spin_lock(&dev->state_lock);
list_for_each_entry(pl, &dev->proc_list, list_head) { list_for_each_entry(pl, &dev->proc_list, list_head) {
len += sprintf(buf + len, "%d\n", pl->tsk->pid); len += sprintf(buf + len, "%d\n", pl->pid);
} }
spin_unlock(&dev->state_lock); spin_unlock(&dev->state_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment