Commit 62ef2b07 authored by David Brownell's avatar David Brownell Committed by Kevin Hilman

EDMA: split channel/slot resource management

EDMA interface update for channel and parameter RAM slot alloc/free.
This is the biggest of these changes, since it's non-cosmetic.

 - Stop talking about "master" and "slave"!  Instead, use the notions
   exposed by the hardware:  a DMA "channel", and a PaRAM slot.  This
   is a general doc/comment update, and affects calling conventions.

 - Split davinci_request_dma() into two simpler routines:
     * edma_alloc_channel() with three fewer parameters
     * edma_alloc_slot() with just one parameter (may be a wildcard)
   The test for successful returns is "value < 0", not "value != 0";
   non-negative values are the returned channel or slot number.

 - Split davinci_free_dma() into two routines, both of which update
   the now-free parameter RAM slot to hold a dummy transfer.
     * void edma_free_channel(unsigned channel)
     * void edma_free_slot(unsigned slot);

 - Fill all PaRAM slots with dummy transfers when they're not in use.

 - Change the channel and slot numbers to "unsigned" in some cases so
   we can avoid some tests for invalid parameters.

A key notion here is to *stop* fuzzing distinctions between DMA channels
and parameter RAM slots.  This makes it easier to match these calls to
hardware docs, and harder to get confused by differences; channels are
(potentially) active, while slots are always passive.

Transfer Completion Code (TCC) values are no longer supported except
through the calls which manipulate entire parameter RAM sets.  This
means that completion IRQ setup (for audio) is a bit different.
Signed-off-by: default avatarDavid Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: default avatarKevin Hilman <khilman@deeprootsystems.com>
parent ff2f60e6
......@@ -234,7 +234,7 @@ static struct platform_device edma_dev = {
/*****************************************************************************/
static struct dma_interrupt_data {
void (*callback) (int lch, unsigned short ch_status, void *data);
void (*callback)(unsigned channel, unsigned short ch_status, void *data);
void *data;
} intr_data[DAVINCI_EDMA_NUM_DMACH];
......@@ -243,12 +243,18 @@ static struct dma_interrupt_data {
*/
static DECLARE_BITMAP(edma_inuse, DAVINCI_EDMA_NUM_PARAMENTRY);
/* The edma_noevent bit for each master channel is clear unless
/* The edma_noevent bit for each channel is clear unless
* it doesn't trigger DMA events on this platform. It uses a
* bit of SOC-specific initialization code.
*/
static DECLARE_BITMAP(edma_noevent, DAVINCI_EDMA_NUM_DMACH);
/* dummy param set used to (re)initialize parameter RAM slots */
static const struct edmacc_param dummy_paramset = {
.link_bcntrld = 0xffff,
.ccnt = 1,
};
static const s8 __initconst dma_chan_dm644x_no_event[] = {
0, 1, 12, 13, 14, 15, 25, 30, 31, 45, 46, 47, 55, 56, 57, 58, 59, 60,
61, 62, 63, -1
......@@ -298,7 +304,7 @@ static void __init assign_priority_to_queue(int queue_no, int priority)
static inline void
setup_dma_interrupt(unsigned lch,
void (*callback)(int lch, unsigned short ch_status, void *data),
void (*callback)(unsigned channel, u16 ch_status, void *data),
void *data)
{
if (!callback) {
......@@ -502,8 +508,9 @@ static int __init davinci_dma_init(void)
dev_dbg(&edma_dev.dev, "DMA REG BASE ADDR=%p\n", edmacc_regs_base);
for (i = 0; i < DAVINCI_EDMA_NUM_PARAMENTRY * PARM_SIZE; i += 4)
edma_write(EDMA_PARM + i, 0);
for (i = 0; i < DAVINCI_EDMA_NUM_PARAMENTRY; i++)
memcpy_toio(edmacc_regs_base + PARM_OFFSET(i),
&dummy_paramset, PARM_SIZE);
if (cpu_is_davinci_dm355()) {
/* NOTE conflicts with SPI1_INT{0,1} and SPI2_INT0 */
......@@ -516,7 +523,7 @@ static int __init davinci_dma_init(void)
} else if (cpu_is_davinci_dm644x()) {
noevent = dma_chan_dm644x_no_event;
} else {
/* request_dma(DAVINCI_DMA_CHANNEL_ANY) fails */
/* alloc_channel(EDMA_CHANNEL_ANY) fails */
noevent = NULL;
}
......@@ -585,167 +592,162 @@ static int __init davinci_dma_init(void)
}
arch_initcall(davinci_dma_init);
/*-----------------------------------------------------------------------*/
/* Resource alloc/free: dma channels, parameter RAM slots */
/**
* davinci_request_dma - allocate a DMA channel
* @dev_id: specific DMA channel; else DAVINCI_DMA_CHANNEL_ANY to
* allocate some master channel without a hardware event, or
* DAVINCI_EDMA_PARAM_ANY to allocate some slave channel.
* @name: name associated with @dev_id
* @callback: to be issued on DMA completion or errors (master only)
* @data: passed to callback (master only)
* @lch: used to return the number of the allocated event channel; pass
* this later to davinci_free_dma()
* @tcc: may be NULL; else an input for masters, an output for slaves.
* edma_alloc_channel - allocate DMA channel and paired parameter RAM
* @channel: specific channel to allocate; negative for "any unmapped channel"
* @callback: optional; to be issued on DMA completion or errors
* @data: passed to callback
* @eventq_no: an EVENTQ_* constant, used to choose which Transfer
* Controller (TC) executes requests on this channel (master only)
* Controller (TC) executes requests using this channel
*
* Returns zero on success, else negative errno.
* This allocates a DMA channel and its associated parameter RAM slot.
* The parameter RAM is initialized to hold a dummy transfer.
*
* The @tcc parameter may be null, indicating default behavior: no
* transfer completion callbacks are issued, but masters use @callback
* and @data (if provided) to report transfer errors. Else masters use
* it as an output, returning either what @lch returns (and enabling
* transfer completion interrupts), or TCC_ANY if there is no callback.
* Slaves use @tcc as an input: TCC_ANY gives the default behavior,
* else it specifies a transfer completion @callback to be used.
* Normal use is to pass a specific channel number as @channel, to make
* use of hardware events mapped to that channel. When the channel will
* be used only for software triggering or event chaining, channels not
* mapped to hardware events (or mapped to unused events) are preferable.
*
* These TCC settings are stored in PaRAM slots, so they may be updated
* later. In particular, reloading a master PaRAM entry from a slave
* (via linking) overwrites everything, including those TCC settings.
* DMA transfers start from a channel using davinci_start_dma(), or by
* chaining. When the transfer described in that channel's parameter RAM
* slot completes, that slot's data may be reloaded through a link.
*
* DMA transfers start from a master channel using davinci_start_dma()
* or by chaining. When the transfer described in that master's PaRAM
* slot completes, its PaRAM data may be reloaded from a linked slave.
* DMA errors are only reported to the @callback associated with the
* channel driving that transfer, but transfer completion callbacks can
* be sent to another channel under control of the TCC field in
* the option word of the transfer's parameter RAM set. Drivers must not
* use DMA transfer completion callbacks for channels they did not allocate.
* (The same applies to TCC codes used in transfer chaining.)
*
* DMA errors are only reported to the @callback associated with that
* master channel, but transfer completion callbacks can be sent to
* another master channel. Drivers must not use DMA transfer completion
* callbacks (@tcc) for master channels they did not allocate. (The
* same applies to transfer chaining, since the same @tcc codes are
* used both to trigger completion interrupts and to chain transfers.)
* Returns the number of the channel, else negative errno.
*/
int davinci_request_dma(int dev_id, const char *name,
void (*callback) (int lch, unsigned short ch_status,
void *data),
void *data, int *lch,
int *tcc, enum dma_event_q eventq_no)
int edma_alloc_channel(int channel,
void (*callback)(unsigned channel, u16 ch_status, void *data),
void *data,
enum dma_event_q eventq_no)
{
int tcc_val = tcc ? *tcc : TCC_ANY;
struct edmacc_param param = { .opt = 0, };
/* REVISIT: tcc would be better as a non-pointer parameter */
switch (tcc_val) {
case TCC_ANY:
case 0 ... DAVINCI_EDMA_NUM_DMACH - 1:
break;
default:
if (channel < 0) {
channel = 0;
for (;;) {
channel = find_next_zero_bit(edma_inuse,
DAVINCI_EDMA_NUM_DMACH, channel);
if (channel == DAVINCI_EDMA_NUM_DMACH)
return -ENOMEM;
if (!test_and_set_bit(channel, edma_inuse))
break;
}
} else if (channel >= DAVINCI_EDMA_NUM_DMACH) {
return -EINVAL;
} else if (test_and_set_bit(channel, edma_inuse)) {
return -EBUSY;
}
switch (dev_id) {
/* ensure access through shadow region 0 */
edma_or_array2(EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
/* Allocate a specific master channel, e.g. for MMC1 RX or ASP0 TX */
case 0 ... DAVINCI_EDMA_NUM_DMACH - 1:
if (test_and_set_bit(dev_id, edma_inuse))
return -EBUSY;
/* ensure no events are pending */
davinci_stop_dma(channel);
memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel),
&dummy_paramset, PARM_SIZE);
alloc_master:
tcc_val = (tcc && callback) ? dev_id : TCC_ANY;
if (callback)
setup_dma_interrupt(channel, callback, data);
/* ensure access through shadow region 0 */
edma_or_array2(EDMA_DRAE, 0, dev_id >> 5,
1 << (dev_id & 0x1f));
map_dmach_queue(channel, eventq_no);
if (callback)
setup_dma_interrupt(dev_id, callback, data);
return channel;
}
EXPORT_SYMBOL(edma_alloc_channel);
map_dmach_queue(dev_id, eventq_no);
/* ensure no events are pending */
davinci_stop_dma(dev_id);
break;
/**
* edma_free_channel - deallocate DMA channel
* @channel: dma channel returned from edma_alloc_channel()
*
* This deallocates the DMA channel and associated parameter RAM slot
* allocated by edma_alloc_channel().
*
* Callers are responsible for ensuring the channel is inactive, and
* will not be reactivated by linking, chaining, or software calls to
* davinci_start_dma().
*/
void edma_free_channel(unsigned channel)
{
if (channel >= DAVINCI_EDMA_NUM_DMACH)
return;
/* Allocate a specific slave channel, mostly to reserve it
* as part of a set of resources allocated to a DSP.
*/
case DAVINCI_EDMA_NUM_DMACH ... DAVINCI_EDMA_NUM_PARAMENTRY - 1:
if (test_and_set_bit(dev_id, edma_inuse))
return -EBUSY;
break;
/* return some master channel with no event association */
case DAVINCI_DMA_CHANNEL_ANY:
dev_id = 0;
for (;;) {
dev_id = find_next_bit(edma_noevent,
DAVINCI_EDMA_NUM_DMACH, dev_id);
if (dev_id == DAVINCI_EDMA_NUM_DMACH)
return -ENOMEM;
if (!test_and_set_bit(dev_id, edma_inuse))
goto alloc_master;
}
break;
setup_dma_interrupt(channel, NULL, NULL);
/* REVISIT should probably take out of shadow region 0 */
memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel),
&dummy_paramset, PARM_SIZE);
clear_bit(channel, edma_inuse);
}
EXPORT_SYMBOL(edma_free_channel);
/* return some slave channel */
case DAVINCI_EDMA_PARAM_ANY:
dev_id = DAVINCI_EDMA_NUM_DMACH;
/**
* edma_alloc_slot - allocate DMA parameter RAM
* @slot: specific slot to allocate; negative for "any unused slot"
*
* This allocates a parameter RAM slot, initializing it to hold a
* dummy transfer. Slots allocated using this routine have not been
* mapped to a hardware DMA channel, and will normally be used by
* linking to them from a slot associated with a DMA channel.
*
* Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
* slots may be allocated on behalf of DSP firmware.
*
* Returns the number of the slot, else negative errno.
*/
int edma_alloc_slot(int slot)
{
if (slot < 0) {
slot = DAVINCI_EDMA_NUM_DMACH;
for (;;) {
dev_id = find_next_zero_bit(edma_inuse,
DAVINCI_EDMA_NUM_PARAMENTRY, dev_id);
if (dev_id == DAVINCI_EDMA_NUM_PARAMENTRY)
slot = find_next_zero_bit(edma_inuse,
DAVINCI_EDMA_NUM_PARAMENTRY, slot);
if (slot == DAVINCI_EDMA_NUM_PARAMENTRY)
return -ENOMEM;
if (!test_and_set_bit(dev_id, edma_inuse))
if (!test_and_set_bit(slot, edma_inuse))
break;
}
break;
default:
} else if (slot < DAVINCI_EDMA_NUM_DMACH
|| slot >= DAVINCI_EDMA_NUM_PARAMENTRY) {
return -EINVAL;
} else if (test_and_set_bit(slot, edma_inuse)) {
return -EBUSY;
}
/* Optionally fire Transfer Complete interrupts */
if (tcc_val != TCC_ANY)
param.opt = ((0x3f & tcc_val) << 12) | TCINTEN;
/* init the link field to no link. i.e 0xffff */
param.link_bcntrld = 0xffff;
memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot),
&dummy_paramset, PARM_SIZE);
/* init channel with a dummy PaRAM set */
param.ccnt = 1;
memcpy_toio(edmacc_regs_base + PARM_OFFSET(dev_id), &param, PARM_SIZE);
/* non-status return values */
*lch = dev_id;
if (tcc)
*tcc = tcc_val;
dev_dbg(&edma_dev.dev, "alloc lch %d, tcc %d\n", dev_id, tcc_val);
return 0;
return slot;
}
EXPORT_SYMBOL(davinci_request_dma);
EXPORT_SYMBOL(edma_alloc_slot);
/**
* davinci_free_dma - deallocate a DMA channel
* @lch: dma channel returned from davinci_request_dma()
* edma_free_slot - deallocate DMA parameter RAM
* @slot: parameter RAM slot returned from edma_alloc_slot()
*
* This deallocates the resources allocated by davinci_request_dma().
* Callers are responsible for ensuring the channel is inactive, and
* will not be reactivated by linking, chaining, or software calls to
* davinci_start_dma().
* This deallocates the parameter RAM slot allocated by edma_alloc_slot().
* Callers are responsible for ensuring the slot is inactive, and will
* not be activated.
*/
void davinci_free_dma(int lch)
void edma_free_slot(unsigned slot)
{
if (lch < 0 || lch >= DAVINCI_EDMA_NUM_PARAMENTRY)
if (slot < DAVINCI_EDMA_NUM_DMACH
|| slot >= DAVINCI_EDMA_NUM_PARAMENTRY)
return;
if (lch < DAVINCI_EDMA_NUM_DMACH) {
setup_dma_interrupt(lch, NULL, NULL);
/* REVISIT should probably take out shadow region 0 */
}
clear_bit(lch, edma_inuse);
memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot),
&dummy_paramset, PARM_SIZE);
clear_bit(slot, edma_inuse);
}
EXPORT_SYMBOL(davinci_free_dma);
EXPORT_SYMBOL(edma_free_slot);
/*-----------------------------------------------------------------------*/
......@@ -1009,8 +1011,8 @@ void davinci_resume_dma(int lch)
EXPORT_SYMBOL(davinci_resume_dma);
/**
* davinci_start_dma - start dma on a master channel
* @lch: logical master channel being activated
* davinci_start_dma - start dma on a channel
* @lch: logical channel being activated
*
* Channels with event associations will be triggered by their hardware
* events, and channels without such associations will be triggered by
......@@ -1045,7 +1047,7 @@ int davinci_start_dma(int lch)
edma_shadow0_write_array(SH_EESR, j, mask);
dev_dbg(&edma_dev.dev, "EER%d %08x\n", j,
edma_shadow0_read_array(SH_EER, j));
} else { /* for slaveChannels */
} else {
ret_val = -EINVAL;
}
return ret_val;
......@@ -1056,7 +1058,7 @@ EXPORT_SYMBOL(davinci_start_dma);
* davinci_stop_dma - stops dma on the channel passed
* @lch: logical channel being deactivated
*
* When @lch is a master channel, any active transfer is paused and
* When @lch is a channel, any active transfer is paused and
* all pending hardware events are cleared. The current transfer
* may not be resumed, and the channel's Parameter RAM should be
* reinitialized before being reused.
......@@ -1086,7 +1088,6 @@ void davinci_stop_dma(int lch)
* edma_parm_or(PARM_LINK_BCNTRLD, lch, 0xffff);
*/
} else {
/* for slaveChannels */
edma_parm_or(PARM_LINK_BCNTRLD, lch, 0xffff);
}
}
......
......@@ -27,26 +27,29 @@
*/
/*
* The EDMA3 framework for DaVinci abstracts DMA Parameter RAM (PaRAM) slots
* as logical DMA channels. There are two types of logical channel:
* This EDMA3 programming framework exposes two basic kinds of resource:
*
* Master Triggers transfers, usually from a hardware event but
* Channel Triggers transfers, usually from a hardware event but
* also manually or by "chaining" from DMA completions.
* Not all PaRAM slots may be masters; and not all masters
* support hardware event triggering.
* Each channel is coupled to a Parameter RAM (PaRAM) slot.
*
* Slave A master may be linked to a "slave" PaRAM slot, used to
* reload master parameters when a transfer finishes. Any
* PaRAM slot may be such a link target.
* Slot Each PaRAM slot holds a DMA transfer descriptor (PaRAM
* "set"), source and destination addresses, a link to a
* next PaRAM slot (if any), options for the transfer, and
* instructions for updating those addresses. There are
* more than twice as many slots as event channels.
*
* Each PaRAM slot holds a DMA transfer descriptor with destination and
* source addresses, a link to the next PaRAM slot (if any), options for
* the transfer, and instructions for updating those addresses.
* Each PaRAM set describes a sequence of transfers, either for one large
* buffer or for several discontiguous smaller buffers. An EDMA transfer
* is driven only from a channel, which performs the transfers specified
* in its PaRAM slot until there are no more transfers. When that last
* transfer completes, the "link" field may be used to reload the channel's
* PaRAM slot with a new transfer descriptor.
*
* The EDMA Channel Controller (CC) maps requests from master channels
* into physical Transfer Controller (TC) requests when the master
* triggers. The two physical DMA channels provided by the TC are thus
* shared by many logical channels.
* The EDMA Channel Controller (CC) maps requests from channels into physical
* Transfer Controller (TC) requests when the channel triggers (by hardware
* or software events, or by chaining). The two physical DMA channels provided
* by the TCs are thus shared by many logical channels.
*
* DaVinci hardware also has a "QDMA" mechanism which is not currently
* supported through this interface. (DSP firmware uses it though.)
......@@ -79,7 +82,7 @@ struct edmacc_param {
#define STATIC BIT(3)
#define EDMA_FWID (0x07 << 8)
#define TCCMODE BIT(11)
#define TCC (0x3f << 12)
#define EDMA_TCC(t) ((t) << 12)
#define TCINTEN BIT(20)
#define ITCINTEN BIT(21)
#define TCCHEN BIT(22)
......@@ -97,13 +100,6 @@ struct edmacc_param {
#define DAVINCI_EDMA_NUM_REGIONS 4
#define DAVINCI_EDMA_MEMPROTECT 0
#define DAVINCI_NUM_UNUSEDCH 21
#define TCC_ANY -1
/* special values understood by davinci_request_dma() */
#define DAVINCI_EDMA_PARAM_ANY -2
#define DAVINCI_DMA_CHANNEL_ANY -1
/* Drivers should avoid using these symbolic names for dm644x
* channels, and use platform_device IORESOURCE_DMA resources
......@@ -185,10 +181,18 @@ enum sync_dimension {
ABSYNC = 1
};
int davinci_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, unsigned short ch_status, void *data),
void *data, int *lch, int *tcc, enum dma_event_q);
void davinci_free_dma(int lch);
#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
/* alloc/free DMA channels and their dedicated parameter RAM slots */
int edma_alloc_channel(int channel,
void (*callback)(unsigned channel, u16 ch_status, void *data),
void *data, enum dma_event_q);
void edma_free_channel(unsigned channel);
/* alloc/free parameter RAM slots */
int edma_alloc_slot(int slot);
void edma_free_slot(unsigned slot);
/* calls that operate on part of a parameter RAM slot */
void davinci_set_dma_src_params(int lch, dma_addr_t src_port,
......
......@@ -421,7 +421,7 @@ static void davinci_abort_dma(struct mmc_davinci_host *host)
davinci_clean_channel(sync_dev);
}
static void mmc_davinci_dma_cb(int lch, u16 ch_status, void *data)
static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
{
if (DMA_COMPLETE != ch_status) {
struct mmc_davinci_host *host = data;
......@@ -499,7 +499,7 @@ static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
edma_read_slot(sync_dev, template);
/* don't bother with irqs or chaining */
template->opt &= ~(ITCCHEN | TCCHEN | ITCINTEN | TCINTEN);
template->opt |= sync_dev << 12;
}
static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
......@@ -572,34 +572,28 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
if (!host->use_dma)
return;
davinci_free_dma(host->txdma);
davinci_free_dma(host->rxdma);
edma_free_channel(host->txdma);
edma_free_channel(host->rxdma);
}
static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
const char *hostname = mmc_hostname(host->mmc);
int edma_chan_num, tcc = 0, r;
enum dma_event_q queue_no = EVENTQ_0;
int r;
/* Acquire master DMA write channel */
r = davinci_request_dma(host->txdma, hostname,
mmc_davinci_dma_cb, host, &edma_chan_num, &tcc, queue_no);
if (r != 0) {
dev_warn(mmc_dev(host->mmc),
"MMC: davinci_request_dma() failed with %d\n",
r);
r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, EVENTQ_0);
if (r < 0) {
dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
"tx", r);
return r;
}
mmc_davinci_dma_setup(host, true, &host->tx_template);
/* Acquire master DMA read channel */
r = davinci_request_dma(host->rxdma, hostname,
mmc_davinci_dma_cb, host, &edma_chan_num, &tcc, queue_no);
if (r != 0) {
dev_warn(mmc_dev(host->mmc),
"MMC: davinci_request_dma() failed with %d\n",
r);
r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, EVENTQ_0);
if (r < 0) {
dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
"rx", r);
goto free_master_write;
}
mmc_davinci_dma_setup(host, false, &host->rx_template);
......@@ -607,7 +601,7 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
return 0;
free_master_write:
davinci_free_dma(host->txdma);
edma_free_channel(host->txdma);
return r;
}
......
......@@ -108,12 +108,12 @@ static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream)
prtd->period = 0;
}
static void davinci_pcm_dma_irq(int lch, u16 ch_status, void *data)
static void davinci_pcm_dma_irq(unsigned channel, u16 ch_status, void *data)
{
struct snd_pcm_substream *substream = data;
struct davinci_runtime_data *prtd = substream->runtime->private_data;
DPRINTK("lch=%d, status=0x%x\n", lch, ch_status);
DPRINTK("channel=%d, status=0x%x\n", channel, ch_status);
if (unlikely(ch_status != DMA_COMPLETE))
return;
......@@ -132,7 +132,7 @@ static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
struct davinci_runtime_data *prtd = substream->runtime->private_data;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct davinci_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data;
int tcc = TCC_ANY;
struct edmacc_param p_ram;
int ret;
if (!dma_data)
......@@ -141,22 +141,34 @@ static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
prtd->params = dma_data;
/* Request master DMA channel */
ret = davinci_request_dma(prtd->params->channel, prtd->params->name,
ret = edma_alloc_channel(prtd->params->channel,
davinci_pcm_dma_irq, substream,
&prtd->master_lch, &tcc, EVENTQ_0);
if (ret)
EVENTQ_0);
if (ret < 0)
return ret;
prtd->master_lch = ret;
/* Request parameter RAM reload slot */
ret = davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "Link",
NULL, NULL, &prtd->slave_lch, &tcc, EVENTQ_0);
if (ret) {
davinci_free_dma(prtd->master_lch);
ret = edma_alloc_slot(EDMA_SLOT_ANY);
if (ret < 0) {
edma_free_channel(prtd->master_lch);
return ret;
}
/* Link parameter RAM to itself in loopback */
edma_link(prtd->slave_lch, prtd->slave_lch);
prtd->slave_lch = ret;
/* Issue transfer completion IRQ when the channel completes a
* transfer, then always reload from the same slot (by a kind
* of loopback link). The completion IRQ handler will update
* the reload slot with a new buffer.
*
* REVISIT save p_ram here after setting up everything except
* the buffer and its length (ccnt) ... use it as a template
* so davinci_pcm_enqueue_dma() takes less time in IRQ.
*/
edma_read_slot(prtd->slave_lch, &p_ram);
p_ram.opt |= TCINTEN | EDMA_TCC(prtd->master_lch);
p_ram.link_bcntrld = prtd->slave_lch << 5;
edma_write_slot(prtd->slave_lch, &p_ram);
return 0;
}
......@@ -262,8 +274,8 @@ static int davinci_pcm_close(struct snd_pcm_substream *substream)
edma_unlink(prtd->slave_lch);
davinci_free_dma(prtd->slave_lch);
davinci_free_dma(prtd->master_lch);
edma_free_slot(prtd->slave_lch);
edma_free_channel(prtd->master_lch);
kfree(prtd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment