Commit 37093b1e authored by Hans Verkuil's avatar Hans Verkuil Committed by Mauro Carvalho Chehab

V4L/DVB (6047): ivtv: Fix scatter/gather DMA timeouts

It turns out that the cx23415/6 DMA engine cannot do scatter/gather DMA
reliably. Every so often depending on the phase of the moon and your
hardware configuration the cx2341x DMA engine simply chokes on it and
you have to reboot to get it working again.

This change replaced the scatter/gather DMA by single transfers at a time,
where the driver is now responsible for DMA-ing each buffer.

UDMA is still done using scatter/gather DMA, that will be fixed soon.

Many thanks to Mark Bryars <mark.bryars@etvinteractive.com> for discovering
the link between scatter/gather and the DMA timeouts.
Signed-off-by: default avatarHans Verkuil <hverkuil@xs4all.nl>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@infradead.org>
parent f4071b85
......@@ -382,7 +382,6 @@ struct ivtv_mailbox_data {
#define IVTV_F_I_RADIO_USER 5 /* The radio tuner is selected */
#define IVTV_F_I_DIG_RST 6 /* Reset digitizer */
#define IVTV_F_I_DEC_YUV 7 /* YUV instead of MPG is being decoded */
#define IVTV_F_I_ENC_VBI 8 /* VBI DMA */
#define IVTV_F_I_UPDATE_CC 9 /* CC should be updated */
#define IVTV_F_I_UPDATE_WSS 10 /* WSS should be updated */
#define IVTV_F_I_UPDATE_VPS 11 /* VPS should be updated */
......@@ -405,7 +404,7 @@ struct ivtv_mailbox_data {
#define IVTV_F_I_EV_VSYNC_ENABLED 31 /* VSYNC event enabled */
/* Scatter-Gather array element, used in DMA transfers */
struct ivtv_SG_element {
struct ivtv_sg_element {
u32 src;
u32 dst;
u32 size;
......@@ -417,7 +416,7 @@ struct ivtv_user_dma {
struct page *map[IVTV_DMA_SG_OSD_ENT];
/* Base Dev SG Array for cx23415/6 */
struct ivtv_SG_element SGarray[IVTV_DMA_SG_OSD_ENT];
struct ivtv_sg_element SGarray[IVTV_DMA_SG_OSD_ENT];
dma_addr_t SG_handle;
int SG_length;
......@@ -468,6 +467,10 @@ struct ivtv_stream {
int dma; /* can be PCI_DMA_TODEVICE,
PCI_DMA_FROMDEVICE or
PCI_DMA_NONE */
u32 pending_offset;
u32 pending_backup;
u64 pending_pts;
u32 dma_offset;
u32 dma_backup;
u64 dma_pts;
......@@ -493,10 +496,13 @@ struct ivtv_stream {
u16 dma_xfer_cnt;
/* Base Dev SG Array for cx23415/6 */
struct ivtv_SG_element *SGarray;
struct ivtv_SG_element *PIOarray;
dma_addr_t SG_handle;
int SG_length;
struct ivtv_sg_element *sg_pending;
struct ivtv_sg_element *sg_processing;
struct ivtv_sg_element *sg_dma;
dma_addr_t sg_handle;
int sg_pending_size;
int sg_processing_size;
int sg_processed;
/* SG List of Buffers */
struct scatterlist *SGlist;
......@@ -637,7 +643,6 @@ struct vbi_info {
u32 enc_start, enc_size;
int fpi;
u32 frame;
u32 dma_offset;
u8 cc_data_odd[256];
u8 cc_data_even[256];
int cc_pos;
......@@ -724,6 +729,7 @@ struct ivtv {
int cur_pio_stream; /* index of stream doing PIO */
u32 dma_data_req_offset;
u32 dma_data_req_size;
int dma_retries;
int output_mode; /* NONE, MPG, YUV, UDMA YUV, passthrough */
spinlock_t lock; /* lock access to this struct */
int search_pack_header;
......
This diff is collapsed.
......@@ -195,7 +195,7 @@ void ivtv_flush_queues(struct ivtv_stream *s)
int ivtv_stream_alloc(struct ivtv_stream *s)
{
struct ivtv *itv = s->itv;
int SGsize = sizeof(struct ivtv_SG_element) * s->buffers;
int SGsize = sizeof(struct ivtv_sg_element) * s->buffers;
int i;
if (s->buffers == 0)
......@@ -205,27 +205,33 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
s->dma != PCI_DMA_NONE ? "DMA " : "",
s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
if (ivtv_might_use_pio(s)) {
s->PIOarray = (struct ivtv_SG_element *)kzalloc(SGsize, GFP_KERNEL);
if (s->PIOarray == NULL) {
IVTV_ERR("Could not allocate PIOarray for %s stream\n", s->name);
return -ENOMEM;
}
s->sg_pending = (struct ivtv_sg_element *)kzalloc(SGsize, GFP_KERNEL);
if (s->sg_pending == NULL) {
IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
return -ENOMEM;
}
s->sg_pending_size = 0;
/* Allocate DMA SG Arrays */
s->SGarray = (struct ivtv_SG_element *)kzalloc(SGsize, GFP_KERNEL);
if (s->SGarray == NULL) {
IVTV_ERR("Could not allocate SGarray for %s stream\n", s->name);
if (ivtv_might_use_pio(s)) {
kfree(s->PIOarray);
s->PIOarray = NULL;
}
s->sg_processing = (struct ivtv_sg_element *)kzalloc(SGsize, GFP_KERNEL);
if (s->sg_processing == NULL) {
IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
kfree(s->sg_pending);
s->sg_pending = NULL;
return -ENOMEM;
}
s->sg_processing_size = 0;
s->sg_dma = (struct ivtv_sg_element *)kzalloc(sizeof(struct ivtv_sg_element), GFP_KERNEL);
if (s->sg_dma == NULL) {
IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
kfree(s->sg_pending);
s->sg_pending = NULL;
kfree(s->sg_processing);
s->sg_processing = NULL;
return -ENOMEM;
}
s->SG_length = 0;
if (ivtv_might_use_dma(s)) {
s->SG_handle = pci_map_single(itv->dev, s->SGarray, SGsize, s->dma);
s->sg_handle = pci_map_single(itv->dev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma);
ivtv_stream_sync_for_cpu(s);
}
......@@ -272,16 +278,19 @@ void ivtv_stream_free(struct ivtv_stream *s)
}
/* Free SG Array/Lists */
if (s->SGarray != NULL) {
if (s->SG_handle != IVTV_DMA_UNMAPPED) {
pci_unmap_single(s->itv->dev, s->SG_handle,
sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE);
s->SG_handle = IVTV_DMA_UNMAPPED;
if (s->sg_dma != NULL) {
if (s->sg_handle != IVTV_DMA_UNMAPPED) {
pci_unmap_single(s->itv->dev, s->sg_handle,
sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
s->sg_handle = IVTV_DMA_UNMAPPED;
}
kfree(s->SGarray);
kfree(s->PIOarray);
s->PIOarray = NULL;
s->SGarray = NULL;
s->SG_length = 0;
kfree(s->sg_pending);
kfree(s->sg_processing);
kfree(s->sg_dma);
s->sg_pending = NULL;
s->sg_processing = NULL;
s->sg_dma = NULL;
s->sg_pending_size = 0;
s->sg_processing_size = 0;
}
}
......@@ -79,13 +79,13 @@ void ivtv_stream_free(struct ivtv_stream *s);
static inline void ivtv_stream_sync_for_cpu(struct ivtv_stream *s)
{
if (ivtv_use_dma(s))
pci_dma_sync_single_for_cpu(s->itv->dev, s->SG_handle,
sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE);
pci_dma_sync_single_for_cpu(s->itv->dev, s->sg_handle,
sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
}
static inline void ivtv_stream_sync_for_device(struct ivtv_stream *s)
{
if (ivtv_use_dma(s))
pci_dma_sync_single_for_device(s->itv->dev, s->SG_handle,
sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE);
pci_dma_sync_single_for_device(s->itv->dev, s->sg_handle,
sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
}
......@@ -154,7 +154,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
spin_lock_init(&s->qlock);
init_waitqueue_head(&s->waitq);
s->id = -1;
s->SG_handle = IVTV_DMA_UNMAPPED;
s->sg_handle = IVTV_DMA_UNMAPPED;
ivtv_queue_init(&s->q_free);
ivtv_queue_init(&s->q_full);
ivtv_queue_init(&s->q_dma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment