Commit f4071b85 authored by Hans Verkuil's avatar Hans Verkuil Committed by Mauro Carvalho Chehab

V4L/DVB (6046): ivtv: always steal full frames if out of buffers.

When there are no more free buffers, then buffers are stolen from the
predma queue. Buffers should be stolen from the head of that queue (which
is where the most recently added buffers are) and all buffers belonging
to a frame should be stolen. Otherwise 'half-frames' would remain in the
queue, which leads to ugly playback and complete sync failure for YUV
buffers.
Signed-off-by: default avatarHans Verkuil <hverkuil@xs4all.nl>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@infradead.org>
parent 3562c43b
......@@ -356,7 +356,7 @@ struct ivtv_mailbox_data {
};
/* per-buffer bit flags */
#define IVTV_F_B_NEED_BUF_SWAP 0 /* this buffer should be byte swapped */
#define IVTV_F_B_NEED_BUF_SWAP (1 << 0) /* this buffer should be byte swapped */
/* per-stream, s_flags */
#define IVTV_F_S_DMA_PENDING 0 /* this stream has pending DMA */
......@@ -437,7 +437,8 @@ struct ivtv_dma_page_info {
struct ivtv_buffer {
struct list_head list;
dma_addr_t dma_handle;
unsigned long b_flags;
unsigned short b_flags;
unsigned short dma_xfer_cnt;
char *buf;
u32 bytesused;
......@@ -487,6 +488,10 @@ struct ivtv_stream {
struct ivtv_queue q_dma; /* waiting for DMA */
struct ivtv_queue q_predma; /* waiting for DMA */
/* DMA xfer counter, buffers belonging to the same DMA
xfer will have the same dma_xfer_cnt. */
u16 dma_xfer_cnt;
/* Base Dev SG Array for cx23415/6 */
struct ivtv_SG_element *SGarray;
struct ivtv_SG_element *PIOarray;
......
......@@ -247,8 +247,9 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
/* do we have new data? */
buf = ivtv_dequeue(s, &s->q_full);
if (buf) {
if (!test_and_clear_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags))
if ((buf->b_flags & IVTV_F_B_NEED_BUF_SWAP) == 0)
return buf;
buf->b_flags &= ~IVTV_F_B_NEED_BUF_SWAP;
if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
/* byteswap MPG data */
ivtv_buf_swap(buf);
......
......@@ -214,6 +214,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
s->SGarray[idx].src = cpu_to_le32(offset);
s->SGarray[idx].size = cpu_to_le32(s->buf_size);
buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
buf->dma_xfer_cnt = s->dma_xfer_cnt;
s->q_predma.bytesused += buf->bytesused;
size -= buf->bytesused;
......@@ -286,7 +287,7 @@ static void dma_post(struct ivtv_stream *s)
/* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
s->type == IVTV_ENC_STREAM_TYPE_VBI)
set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags);
buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
}
if (buf)
buf->bytesused += s->dma_last_offset;
......@@ -396,12 +397,14 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
}
itv->vbi.dma_offset = s_vbi->dma_offset;
s_vbi->SG_length = 0;
s_vbi->dma_xfer_cnt++;
set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
}
/* Mark last buffer size for Interrupt flag */
s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
s->dma_xfer_cnt++;
if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
......
......@@ -60,6 +60,7 @@ void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_qu
buf->bytesused = 0;
buf->readpos = 0;
buf->b_flags = 0;
buf->dma_xfer_cnt = 0;
}
spin_lock_irqsave(&s->qlock, flags);
list_add_tail(&buf->list, &q->list);
......@@ -87,7 +88,7 @@ struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
}
static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
struct ivtv_queue *to, int clear, int full)
struct ivtv_queue *to, int clear)
{
struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
......@@ -97,13 +98,7 @@ static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
from->bytesused -= buf->bytesused - buf->readpos;
/* special handling for q_free */
if (clear)
buf->bytesused = buf->readpos = buf->b_flags = 0;
else if (full) {
/* special handling for stolen buffers, assume
all bytes are used. */
buf->bytesused = s->buf_size;
buf->readpos = buf->b_flags = 0;
}
buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
to->buffers++;
to->length += s->buf_size;
to->bytesused += buf->bytesused - buf->readpos;
......@@ -112,7 +107,7 @@ static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
If 'steal' != NULL, then buffers may also taken from that queue if
needed.
needed, but only if 'from' is the free queue.
The buffer is automatically cleared if it goes to the free queue. It is
also cleared if buffers need to be taken from the 'steal' queue and
......@@ -133,7 +128,7 @@ int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_
int rc = 0;
int from_free = from == &s->q_free;
int to_free = to == &s->q_free;
int bytes_available;
int bytes_available, bytes_steal;
spin_lock_irqsave(&s->qlock, flags);
if (needed_bytes == 0) {
......@@ -142,32 +137,47 @@ int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_
}
bytes_available = from_free ? from->length : from->bytesused;
bytes_available += steal ? steal->length : 0;
bytes_steal = (from_free && steal) ? steal->length : 0;
if (bytes_available < needed_bytes) {
if (bytes_available + bytes_steal < needed_bytes) {
spin_unlock_irqrestore(&s->qlock, flags);
return -ENOMEM;
}
while (bytes_available < needed_bytes) {
struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
u16 dma_xfer_cnt = buf->dma_xfer_cnt;
/* move buffers from the tail of the 'steal' queue to the tail of the
'from' queue. Always copy all the buffers with the same dma_xfer_cnt
value, this ensures that you do not end up with partial frame data
if one frame is stored in multiple buffers. */
while (dma_xfer_cnt == buf->dma_xfer_cnt) {
list_move_tail(steal->list.prev, &from->list);
rc++;
steal->buffers--;
steal->length -= s->buf_size;
steal->bytesused -= buf->bytesused - buf->readpos;
buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
from->buffers++;
from->length += s->buf_size;
bytes_available += s->buf_size;
if (list_empty(&steal->list))
break;
buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
}
}
if (from_free) {
u32 old_length = to->length;
while (to->length - old_length < needed_bytes) {
if (list_empty(&from->list))
from = steal;
if (from == steal)
rc++; /* keep track of 'stolen' buffers */
ivtv_queue_move_buf(s, from, to, 1, 0);
ivtv_queue_move_buf(s, from, to, 1);
}
}
else {
u32 old_bytesused = to->bytesused;
while (to->bytesused - old_bytesused < needed_bytes) {
if (list_empty(&from->list))
from = steal;
if (from == steal)
rc++; /* keep track of 'stolen' buffers */
ivtv_queue_move_buf(s, from, to, to_free, rc);
ivtv_queue_move_buf(s, from, to, to_free);
}
}
spin_unlock_irqrestore(&s->qlock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment