2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
25 #include "ivtv-mailbox.h"
28 #include <media/v4l2-event.h>
30 #define DMA_MAGIC_COOKIE 0x000001fe
32 static void ivtv_dma_dec_start(struct ivtv_stream *s);
34 static const int ivtv_stream_map[] = {
35 IVTV_ENC_STREAM_TYPE_MPG,
36 IVTV_ENC_STREAM_TYPE_YUV,
37 IVTV_ENC_STREAM_TYPE_PCM,
38 IVTV_ENC_STREAM_TYPE_VBI,
42 static void ivtv_pio_work_handler(struct ivtv *itv)
44 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
45 struct ivtv_buffer *buf;
48 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
49 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
50 s->vdev == NULL || !ivtv_use_pio(s)) {
51 itv->cur_pio_stream = -1;
52 /* trigger PIO complete user interrupt */
53 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
56 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
57 list_for_each_entry(buf, &s->q_dma.list, list) {
58 u32 size = s->sg_processing[i].size & 0x3ffff;
60 /* Copy the data from the card to the buffer */
61 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
62 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
65 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
68 if (i == s->sg_processing_size)
71 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
74 void ivtv_irq_work_handler(struct work_struct *work)
76 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
80 if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) {
81 struct sched_param param = { .sched_priority = 99 };
83 /* This thread must use the FIFO scheduler as it
84 is realtime sensitive. */
85 sched_setscheduler(current, SCHED_FIFO, ¶m);
87 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
88 ivtv_pio_work_handler(itv);
90 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
91 ivtv_vbi_work_handler(itv);
93 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
94 ivtv_yuv_work_handler(itv);
97 /* Determine the required DMA size, setup enough buffers in the predma queue and
98 actually copy the data from the card to the buffers in case a PIO transfer is
99 required for this stream.
101 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
103 struct ivtv *itv = s->itv;
104 struct ivtv_buffer *buf;
105 u32 bytes_needed = 0;
107 u32 UVoffset = 0, UVsize = 0;
108 int skip_bufs = s->q_predma.buffers;
109 int idx = s->sg_pending_size;
113 if (s->vdev == NULL) {
114 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
117 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
118 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
122 /* determine offset, size and PTS for the various streams */
124 case IVTV_ENC_STREAM_TYPE_MPG:
130 case IVTV_ENC_STREAM_TYPE_YUV:
135 s->pending_pts = ((u64) data[5] << 32) | data[6];
138 case IVTV_ENC_STREAM_TYPE_PCM:
139 offset = data[1] + 12;
141 s->pending_pts = read_dec(offset - 8) |
142 ((u64)(read_dec(offset - 12)) << 32);
143 if (itv->has_cx23415)
144 offset += IVTV_DECODER_OFFSET;
147 case IVTV_ENC_STREAM_TYPE_VBI:
148 size = itv->vbi.enc_size * itv->vbi.fpi;
149 offset = read_enc(itv->vbi.enc_start - 4) + 12;
151 IVTV_DEBUG_INFO("VBI offset == 0\n");
154 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
157 case IVTV_DEC_STREAM_TYPE_VBI:
158 size = read_dec(itv->vbi.dec_start + 4) + 8;
159 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
161 offset += IVTV_DECODER_OFFSET;
164 /* shouldn't happen */
168 /* if this is the start of the DMA then fill in the magic cookie */
169 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
170 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
171 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
172 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
173 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
176 s->pending_backup = read_enc(offset);
177 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
179 s->pending_offset = offset;
183 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
184 /* The size for the Y samples needs to be rounded upwards to a
185 multiple of the buf_size. The UV samples then start in the
187 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
188 bytes_needed += UVsize;
191 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
192 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
194 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
195 if (rc < 0) { /* Insufficient buffers */
196 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
197 bytes_needed, s->name);
200 if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
201 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
202 IVTV_WARN("Cause: the application is not reading fast enough.\n");
204 s->buffers_stolen = rc;
206 /* got the buffers, now fill in sg_pending */
207 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
208 memset(buf->buf, 0, 128);
209 list_for_each_entry(buf, &s->q_predma.list, list) {
212 s->sg_pending[idx].dst = buf->dma_handle;
213 s->sg_pending[idx].src = offset;
214 s->sg_pending[idx].size = s->buf_size;
215 buf->bytesused = min(size, s->buf_size);
216 buf->dma_xfer_cnt = s->dma_xfer_cnt;
218 s->q_predma.bytesused += buf->bytesused;
219 size -= buf->bytesused;
220 offset += s->buf_size;
222 /* Sync SG buffers */
223 ivtv_buf_sync_for_device(s, buf);
225 if (size == 0) { /* YUV */
226 /* process the UV section */
232 s->sg_pending_size = idx;
236 static void dma_post(struct ivtv_stream *s)
238 struct ivtv *itv = s->itv;
239 struct ivtv_buffer *buf = NULL;
245 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
246 s->name, s->dma_offset);
247 list_for_each(p, &s->q_dma.list) {
248 buf = list_entry(p, struct ivtv_buffer, list);
249 u32buf = (__le32 *)buf->buf;
252 ivtv_buf_sync_for_cpu(s, buf);
254 if (x == 0 && ivtv_use_dma(s)) {
255 offset = s->dma_last_offset;
256 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
258 for (offset = 0; offset < 64; offset++) {
259 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
265 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
266 offset = s->dma_last_offset;
268 if (s->dma_last_offset != offset)
269 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
270 s->dma_last_offset = offset;
272 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
273 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
274 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
277 write_enc_sync(0, s->dma_offset);
280 buf->bytesused -= offset;
281 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
283 *u32buf = cpu_to_le32(s->dma_backup);
286 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
287 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
288 s->type == IVTV_ENC_STREAM_TYPE_VBI)
289 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
292 buf->bytesused += s->dma_last_offset;
293 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
294 list_for_each_entry(buf, &s->q_dma.list, list) {
295 /* Parse and Groom VBI Data */
296 s->q_dma.bytesused -= buf->bytesused;
297 ivtv_process_vbi_data(itv, buf, 0, s->type);
298 s->q_dma.bytesused += buf->bytesused;
301 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
305 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
310 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
312 struct ivtv *itv = s->itv;
313 struct yuv_playback_info *yi = &itv->yuv_info;
314 u8 frame = yi->draw_frame;
315 struct yuv_frame_info *f = &yi->new_frame_info[frame];
316 struct ivtv_buffer *buf;
317 u32 y_size = 720 * ((f->src_h + 31) & ~31);
318 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
320 int bytes_written = 0;
321 unsigned long flags = 0;
324 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
326 /* Insert buffer block for YUV if needed */
327 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
328 if (yi->blanking_dmaptr) {
329 s->sg_pending[idx].src = yi->blanking_dmaptr;
330 s->sg_pending[idx].dst = offset;
331 s->sg_pending[idx].size = 720 * 16;
337 list_for_each_entry(buf, &s->q_predma.list, list) {
338 /* YUV UV Offset from Y Buffer */
339 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
340 (bytes_written + buf->bytesused) >= y_size) {
341 s->sg_pending[idx].src = buf->dma_handle;
342 s->sg_pending[idx].dst = offset;
343 s->sg_pending[idx].size = y_size - bytes_written;
345 if (s->sg_pending[idx].size != buf->bytesused) {
347 s->sg_pending[idx].src =
348 buf->dma_handle + s->sg_pending[idx - 1].size;
349 s->sg_pending[idx].dst = offset;
350 s->sg_pending[idx].size =
351 buf->bytesused - s->sg_pending[idx - 1].size;
352 offset += s->sg_pending[idx].size;
356 s->sg_pending[idx].src = buf->dma_handle;
357 s->sg_pending[idx].dst = offset;
358 s->sg_pending[idx].size = buf->bytesused;
359 offset += buf->bytesused;
361 bytes_written += buf->bytesused;
363 /* Sync SG buffers */
364 ivtv_buf_sync_for_device(s, buf);
367 s->sg_pending_size = idx;
369 /* Sync Hardware SG List of buffers */
370 ivtv_stream_sync_for_device(s);
372 spin_lock_irqsave(&itv->dma_reg_lock, flags);
373 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
374 ivtv_dma_dec_start(s);
377 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
380 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
383 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
385 struct ivtv *itv = s->itv;
387 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
388 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
389 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
391 /* Sync Hardware SG List of buffers */
392 ivtv_stream_sync_for_device(s);
393 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
394 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
395 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
396 add_timer(&itv->dma_timer);
399 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
401 struct ivtv *itv = s->itv;
403 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
404 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
405 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
407 /* Sync Hardware SG List of buffers */
408 ivtv_stream_sync_for_device(s);
409 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
410 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
411 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
412 add_timer(&itv->dma_timer);
415 /* start the encoder DMA */
416 static void ivtv_dma_enc_start(struct ivtv_stream *s)
418 struct ivtv *itv = s->itv;
419 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
422 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
424 if (s->q_predma.bytesused)
425 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
428 s->sg_pending[s->sg_pending_size - 1].size += 256;
430 /* If this is an MPEG stream, and VBI data is also pending, then append the
431 VBI DMA to the MPEG DMA and transfer both sets of data at once.
433 VBI DMA is a second class citizen compared to MPEG and mixing them together
434 will confuse the firmware (the end of a VBI DMA is seen as the end of a
435 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
436 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
437 use. This way no conflicts occur. */
438 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
439 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
440 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
441 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
442 if (ivtv_use_dma(s_vbi))
443 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
444 for (i = 0; i < s_vbi->sg_pending_size; i++) {
445 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
447 s_vbi->dma_offset = s_vbi->pending_offset;
448 s_vbi->sg_pending_size = 0;
449 s_vbi->dma_xfer_cnt++;
450 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
451 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
455 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
456 s->sg_processing_size = s->sg_pending_size;
457 s->sg_pending_size = 0;
459 s->dma_offset = s->pending_offset;
460 s->dma_backup = s->pending_backup;
461 s->dma_pts = s->pending_pts;
463 if (ivtv_use_pio(s)) {
464 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
465 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
466 set_bit(IVTV_F_I_PIO, &itv->i_flags);
467 itv->cur_pio_stream = s->type;
470 itv->dma_retries = 0;
471 ivtv_dma_enc_start_xfer(s);
472 set_bit(IVTV_F_I_DMA, &itv->i_flags);
473 itv->cur_dma_stream = s->type;
477 static void ivtv_dma_dec_start(struct ivtv_stream *s)
479 struct ivtv *itv = s->itv;
481 if (s->q_predma.bytesused)
482 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
484 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
485 s->sg_processing_size = s->sg_pending_size;
486 s->sg_pending_size = 0;
489 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
490 itv->dma_retries = 0;
491 ivtv_dma_dec_start_xfer(s);
492 set_bit(IVTV_F_I_DMA, &itv->i_flags);
493 itv->cur_dma_stream = s->type;
496 static void ivtv_irq_dma_read(struct ivtv *itv)
498 struct ivtv_stream *s = NULL;
499 struct ivtv_buffer *buf;
500 int hw_stream_type = 0;
502 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
504 del_timer(&itv->dma_timer);
506 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
509 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
510 s = &itv->streams[itv->cur_dma_stream];
511 ivtv_stream_sync_for_cpu(s);
513 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
514 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
515 read_reg(IVTV_REG_DMASTATUS),
516 s->sg_processed, s->sg_processing_size, itv->dma_retries);
517 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
518 if (itv->dma_retries == 3) {
519 /* Too many retries, give up on this frame */
520 itv->dma_retries = 0;
521 s->sg_processed = s->sg_processing_size;
524 /* Retry, starting with the first xfer segment.
525 Just retrying the current segment is not sufficient. */
530 if (s->sg_processed < s->sg_processing_size) {
531 /* DMA next buffer */
532 ivtv_dma_dec_start_xfer(s);
535 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
537 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
539 /* For some reason must kick the firmware, like PIO mode,
540 I think this tells the firmware we are done and the size
541 of the xfer so it can calculate what we need next.
542 I think we can do this part ourselves but would have to
543 fully calculate xfer info ourselves and not use interrupts
545 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
548 /* Free last DMA call */
549 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
550 ivtv_buf_sync_for_cpu(s, buf);
551 ivtv_enqueue(s, buf, &s->q_free);
555 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
556 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
557 itv->cur_dma_stream = -1;
558 wake_up(&itv->dma_waitq);
561 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
563 u32 data[CX2341X_MBOX_MAX_DATA];
564 struct ivtv_stream *s;
566 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
567 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
569 del_timer(&itv->dma_timer);
571 if (itv->cur_dma_stream < 0)
574 s = &itv->streams[itv->cur_dma_stream];
575 ivtv_stream_sync_for_cpu(s);
577 if (data[0] & 0x18) {
578 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
579 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
580 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
581 if (itv->dma_retries == 3) {
582 /* Too many retries, give up on this frame */
583 itv->dma_retries = 0;
584 s->sg_processed = s->sg_processing_size;
587 /* Retry, starting with the first xfer segment.
588 Just retrying the current segment is not sufficient. */
593 if (s->sg_processed < s->sg_processing_size) {
594 /* DMA next buffer */
595 ivtv_dma_enc_start_xfer(s);
598 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
599 itv->cur_dma_stream = -1;
601 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
602 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
605 s->sg_processing_size = 0;
607 wake_up(&itv->dma_waitq);
610 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
612 struct ivtv_stream *s;
614 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
615 itv->cur_pio_stream = -1;
618 s = &itv->streams[itv->cur_pio_stream];
619 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
620 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
621 itv->cur_pio_stream = -1;
623 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
624 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
625 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
626 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
627 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
628 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
629 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
630 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
631 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
634 wake_up(&itv->dma_waitq);
637 static void ivtv_irq_dma_err(struct ivtv *itv)
639 u32 data[CX2341X_MBOX_MAX_DATA];
641 del_timer(&itv->dma_timer);
642 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
643 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
644 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
645 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
646 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
647 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
648 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
651 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
652 ivtv_dma_dec_start(s);
654 ivtv_dma_enc_start(s);
657 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
658 ivtv_udma_start(itv);
661 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
662 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
663 itv->cur_dma_stream = -1;
664 wake_up(&itv->dma_waitq);
667 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
669 u32 data[CX2341X_MBOX_MAX_DATA];
670 struct ivtv_stream *s;
672 /* Get DMA destination and size arguments from card */
673 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
674 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
676 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
677 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
678 data[0], data[1], data[2]);
681 s = &itv->streams[ivtv_stream_map[data[0]]];
682 if (!stream_enc_dma_append(s, data)) {
683 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
687 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
689 u32 data[CX2341X_MBOX_MAX_DATA];
690 struct ivtv_stream *s;
692 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
693 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
695 if (!stream_enc_dma_append(s, data))
696 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
699 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
701 u32 data[CX2341X_MBOX_MAX_DATA];
702 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
704 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
705 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
706 !stream_enc_dma_append(s, data)) {
707 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
711 static void ivtv_irq_dec_data_req(struct ivtv *itv)
713 u32 data[CX2341X_MBOX_MAX_DATA];
714 struct ivtv_stream *s;
718 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
719 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
720 itv->dma_data_req_size =
721 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
722 itv->dma_data_req_offset = data[1];
723 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
724 ivtv_yuv_frame_complete(itv);
725 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
728 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
729 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
730 itv->dma_data_req_offset = data[1];
731 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
733 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
734 itv->dma_data_req_offset, itv->dma_data_req_size);
735 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
736 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
739 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
740 ivtv_yuv_setup_stream_frame(itv);
741 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
742 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
743 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
747 static void ivtv_irq_vsync(struct ivtv *itv)
749 /* The vsync interrupt is unusual in that it won't clear until
750 * the end of the first line for the current field, at which
751 * point it clears itself. This can result in repeated vsync
752 * interrupts, or a missed vsync. Read some of the registers
753 * to determine the line being displayed and ensure we handle
754 * one vsync per frame.
756 unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
757 struct yuv_playback_info *yi = &itv->yuv_info;
758 int last_dma_frame = atomic_read(&yi->next_dma_frame);
759 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
761 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
763 if (((frame ^ f->sync_field) == 0 &&
764 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
765 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
766 int next_dma_frame = last_dma_frame;
768 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
769 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
770 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
771 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
772 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
773 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
774 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
775 atomic_set(&yi->next_dma_frame, next_dma_frame);
776 yi->fields_lapsed = -1;
781 if (frame != (itv->last_vsync_field & 1)) {
782 static const struct v4l2_event evtop = {
783 .type = V4L2_EVENT_VSYNC,
784 .u.vsync.field = V4L2_FIELD_TOP,
786 static const struct v4l2_event evbottom = {
787 .type = V4L2_EVENT_VSYNC,
788 .u.vsync.field = V4L2_FIELD_BOTTOM,
790 struct ivtv_stream *s = ivtv_get_output_stream(itv);
792 itv->last_vsync_field += 1;
794 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
795 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
798 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
800 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
801 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
802 wake_up(&itv->event_waitq);
807 v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
808 wake_up(&itv->vsync_waitq);
810 /* Send VBI to saa7127 */
811 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
812 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
813 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
814 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
815 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
816 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
819 /* Check if we need to update the yuv registers */
820 if (yi->running && (yi->yuv_forced_update || f->update)) {
823 (u8)(atomic_read(&yi->next_dma_frame) -
824 1) % IVTV_YUV_BUFFERS;
825 f = &yi->new_frame_info[last_dma_frame];
829 yi->update_frame = last_dma_frame;
831 yi->yuv_forced_update = 0;
832 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
833 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
841 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
843 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
845 struct ivtv *itv = (struct ivtv *)dev_id;
851 spin_lock(&itv->dma_reg_lock);
852 /* get contents of irq status register */
853 stat = read_reg(IVTV_REG_IRQSTATUS);
855 combo = ~itv->irqmask & stat;
858 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
861 /* The vsync interrupt is unusual and clears itself. If we
862 * took too long, we may have missed it. Do some checks
864 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
865 /* vsync is enabled, see if we're in a new field */
866 if ((itv->last_vsync_field & 1) !=
867 (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
868 /* New field, looks like we missed it */
869 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
870 read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
876 /* No Vsync expected, wasn't for us */
877 spin_unlock(&itv->dma_reg_lock);
882 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
884 if (combo & ~0xff6d0400)
885 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
887 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
888 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
891 if (combo & IVTV_IRQ_DMA_READ) {
892 ivtv_irq_dma_read(itv);
895 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
896 ivtv_irq_enc_dma_complete(itv);
899 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
900 ivtv_irq_enc_pio_complete(itv);
903 if (combo & IVTV_IRQ_DMA_ERR) {
904 ivtv_irq_dma_err(itv);
907 if (combo & IVTV_IRQ_ENC_START_CAP) {
908 ivtv_irq_enc_start_cap(itv);
911 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
912 ivtv_irq_enc_vbi_cap(itv);
915 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
916 ivtv_irq_dec_vbi_reinsert(itv);
919 if (combo & IVTV_IRQ_ENC_EOS) {
920 IVTV_DEBUG_IRQ("ENC EOS\n");
921 set_bit(IVTV_F_I_EOS, &itv->i_flags);
922 wake_up(&itv->eos_waitq);
925 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
926 ivtv_irq_dec_data_req(itv);
929 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
930 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
934 if (combo & IVTV_IRQ_ENC_VIM_RST) {
935 IVTV_DEBUG_IRQ("VIM RST\n");
936 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
939 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
940 IVTV_DEBUG_INFO("Stereo mode changed\n");
943 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
945 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
946 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
947 struct ivtv_stream *s = &itv->streams[idx];
949 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
951 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
952 ivtv_dma_dec_start(s);
954 ivtv_dma_enc_start(s);
958 if (i == IVTV_MAX_STREAMS &&
959 test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
960 ivtv_udma_start(itv);
963 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
965 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
966 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
967 struct ivtv_stream *s = &itv->streams[idx];
969 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
971 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
972 ivtv_dma_enc_start(s);
977 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
978 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
981 spin_unlock(&itv->dma_reg_lock);
983 /* If we've just handled a 'forced' vsync, it's safest to say it
984 * wasn't ours. Another device may have triggered it at just
987 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
990 void ivtv_unfinished_dma(unsigned long arg)
992 struct ivtv *itv = (struct ivtv *)arg;
994 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
996 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
998 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
999 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
1000 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1001 itv->cur_dma_stream = -1;
1002 wake_up(&itv->dma_waitq);