Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platf...
[sfrench/cifs-2.6.git] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5
6     This program is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with this program; if not, write to the Free Software
18     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
28 #include <media/v4l2-event.h>
29
30 #define DMA_MAGIC_COOKIE 0x000001fe
31
32 static void ivtv_dma_dec_start(struct ivtv_stream *s);
33
34 static const int ivtv_stream_map[] = {
35         IVTV_ENC_STREAM_TYPE_MPG,
36         IVTV_ENC_STREAM_TYPE_YUV,
37         IVTV_ENC_STREAM_TYPE_PCM,
38         IVTV_ENC_STREAM_TYPE_VBI,
39 };
40
41
42 static void ivtv_pio_work_handler(struct ivtv *itv)
43 {
44         struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
45         struct ivtv_buffer *buf;
46         int i = 0;
47
48         IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
49         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
50                         s->vdev == NULL || !ivtv_use_pio(s)) {
51                 itv->cur_pio_stream = -1;
52                 /* trigger PIO complete user interrupt */
53                 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
54                 return;
55         }
56         IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
57         list_for_each_entry(buf, &s->q_dma.list, list) {
58                 u32 size = s->sg_processing[i].size & 0x3ffff;
59
60                 /* Copy the data from the card to the buffer */
61                 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
62                         memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
63                 }
64                 else {
65                         memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
66                 }
67                 i++;
68                 if (i == s->sg_processing_size)
69                         break;
70         }
71         write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
72 }
73
74 void ivtv_irq_work_handler(struct work_struct *work)
75 {
76         struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
77
78         DEFINE_WAIT(wait);
79
80         if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) {
81                 struct sched_param param = { .sched_priority = 99 };
82
83                 /* This thread must use the FIFO scheduler as it
84                    is realtime sensitive. */
85                 sched_setscheduler(current, SCHED_FIFO, &param);
86         }
87         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
88                 ivtv_pio_work_handler(itv);
89
90         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
91                 ivtv_vbi_work_handler(itv);
92
93         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
94                 ivtv_yuv_work_handler(itv);
95 }
96
97 /* Determine the required DMA size, setup enough buffers in the predma queue and
98    actually copy the data from the card to the buffers in case a PIO transfer is
99    required for this stream.
100  */
101 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
102 {
103         struct ivtv *itv = s->itv;
104         struct ivtv_buffer *buf;
105         u32 bytes_needed = 0;
106         u32 offset, size;
107         u32 UVoffset = 0, UVsize = 0;
108         int skip_bufs = s->q_predma.buffers;
109         int idx = s->sg_pending_size;
110         int rc;
111
112         /* sanity checks */
113         if (s->vdev == NULL) {
114                 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
115                 return -1;
116         }
117         if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
118                 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
119                 return -1;
120         }
121
122         /* determine offset, size and PTS for the various streams */
123         switch (s->type) {
124                 case IVTV_ENC_STREAM_TYPE_MPG:
125                         offset = data[1];
126                         size = data[2];
127                         s->pending_pts = 0;
128                         break;
129
130                 case IVTV_ENC_STREAM_TYPE_YUV:
131                         offset = data[1];
132                         size = data[2];
133                         UVoffset = data[3];
134                         UVsize = data[4];
135                         s->pending_pts = ((u64) data[5] << 32) | data[6];
136                         break;
137
138                 case IVTV_ENC_STREAM_TYPE_PCM:
139                         offset = data[1] + 12;
140                         size = data[2] - 12;
141                         s->pending_pts = read_dec(offset - 8) |
142                                 ((u64)(read_dec(offset - 12)) << 32);
143                         if (itv->has_cx23415)
144                                 offset += IVTV_DECODER_OFFSET;
145                         break;
146
147                 case IVTV_ENC_STREAM_TYPE_VBI:
148                         size = itv->vbi.enc_size * itv->vbi.fpi;
149                         offset = read_enc(itv->vbi.enc_start - 4) + 12;
150                         if (offset == 12) {
151                                 IVTV_DEBUG_INFO("VBI offset == 0\n");
152                                 return -1;
153                         }
154                         s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
155                         break;
156
157                 case IVTV_DEC_STREAM_TYPE_VBI:
158                         size = read_dec(itv->vbi.dec_start + 4) + 8;
159                         offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
160                         s->pending_pts = 0;
161                         offset += IVTV_DECODER_OFFSET;
162                         break;
163                 default:
164                         /* shouldn't happen */
165                         return -1;
166         }
167
168         /* if this is the start of the DMA then fill in the magic cookie */
169         if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
170                 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
171                     s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
172                         s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
173                         write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
174                 }
175                 else {
176                         s->pending_backup = read_enc(offset);
177                         write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
178                 }
179                 s->pending_offset = offset;
180         }
181
182         bytes_needed = size;
183         if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
184                 /* The size for the Y samples needs to be rounded upwards to a
185                    multiple of the buf_size. The UV samples then start in the
186                    next buffer. */
187                 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
188                 bytes_needed += UVsize;
189         }
190
191         IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
192                 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
193
194         rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
195         if (rc < 0) { /* Insufficient buffers */
196                 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
197                                 bytes_needed, s->name);
198                 return -1;
199         }
200         if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
201                 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
202                 IVTV_WARN("Cause: the application is not reading fast enough.\n");
203         }
204         s->buffers_stolen = rc;
205
206         /* got the buffers, now fill in sg_pending */
207         buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
208         memset(buf->buf, 0, 128);
209         list_for_each_entry(buf, &s->q_predma.list, list) {
210                 if (skip_bufs-- > 0)
211                         continue;
212                 s->sg_pending[idx].dst = buf->dma_handle;
213                 s->sg_pending[idx].src = offset;
214                 s->sg_pending[idx].size = s->buf_size;
215                 buf->bytesused = min(size, s->buf_size);
216                 buf->dma_xfer_cnt = s->dma_xfer_cnt;
217
218                 s->q_predma.bytesused += buf->bytesused;
219                 size -= buf->bytesused;
220                 offset += s->buf_size;
221
222                 /* Sync SG buffers */
223                 ivtv_buf_sync_for_device(s, buf);
224
225                 if (size == 0) {        /* YUV */
226                         /* process the UV section */
227                         offset = UVoffset;
228                         size = UVsize;
229                 }
230                 idx++;
231         }
232         s->sg_pending_size = idx;
233         return 0;
234 }
235
236 static void dma_post(struct ivtv_stream *s)
237 {
238         struct ivtv *itv = s->itv;
239         struct ivtv_buffer *buf = NULL;
240         struct list_head *p;
241         u32 offset;
242         __le32 *u32buf;
243         int x = 0;
244
245         IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
246                         s->name, s->dma_offset);
247         list_for_each(p, &s->q_dma.list) {
248                 buf = list_entry(p, struct ivtv_buffer, list);
249                 u32buf = (__le32 *)buf->buf;
250
251                 /* Sync Buffer */
252                 ivtv_buf_sync_for_cpu(s, buf);
253
254                 if (x == 0 && ivtv_use_dma(s)) {
255                         offset = s->dma_last_offset;
256                         if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
257                         {
258                                 for (offset = 0; offset < 64; offset++) {
259                                         if (u32buf[offset] == DMA_MAGIC_COOKIE) {
260                                                 break;
261                                         }
262                                 }
263                                 offset *= 4;
264                                 if (offset == 256) {
265                                         IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
266                                         offset = s->dma_last_offset;
267                                 }
268                                 if (s->dma_last_offset != offset)
269                                         IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
270                                 s->dma_last_offset = offset;
271                         }
272                         if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
273                                                 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
274                                 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
275                         }
276                         else {
277                                 write_enc_sync(0, s->dma_offset);
278                         }
279                         if (offset) {
280                                 buf->bytesused -= offset;
281                                 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
282                         }
283                         *u32buf = cpu_to_le32(s->dma_backup);
284                 }
285                 x++;
286                 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
287                 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
288                     s->type == IVTV_ENC_STREAM_TYPE_VBI)
289                         buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
290         }
291         if (buf)
292                 buf->bytesused += s->dma_last_offset;
293         if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
294                 list_for_each_entry(buf, &s->q_dma.list, list) {
295                         /* Parse and Groom VBI Data */
296                         s->q_dma.bytesused -= buf->bytesused;
297                         ivtv_process_vbi_data(itv, buf, 0, s->type);
298                         s->q_dma.bytesused += buf->bytesused;
299                 }
300                 if (s->id == -1) {
301                         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
302                         return;
303                 }
304         }
305         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
306         if (s->id != -1)
307                 wake_up(&s->waitq);
308 }
309
310 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
311 {
312         struct ivtv *itv = s->itv;
313         struct yuv_playback_info *yi = &itv->yuv_info;
314         u8 frame = yi->draw_frame;
315         struct yuv_frame_info *f = &yi->new_frame_info[frame];
316         struct ivtv_buffer *buf;
317         u32 y_size = 720 * ((f->src_h + 31) & ~31);
318         u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
319         int y_done = 0;
320         int bytes_written = 0;
321         unsigned long flags = 0;
322         int idx = 0;
323
324         IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
325
326         /* Insert buffer block for YUV if needed */
327         if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
328                 if (yi->blanking_dmaptr) {
329                         s->sg_pending[idx].src = yi->blanking_dmaptr;
330                         s->sg_pending[idx].dst = offset;
331                         s->sg_pending[idx].size = 720 * 16;
332                 }
333                 offset += 720 * 16;
334                 idx++;
335         }
336
337         list_for_each_entry(buf, &s->q_predma.list, list) {
338                 /* YUV UV Offset from Y Buffer */
339                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
340                                 (bytes_written + buf->bytesused) >= y_size) {
341                         s->sg_pending[idx].src = buf->dma_handle;
342                         s->sg_pending[idx].dst = offset;
343                         s->sg_pending[idx].size = y_size - bytes_written;
344                         offset = uv_offset;
345                         if (s->sg_pending[idx].size != buf->bytesused) {
346                                 idx++;
347                                 s->sg_pending[idx].src =
348                                   buf->dma_handle + s->sg_pending[idx - 1].size;
349                                 s->sg_pending[idx].dst = offset;
350                                 s->sg_pending[idx].size =
351                                    buf->bytesused - s->sg_pending[idx - 1].size;
352                                 offset += s->sg_pending[idx].size;
353                         }
354                         y_done = 1;
355                 } else {
356                         s->sg_pending[idx].src = buf->dma_handle;
357                         s->sg_pending[idx].dst = offset;
358                         s->sg_pending[idx].size = buf->bytesused;
359                         offset += buf->bytesused;
360                 }
361                 bytes_written += buf->bytesused;
362
363                 /* Sync SG buffers */
364                 ivtv_buf_sync_for_device(s, buf);
365                 idx++;
366         }
367         s->sg_pending_size = idx;
368
369         /* Sync Hardware SG List of buffers */
370         ivtv_stream_sync_for_device(s);
371         if (lock)
372                 spin_lock_irqsave(&itv->dma_reg_lock, flags);
373         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
374                 ivtv_dma_dec_start(s);
375         }
376         else {
377                 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
378         }
379         if (lock)
380                 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
381 }
382
383 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
384 {
385         struct ivtv *itv = s->itv;
386
387         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
388         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
389         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
390         s->sg_processed++;
391         /* Sync Hardware SG List of buffers */
392         ivtv_stream_sync_for_device(s);
393         write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
394         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
395         itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
396         add_timer(&itv->dma_timer);
397 }
398
399 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
400 {
401         struct ivtv *itv = s->itv;
402
403         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
404         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
405         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
406         s->sg_processed++;
407         /* Sync Hardware SG List of buffers */
408         ivtv_stream_sync_for_device(s);
409         write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
410         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
411         itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
412         add_timer(&itv->dma_timer);
413 }
414
415 /* start the encoder DMA */
416 static void ivtv_dma_enc_start(struct ivtv_stream *s)
417 {
418         struct ivtv *itv = s->itv;
419         struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
420         int i;
421
422         IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
423
424         if (s->q_predma.bytesused)
425                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
426
427         if (ivtv_use_dma(s))
428                 s->sg_pending[s->sg_pending_size - 1].size += 256;
429
430         /* If this is an MPEG stream, and VBI data is also pending, then append the
431            VBI DMA to the MPEG DMA and transfer both sets of data at once.
432
433            VBI DMA is a second class citizen compared to MPEG and mixing them together
434            will confuse the firmware (the end of a VBI DMA is seen as the end of a
435            MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
436            sure we only use the MPEG DMA to transfer the VBI DMA if both are in
437            use. This way no conflicts occur. */
438         clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
439         if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
440                         s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
441                 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
442                 if (ivtv_use_dma(s_vbi))
443                         s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
444                 for (i = 0; i < s_vbi->sg_pending_size; i++) {
445                         s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
446                 }
447                 s_vbi->dma_offset = s_vbi->pending_offset;
448                 s_vbi->sg_pending_size = 0;
449                 s_vbi->dma_xfer_cnt++;
450                 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
451                 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
452         }
453
454         s->dma_xfer_cnt++;
455         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
456         s->sg_processing_size = s->sg_pending_size;
457         s->sg_pending_size = 0;
458         s->sg_processed = 0;
459         s->dma_offset = s->pending_offset;
460         s->dma_backup = s->pending_backup;
461         s->dma_pts = s->pending_pts;
462
463         if (ivtv_use_pio(s)) {
464                 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
465                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
466                 set_bit(IVTV_F_I_PIO, &itv->i_flags);
467                 itv->cur_pio_stream = s->type;
468         }
469         else {
470                 itv->dma_retries = 0;
471                 ivtv_dma_enc_start_xfer(s);
472                 set_bit(IVTV_F_I_DMA, &itv->i_flags);
473                 itv->cur_dma_stream = s->type;
474         }
475 }
476
477 static void ivtv_dma_dec_start(struct ivtv_stream *s)
478 {
479         struct ivtv *itv = s->itv;
480
481         if (s->q_predma.bytesused)
482                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
483         s->dma_xfer_cnt++;
484         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
485         s->sg_processing_size = s->sg_pending_size;
486         s->sg_pending_size = 0;
487         s->sg_processed = 0;
488
489         IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
490         itv->dma_retries = 0;
491         ivtv_dma_dec_start_xfer(s);
492         set_bit(IVTV_F_I_DMA, &itv->i_flags);
493         itv->cur_dma_stream = s->type;
494 }
495
496 static void ivtv_irq_dma_read(struct ivtv *itv)
497 {
498         struct ivtv_stream *s = NULL;
499         struct ivtv_buffer *buf;
500         int hw_stream_type = 0;
501
502         IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
503
504         del_timer(&itv->dma_timer);
505
506         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
507                 return;
508
509         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
510                 s = &itv->streams[itv->cur_dma_stream];
511                 ivtv_stream_sync_for_cpu(s);
512
513                 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
514                         IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
515                                         read_reg(IVTV_REG_DMASTATUS),
516                                         s->sg_processed, s->sg_processing_size, itv->dma_retries);
517                         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
518                         if (itv->dma_retries == 3) {
519                                 /* Too many retries, give up on this frame */
520                                 itv->dma_retries = 0;
521                                 s->sg_processed = s->sg_processing_size;
522                         }
523                         else {
524                                 /* Retry, starting with the first xfer segment.
525                                    Just retrying the current segment is not sufficient. */
526                                 s->sg_processed = 0;
527                                 itv->dma_retries++;
528                         }
529                 }
530                 if (s->sg_processed < s->sg_processing_size) {
531                         /* DMA next buffer */
532                         ivtv_dma_dec_start_xfer(s);
533                         return;
534                 }
535                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
536                         hw_stream_type = 2;
537                 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
538
539                 /* For some reason must kick the firmware, like PIO mode,
540                    I think this tells the firmware we are done and the size
541                    of the xfer so it can calculate what we need next.
542                    I think we can do this part ourselves but would have to
543                    fully calculate xfer info ourselves and not use interrupts
544                  */
545                 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
546                                 hw_stream_type);
547
548                 /* Free last DMA call */
549                 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
550                         ivtv_buf_sync_for_cpu(s, buf);
551                         ivtv_enqueue(s, buf, &s->q_free);
552                 }
553                 wake_up(&s->waitq);
554         }
555         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
556         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
557         itv->cur_dma_stream = -1;
558         wake_up(&itv->dma_waitq);
559 }
560
561 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
562 {
563         u32 data[CX2341X_MBOX_MAX_DATA];
564         struct ivtv_stream *s;
565
566         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
567         IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
568
569         del_timer(&itv->dma_timer);
570
571         if (itv->cur_dma_stream < 0)
572                 return;
573
574         s = &itv->streams[itv->cur_dma_stream];
575         ivtv_stream_sync_for_cpu(s);
576
577         if (data[0] & 0x18) {
578                 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
579                         s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
580                 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
581                 if (itv->dma_retries == 3) {
582                         /* Too many retries, give up on this frame */
583                         itv->dma_retries = 0;
584                         s->sg_processed = s->sg_processing_size;
585                 }
586                 else {
587                         /* Retry, starting with the first xfer segment.
588                            Just retrying the current segment is not sufficient. */
589                         s->sg_processed = 0;
590                         itv->dma_retries++;
591                 }
592         }
593         if (s->sg_processed < s->sg_processing_size) {
594                 /* DMA next buffer */
595                 ivtv_dma_enc_start_xfer(s);
596                 return;
597         }
598         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
599         itv->cur_dma_stream = -1;
600         dma_post(s);
601         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
602                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
603                 dma_post(s);
604         }
605         s->sg_processing_size = 0;
606         s->sg_processed = 0;
607         wake_up(&itv->dma_waitq);
608 }
609
610 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
611 {
612         struct ivtv_stream *s;
613
614         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
615                 itv->cur_pio_stream = -1;
616                 return;
617         }
618         s = &itv->streams[itv->cur_pio_stream];
619         IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
620         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
621         itv->cur_pio_stream = -1;
622         dma_post(s);
623         if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
624                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
625         else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
626                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
627         else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
628                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
629         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
630         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
631                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
632                 dma_post(s);
633         }
634         wake_up(&itv->dma_waitq);
635 }
636
637 static void ivtv_irq_dma_err(struct ivtv *itv)
638 {
639         u32 data[CX2341X_MBOX_MAX_DATA];
640
641         del_timer(&itv->dma_timer);
642         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
643         IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
644                                 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
645         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
646         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
647             itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
648                 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
649
650                 /* retry */
651                 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
652                         ivtv_dma_dec_start(s);
653                 else
654                         ivtv_dma_enc_start(s);
655                 return;
656         }
657         if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
658                 ivtv_udma_start(itv);
659                 return;
660         }
661         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
662         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
663         itv->cur_dma_stream = -1;
664         wake_up(&itv->dma_waitq);
665 }
666
667 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
668 {
669         u32 data[CX2341X_MBOX_MAX_DATA];
670         struct ivtv_stream *s;
671
672         /* Get DMA destination and size arguments from card */
673         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
674         IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
675
676         if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
677                 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
678                                 data[0], data[1], data[2]);
679                 return;
680         }
681         s = &itv->streams[ivtv_stream_map[data[0]]];
682         if (!stream_enc_dma_append(s, data)) {
683                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
684         }
685 }
686
687 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
688 {
689         u32 data[CX2341X_MBOX_MAX_DATA];
690         struct ivtv_stream *s;
691
692         IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
693         s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
694
695         if (!stream_enc_dma_append(s, data))
696                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
697 }
698
699 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
700 {
701         u32 data[CX2341X_MBOX_MAX_DATA];
702         struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
703
704         IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
705         if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
706                         !stream_enc_dma_append(s, data)) {
707                 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
708         }
709 }
710
711 static void ivtv_irq_dec_data_req(struct ivtv *itv)
712 {
713         u32 data[CX2341X_MBOX_MAX_DATA];
714         struct ivtv_stream *s;
715
716         /* YUV or MPG */
717
718         if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
719                 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
720                 itv->dma_data_req_size =
721                                  1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
722                 itv->dma_data_req_offset = data[1];
723                 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
724                         ivtv_yuv_frame_complete(itv);
725                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
726         }
727         else {
728                 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
729                 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
730                 itv->dma_data_req_offset = data[1];
731                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
732         }
733         IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
734                        itv->dma_data_req_offset, itv->dma_data_req_size);
735         if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
736                 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
737         }
738         else {
739                 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
740                         ivtv_yuv_setup_stream_frame(itv);
741                 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
742                 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
743                 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
744         }
745 }
746
747 static void ivtv_irq_vsync(struct ivtv *itv)
748 {
749         /* The vsync interrupt is unusual in that it won't clear until
750          * the end of the first line for the current field, at which
751          * point it clears itself. This can result in repeated vsync
752          * interrupts, or a missed vsync. Read some of the registers
753          * to determine the line being displayed and ensure we handle
754          * one vsync per frame.
755          */
756         unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
757         struct yuv_playback_info *yi = &itv->yuv_info;
758         int last_dma_frame = atomic_read(&yi->next_dma_frame);
759         struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
760
761         if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
762
763         if (((frame ^ f->sync_field) == 0 &&
764                 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
765                         (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
766                 int next_dma_frame = last_dma_frame;
767
768                 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
769                         if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
770                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
771                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
772                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
773                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
774                                 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
775                                 atomic_set(&yi->next_dma_frame, next_dma_frame);
776                                 yi->fields_lapsed = -1;
777                                 yi->running = 1;
778                         }
779                 }
780         }
781         if (frame != (itv->last_vsync_field & 1)) {
782                 static const struct v4l2_event evtop = {
783                         .type = V4L2_EVENT_VSYNC,
784                         .u.vsync.field = V4L2_FIELD_TOP,
785                 };
786                 static const struct v4l2_event evbottom = {
787                         .type = V4L2_EVENT_VSYNC,
788                         .u.vsync.field = V4L2_FIELD_BOTTOM,
789                 };
790                 struct ivtv_stream *s = ivtv_get_output_stream(itv);
791
792                 itv->last_vsync_field += 1;
793                 if (frame == 0) {
794                         clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
795                         clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
796                 }
797                 else {
798                         set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
799                 }
800                 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
801                         set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
802                         wake_up(&itv->event_waitq);
803                         if (s)
804                                 wake_up(&s->waitq);
805                 }
806                 if (s && s->vdev)
807                         v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
808                 wake_up(&itv->vsync_waitq);
809
810                 /* Send VBI to saa7127 */
811                 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
812                         test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
813                         test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
814                         test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
815                         set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
816                         set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
817                 }
818
819                 /* Check if we need to update the yuv registers */
820                 if (yi->running && (yi->yuv_forced_update || f->update)) {
821                         if (!f->update) {
822                                 last_dma_frame =
823                                         (u8)(atomic_read(&yi->next_dma_frame) -
824                                                  1) % IVTV_YUV_BUFFERS;
825                                 f = &yi->new_frame_info[last_dma_frame];
826                         }
827
828                         if (f->src_w) {
829                                 yi->update_frame = last_dma_frame;
830                                 f->update = 0;
831                                 yi->yuv_forced_update = 0;
832                                 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
833                                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
834                         }
835                 }
836
837                 yi->fields_lapsed++;
838         }
839 }
840
841 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
842
843 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
844 {
845         struct ivtv *itv = (struct ivtv *)dev_id;
846         u32 combo;
847         u32 stat;
848         int i;
849         u8 vsync_force = 0;
850
851         spin_lock(&itv->dma_reg_lock);
852         /* get contents of irq status register */
853         stat = read_reg(IVTV_REG_IRQSTATUS);
854
855         combo = ~itv->irqmask & stat;
856
857         /* Clear out IRQ */
858         if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
859
860         if (0 == combo) {
861                 /* The vsync interrupt is unusual and clears itself. If we
862                  * took too long, we may have missed it. Do some checks
863                  */
864                 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
865                         /* vsync is enabled, see if we're in a new field */
866                         if ((itv->last_vsync_field & 1) !=
867                             (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
868                                 /* New field, looks like we missed it */
869                                 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
870                                        read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
871                                 vsync_force = 1;
872                         }
873                 }
874
875                 if (!vsync_force) {
876                         /* No Vsync expected, wasn't for us */
877                         spin_unlock(&itv->dma_reg_lock);
878                         return IRQ_NONE;
879                 }
880         }
881
882         /* Exclude interrupts noted below from the output, otherwise the log is flooded with
883            these messages */
884         if (combo & ~0xff6d0400)
885                 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
886
887         if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
888                 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
889         }
890
891         if (combo & IVTV_IRQ_DMA_READ) {
892                 ivtv_irq_dma_read(itv);
893         }
894
895         if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
896                 ivtv_irq_enc_dma_complete(itv);
897         }
898
899         if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
900                 ivtv_irq_enc_pio_complete(itv);
901         }
902
903         if (combo & IVTV_IRQ_DMA_ERR) {
904                 ivtv_irq_dma_err(itv);
905         }
906
907         if (combo & IVTV_IRQ_ENC_START_CAP) {
908                 ivtv_irq_enc_start_cap(itv);
909         }
910
911         if (combo & IVTV_IRQ_ENC_VBI_CAP) {
912                 ivtv_irq_enc_vbi_cap(itv);
913         }
914
915         if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
916                 ivtv_irq_dec_vbi_reinsert(itv);
917         }
918
919         if (combo & IVTV_IRQ_ENC_EOS) {
920                 IVTV_DEBUG_IRQ("ENC EOS\n");
921                 set_bit(IVTV_F_I_EOS, &itv->i_flags);
922                 wake_up(&itv->eos_waitq);
923         }
924
925         if (combo & IVTV_IRQ_DEC_DATA_REQ) {
926                 ivtv_irq_dec_data_req(itv);
927         }
928
929         /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
930         if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
931                 ivtv_irq_vsync(itv);
932         }
933
934         if (combo & IVTV_IRQ_ENC_VIM_RST) {
935                 IVTV_DEBUG_IRQ("VIM RST\n");
936                 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
937         }
938
939         if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
940                 IVTV_DEBUG_INFO("Stereo mode changed\n");
941         }
942
943         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
944                 itv->irq_rr_idx++;
945                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
946                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
947                         struct ivtv_stream *s = &itv->streams[idx];
948
949                         if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
950                                 continue;
951                         if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
952                                 ivtv_dma_dec_start(s);
953                         else
954                                 ivtv_dma_enc_start(s);
955                         break;
956                 }
957
958                 if (i == IVTV_MAX_STREAMS &&
959                     test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
960                         ivtv_udma_start(itv);
961         }
962
963         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
964                 itv->irq_rr_idx++;
965                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
966                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
967                         struct ivtv_stream *s = &itv->streams[idx];
968
969                         if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
970                                 continue;
971                         if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
972                                 ivtv_dma_enc_start(s);
973                         break;
974                 }
975         }
976
977         if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
978                 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
979         }
980
981         spin_unlock(&itv->dma_reg_lock);
982
983         /* If we've just handled a 'forced' vsync, it's safest to say it
984          * wasn't ours. Another device may have triggered it at just
985          * the right time.
986          */
987         return vsync_force ? IRQ_NONE : IRQ_HANDLED;
988 }
989
990 void ivtv_unfinished_dma(unsigned long arg)
991 {
992         struct ivtv *itv = (struct ivtv *)arg;
993
994         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
995                 return;
996         IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
997
998         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
999         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
1000         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1001         itv->cur_dma_stream = -1;
1002         wake_up(&itv->dma_waitq);
1003 }