V4L/DVB (6046): ivtv: always steal full frames if out of buffers.
[deliverable/linux.git] / drivers / media / video / ivtv / ivtv-irq.c
CommitLineData
1a0adaf3
HV
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-firmware.h"
23#include "ivtv-fileops.h"
24#include "ivtv-queue.h"
25#include "ivtv-udma.h"
26#include "ivtv-irq.h"
27#include "ivtv-ioctl.h"
28#include "ivtv-mailbox.h"
29#include "ivtv-vbi.h"
1e13f9e3 30#include "ivtv-yuv.h"
1a0adaf3
HV
31
32#define DMA_MAGIC_COOKIE 0x000001fe
33
1a0adaf3
HV
34static void ivtv_dma_dec_start(struct ivtv_stream *s);
35
36static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
41};
42
dc02d50a
HV
43
44static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 45{
dc02d50a
HV
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
48 struct list_head *p;
49 int i = 0;
50
bd58df6d 51 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a
HV
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
57 return;
58 }
bd58df6d 59 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
dc02d50a
HV
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff;
1a0adaf3 64
dc02d50a
HV
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size);
68 }
69 else {
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size);
71 }
72 if (s->PIOarray[i].size & 0x80000000)
73 break;
74 i++;
75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
77}
78
1e13f9e3
HV
79void ivtv_irq_work_handler(struct work_struct *work)
80{
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
82
83 DEFINE_WAIT(wait);
84
dc02d50a
HV
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
87
1e13f9e3 88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 89 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
90
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
93}
94
1a0adaf3
HV
95/* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
98 */
99static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
100{
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
103 struct list_head *p;
104 u32 bytes_needed = 0;
105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->SG_length;
109 int rc;
110
111 /* sanity checks */
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114 return -1;
115 }
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 return -1;
119 }
120
121 /* determine offset, size and PTS for the various streams */
122 switch (s->type) {
123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1];
125 size = data[2];
126 s->dma_pts = 0;
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_YUV:
130 offset = data[1];
131 size = data[2];
132 UVoffset = data[3];
133 UVsize = data[4];
134 s->dma_pts = ((u64) data[5] << 32) | data[6];
135 break;
136
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
139 size = data[2] - 12;
140 s->dma_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
144 break;
145
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
149 if (offset == 12) {
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1;
152 }
153 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154 break;
155
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159 s->dma_pts = 0;
160 offset += IVTV_DECODER_OFFSET;
161 break;
162 default:
163 /* shouldn't happen */
164 return -1;
165 }
166
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->SG_length == 0) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 }
174 else {
175 s->dma_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 }
178 s->dma_offset = offset;
179 }
180
181 bytes_needed = size;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
185 next buffer. */
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
188 }
189
bd58df6d 190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
197 return -1;
198 }
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202 }
203 s->buffers_stolen = rc;
204
dc02d50a 205 /* got the buffers, now fill in SGarray (DMA) */
1a0adaf3
HV
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
210
211 if (skip_bufs-- > 0)
212 continue;
dc02d50a
HV
213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
214 s->SGarray[idx].src = cpu_to_le32(offset);
215 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
1a0adaf3 216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
f4071b85 217 buf->dma_xfer_cnt = s->dma_xfer_cnt;
1a0adaf3 218
1a0adaf3
HV
219 s->q_predma.bytesused += buf->bytesused;
220 size -= buf->bytesused;
221 offset += s->buf_size;
222
223 /* Sync SG buffers */
224 ivtv_buf_sync_for_device(s, buf);
225
226 if (size == 0) { /* YUV */
227 /* process the UV section */
228 offset = UVoffset;
229 size = UVsize;
230 }
231 idx++;
232 }
233 s->SG_length = idx;
234 return 0;
235}
236
237static void dma_post(struct ivtv_stream *s)
238{
239 struct ivtv *itv = s->itv;
240 struct ivtv_buffer *buf = NULL;
241 struct list_head *p;
242 u32 offset;
243 u32 *u32buf;
244 int x = 0;
245
bd58df6d 246 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
247 s->name, s->dma_offset);
248 list_for_each(p, &s->q_dma.list) {
249 buf = list_entry(p, struct ivtv_buffer, list);
250 u32buf = (u32 *)buf->buf;
251
252 /* Sync Buffer */
253 ivtv_buf_sync_for_cpu(s, buf);
254
255 if (x == 0) {
256 offset = s->dma_last_offset;
257 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
258 {
259 for (offset = 0; offset < 64; offset++) {
260 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
261 break;
262 }
263 }
264 offset *= 4;
265 if (offset == 256) {
266 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
267 offset = s->dma_last_offset;
268 }
269 if (s->dma_last_offset != offset)
270 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
271 s->dma_last_offset = offset;
272 }
273 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
274 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
275 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
276 }
277 else {
278 write_enc_sync(0, s->dma_offset);
279 }
280 if (offset) {
281 buf->bytesused -= offset;
282 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
283 }
284 *u32buf = cpu_to_le32(s->dma_backup);
285 }
286 x++;
287 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
288 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
289 s->type == IVTV_ENC_STREAM_TYPE_VBI)
f4071b85 290 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
1a0adaf3
HV
291 }
292 if (buf)
293 buf->bytesused += s->dma_last_offset;
294 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
dc02d50a
HV
295 list_for_each(p, &s->q_dma.list) {
296 buf = list_entry(p, struct ivtv_buffer, list);
297
298 /* Parse and Groom VBI Data */
299 s->q_dma.bytesused -= buf->bytesused;
300 ivtv_process_vbi_data(itv, buf, 0, s->type);
301 s->q_dma.bytesused += buf->bytesused;
302 }
1a0adaf3
HV
303 if (s->id == -1) {
304 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
305 return;
306 }
307 }
308 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
309 if (s->id != -1)
310 wake_up(&s->waitq);
311}
312
313void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
314{
315 struct ivtv *itv = s->itv;
316 struct ivtv_buffer *buf;
317 struct list_head *p;
318 u32 y_size = itv->params.height * itv->params.width;
319 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
320 int y_done = 0;
321 int bytes_written = 0;
322 unsigned long flags = 0;
323 int idx = 0;
324
bd58df6d 325 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
1a0adaf3
HV
326 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
327 list_for_each(p, &s->q_predma.list) {
328 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
329
330 /* YUV UV Offset from Y Buffer */
331 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
332 offset = uv_offset;
333 y_done = 1;
334 }
335 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle);
336 s->SGarray[idx].dst = cpu_to_le32(offset);
337 s->SGarray[idx].size = cpu_to_le32(buf->bytesused);
338
339 offset += buf->bytesused;
340 bytes_written += buf->bytesused;
341
342 /* Sync SG buffers */
343 ivtv_buf_sync_for_device(s, buf);
344 idx++;
345 }
346 s->SG_length = idx;
347
348 /* Mark last buffer size for Interrupt flag */
349 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
350
351 /* Sync Hardware SG List of buffers */
352 ivtv_stream_sync_for_device(s);
353 if (lock)
354 spin_lock_irqsave(&itv->dma_reg_lock, flags);
355 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
356 ivtv_dma_dec_start(s);
357 }
358 else {
359 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
360 }
361 if (lock)
362 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
363}
364
365/* start the encoder DMA */
366static void ivtv_dma_enc_start(struct ivtv_stream *s)
367{
368 struct ivtv *itv = s->itv;
369 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
370 int i;
371
bd58df6d 372 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 373
1a0adaf3
HV
374 if (s->q_predma.bytesused)
375 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
376
377 if (ivtv_use_dma(s))
378 s->SGarray[s->SG_length - 1].size =
379 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
1a0adaf3
HV
380
381 /* If this is an MPEG stream, and VBI data is also pending, then append the
382 VBI DMA to the MPEG DMA and transfer both sets of data at once.
383
384 VBI DMA is a second class citizen compared to MPEG and mixing them together
385 will confuse the firmware (the end of a VBI DMA is seen as the end of a
386 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
387 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
388 use. This way no conflicts occur. */
389 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
390 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
391 s->SG_length + s_vbi->SG_length <= s->buffers) {
392 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a
HV
393 if (ivtv_use_dma(s_vbi))
394 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
1a0adaf3
HV
395 for (i = 0; i < s_vbi->SG_length; i++) {
396 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
397 }
398 itv->vbi.dma_offset = s_vbi->dma_offset;
399 s_vbi->SG_length = 0;
f4071b85 400 s_vbi->dma_xfer_cnt++;
1a0adaf3 401 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
bd58df6d 402 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
1a0adaf3
HV
403 }
404
405 /* Mark last buffer size for Interrupt flag */
406 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
f4071b85 407 s->dma_xfer_cnt++;
1a0adaf3 408
dd1e729d
HV
409 if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
410 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
411 else
412 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
413
dc02d50a
HV
414 if (ivtv_use_pio(s)) {
415 for (i = 0; i < s->SG_length; i++) {
416 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
417 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
418 }
419 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
420 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
421 set_bit(IVTV_F_I_PIO, &itv->i_flags);
422 itv->cur_pio_stream = s->type;
423 }
424 else {
425 /* Sync Hardware SG List of buffers */
426 ivtv_stream_sync_for_device(s);
427 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
428 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
429 set_bit(IVTV_F_I_DMA, &itv->i_flags);
430 itv->cur_dma_stream = s->type;
201700d3 431 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
dc02d50a
HV
432 add_timer(&itv->dma_timer);
433 }
1a0adaf3
HV
434}
435
436static void ivtv_dma_dec_start(struct ivtv_stream *s)
437{
438 struct ivtv *itv = s->itv;
439
440 if (s->q_predma.bytesused)
441 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
bd58df6d 442 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
1a0adaf3
HV
443 /* put SG Handle into register 0x0c */
444 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
445 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
446 set_bit(IVTV_F_I_DMA, &itv->i_flags);
447 itv->cur_dma_stream = s->type;
201700d3 448 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
1a0adaf3
HV
449 add_timer(&itv->dma_timer);
450}
451
452static void ivtv_irq_dma_read(struct ivtv *itv)
453{
454 struct ivtv_stream *s = NULL;
455 struct ivtv_buffer *buf;
456 int hw_stream_type;
457
bd58df6d 458 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
1a0adaf3
HV
459 del_timer(&itv->dma_timer);
460 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
461 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
462 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
463 }
464 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
465 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
466 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
467 hw_stream_type = 2;
468 }
469 else {
470 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
471 hw_stream_type = 0;
472 }
bd58df6d 473 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3
HV
474
475 ivtv_stream_sync_for_cpu(s);
476
477 /* For some reason must kick the firmware, like PIO mode,
478 I think this tells the firmware we are done and the size
479 of the xfer so it can calculate what we need next.
480 I think we can do this part ourselves but would have to
481 fully calculate xfer info ourselves and not use interrupts
482 */
483 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
484 hw_stream_type);
485
486 /* Free last DMA call */
487 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
488 ivtv_buf_sync_for_cpu(s, buf);
489 ivtv_enqueue(s, buf, &s->q_free);
490 }
491 wake_up(&s->waitq);
492 }
493 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
494 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
495 itv->cur_dma_stream = -1;
496 wake_up(&itv->dma_waitq);
497}
498
499static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
500{
501 u32 data[CX2341X_MBOX_MAX_DATA];
502 struct ivtv_stream *s;
503
504 del_timer(&itv->dma_timer);
505 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
bd58df6d 506 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
1a0adaf3
HV
507 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
508 data[1] = 3;
509 else if (data[1] > 2)
510 return;
511 s = &itv->streams[ivtv_stream_map[data[1]]];
512 if (data[0] & 0x18) {
513 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]);
514 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
515 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]);
516 }
517 s->SG_length = 0;
518 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
519 itv->cur_dma_stream = -1;
520 dma_post(s);
521 ivtv_stream_sync_for_cpu(s);
522 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
523 u32 tmp;
524
525 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
526 tmp = s->dma_offset;
527 s->dma_offset = itv->vbi.dma_offset;
528 dma_post(s);
529 s->dma_offset = tmp;
530 }
531 wake_up(&itv->dma_waitq);
532}
533
dc02d50a
HV
534static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
535{
536 struct ivtv_stream *s;
537
538 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
539 itv->cur_pio_stream = -1;
540 return;
541 }
542 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 543 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
dc02d50a
HV
544 s->SG_length = 0;
545 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
546 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
547 itv->cur_pio_stream = -1;
548 dma_post(s);
549 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
550 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
551 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
552 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
553 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
554 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
555 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
556 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
557 u32 tmp;
558
559 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
560 tmp = s->dma_offset;
561 s->dma_offset = itv->vbi.dma_offset;
562 dma_post(s);
563 s->dma_offset = tmp;
564 }
565 wake_up(&itv->dma_waitq);
566}
567
1a0adaf3
HV
568static void ivtv_irq_dma_err(struct ivtv *itv)
569{
570 u32 data[CX2341X_MBOX_MAX_DATA];
571
572 del_timer(&itv->dma_timer);
573 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
574 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
575 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
576 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
577 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
578 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
579
580 /* retry */
581 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
582 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
583 ivtv_dma_dec_start(s);
584 else
585 ivtv_dma_enc_start(s);
586 return;
587 }
588 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
589 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590 itv->cur_dma_stream = -1;
591 wake_up(&itv->dma_waitq);
592}
593
594static void ivtv_irq_enc_start_cap(struct ivtv *itv)
595{
596 u32 data[CX2341X_MBOX_MAX_DATA];
597 struct ivtv_stream *s;
598
599 /* Get DMA destination and size arguments from card */
600 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
bd58df6d 601 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
602
603 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
604 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
605 data[0], data[1], data[2]);
606 return;
607 }
1a0adaf3
HV
608 s = &itv->streams[ivtv_stream_map[data[0]]];
609 if (!stream_enc_dma_append(s, data)) {
dc02d50a 610 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
611 }
612}
613
614static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
615{
616 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
617 u32 data[CX2341X_MBOX_MAX_DATA];
618 struct ivtv_stream *s;
619
bd58df6d 620 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
621 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
622
1a0adaf3
HV
623 /* If more than two VBI buffers are pending, then
624 clear the old ones and start with this new one.
625 This can happen during transition stages when MPEG capturing is
626 started, but the first interrupts haven't arrived yet. During
627 that period VBI requests can accumulate without being able to
628 DMA the data. Since at most four VBI DMA buffers are available,
629 we just drop the old requests when there are already three
630 requests queued. */
631 if (s->SG_length > 2) {
632 struct list_head *p;
633 list_for_each(p, &s->q_predma.list) {
634 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
635 ivtv_buf_sync_for_cpu(s, buf);
636 }
637 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
638 s->SG_length = 0;
639 }
640 /* if we can append the data, and the MPEG stream isn't capturing,
641 then start a DMA request for just the VBI data. */
642 if (!stream_enc_dma_append(s, data) &&
643 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
dc02d50a 644 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
645 }
646}
647
dc02d50a 648static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
649{
650 u32 data[CX2341X_MBOX_MAX_DATA];
651 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
652
bd58df6d 653 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
654 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
655 !stream_enc_dma_append(s, data)) {
dc02d50a 656 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
657 }
658}
659
660static void ivtv_irq_dec_data_req(struct ivtv *itv)
661{
662 u32 data[CX2341X_MBOX_MAX_DATA];
663 struct ivtv_stream *s;
664
665 /* YUV or MPG */
666 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
667
668 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
669 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
670 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
671 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
672 }
673 else {
674 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
675 itv->dma_data_req_offset = data[1];
676 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
677 }
bd58df6d 678 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
679 itv->dma_data_req_offset, itv->dma_data_req_size);
680 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
681 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
682 }
683 else {
684 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
685 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
686 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
687 }
688}
689
690static void ivtv_irq_vsync(struct ivtv *itv)
691{
692 /* The vsync interrupt is unusual in that it won't clear until
693 * the end of the first line for the current field, at which
694 * point it clears itself. This can result in repeated vsync
695 * interrupts, or a missed vsync. Read some of the registers
696 * to determine the line being displayed and ensure we handle
697 * one vsync per frame.
698 */
699 unsigned int frame = read_reg(0x28c0) & 1;
700 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
701
702 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
703
bfd7beac
IA
704 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
705 ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
1a0adaf3
HV
706 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
707 int next_dma_frame = last_dma_frame;
708
bfd7beac
IA
709 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
710 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
711 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
712 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
713 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
714 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
715 next_dma_frame = (next_dma_frame + 1) & 0x3;
716 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
717 itv->yuv_info.fields_lapsed = -1;
718 }
1a0adaf3
HV
719 }
720 }
721 if (frame != (itv->lastVsyncFrame & 1)) {
722 struct ivtv_stream *s = ivtv_get_output_stream(itv);
723
724 itv->lastVsyncFrame += 1;
725 if (frame == 0) {
726 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
727 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
728 }
729 else {
730 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
731 }
732 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
733 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
734 wake_up(&itv->event_waitq);
735 }
736 wake_up(&itv->vsync_waitq);
737 if (s)
738 wake_up(&s->waitq);
739
740 /* Send VBI to saa7127 */
1e13f9e3
HV
741 if (frame) {
742 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 743 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 744 }
1a0adaf3
HV
745
746 /* Check if we need to update the yuv registers */
747 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
748 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
749 last_dma_frame = (last_dma_frame - 1) & 3;
750
751 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
752 itv->yuv_info.update_frame = last_dma_frame;
753 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
754 itv->yuv_info.yuv_forced_update = 0;
1e13f9e3 755 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 756 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
757 }
758 }
bfd7beac
IA
759
760 itv->yuv_info.fields_lapsed ++;
1a0adaf3
HV
761 }
762}
763
764#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
765
766irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
767{
768 struct ivtv *itv = (struct ivtv *)dev_id;
769 u32 combo;
770 u32 stat;
771 int i;
772 u8 vsync_force = 0;
773
774 spin_lock(&itv->dma_reg_lock);
775 /* get contents of irq status register */
776 stat = read_reg(IVTV_REG_IRQSTATUS);
777
778 combo = ~itv->irqmask & stat;
779
780 /* Clear out IRQ */
781 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
782
783 if (0 == combo) {
784 /* The vsync interrupt is unusual and clears itself. If we
785 * took too long, we may have missed it. Do some checks
786 */
787 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
788 /* vsync is enabled, see if we're in a new field */
789 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
790 /* New field, looks like we missed it */
791 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
792 vsync_force = 1;
793 }
794 }
795
796 if (!vsync_force) {
797 /* No Vsync expected, wasn't for us */
798 spin_unlock(&itv->dma_reg_lock);
799 return IRQ_NONE;
800 }
801 }
802
803 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
804 these messages */
805 if (combo & ~0xff6d0400)
bd58df6d 806 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
807
808 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 809 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
810 }
811
812 if (combo & IVTV_IRQ_DMA_READ) {
813 ivtv_irq_dma_read(itv);
814 }
815
816 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
817 ivtv_irq_enc_dma_complete(itv);
818 }
819
dc02d50a
HV
820 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
821 ivtv_irq_enc_pio_complete(itv);
822 }
823
1a0adaf3
HV
824 if (combo & IVTV_IRQ_DMA_ERR) {
825 ivtv_irq_dma_err(itv);
826 }
827
828 if (combo & IVTV_IRQ_ENC_START_CAP) {
829 ivtv_irq_enc_start_cap(itv);
830 }
831
832 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
833 ivtv_irq_enc_vbi_cap(itv);
834 }
835
836 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 837 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
838 }
839
840 if (combo & IVTV_IRQ_ENC_EOS) {
841 IVTV_DEBUG_IRQ("ENC EOS\n");
842 set_bit(IVTV_F_I_EOS, &itv->i_flags);
843 wake_up(&itv->cap_w);
844 }
845
846 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
847 ivtv_irq_dec_data_req(itv);
848 }
849
850 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
851 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
852 ivtv_irq_vsync(itv);
853 }
854
855 if (combo & IVTV_IRQ_ENC_VIM_RST) {
856 IVTV_DEBUG_IRQ("VIM RST\n");
857 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
858 }
859
860 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
861 IVTV_DEBUG_INFO("Stereo mode changed\n");
862 }
863
864 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
33bc4dea 865 itv->irq_rr_idx++;
1a0adaf3 866 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 867 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1a0adaf3
HV
868 struct ivtv_stream *s = &itv->streams[idx];
869
870 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
871 continue;
872 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
873 ivtv_dma_dec_start(s);
874 else
875 ivtv_dma_enc_start(s);
876 break;
877 }
878 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
879 ivtv_udma_start(itv);
880 }
881 }
882
dc02d50a 883 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
33bc4dea 884 itv->irq_rr_idx++;
dc02d50a 885 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 886 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
dc02d50a
HV
887 struct ivtv_stream *s = &itv->streams[idx];
888
889 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
890 continue;
891 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
892 ivtv_dma_enc_start(s);
893 break;
894 }
895 }
896
897 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
898 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
899
1a0adaf3
HV
900 spin_unlock(&itv->dma_reg_lock);
901
902 /* If we've just handled a 'forced' vsync, it's safest to say it
903 * wasn't ours. Another device may have triggered it at just
904 * the right time.
905 */
906 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
907}
908
909void ivtv_unfinished_dma(unsigned long arg)
910{
911 struct ivtv *itv = (struct ivtv *)arg;
912
913 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
914 return;
915 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
916
917 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
918 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
919 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
920 itv->cur_dma_stream = -1;
921 wake_up(&itv->dma_waitq);
922}
This page took 0.162366 seconds and 5 git commands to generate.