7272f1a71dd96c07e01780a4ea946bb2c9ee5255
[deliverable/linux.git] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-firmware.h"
23 #include "ivtv-fileops.h"
24 #include "ivtv-queue.h"
25 #include "ivtv-udma.h"
26 #include "ivtv-irq.h"
27 #include "ivtv-ioctl.h"
28 #include "ivtv-mailbox.h"
29 #include "ivtv-vbi.h"
30 #include "ivtv-yuv.h"
31
32 #define DMA_MAGIC_COOKIE 0x000001fe
33
34 static void ivtv_dma_dec_start(struct ivtv_stream *s);
35
36 static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
41 };
42
43
44 static void ivtv_pio_work_handler(struct ivtv *itv)
45 {
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
48 struct list_head *p;
49 int i = 0;
50
51 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
57 return;
58 }
59 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->sg_processing[i].size & 0x3ffff;
64
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
68 }
69 else {
70 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
71 }
72 i++;
73 if (i == s->sg_processing_size)
74 break;
75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
77 }
78
79 void ivtv_irq_work_handler(struct work_struct *work)
80 {
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
82
83 DEFINE_WAIT(wait);
84
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
87
88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
89 ivtv_vbi_work_handler(itv);
90
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
93 }
94
95 /* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
98 */
99 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
100 {
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
103 struct list_head *p;
104 u32 bytes_needed = 0;
105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->sg_pending_size;
109 int rc;
110
111 /* sanity checks */
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114 return -1;
115 }
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 return -1;
119 }
120
121 /* determine offset, size and PTS for the various streams */
122 switch (s->type) {
123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1];
125 size = data[2];
126 s->pending_pts = 0;
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_YUV:
130 offset = data[1];
131 size = data[2];
132 UVoffset = data[3];
133 UVsize = data[4];
134 s->pending_pts = ((u64) data[5] << 32) | data[6];
135 break;
136
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
139 size = data[2] - 12;
140 s->pending_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
144 break;
145
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
149 if (offset == 12) {
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1;
152 }
153 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154 break;
155
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159 s->pending_pts = 0;
160 offset += IVTV_DECODER_OFFSET;
161 break;
162 default:
163 /* shouldn't happen */
164 return -1;
165 }
166
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 }
174 else {
175 s->pending_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 }
178 s->pending_offset = offset;
179 }
180
181 bytes_needed = size;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
185 next buffer. */
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
188 }
189
190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
197 return -1;
198 }
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202 }
203 s->buffers_stolen = rc;
204
205 /* got the buffers, now fill in sg_pending */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
210
211 if (skip_bufs-- > 0)
212 continue;
213 s->sg_pending[idx].dst = buf->dma_handle;
214 s->sg_pending[idx].src = offset;
215 s->sg_pending[idx].size = s->buf_size;
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217 buf->dma_xfer_cnt = s->dma_xfer_cnt;
218
219 s->q_predma.bytesused += buf->bytesused;
220 size -= buf->bytesused;
221 offset += s->buf_size;
222
223 /* Sync SG buffers */
224 ivtv_buf_sync_for_device(s, buf);
225
226 if (size == 0) { /* YUV */
227 /* process the UV section */
228 offset = UVoffset;
229 size = UVsize;
230 }
231 idx++;
232 }
233 s->sg_pending_size = idx;
234 return 0;
235 }
236
237 static void dma_post(struct ivtv_stream *s)
238 {
239 struct ivtv *itv = s->itv;
240 struct ivtv_buffer *buf = NULL;
241 struct list_head *p;
242 u32 offset;
243 u32 *u32buf;
244 int x = 0;
245
246 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
247 s->name, s->dma_offset);
248 list_for_each(p, &s->q_dma.list) {
249 buf = list_entry(p, struct ivtv_buffer, list);
250 u32buf = (u32 *)buf->buf;
251
252 /* Sync Buffer */
253 ivtv_buf_sync_for_cpu(s, buf);
254
255 if (x == 0 && ivtv_use_dma(s)) {
256 offset = s->dma_last_offset;
257 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
258 {
259 for (offset = 0; offset < 64; offset++) {
260 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
261 break;
262 }
263 }
264 offset *= 4;
265 if (offset == 256) {
266 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
267 offset = s->dma_last_offset;
268 }
269 if (s->dma_last_offset != offset)
270 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
271 s->dma_last_offset = offset;
272 }
273 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
274 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
275 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
276 }
277 else {
278 write_enc_sync(0, s->dma_offset);
279 }
280 if (offset) {
281 buf->bytesused -= offset;
282 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
283 }
284 *u32buf = cpu_to_le32(s->dma_backup);
285 }
286 x++;
287 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
288 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
289 s->type == IVTV_ENC_STREAM_TYPE_VBI)
290 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
291 }
292 if (buf)
293 buf->bytesused += s->dma_last_offset;
294 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
295 list_for_each(p, &s->q_dma.list) {
296 buf = list_entry(p, struct ivtv_buffer, list);
297
298 /* Parse and Groom VBI Data */
299 s->q_dma.bytesused -= buf->bytesused;
300 ivtv_process_vbi_data(itv, buf, 0, s->type);
301 s->q_dma.bytesused += buf->bytesused;
302 }
303 if (s->id == -1) {
304 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
305 return;
306 }
307 }
308 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
309 if (s->id != -1)
310 wake_up(&s->waitq);
311 }
312
313 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
314 {
315 struct ivtv *itv = s->itv;
316 struct ivtv_buffer *buf;
317 struct list_head *p;
318 u32 y_size = itv->params.height * itv->params.width;
319 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
320 int y_done = 0;
321 int bytes_written = 0;
322 unsigned long flags = 0;
323 int idx = 0;
324
325 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
326 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
327 list_for_each(p, &s->q_predma.list) {
328 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
329
330 /* YUV UV Offset from Y Buffer */
331 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
332 offset = uv_offset;
333 y_done = 1;
334 }
335 s->sg_pending[idx].src = buf->dma_handle;
336 s->sg_pending[idx].dst = offset;
337 s->sg_pending[idx].size = buf->bytesused;
338
339 offset += buf->bytesused;
340 bytes_written += buf->bytesused;
341
342 /* Sync SG buffers */
343 ivtv_buf_sync_for_device(s, buf);
344 idx++;
345 }
346 s->sg_pending_size = idx;
347
348 /* Sync Hardware SG List of buffers */
349 ivtv_stream_sync_for_device(s);
350 if (lock)
351 spin_lock_irqsave(&itv->dma_reg_lock, flags);
352 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
353 ivtv_dma_dec_start(s);
354 }
355 else {
356 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
357 }
358 if (lock)
359 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
360 }
361
362 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
363 {
364 struct ivtv *itv = s->itv;
365
366 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
367 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
368 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
369 s->sg_processed++;
370 /* Sync Hardware SG List of buffers */
371 ivtv_stream_sync_for_device(s);
372 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
373 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
374 }
375
376 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
377 {
378 struct ivtv *itv = s->itv;
379
380 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
381 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
382 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
383 s->sg_processed++;
384 /* Sync Hardware SG List of buffers */
385 ivtv_stream_sync_for_device(s);
386 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
388 }
389
390 /* start the encoder DMA */
391 static void ivtv_dma_enc_start(struct ivtv_stream *s)
392 {
393 struct ivtv *itv = s->itv;
394 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
395 int i;
396
397 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
398
399 if (s->q_predma.bytesused)
400 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
401
402 if (ivtv_use_dma(s))
403 s->sg_pending[s->sg_pending_size - 1].size += 256;
404
405 /* If this is an MPEG stream, and VBI data is also pending, then append the
406 VBI DMA to the MPEG DMA and transfer both sets of data at once.
407
408 VBI DMA is a second class citizen compared to MPEG and mixing them together
409 will confuse the firmware (the end of a VBI DMA is seen as the end of a
410 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
411 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
412 use. This way no conflicts occur. */
413 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
414 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
415 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
416 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
417 if (ivtv_use_dma(s_vbi))
418 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
419 for (i = 0; i < s_vbi->sg_pending_size; i++) {
420 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
421 }
422 s_vbi->dma_offset = s_vbi->pending_offset;
423 s_vbi->sg_pending_size = 0;
424 s_vbi->dma_xfer_cnt++;
425 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
426 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
427 }
428
429 s->dma_xfer_cnt++;
430 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
431 s->sg_processing_size = s->sg_pending_size;
432 s->sg_pending_size = 0;
433 s->sg_processed = 0;
434 s->dma_offset = s->pending_offset;
435 s->dma_backup = s->pending_backup;
436 s->dma_pts = s->pending_pts;
437
438 if (ivtv_use_pio(s)) {
439 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
440 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
441 set_bit(IVTV_F_I_PIO, &itv->i_flags);
442 itv->cur_pio_stream = s->type;
443 }
444 else {
445 itv->dma_retries = 0;
446 ivtv_dma_enc_start_xfer(s);
447 set_bit(IVTV_F_I_DMA, &itv->i_flags);
448 itv->cur_dma_stream = s->type;
449 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
450 add_timer(&itv->dma_timer);
451 }
452 }
453
454 static void ivtv_dma_dec_start(struct ivtv_stream *s)
455 {
456 struct ivtv *itv = s->itv;
457
458 if (s->q_predma.bytesused)
459 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
460 s->dma_xfer_cnt++;
461 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
462 s->sg_processing_size = s->sg_pending_size;
463 s->sg_pending_size = 0;
464 s->sg_processed = 0;
465
466 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
467 itv->dma_retries = 0;
468 ivtv_dma_dec_start_xfer(s);
469 set_bit(IVTV_F_I_DMA, &itv->i_flags);
470 itv->cur_dma_stream = s->type;
471 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
472 add_timer(&itv->dma_timer);
473 }
474
475 static void ivtv_irq_dma_read(struct ivtv *itv)
476 {
477 struct ivtv_stream *s = NULL;
478 struct ivtv_buffer *buf;
479 int hw_stream_type = 0;
480
481 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
482 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
483 del_timer(&itv->dma_timer);
484 return;
485 }
486
487 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
488 s = &itv->streams[itv->cur_dma_stream];
489 ivtv_stream_sync_for_cpu(s);
490
491 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
492 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
493 read_reg(IVTV_REG_DMASTATUS),
494 s->sg_processed, s->sg_processing_size, itv->dma_retries);
495 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
496 if (itv->dma_retries == 3) {
497 itv->dma_retries = 0;
498 }
499 else {
500 /* Retry, starting with the first xfer segment.
501 Just retrying the current segment is not sufficient. */
502 s->sg_processed = 0;
503 itv->dma_retries++;
504 }
505 }
506 if (s->sg_processed < s->sg_processing_size) {
507 /* DMA next buffer */
508 ivtv_dma_dec_start_xfer(s);
509 return;
510 }
511 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
512 hw_stream_type = 2;
513 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
514
515 /* For some reason must kick the firmware, like PIO mode,
516 I think this tells the firmware we are done and the size
517 of the xfer so it can calculate what we need next.
518 I think we can do this part ourselves but would have to
519 fully calculate xfer info ourselves and not use interrupts
520 */
521 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
522 hw_stream_type);
523
524 /* Free last DMA call */
525 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
526 ivtv_buf_sync_for_cpu(s, buf);
527 ivtv_enqueue(s, buf, &s->q_free);
528 }
529 wake_up(&s->waitq);
530 }
531 del_timer(&itv->dma_timer);
532 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
533 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
534 itv->cur_dma_stream = -1;
535 wake_up(&itv->dma_waitq);
536 }
537
538 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
539 {
540 u32 data[CX2341X_MBOX_MAX_DATA];
541 struct ivtv_stream *s;
542
543 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
544 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
545 if (itv->cur_dma_stream < 0) {
546 del_timer(&itv->dma_timer);
547 return;
548 }
549 s = &itv->streams[itv->cur_dma_stream];
550 ivtv_stream_sync_for_cpu(s);
551
552 if (data[0] & 0x18) {
553 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
554 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
555 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
556 if (itv->dma_retries == 3) {
557 itv->dma_retries = 0;
558 }
559 else {
560 /* Retry, starting with the first xfer segment.
561 Just retrying the current segment is not sufficient. */
562 s->sg_processed = 0;
563 itv->dma_retries++;
564 }
565 }
566 if (s->sg_processed < s->sg_processing_size) {
567 /* DMA next buffer */
568 ivtv_dma_enc_start_xfer(s);
569 return;
570 }
571 del_timer(&itv->dma_timer);
572 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
573 itv->cur_dma_stream = -1;
574 dma_post(s);
575 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
576 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
577 dma_post(s);
578 }
579 s->sg_processing_size = 0;
580 s->sg_processed = 0;
581 wake_up(&itv->dma_waitq);
582 }
583
584 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
585 {
586 struct ivtv_stream *s;
587
588 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
589 itv->cur_pio_stream = -1;
590 return;
591 }
592 s = &itv->streams[itv->cur_pio_stream];
593 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
594 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
595 itv->cur_pio_stream = -1;
596 dma_post(s);
597 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
598 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
599 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
600 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
601 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
602 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
603 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
604 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
605 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
606 dma_post(s);
607 }
608 wake_up(&itv->dma_waitq);
609 }
610
611 static void ivtv_irq_dma_err(struct ivtv *itv)
612 {
613 u32 data[CX2341X_MBOX_MAX_DATA];
614
615 del_timer(&itv->dma_timer);
616 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
617 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
618 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
619 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
620 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
621 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
622 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
623
624 /* retry */
625 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
626 ivtv_dma_dec_start(s);
627 else
628 ivtv_dma_enc_start(s);
629 return;
630 }
631 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
632 ivtv_udma_start(itv);
633 return;
634 }
635 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
636 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
637 itv->cur_dma_stream = -1;
638 wake_up(&itv->dma_waitq);
639 }
640
641 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
642 {
643 u32 data[CX2341X_MBOX_MAX_DATA];
644 struct ivtv_stream *s;
645
646 /* Get DMA destination and size arguments from card */
647 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
648 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
649
650 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
651 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
652 data[0], data[1], data[2]);
653 return;
654 }
655 s = &itv->streams[ivtv_stream_map[data[0]]];
656 if (!stream_enc_dma_append(s, data)) {
657 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
658 }
659 }
660
661 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
662 {
663 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
664 u32 data[CX2341X_MBOX_MAX_DATA];
665 struct ivtv_stream *s;
666
667 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
668 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
669
670 /* If more than two VBI buffers are pending, then
671 clear the old ones and start with this new one.
672 This can happen during transition stages when MPEG capturing is
673 started, but the first interrupts haven't arrived yet. During
674 that period VBI requests can accumulate without being able to
675 DMA the data. Since at most four VBI DMA buffers are available,
676 we just drop the old requests when there are already three
677 requests queued. */
678 if (s->sg_pending_size > 2) {
679 struct list_head *p;
680 list_for_each(p, &s->q_predma.list) {
681 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
682 ivtv_buf_sync_for_cpu(s, buf);
683 }
684 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
685 s->sg_pending_size = 0;
686 }
687 /* if we can append the data, and the MPEG stream isn't capturing,
688 then start a DMA request for just the VBI data. */
689 if (!stream_enc_dma_append(s, data) &&
690 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
691 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
692 }
693 }
694
695 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
696 {
697 u32 data[CX2341X_MBOX_MAX_DATA];
698 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
699
700 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
701 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
702 !stream_enc_dma_append(s, data)) {
703 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
704 }
705 }
706
707 static void ivtv_irq_dec_data_req(struct ivtv *itv)
708 {
709 u32 data[CX2341X_MBOX_MAX_DATA];
710 struct ivtv_stream *s;
711
712 /* YUV or MPG */
713 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
714
715 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
716 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
717 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
718 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
719 }
720 else {
721 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
722 itv->dma_data_req_offset = data[1];
723 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
724 }
725 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
726 itv->dma_data_req_offset, itv->dma_data_req_size);
727 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
728 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
729 }
730 else {
731 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
732 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
733 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
734 }
735 }
736
737 static void ivtv_irq_vsync(struct ivtv *itv)
738 {
739 /* The vsync interrupt is unusual in that it won't clear until
740 * the end of the first line for the current field, at which
741 * point it clears itself. This can result in repeated vsync
742 * interrupts, or a missed vsync. Read some of the registers
743 * to determine the line being displayed and ensure we handle
744 * one vsync per frame.
745 */
746 unsigned int frame = read_reg(0x28c0) & 1;
747 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
748
749 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
750
751 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
752 ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
753 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
754 int next_dma_frame = last_dma_frame;
755
756 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
757 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
758 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
759 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
760 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
761 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
762 next_dma_frame = (next_dma_frame + 1) & 0x3;
763 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
764 itv->yuv_info.fields_lapsed = -1;
765 }
766 }
767 }
768 if (frame != (itv->lastVsyncFrame & 1)) {
769 struct ivtv_stream *s = ivtv_get_output_stream(itv);
770
771 itv->lastVsyncFrame += 1;
772 if (frame == 0) {
773 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
774 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
775 }
776 else {
777 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
778 }
779 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
780 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
781 wake_up(&itv->event_waitq);
782 }
783 wake_up(&itv->vsync_waitq);
784 if (s)
785 wake_up(&s->waitq);
786
787 /* Send VBI to saa7127 */
788 if (frame) {
789 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
790 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
791 }
792
793 /* Check if we need to update the yuv registers */
794 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
795 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
796 last_dma_frame = (last_dma_frame - 1) & 3;
797
798 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
799 itv->yuv_info.update_frame = last_dma_frame;
800 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
801 itv->yuv_info.yuv_forced_update = 0;
802 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
803 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
804 }
805 }
806
807 itv->yuv_info.fields_lapsed ++;
808 }
809 }
810
811 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
812
813 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
814 {
815 struct ivtv *itv = (struct ivtv *)dev_id;
816 u32 combo;
817 u32 stat;
818 int i;
819 u8 vsync_force = 0;
820
821 spin_lock(&itv->dma_reg_lock);
822 /* get contents of irq status register */
823 stat = read_reg(IVTV_REG_IRQSTATUS);
824
825 combo = ~itv->irqmask & stat;
826
827 /* Clear out IRQ */
828 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
829
830 if (0 == combo) {
831 /* The vsync interrupt is unusual and clears itself. If we
832 * took too long, we may have missed it. Do some checks
833 */
834 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
835 /* vsync is enabled, see if we're in a new field */
836 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
837 /* New field, looks like we missed it */
838 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
839 vsync_force = 1;
840 }
841 }
842
843 if (!vsync_force) {
844 /* No Vsync expected, wasn't for us */
845 spin_unlock(&itv->dma_reg_lock);
846 return IRQ_NONE;
847 }
848 }
849
850 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
851 these messages */
852 if (combo & ~0xff6d0400)
853 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
854
855 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
856 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
857 }
858
859 if (combo & IVTV_IRQ_DMA_READ) {
860 ivtv_irq_dma_read(itv);
861 }
862
863 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
864 ivtv_irq_enc_dma_complete(itv);
865 }
866
867 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
868 ivtv_irq_enc_pio_complete(itv);
869 }
870
871 if (combo & IVTV_IRQ_DMA_ERR) {
872 ivtv_irq_dma_err(itv);
873 }
874
875 if (combo & IVTV_IRQ_ENC_START_CAP) {
876 ivtv_irq_enc_start_cap(itv);
877 }
878
879 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
880 ivtv_irq_enc_vbi_cap(itv);
881 }
882
883 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
884 ivtv_irq_dec_vbi_reinsert(itv);
885 }
886
887 if (combo & IVTV_IRQ_ENC_EOS) {
888 IVTV_DEBUG_IRQ("ENC EOS\n");
889 set_bit(IVTV_F_I_EOS, &itv->i_flags);
890 wake_up(&itv->cap_w);
891 }
892
893 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
894 ivtv_irq_dec_data_req(itv);
895 }
896
897 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
898 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
899 ivtv_irq_vsync(itv);
900 }
901
902 if (combo & IVTV_IRQ_ENC_VIM_RST) {
903 IVTV_DEBUG_IRQ("VIM RST\n");
904 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
905 }
906
907 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
908 IVTV_DEBUG_INFO("Stereo mode changed\n");
909 }
910
911 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
912 itv->irq_rr_idx++;
913 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
914 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
915 struct ivtv_stream *s = &itv->streams[idx];
916
917 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
918 continue;
919 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
920 ivtv_dma_dec_start(s);
921 else
922 ivtv_dma_enc_start(s);
923 break;
924 }
925 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
926 ivtv_udma_start(itv);
927 }
928 }
929
930 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
931 itv->irq_rr_idx++;
932 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
933 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
934 struct ivtv_stream *s = &itv->streams[idx];
935
936 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
937 continue;
938 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
939 ivtv_dma_enc_start(s);
940 break;
941 }
942 }
943
944 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
945 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
946
947 spin_unlock(&itv->dma_reg_lock);
948
949 /* If we've just handled a 'forced' vsync, it's safest to say it
950 * wasn't ours. Another device may have triggered it at just
951 * the right time.
952 */
953 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
954 }
955
956 void ivtv_unfinished_dma(unsigned long arg)
957 {
958 struct ivtv *itv = (struct ivtv *)arg;
959
960 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
961 return;
962 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
963
964 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
965 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
966 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
967 itv->cur_dma_stream = -1;
968 wake_up(&itv->dma_waitq);
969 }
This page took 0.051966 seconds and 4 git commands to generate.