Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
28
29 #define DMA_MAGIC_COOKIE 0x000001fe
30
31 static void ivtv_dma_dec_start(struct ivtv_stream *s);
32
33 static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
38 };
39
40
41 static void ivtv_pio_work_handler(struct ivtv *itv)
42 {
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
45 int i = 0;
46
47 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
48 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
49 s->vdev == NULL || !ivtv_use_pio(s)) {
50 itv->cur_pio_stream = -1;
51 /* trigger PIO complete user interrupt */
52 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
53 return;
54 }
55 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
56 list_for_each_entry(buf, &s->q_dma.list, list) {
57 u32 size = s->sg_processing[i].size & 0x3ffff;
58
59 /* Copy the data from the card to the buffer */
60 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
61 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
62 }
63 else {
64 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
65 }
66 i++;
67 if (i == s->sg_processing_size)
68 break;
69 }
70 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
71 }
72
73 void ivtv_irq_work_handler(struct work_struct *work)
74 {
75 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
76
77 DEFINE_WAIT(wait);
78
79 if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) {
80 struct sched_param param = { .sched_priority = 99 };
81
82 /* This thread must use the FIFO scheduler as it
83 is realtime sensitive. */
84 sched_setscheduler(current, SCHED_FIFO, &param);
85 }
86 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
87 ivtv_pio_work_handler(itv);
88
89 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
90 ivtv_vbi_work_handler(itv);
91
92 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
93 ivtv_yuv_work_handler(itv);
94 }
95
96 /* Determine the required DMA size, setup enough buffers in the predma queue and
97 actually copy the data from the card to the buffers in case a PIO transfer is
98 required for this stream.
99 */
100 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
101 {
102 struct ivtv *itv = s->itv;
103 struct ivtv_buffer *buf;
104 u32 bytes_needed = 0;
105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->sg_pending_size;
109 int rc;
110
111 /* sanity checks */
112 if (s->vdev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114 return -1;
115 }
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 return -1;
119 }
120
121 /* determine offset, size and PTS for the various streams */
122 switch (s->type) {
123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1];
125 size = data[2];
126 s->pending_pts = 0;
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_YUV:
130 offset = data[1];
131 size = data[2];
132 UVoffset = data[3];
133 UVsize = data[4];
134 s->pending_pts = ((u64) data[5] << 32) | data[6];
135 break;
136
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
139 size = data[2] - 12;
140 s->pending_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
144 break;
145
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
149 if (offset == 12) {
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1;
152 }
153 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154 break;
155
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159 s->pending_pts = 0;
160 offset += IVTV_DECODER_OFFSET;
161 break;
162 default:
163 /* shouldn't happen */
164 return -1;
165 }
166
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 }
174 else {
175 s->pending_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 }
178 s->pending_offset = offset;
179 }
180
181 bytes_needed = size;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
185 next buffer. */
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
188 }
189
190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
197 return -1;
198 }
199 if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202 }
203 s->buffers_stolen = rc;
204
205 /* got the buffers, now fill in sg_pending */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each_entry(buf, &s->q_predma.list, list) {
209 if (skip_bufs-- > 0)
210 continue;
211 s->sg_pending[idx].dst = buf->dma_handle;
212 s->sg_pending[idx].src = offset;
213 s->sg_pending[idx].size = s->buf_size;
214 buf->bytesused = min(size, s->buf_size);
215 buf->dma_xfer_cnt = s->dma_xfer_cnt;
216
217 s->q_predma.bytesused += buf->bytesused;
218 size -= buf->bytesused;
219 offset += s->buf_size;
220
221 /* Sync SG buffers */
222 ivtv_buf_sync_for_device(s, buf);
223
224 if (size == 0) { /* YUV */
225 /* process the UV section */
226 offset = UVoffset;
227 size = UVsize;
228 }
229 idx++;
230 }
231 s->sg_pending_size = idx;
232 return 0;
233 }
234
235 static void dma_post(struct ivtv_stream *s)
236 {
237 struct ivtv *itv = s->itv;
238 struct ivtv_buffer *buf = NULL;
239 struct list_head *p;
240 u32 offset;
241 __le32 *u32buf;
242 int x = 0;
243
244 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
245 s->name, s->dma_offset);
246 list_for_each(p, &s->q_dma.list) {
247 buf = list_entry(p, struct ivtv_buffer, list);
248 u32buf = (__le32 *)buf->buf;
249
250 /* Sync Buffer */
251 ivtv_buf_sync_for_cpu(s, buf);
252
253 if (x == 0 && ivtv_use_dma(s)) {
254 offset = s->dma_last_offset;
255 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
256 {
257 for (offset = 0; offset < 64; offset++) {
258 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
259 break;
260 }
261 }
262 offset *= 4;
263 if (offset == 256) {
264 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
265 offset = s->dma_last_offset;
266 }
267 if (s->dma_last_offset != offset)
268 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
269 s->dma_last_offset = offset;
270 }
271 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
272 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
273 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
274 }
275 else {
276 write_enc_sync(0, s->dma_offset);
277 }
278 if (offset) {
279 buf->bytesused -= offset;
280 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
281 }
282 *u32buf = cpu_to_le32(s->dma_backup);
283 }
284 x++;
285 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
286 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
287 s->type == IVTV_ENC_STREAM_TYPE_VBI)
288 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
289 }
290 if (buf)
291 buf->bytesused += s->dma_last_offset;
292 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
293 list_for_each_entry(buf, &s->q_dma.list, list) {
294 /* Parse and Groom VBI Data */
295 s->q_dma.bytesused -= buf->bytesused;
296 ivtv_process_vbi_data(itv, buf, 0, s->type);
297 s->q_dma.bytesused += buf->bytesused;
298 }
299 if (s->id == -1) {
300 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
301 return;
302 }
303 }
304 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
305 if (s->id != -1)
306 wake_up(&s->waitq);
307 }
308
309 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
310 {
311 struct ivtv *itv = s->itv;
312 struct yuv_playback_info *yi = &itv->yuv_info;
313 u8 frame = yi->draw_frame;
314 struct yuv_frame_info *f = &yi->new_frame_info[frame];
315 struct ivtv_buffer *buf;
316 u32 y_size = 720 * ((f->src_h + 31) & ~31);
317 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
318 int y_done = 0;
319 int bytes_written = 0;
320 unsigned long flags = 0;
321 int idx = 0;
322
323 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
324
325 /* Insert buffer block for YUV if needed */
326 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
327 if (yi->blanking_dmaptr) {
328 s->sg_pending[idx].src = yi->blanking_dmaptr;
329 s->sg_pending[idx].dst = offset;
330 s->sg_pending[idx].size = 720 * 16;
331 }
332 offset += 720 * 16;
333 idx++;
334 }
335
336 list_for_each_entry(buf, &s->q_predma.list, list) {
337 /* YUV UV Offset from Y Buffer */
338 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
339 (bytes_written + buf->bytesused) >= y_size) {
340 s->sg_pending[idx].src = buf->dma_handle;
341 s->sg_pending[idx].dst = offset;
342 s->sg_pending[idx].size = y_size - bytes_written;
343 offset = uv_offset;
344 if (s->sg_pending[idx].size != buf->bytesused) {
345 idx++;
346 s->sg_pending[idx].src =
347 buf->dma_handle + s->sg_pending[idx - 1].size;
348 s->sg_pending[idx].dst = offset;
349 s->sg_pending[idx].size =
350 buf->bytesused - s->sg_pending[idx - 1].size;
351 offset += s->sg_pending[idx].size;
352 }
353 y_done = 1;
354 } else {
355 s->sg_pending[idx].src = buf->dma_handle;
356 s->sg_pending[idx].dst = offset;
357 s->sg_pending[idx].size = buf->bytesused;
358 offset += buf->bytesused;
359 }
360 bytes_written += buf->bytesused;
361
362 /* Sync SG buffers */
363 ivtv_buf_sync_for_device(s, buf);
364 idx++;
365 }
366 s->sg_pending_size = idx;
367
368 /* Sync Hardware SG List of buffers */
369 ivtv_stream_sync_for_device(s);
370 if (lock)
371 spin_lock_irqsave(&itv->dma_reg_lock, flags);
372 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
373 ivtv_dma_dec_start(s);
374 }
375 else {
376 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
377 }
378 if (lock)
379 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
380 }
381
382 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
383 {
384 struct ivtv *itv = s->itv;
385
386 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
387 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
388 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
389 s->sg_processed++;
390 /* Sync Hardware SG List of buffers */
391 ivtv_stream_sync_for_device(s);
392 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
393 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
394 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
395 add_timer(&itv->dma_timer);
396 }
397
398 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
399 {
400 struct ivtv *itv = s->itv;
401
402 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
403 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
404 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
405 s->sg_processed++;
406 /* Sync Hardware SG List of buffers */
407 ivtv_stream_sync_for_device(s);
408 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
409 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
410 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
411 add_timer(&itv->dma_timer);
412 }
413
414 /* start the encoder DMA */
415 static void ivtv_dma_enc_start(struct ivtv_stream *s)
416 {
417 struct ivtv *itv = s->itv;
418 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
419 int i;
420
421 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
422
423 if (s->q_predma.bytesused)
424 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
425
426 if (ivtv_use_dma(s))
427 s->sg_pending[s->sg_pending_size - 1].size += 256;
428
429 /* If this is an MPEG stream, and VBI data is also pending, then append the
430 VBI DMA to the MPEG DMA and transfer both sets of data at once.
431
432 VBI DMA is a second class citizen compared to MPEG and mixing them together
433 will confuse the firmware (the end of a VBI DMA is seen as the end of a
434 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
435 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
436 use. This way no conflicts occur. */
437 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
438 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
439 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
440 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
441 if (ivtv_use_dma(s_vbi))
442 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
443 for (i = 0; i < s_vbi->sg_pending_size; i++) {
444 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
445 }
446 s_vbi->dma_offset = s_vbi->pending_offset;
447 s_vbi->sg_pending_size = 0;
448 s_vbi->dma_xfer_cnt++;
449 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
450 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
451 }
452
453 s->dma_xfer_cnt++;
454 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
455 s->sg_processing_size = s->sg_pending_size;
456 s->sg_pending_size = 0;
457 s->sg_processed = 0;
458 s->dma_offset = s->pending_offset;
459 s->dma_backup = s->pending_backup;
460 s->dma_pts = s->pending_pts;
461
462 if (ivtv_use_pio(s)) {
463 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
464 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
465 set_bit(IVTV_F_I_PIO, &itv->i_flags);
466 itv->cur_pio_stream = s->type;
467 }
468 else {
469 itv->dma_retries = 0;
470 ivtv_dma_enc_start_xfer(s);
471 set_bit(IVTV_F_I_DMA, &itv->i_flags);
472 itv->cur_dma_stream = s->type;
473 }
474 }
475
476 static void ivtv_dma_dec_start(struct ivtv_stream *s)
477 {
478 struct ivtv *itv = s->itv;
479
480 if (s->q_predma.bytesused)
481 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
482 s->dma_xfer_cnt++;
483 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
484 s->sg_processing_size = s->sg_pending_size;
485 s->sg_pending_size = 0;
486 s->sg_processed = 0;
487
488 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
489 itv->dma_retries = 0;
490 ivtv_dma_dec_start_xfer(s);
491 set_bit(IVTV_F_I_DMA, &itv->i_flags);
492 itv->cur_dma_stream = s->type;
493 }
494
495 static void ivtv_irq_dma_read(struct ivtv *itv)
496 {
497 struct ivtv_stream *s = NULL;
498 struct ivtv_buffer *buf;
499 int hw_stream_type = 0;
500
501 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
502
503 del_timer(&itv->dma_timer);
504
505 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
506 return;
507
508 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
509 s = &itv->streams[itv->cur_dma_stream];
510 ivtv_stream_sync_for_cpu(s);
511
512 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
513 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
514 read_reg(IVTV_REG_DMASTATUS),
515 s->sg_processed, s->sg_processing_size, itv->dma_retries);
516 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
517 if (itv->dma_retries == 3) {
518 /* Too many retries, give up on this frame */
519 itv->dma_retries = 0;
520 s->sg_processed = s->sg_processing_size;
521 }
522 else {
523 /* Retry, starting with the first xfer segment.
524 Just retrying the current segment is not sufficient. */
525 s->sg_processed = 0;
526 itv->dma_retries++;
527 }
528 }
529 if (s->sg_processed < s->sg_processing_size) {
530 /* DMA next buffer */
531 ivtv_dma_dec_start_xfer(s);
532 return;
533 }
534 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
535 hw_stream_type = 2;
536 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
537
538 /* For some reason must kick the firmware, like PIO mode,
539 I think this tells the firmware we are done and the size
540 of the xfer so it can calculate what we need next.
541 I think we can do this part ourselves but would have to
542 fully calculate xfer info ourselves and not use interrupts
543 */
544 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
545 hw_stream_type);
546
547 /* Free last DMA call */
548 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
549 ivtv_buf_sync_for_cpu(s, buf);
550 ivtv_enqueue(s, buf, &s->q_free);
551 }
552 wake_up(&s->waitq);
553 }
554 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
555 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
556 itv->cur_dma_stream = -1;
557 wake_up(&itv->dma_waitq);
558 }
559
560 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
561 {
562 u32 data[CX2341X_MBOX_MAX_DATA];
563 struct ivtv_stream *s;
564
565 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
566 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
567
568 del_timer(&itv->dma_timer);
569
570 if (itv->cur_dma_stream < 0)
571 return;
572
573 s = &itv->streams[itv->cur_dma_stream];
574 ivtv_stream_sync_for_cpu(s);
575
576 if (data[0] & 0x18) {
577 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
578 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
579 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
580 if (itv->dma_retries == 3) {
581 /* Too many retries, give up on this frame */
582 itv->dma_retries = 0;
583 s->sg_processed = s->sg_processing_size;
584 }
585 else {
586 /* Retry, starting with the first xfer segment.
587 Just retrying the current segment is not sufficient. */
588 s->sg_processed = 0;
589 itv->dma_retries++;
590 }
591 }
592 if (s->sg_processed < s->sg_processing_size) {
593 /* DMA next buffer */
594 ivtv_dma_enc_start_xfer(s);
595 return;
596 }
597 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
598 itv->cur_dma_stream = -1;
599 dma_post(s);
600 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
601 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
602 dma_post(s);
603 }
604 s->sg_processing_size = 0;
605 s->sg_processed = 0;
606 wake_up(&itv->dma_waitq);
607 }
608
609 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
610 {
611 struct ivtv_stream *s;
612
613 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
614 itv->cur_pio_stream = -1;
615 return;
616 }
617 s = &itv->streams[itv->cur_pio_stream];
618 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
619 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
620 itv->cur_pio_stream = -1;
621 dma_post(s);
622 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
623 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
624 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
625 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
626 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
627 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
628 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
629 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
630 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
631 dma_post(s);
632 }
633 wake_up(&itv->dma_waitq);
634 }
635
636 static void ivtv_irq_dma_err(struct ivtv *itv)
637 {
638 u32 data[CX2341X_MBOX_MAX_DATA];
639
640 del_timer(&itv->dma_timer);
641 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
642 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
643 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
644 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
645 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
646 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
647 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
648
649 /* retry */
650 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
651 ivtv_dma_dec_start(s);
652 else
653 ivtv_dma_enc_start(s);
654 return;
655 }
656 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
657 ivtv_udma_start(itv);
658 return;
659 }
660 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
661 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
662 itv->cur_dma_stream = -1;
663 wake_up(&itv->dma_waitq);
664 }
665
666 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
667 {
668 u32 data[CX2341X_MBOX_MAX_DATA];
669 struct ivtv_stream *s;
670
671 /* Get DMA destination and size arguments from card */
672 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
673 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
674
675 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
676 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
677 data[0], data[1], data[2]);
678 return;
679 }
680 s = &itv->streams[ivtv_stream_map[data[0]]];
681 if (!stream_enc_dma_append(s, data)) {
682 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
683 }
684 }
685
686 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
687 {
688 u32 data[CX2341X_MBOX_MAX_DATA];
689 struct ivtv_stream *s;
690
691 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
692 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
693
694 if (!stream_enc_dma_append(s, data))
695 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
696 }
697
698 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
699 {
700 u32 data[CX2341X_MBOX_MAX_DATA];
701 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
702
703 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
704 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
705 !stream_enc_dma_append(s, data)) {
706 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
707 }
708 }
709
710 static void ivtv_irq_dec_data_req(struct ivtv *itv)
711 {
712 u32 data[CX2341X_MBOX_MAX_DATA];
713 struct ivtv_stream *s;
714
715 /* YUV or MPG */
716 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
717
718 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
719 itv->dma_data_req_size =
720 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
721 itv->dma_data_req_offset = data[1];
722 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
723 ivtv_yuv_frame_complete(itv);
724 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
725 }
726 else {
727 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
728 itv->dma_data_req_offset = data[1];
729 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
730 }
731 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
732 itv->dma_data_req_offset, itv->dma_data_req_size);
733 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
734 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
735 }
736 else {
737 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
738 ivtv_yuv_setup_stream_frame(itv);
739 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
740 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
741 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
742 }
743 }
744
745 static void ivtv_irq_vsync(struct ivtv *itv)
746 {
747 /* The vsync interrupt is unusual in that it won't clear until
748 * the end of the first line for the current field, at which
749 * point it clears itself. This can result in repeated vsync
750 * interrupts, or a missed vsync. Read some of the registers
751 * to determine the line being displayed and ensure we handle
752 * one vsync per frame.
753 */
754 unsigned int frame = read_reg(0x28c0) & 1;
755 struct yuv_playback_info *yi = &itv->yuv_info;
756 int last_dma_frame = atomic_read(&yi->next_dma_frame);
757 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
758
759 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
760
761 if (((frame ^ f->sync_field) == 0 &&
762 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
763 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
764 int next_dma_frame = last_dma_frame;
765
766 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
767 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
768 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
769 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
770 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
771 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
772 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
773 atomic_set(&yi->next_dma_frame, next_dma_frame);
774 yi->fields_lapsed = -1;
775 yi->running = 1;
776 }
777 }
778 }
779 if (frame != (itv->last_vsync_field & 1)) {
780 struct ivtv_stream *s = ivtv_get_output_stream(itv);
781
782 itv->last_vsync_field += 1;
783 if (frame == 0) {
784 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
785 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
786 }
787 else {
788 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
789 }
790 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
791 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
792 wake_up(&itv->event_waitq);
793 }
794 wake_up(&itv->vsync_waitq);
795 if (s)
796 wake_up(&s->waitq);
797
798 /* Send VBI to saa7127 */
799 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
800 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
801 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
802 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
803 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
804 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
805 }
806
807 /* Check if we need to update the yuv registers */
808 if (yi->running && (yi->yuv_forced_update || f->update)) {
809 if (!f->update) {
810 last_dma_frame =
811 (u8)(atomic_read(&yi->next_dma_frame) -
812 1) % IVTV_YUV_BUFFERS;
813 f = &yi->new_frame_info[last_dma_frame];
814 }
815
816 if (f->src_w) {
817 yi->update_frame = last_dma_frame;
818 f->update = 0;
819 yi->yuv_forced_update = 0;
820 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
821 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
822 }
823 }
824
825 yi->fields_lapsed++;
826 }
827 }
828
829 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
830
831 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
832 {
833 struct ivtv *itv = (struct ivtv *)dev_id;
834 u32 combo;
835 u32 stat;
836 int i;
837 u8 vsync_force = 0;
838
839 spin_lock(&itv->dma_reg_lock);
840 /* get contents of irq status register */
841 stat = read_reg(IVTV_REG_IRQSTATUS);
842
843 combo = ~itv->irqmask & stat;
844
845 /* Clear out IRQ */
846 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
847
848 if (0 == combo) {
849 /* The vsync interrupt is unusual and clears itself. If we
850 * took too long, we may have missed it. Do some checks
851 */
852 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
853 /* vsync is enabled, see if we're in a new field */
854 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
855 /* New field, looks like we missed it */
856 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
857 vsync_force = 1;
858 }
859 }
860
861 if (!vsync_force) {
862 /* No Vsync expected, wasn't for us */
863 spin_unlock(&itv->dma_reg_lock);
864 return IRQ_NONE;
865 }
866 }
867
868 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
869 these messages */
870 if (combo & ~0xff6d0400)
871 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
872
873 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
874 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
875 }
876
877 if (combo & IVTV_IRQ_DMA_READ) {
878 ivtv_irq_dma_read(itv);
879 }
880
881 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
882 ivtv_irq_enc_dma_complete(itv);
883 }
884
885 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
886 ivtv_irq_enc_pio_complete(itv);
887 }
888
889 if (combo & IVTV_IRQ_DMA_ERR) {
890 ivtv_irq_dma_err(itv);
891 }
892
893 if (combo & IVTV_IRQ_ENC_START_CAP) {
894 ivtv_irq_enc_start_cap(itv);
895 }
896
897 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
898 ivtv_irq_enc_vbi_cap(itv);
899 }
900
901 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
902 ivtv_irq_dec_vbi_reinsert(itv);
903 }
904
905 if (combo & IVTV_IRQ_ENC_EOS) {
906 IVTV_DEBUG_IRQ("ENC EOS\n");
907 set_bit(IVTV_F_I_EOS, &itv->i_flags);
908 wake_up(&itv->eos_waitq);
909 }
910
911 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
912 ivtv_irq_dec_data_req(itv);
913 }
914
915 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
916 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
917 ivtv_irq_vsync(itv);
918 }
919
920 if (combo & IVTV_IRQ_ENC_VIM_RST) {
921 IVTV_DEBUG_IRQ("VIM RST\n");
922 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
923 }
924
925 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
926 IVTV_DEBUG_INFO("Stereo mode changed\n");
927 }
928
929 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
930 itv->irq_rr_idx++;
931 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
932 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
933 struct ivtv_stream *s = &itv->streams[idx];
934
935 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
936 continue;
937 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
938 ivtv_dma_dec_start(s);
939 else
940 ivtv_dma_enc_start(s);
941 break;
942 }
943 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
944 ivtv_udma_start(itv);
945 }
946 }
947
948 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
949 itv->irq_rr_idx++;
950 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
951 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
952 struct ivtv_stream *s = &itv->streams[idx];
953
954 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
955 continue;
956 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
957 ivtv_dma_enc_start(s);
958 break;
959 }
960 }
961
962 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
963 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
964 }
965
966 spin_unlock(&itv->dma_reg_lock);
967
968 /* If we've just handled a 'forced' vsync, it's safest to say it
969 * wasn't ours. Another device may have triggered it at just
970 * the right time.
971 */
972 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
973 }
974
975 void ivtv_unfinished_dma(unsigned long arg)
976 {
977 struct ivtv *itv = (struct ivtv *)arg;
978
979 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
980 return;
981 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
982
983 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
984 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
985 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
986 itv->cur_dma_stream = -1;
987 wake_up(&itv->dma_waitq);
988 }
This page took 0.062207 seconds and 5 git commands to generate.