[media] Initial commit to support NetUP Dual DVB-T/C CI RF card
[deliverable/linux.git] / drivers / media / video / cx23885 / cx23885-core.c
1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
32 #include <linux/firmware.h>
33
34 #include "cx23885.h"
35 #include "cimax2.h"
36 #include "altera-ci.h"
37 #include "cx23888-ir.h"
38 #include "cx23885-ir.h"
39 #include "cx23885-av.h"
40 #include "cx23885-input.h"
41
42 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
43 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
44 MODULE_LICENSE("GPL");
45
46 static unsigned int debug;
47 module_param(debug, int, 0644);
48 MODULE_PARM_DESC(debug, "enable debug messages");
49
50 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
51 module_param_array(card, int, NULL, 0444);
52 MODULE_PARM_DESC(card, "card type");
53
54 #define dprintk(level, fmt, arg...)\
55 do { if (debug >= level)\
56 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
57 } while (0)
58
59 static unsigned int cx23885_devcount;
60
61 #define NO_SYNC_LINE (-1U)
62
63 /* FIXME, these allocations will change when
64 * analog arrives. The be reviewed.
65 * CX23887 Assumptions
66 * 1 line = 16 bytes of CDT
67 * cmds size = 80
68 * cdt size = 16 * linesize
69 * iqsize = 64
70 * maxlines = 6
71 *
72 * Address Space:
73 * 0x00000000 0x00008fff FIFO clusters
74 * 0x00010000 0x000104af Channel Management Data Structures
75 * 0x000104b0 0x000104ff Free
76 * 0x00010500 0x000108bf 15 channels * iqsize
77 * 0x000108c0 0x000108ff Free
78 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
79 * 15 channels * (iqsize + (maxlines * linesize))
80 * 0x00010ea0 0x00010xxx Free
81 */
82
83 static struct sram_channel cx23885_sram_channels[] = {
84 [SRAM_CH01] = {
85 .name = "VID A",
86 .cmds_start = 0x10000,
87 .ctrl_start = 0x10380,
88 .cdt = 0x104c0,
89 .fifo_start = 0x40,
90 .fifo_size = 0x2800,
91 .ptr1_reg = DMA1_PTR1,
92 .ptr2_reg = DMA1_PTR2,
93 .cnt1_reg = DMA1_CNT1,
94 .cnt2_reg = DMA1_CNT2,
95 },
96 [SRAM_CH02] = {
97 .name = "ch2",
98 .cmds_start = 0x0,
99 .ctrl_start = 0x0,
100 .cdt = 0x0,
101 .fifo_start = 0x0,
102 .fifo_size = 0x0,
103 .ptr1_reg = DMA2_PTR1,
104 .ptr2_reg = DMA2_PTR2,
105 .cnt1_reg = DMA2_CNT1,
106 .cnt2_reg = DMA2_CNT2,
107 },
108 [SRAM_CH03] = {
109 .name = "TS1 B",
110 .cmds_start = 0x100A0,
111 .ctrl_start = 0x10400,
112 .cdt = 0x10580,
113 .fifo_start = 0x5000,
114 .fifo_size = 0x1000,
115 .ptr1_reg = DMA3_PTR1,
116 .ptr2_reg = DMA3_PTR2,
117 .cnt1_reg = DMA3_CNT1,
118 .cnt2_reg = DMA3_CNT2,
119 },
120 [SRAM_CH04] = {
121 .name = "ch4",
122 .cmds_start = 0x0,
123 .ctrl_start = 0x0,
124 .cdt = 0x0,
125 .fifo_start = 0x0,
126 .fifo_size = 0x0,
127 .ptr1_reg = DMA4_PTR1,
128 .ptr2_reg = DMA4_PTR2,
129 .cnt1_reg = DMA4_CNT1,
130 .cnt2_reg = DMA4_CNT2,
131 },
132 [SRAM_CH05] = {
133 .name = "ch5",
134 .cmds_start = 0x0,
135 .ctrl_start = 0x0,
136 .cdt = 0x0,
137 .fifo_start = 0x0,
138 .fifo_size = 0x0,
139 .ptr1_reg = DMA5_PTR1,
140 .ptr2_reg = DMA5_PTR2,
141 .cnt1_reg = DMA5_CNT1,
142 .cnt2_reg = DMA5_CNT2,
143 },
144 [SRAM_CH06] = {
145 .name = "TS2 C",
146 .cmds_start = 0x10140,
147 .ctrl_start = 0x10440,
148 .cdt = 0x105e0,
149 .fifo_start = 0x6000,
150 .fifo_size = 0x1000,
151 .ptr1_reg = DMA5_PTR1,
152 .ptr2_reg = DMA5_PTR2,
153 .cnt1_reg = DMA5_CNT1,
154 .cnt2_reg = DMA5_CNT2,
155 },
156 [SRAM_CH07] = {
157 .name = "ch7",
158 .cmds_start = 0x0,
159 .ctrl_start = 0x0,
160 .cdt = 0x0,
161 .fifo_start = 0x0,
162 .fifo_size = 0x0,
163 .ptr1_reg = DMA6_PTR1,
164 .ptr2_reg = DMA6_PTR2,
165 .cnt1_reg = DMA6_CNT1,
166 .cnt2_reg = DMA6_CNT2,
167 },
168 [SRAM_CH08] = {
169 .name = "ch8",
170 .cmds_start = 0x0,
171 .ctrl_start = 0x0,
172 .cdt = 0x0,
173 .fifo_start = 0x0,
174 .fifo_size = 0x0,
175 .ptr1_reg = DMA7_PTR1,
176 .ptr2_reg = DMA7_PTR2,
177 .cnt1_reg = DMA7_CNT1,
178 .cnt2_reg = DMA7_CNT2,
179 },
180 [SRAM_CH09] = {
181 .name = "ch9",
182 .cmds_start = 0x0,
183 .ctrl_start = 0x0,
184 .cdt = 0x0,
185 .fifo_start = 0x0,
186 .fifo_size = 0x0,
187 .ptr1_reg = DMA8_PTR1,
188 .ptr2_reg = DMA8_PTR2,
189 .cnt1_reg = DMA8_CNT1,
190 .cnt2_reg = DMA8_CNT2,
191 },
192 };
193
194 static struct sram_channel cx23887_sram_channels[] = {
195 [SRAM_CH01] = {
196 .name = "VID A",
197 .cmds_start = 0x10000,
198 .ctrl_start = 0x105b0,
199 .cdt = 0x107b0,
200 .fifo_start = 0x40,
201 .fifo_size = 0x2800,
202 .ptr1_reg = DMA1_PTR1,
203 .ptr2_reg = DMA1_PTR2,
204 .cnt1_reg = DMA1_CNT1,
205 .cnt2_reg = DMA1_CNT2,
206 },
207 [SRAM_CH02] = {
208 .name = "ch2",
209 .cmds_start = 0x0,
210 .ctrl_start = 0x0,
211 .cdt = 0x0,
212 .fifo_start = 0x0,
213 .fifo_size = 0x0,
214 .ptr1_reg = DMA2_PTR1,
215 .ptr2_reg = DMA2_PTR2,
216 .cnt1_reg = DMA2_CNT1,
217 .cnt2_reg = DMA2_CNT2,
218 },
219 [SRAM_CH03] = {
220 .name = "TS1 B",
221 .cmds_start = 0x100A0,
222 .ctrl_start = 0x10630,
223 .cdt = 0x10870,
224 .fifo_start = 0x5000,
225 .fifo_size = 0x1000,
226 .ptr1_reg = DMA3_PTR1,
227 .ptr2_reg = DMA3_PTR2,
228 .cnt1_reg = DMA3_CNT1,
229 .cnt2_reg = DMA3_CNT2,
230 },
231 [SRAM_CH04] = {
232 .name = "ch4",
233 .cmds_start = 0x0,
234 .ctrl_start = 0x0,
235 .cdt = 0x0,
236 .fifo_start = 0x0,
237 .fifo_size = 0x0,
238 .ptr1_reg = DMA4_PTR1,
239 .ptr2_reg = DMA4_PTR2,
240 .cnt1_reg = DMA4_CNT1,
241 .cnt2_reg = DMA4_CNT2,
242 },
243 [SRAM_CH05] = {
244 .name = "ch5",
245 .cmds_start = 0x0,
246 .ctrl_start = 0x0,
247 .cdt = 0x0,
248 .fifo_start = 0x0,
249 .fifo_size = 0x0,
250 .ptr1_reg = DMA5_PTR1,
251 .ptr2_reg = DMA5_PTR2,
252 .cnt1_reg = DMA5_CNT1,
253 .cnt2_reg = DMA5_CNT2,
254 },
255 [SRAM_CH06] = {
256 .name = "TS2 C",
257 .cmds_start = 0x10140,
258 .ctrl_start = 0x10670,
259 .cdt = 0x108d0,
260 .fifo_start = 0x6000,
261 .fifo_size = 0x1000,
262 .ptr1_reg = DMA5_PTR1,
263 .ptr2_reg = DMA5_PTR2,
264 .cnt1_reg = DMA5_CNT1,
265 .cnt2_reg = DMA5_CNT2,
266 },
267 [SRAM_CH07] = {
268 .name = "ch7",
269 .cmds_start = 0x0,
270 .ctrl_start = 0x0,
271 .cdt = 0x0,
272 .fifo_start = 0x0,
273 .fifo_size = 0x0,
274 .ptr1_reg = DMA6_PTR1,
275 .ptr2_reg = DMA6_PTR2,
276 .cnt1_reg = DMA6_CNT1,
277 .cnt2_reg = DMA6_CNT2,
278 },
279 [SRAM_CH08] = {
280 .name = "ch8",
281 .cmds_start = 0x0,
282 .ctrl_start = 0x0,
283 .cdt = 0x0,
284 .fifo_start = 0x0,
285 .fifo_size = 0x0,
286 .ptr1_reg = DMA7_PTR1,
287 .ptr2_reg = DMA7_PTR2,
288 .cnt1_reg = DMA7_CNT1,
289 .cnt2_reg = DMA7_CNT2,
290 },
291 [SRAM_CH09] = {
292 .name = "ch9",
293 .cmds_start = 0x0,
294 .ctrl_start = 0x0,
295 .cdt = 0x0,
296 .fifo_start = 0x0,
297 .fifo_size = 0x0,
298 .ptr1_reg = DMA8_PTR1,
299 .ptr2_reg = DMA8_PTR2,
300 .cnt1_reg = DMA8_CNT1,
301 .cnt2_reg = DMA8_CNT2,
302 },
303 };
304
305 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
306 {
307 unsigned long flags;
308 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
309
310 dev->pci_irqmask |= mask;
311
312 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
313 }
314
315 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
316 {
317 unsigned long flags;
318 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
319
320 dev->pci_irqmask |= mask;
321 cx_set(PCI_INT_MSK, mask);
322
323 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
324 }
325
326 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
327 {
328 u32 v;
329 unsigned long flags;
330 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
331
332 v = mask & dev->pci_irqmask;
333 if (v)
334 cx_set(PCI_INT_MSK, v);
335
336 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
337 }
338
339 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
340 {
341 cx23885_irq_enable(dev, 0xffffffff);
342 }
343
344 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
345 {
346 unsigned long flags;
347 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
348
349 cx_clear(PCI_INT_MSK, mask);
350
351 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
352 }
353
354 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
355 {
356 cx23885_irq_disable(dev, 0xffffffff);
357 }
358
359 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
360 {
361 unsigned long flags;
362 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
363
364 dev->pci_irqmask &= ~mask;
365 cx_clear(PCI_INT_MSK, mask);
366
367 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
368 }
369
370 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
371 {
372 u32 v;
373 unsigned long flags;
374 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
375
376 v = cx_read(PCI_INT_MSK);
377
378 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
379 return v;
380 }
381
382 static int cx23885_risc_decode(u32 risc)
383 {
384 static char *instr[16] = {
385 [RISC_SYNC >> 28] = "sync",
386 [RISC_WRITE >> 28] = "write",
387 [RISC_WRITEC >> 28] = "writec",
388 [RISC_READ >> 28] = "read",
389 [RISC_READC >> 28] = "readc",
390 [RISC_JUMP >> 28] = "jump",
391 [RISC_SKIP >> 28] = "skip",
392 [RISC_WRITERM >> 28] = "writerm",
393 [RISC_WRITECM >> 28] = "writecm",
394 [RISC_WRITECR >> 28] = "writecr",
395 };
396 static int incr[16] = {
397 [RISC_WRITE >> 28] = 3,
398 [RISC_JUMP >> 28] = 3,
399 [RISC_SKIP >> 28] = 1,
400 [RISC_SYNC >> 28] = 1,
401 [RISC_WRITERM >> 28] = 3,
402 [RISC_WRITECM >> 28] = 3,
403 [RISC_WRITECR >> 28] = 4,
404 };
405 static char *bits[] = {
406 "12", "13", "14", "resync",
407 "cnt0", "cnt1", "18", "19",
408 "20", "21", "22", "23",
409 "irq1", "irq2", "eol", "sol",
410 };
411 int i;
412
413 printk("0x%08x [ %s", risc,
414 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
415 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
416 if (risc & (1 << (i + 12)))
417 printk(" %s", bits[i]);
418 printk(" count=%d ]\n", risc & 0xfff);
419 return incr[risc >> 28] ? incr[risc >> 28] : 1;
420 }
421
422 void cx23885_wakeup(struct cx23885_tsport *port,
423 struct cx23885_dmaqueue *q, u32 count)
424 {
425 struct cx23885_dev *dev = port->dev;
426 struct cx23885_buffer *buf;
427 int bc;
428
429 for (bc = 0;; bc++) {
430 if (list_empty(&q->active))
431 break;
432 buf = list_entry(q->active.next,
433 struct cx23885_buffer, vb.queue);
434
435 /* count comes from the hw and is is 16bit wide --
436 * this trick handles wrap-arounds correctly for
437 * up to 32767 buffers in flight... */
438 if ((s16) (count - buf->count) < 0)
439 break;
440
441 do_gettimeofday(&buf->vb.ts);
442 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
443 count, buf->count);
444 buf->vb.state = VIDEOBUF_DONE;
445 list_del(&buf->vb.queue);
446 wake_up(&buf->vb.done);
447 }
448 if (list_empty(&q->active))
449 del_timer(&q->timeout);
450 else
451 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
452 if (bc != 1)
453 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
454 __func__, bc);
455 }
456
457 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
458 struct sram_channel *ch,
459 unsigned int bpl, u32 risc)
460 {
461 unsigned int i, lines;
462 u32 cdt;
463
464 if (ch->cmds_start == 0) {
465 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
466 ch->name);
467 cx_write(ch->ptr1_reg, 0);
468 cx_write(ch->ptr2_reg, 0);
469 cx_write(ch->cnt2_reg, 0);
470 cx_write(ch->cnt1_reg, 0);
471 return 0;
472 } else {
473 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
474 ch->name);
475 }
476
477 bpl = (bpl + 7) & ~7; /* alignment */
478 cdt = ch->cdt;
479 lines = ch->fifo_size / bpl;
480 if (lines > 6)
481 lines = 6;
482 BUG_ON(lines < 2);
483
484 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
485 cx_write(8 + 4, 8);
486 cx_write(8 + 8, 0);
487
488 /* write CDT */
489 for (i = 0; i < lines; i++) {
490 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
491 ch->fifo_start + bpl*i);
492 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
493 cx_write(cdt + 16*i + 4, 0);
494 cx_write(cdt + 16*i + 8, 0);
495 cx_write(cdt + 16*i + 12, 0);
496 }
497
498 /* write CMDS */
499 if (ch->jumponly)
500 cx_write(ch->cmds_start + 0, 8);
501 else
502 cx_write(ch->cmds_start + 0, risc);
503 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
504 cx_write(ch->cmds_start + 8, cdt);
505 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
506 cx_write(ch->cmds_start + 16, ch->ctrl_start);
507 if (ch->jumponly)
508 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
509 else
510 cx_write(ch->cmds_start + 20, 64 >> 2);
511 for (i = 24; i < 80; i += 4)
512 cx_write(ch->cmds_start + i, 0);
513
514 /* fill registers */
515 cx_write(ch->ptr1_reg, ch->fifo_start);
516 cx_write(ch->ptr2_reg, cdt);
517 cx_write(ch->cnt2_reg, (lines*16) >> 3);
518 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
519
520 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
521 dev->bridge,
522 ch->name,
523 bpl,
524 lines);
525
526 return 0;
527 }
528
529 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
530 struct sram_channel *ch)
531 {
532 static char *name[] = {
533 "init risc lo",
534 "init risc hi",
535 "cdt base",
536 "cdt size",
537 "iq base",
538 "iq size",
539 "risc pc lo",
540 "risc pc hi",
541 "iq wr ptr",
542 "iq rd ptr",
543 "cdt current",
544 "pci target lo",
545 "pci target hi",
546 "line / byte",
547 };
548 u32 risc;
549 unsigned int i, j, n;
550
551 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
552 dev->name, ch->name);
553 for (i = 0; i < ARRAY_SIZE(name); i++)
554 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
555 dev->name, name[i],
556 cx_read(ch->cmds_start + 4*i));
557
558 for (i = 0; i < 4; i++) {
559 risc = cx_read(ch->cmds_start + 4 * (i + 14));
560 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
561 cx23885_risc_decode(risc);
562 }
563 for (i = 0; i < (64 >> 2); i += n) {
564 risc = cx_read(ch->ctrl_start + 4 * i);
565 /* No consideration for bits 63-32 */
566
567 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
568 ch->ctrl_start + 4 * i, i);
569 n = cx23885_risc_decode(risc);
570 for (j = 1; j < n; j++) {
571 risc = cx_read(ch->ctrl_start + 4 * (i + j));
572 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
573 dev->name, i+j, risc, j);
574 }
575 }
576
577 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
578 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
579 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
580 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
581 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
582 dev->name, cx_read(ch->ptr1_reg));
583 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
584 dev->name, cx_read(ch->ptr2_reg));
585 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
586 dev->name, cx_read(ch->cnt1_reg));
587 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
588 dev->name, cx_read(ch->cnt2_reg));
589 }
590
591 static void cx23885_risc_disasm(struct cx23885_tsport *port,
592 struct btcx_riscmem *risc)
593 {
594 struct cx23885_dev *dev = port->dev;
595 unsigned int i, j, n;
596
597 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
598 dev->name, risc->cpu, (unsigned long)risc->dma);
599 for (i = 0; i < (risc->size >> 2); i += n) {
600 printk(KERN_INFO "%s: %04d: ", dev->name, i);
601 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
602 for (j = 1; j < n; j++)
603 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
604 dev->name, i + j, risc->cpu[i + j], j);
605 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
606 break;
607 }
608 }
609
610 static void cx23885_shutdown(struct cx23885_dev *dev)
611 {
612 /* disable RISC controller */
613 cx_write(DEV_CNTRL2, 0);
614
615 /* Disable all IR activity */
616 cx_write(IR_CNTRL_REG, 0);
617
618 /* Disable Video A/B activity */
619 cx_write(VID_A_DMA_CTL, 0);
620 cx_write(VID_B_DMA_CTL, 0);
621 cx_write(VID_C_DMA_CTL, 0);
622
623 /* Disable Audio activity */
624 cx_write(AUD_INT_DMA_CTL, 0);
625 cx_write(AUD_EXT_DMA_CTL, 0);
626
627 /* Disable Serial port */
628 cx_write(UART_CTL, 0);
629
630 /* Disable Interrupts */
631 cx23885_irq_disable_all(dev);
632 cx_write(VID_A_INT_MSK, 0);
633 cx_write(VID_B_INT_MSK, 0);
634 cx_write(VID_C_INT_MSK, 0);
635 cx_write(AUDIO_INT_INT_MSK, 0);
636 cx_write(AUDIO_EXT_INT_MSK, 0);
637
638 }
639
640 static void cx23885_reset(struct cx23885_dev *dev)
641 {
642 dprintk(1, "%s()\n", __func__);
643
644 cx23885_shutdown(dev);
645
646 cx_write(PCI_INT_STAT, 0xffffffff);
647 cx_write(VID_A_INT_STAT, 0xffffffff);
648 cx_write(VID_B_INT_STAT, 0xffffffff);
649 cx_write(VID_C_INT_STAT, 0xffffffff);
650 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
651 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
652 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
653 cx_write(PAD_CTRL, 0x00500300);
654
655 mdelay(100);
656
657 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
658 720*4, 0);
659 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
660 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
661 188*4, 0);
662 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
663 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
664 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
665 188*4, 0);
666 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
667 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
668 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
669
670 cx23885_gpio_setup(dev);
671 }
672
673
674 static int cx23885_pci_quirks(struct cx23885_dev *dev)
675 {
676 dprintk(1, "%s()\n", __func__);
677
678 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
679 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
680 * occur on the cx23887 bridge.
681 */
682 if (dev->bridge == CX23885_BRIDGE_885)
683 cx_clear(RDR_TLCTL0, 1 << 4);
684
685 return 0;
686 }
687
688 static int get_resources(struct cx23885_dev *dev)
689 {
690 if (request_mem_region(pci_resource_start(dev->pci, 0),
691 pci_resource_len(dev->pci, 0),
692 dev->name))
693 return 0;
694
695 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
696 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
697
698 return -EBUSY;
699 }
700
701 static void cx23885_timeout(unsigned long data);
702 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
703 u32 reg, u32 mask, u32 value);
704
705 static int cx23885_init_tsport(struct cx23885_dev *dev,
706 struct cx23885_tsport *port, int portno)
707 {
708 dprintk(1, "%s(portno=%d)\n", __func__, portno);
709
710 /* Transport bus init dma queue - Common settings */
711 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
712 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
713 port->vld_misc_val = 0x0;
714 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
715
716 spin_lock_init(&port->slock);
717 port->dev = dev;
718 port->nr = portno;
719
720 INIT_LIST_HEAD(&port->mpegq.active);
721 INIT_LIST_HEAD(&port->mpegq.queued);
722 port->mpegq.timeout.function = cx23885_timeout;
723 port->mpegq.timeout.data = (unsigned long)port;
724 init_timer(&port->mpegq.timeout);
725
726 mutex_init(&port->frontends.lock);
727 INIT_LIST_HEAD(&port->frontends.felist);
728 port->frontends.active_fe_id = 0;
729
730 /* This should be hardcoded allow a single frontend
731 * attachment to this tsport, keeping the -dvb.c
732 * code clean and safe.
733 */
734 if (!port->num_frontends)
735 port->num_frontends = 1;
736
737 switch (portno) {
738 case 1:
739 port->reg_gpcnt = VID_B_GPCNT;
740 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
741 port->reg_dma_ctl = VID_B_DMA_CTL;
742 port->reg_lngth = VID_B_LNGTH;
743 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
744 port->reg_gen_ctrl = VID_B_GEN_CTL;
745 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
746 port->reg_sop_status = VID_B_SOP_STATUS;
747 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
748 port->reg_vld_misc = VID_B_VLD_MISC;
749 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
750 port->reg_src_sel = VID_B_SRC_SEL;
751 port->reg_ts_int_msk = VID_B_INT_MSK;
752 port->reg_ts_int_stat = VID_B_INT_STAT;
753 port->sram_chno = SRAM_CH03; /* VID_B */
754 port->pci_irqmask = 0x02; /* VID_B bit1 */
755 break;
756 case 2:
757 port->reg_gpcnt = VID_C_GPCNT;
758 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
759 port->reg_dma_ctl = VID_C_DMA_CTL;
760 port->reg_lngth = VID_C_LNGTH;
761 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
762 port->reg_gen_ctrl = VID_C_GEN_CTL;
763 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
764 port->reg_sop_status = VID_C_SOP_STATUS;
765 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
766 port->reg_vld_misc = VID_C_VLD_MISC;
767 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
768 port->reg_src_sel = 0;
769 port->reg_ts_int_msk = VID_C_INT_MSK;
770 port->reg_ts_int_stat = VID_C_INT_STAT;
771 port->sram_chno = SRAM_CH06; /* VID_C */
772 port->pci_irqmask = 0x04; /* VID_C bit2 */
773 break;
774 default:
775 BUG();
776 }
777
778 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
779 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
780
781 return 0;
782 }
783
784 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
785 {
786 switch (cx_read(RDR_CFG2) & 0xff) {
787 case 0x00:
788 /* cx23885 */
789 dev->hwrevision = 0xa0;
790 break;
791 case 0x01:
792 /* CX23885-12Z */
793 dev->hwrevision = 0xa1;
794 break;
795 case 0x02:
796 /* CX23885-13Z/14Z */
797 dev->hwrevision = 0xb0;
798 break;
799 case 0x03:
800 if (dev->pci->device == 0x8880) {
801 /* CX23888-21Z/22Z */
802 dev->hwrevision = 0xc0;
803 } else {
804 /* CX23885-14Z */
805 dev->hwrevision = 0xa4;
806 }
807 break;
808 case 0x04:
809 if (dev->pci->device == 0x8880) {
810 /* CX23888-31Z */
811 dev->hwrevision = 0xd0;
812 } else {
813 /* CX23885-15Z, CX23888-31Z */
814 dev->hwrevision = 0xa5;
815 }
816 break;
817 case 0x0e:
818 /* CX23887-15Z */
819 dev->hwrevision = 0xc0;
820 break;
821 case 0x0f:
822 /* CX23887-14Z */
823 dev->hwrevision = 0xb1;
824 break;
825 default:
826 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
827 __func__, dev->hwrevision);
828 }
829 if (dev->hwrevision)
830 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
831 __func__, dev->hwrevision);
832 else
833 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
834 __func__, dev->hwrevision);
835 }
836
837 /* Find the first v4l2_subdev member of the group id in hw */
838 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
839 {
840 struct v4l2_subdev *result = NULL;
841 struct v4l2_subdev *sd;
842
843 spin_lock(&dev->v4l2_dev.lock);
844 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
845 if (sd->grp_id == hw) {
846 result = sd;
847 break;
848 }
849 }
850 spin_unlock(&dev->v4l2_dev.lock);
851 return result;
852 }
853
854 static int cx23885_dev_setup(struct cx23885_dev *dev)
855 {
856 int i;
857
858 spin_lock_init(&dev->pci_irqmask_lock);
859
860 mutex_init(&dev->lock);
861 mutex_init(&dev->gpio_lock);
862
863 atomic_inc(&dev->refcount);
864
865 dev->nr = cx23885_devcount++;
866 sprintf(dev->name, "cx23885[%d]", dev->nr);
867
868 /* Configure the internal memory */
869 if (dev->pci->device == 0x8880) {
870 /* Could be 887 or 888, assume a default */
871 dev->bridge = CX23885_BRIDGE_887;
872 /* Apply a sensible clock frequency for the PCIe bridge */
873 dev->clk_freq = 25000000;
874 dev->sram_channels = cx23887_sram_channels;
875 } else
876 if (dev->pci->device == 0x8852) {
877 dev->bridge = CX23885_BRIDGE_885;
878 /* Apply a sensible clock frequency for the PCIe bridge */
879 dev->clk_freq = 28000000;
880 dev->sram_channels = cx23885_sram_channels;
881 } else
882 BUG();
883
884 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
885 __func__, dev->bridge);
886
887 /* board config */
888 dev->board = UNSET;
889 if (card[dev->nr] < cx23885_bcount)
890 dev->board = card[dev->nr];
891 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
892 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
893 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
894 dev->board = cx23885_subids[i].card;
895 if (UNSET == dev->board) {
896 dev->board = CX23885_BOARD_UNKNOWN;
897 cx23885_card_list(dev);
898 }
899
900 /* If the user specific a clk freq override, apply it */
901 if (cx23885_boards[dev->board].clk_freq > 0)
902 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
903
904 dev->pci_bus = dev->pci->bus->number;
905 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
906 cx23885_irq_add(dev, 0x001f00);
907
908 /* External Master 1 Bus */
909 dev->i2c_bus[0].nr = 0;
910 dev->i2c_bus[0].dev = dev;
911 dev->i2c_bus[0].reg_stat = I2C1_STAT;
912 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
913 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
914 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
915 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
916 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
917
918 /* External Master 2 Bus */
919 dev->i2c_bus[1].nr = 1;
920 dev->i2c_bus[1].dev = dev;
921 dev->i2c_bus[1].reg_stat = I2C2_STAT;
922 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
923 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
924 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
925 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
926 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
927
928 /* Internal Master 3 Bus */
929 dev->i2c_bus[2].nr = 2;
930 dev->i2c_bus[2].dev = dev;
931 dev->i2c_bus[2].reg_stat = I2C3_STAT;
932 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
933 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
934 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
935 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
936 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
937
938 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
939 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
940 cx23885_init_tsport(dev, &dev->ts1, 1);
941
942 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
943 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
944 cx23885_init_tsport(dev, &dev->ts2, 2);
945
946 if (get_resources(dev) < 0) {
947 printk(KERN_ERR "CORE %s No more PCIe resources for "
948 "subsystem: %04x:%04x\n",
949 dev->name, dev->pci->subsystem_vendor,
950 dev->pci->subsystem_device);
951
952 cx23885_devcount--;
953 return -ENODEV;
954 }
955
956 /* PCIe stuff */
957 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
958 pci_resource_len(dev->pci, 0));
959
960 dev->bmmio = (u8 __iomem *)dev->lmmio;
961
962 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
963 dev->name, dev->pci->subsystem_vendor,
964 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
965 dev->board, card[dev->nr] == dev->board ?
966 "insmod option" : "autodetected");
967
968 cx23885_pci_quirks(dev);
969
970 /* Assume some sensible defaults */
971 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
972 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
973 dev->radio_type = cx23885_boards[dev->board].radio_type;
974 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
975
976 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x\n",
977 __func__, dev->tuner_type, dev->tuner_addr);
978 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
979 __func__, dev->radio_type, dev->radio_addr);
980
981 /* The cx23417 encoder has GPIO's that need to be initialised
982 * before DVB, so that demodulators and tuners are out of
983 * reset before DVB uses them.
984 */
985 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
986 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
987 cx23885_mc417_init(dev);
988
989 /* init hardware */
990 cx23885_reset(dev);
991
992 cx23885_i2c_register(&dev->i2c_bus[0]);
993 cx23885_i2c_register(&dev->i2c_bus[1]);
994 cx23885_i2c_register(&dev->i2c_bus[2]);
995 cx23885_card_setup(dev);
996 call_all(dev, core, s_power, 0);
997 cx23885_ir_init(dev);
998
999 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1000 if (cx23885_video_register(dev) < 0) {
1001 printk(KERN_ERR "%s() Failed to register analog "
1002 "video adapters on VID_A\n", __func__);
1003 }
1004 }
1005
1006 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1007 if (cx23885_dvb_register(&dev->ts1) < 0) {
1008 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1009 __func__);
1010 }
1011 } else
1012 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1013 if (cx23885_417_register(dev) < 0) {
1014 printk(KERN_ERR
1015 "%s() Failed to register 417 on VID_B\n",
1016 __func__);
1017 }
1018 }
1019
1020 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1021 if (cx23885_dvb_register(&dev->ts2) < 0) {
1022 printk(KERN_ERR
1023 "%s() Failed to register dvb on VID_C\n",
1024 __func__);
1025 }
1026 } else
1027 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1028 if (cx23885_417_register(dev) < 0) {
1029 printk(KERN_ERR
1030 "%s() Failed to register 417 on VID_C\n",
1031 __func__);
1032 }
1033 }
1034
1035 cx23885_dev_checkrevision(dev);
1036
1037 return 0;
1038 }
1039
1040 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1041 {
1042 release_mem_region(pci_resource_start(dev->pci, 0),
1043 pci_resource_len(dev->pci, 0));
1044
1045 if (!atomic_dec_and_test(&dev->refcount))
1046 return;
1047
1048 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1049 cx23885_video_unregister(dev);
1050
1051 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1052 cx23885_dvb_unregister(&dev->ts1);
1053
1054 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1055 cx23885_417_unregister(dev);
1056
1057 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1058 cx23885_dvb_unregister(&dev->ts2);
1059
1060 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1061 cx23885_417_unregister(dev);
1062
1063 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1064 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1065 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1066
1067 iounmap(dev->lmmio);
1068 }
1069
1070 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1071 unsigned int offset, u32 sync_line,
1072 unsigned int bpl, unsigned int padding,
1073 unsigned int lines)
1074 {
1075 struct scatterlist *sg;
1076 unsigned int line, todo;
1077
1078 /* sync instruction */
1079 if (sync_line != NO_SYNC_LINE)
1080 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1081
1082 /* scan lines */
1083 sg = sglist;
1084 for (line = 0; line < lines; line++) {
1085 while (offset && offset >= sg_dma_len(sg)) {
1086 offset -= sg_dma_len(sg);
1087 sg++;
1088 }
1089 if (bpl <= sg_dma_len(sg)-offset) {
1090 /* fits into current chunk */
1091 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
1092 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1094 offset += bpl;
1095 } else {
1096 /* scanline needs to be split */
1097 todo = bpl;
1098 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
1099 (sg_dma_len(sg)-offset));
1100 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0;
1104 sg++;
1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110 todo -= sg_dma_len(sg);
1111 sg++;
1112 }
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1115 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1116 offset += todo;
1117 }
1118 offset += padding;
1119 }
1120
1121 return rp;
1122 }
1123
1124 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1125 struct scatterlist *sglist, unsigned int top_offset,
1126 unsigned int bottom_offset, unsigned int bpl,
1127 unsigned int padding, unsigned int lines)
1128 {
1129 u32 instructions, fields;
1130 __le32 *rp;
1131 int rc;
1132
1133 fields = 0;
1134 if (UNSET != top_offset)
1135 fields++;
1136 if (UNSET != bottom_offset)
1137 fields++;
1138
1139 /* estimate risc mem: worst case is one write per page border +
1140 one write per scan line + syncs + jump (all 2 dwords). Padding
1141 can cause next bpl to start close to a page border. First DMA
1142 region may be smaller than PAGE_SIZE */
1143 /* write and jump need and extra dword */
1144 instructions = fields * (1 + ((bpl + padding) * lines)
1145 / PAGE_SIZE + lines);
1146 instructions += 2;
1147 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1148 if (rc < 0)
1149 return rc;
1150
1151 /* write risc instructions */
1152 rp = risc->cpu;
1153 if (UNSET != top_offset)
1154 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1155 bpl, padding, lines);
1156 if (UNSET != bottom_offset)
1157 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1158 bpl, padding, lines);
1159
1160 /* save pointer to jmp instruction address */
1161 risc->jmp = rp;
1162 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1163 return 0;
1164 }
1165
1166 static int cx23885_risc_databuffer(struct pci_dev *pci,
1167 struct btcx_riscmem *risc,
1168 struct scatterlist *sglist,
1169 unsigned int bpl,
1170 unsigned int lines)
1171 {
1172 u32 instructions;
1173 __le32 *rp;
1174 int rc;
1175
1176 /* estimate risc mem: worst case is one write per page border +
1177 one write per scan line + syncs + jump (all 2 dwords). Here
1178 there is no padding and no sync. First DMA region may be smaller
1179 than PAGE_SIZE */
1180 /* Jump and write need an extra dword */
1181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1182 instructions += 1;
1183
1184 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1185 if (rc < 0)
1186 return rc;
1187
1188 /* write risc instructions */
1189 rp = risc->cpu;
1190 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1191
1192 /* save pointer to jmp instruction address */
1193 risc->jmp = rp;
1194 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1195 return 0;
1196 }
1197
1198 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1199 u32 reg, u32 mask, u32 value)
1200 {
1201 __le32 *rp;
1202 int rc;
1203
1204 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1205 if (rc < 0)
1206 return rc;
1207
1208 /* write risc instructions */
1209 rp = risc->cpu;
1210 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1211 *(rp++) = cpu_to_le32(reg);
1212 *(rp++) = cpu_to_le32(value);
1213 *(rp++) = cpu_to_le32(mask);
1214 *(rp++) = cpu_to_le32(RISC_JUMP);
1215 *(rp++) = cpu_to_le32(risc->dma);
1216 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1217 return 0;
1218 }
1219
1220 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1221 {
1222 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1223
1224 BUG_ON(in_interrupt());
1225 videobuf_waiton(q, &buf->vb, 0, 0);
1226 videobuf_dma_unmap(q->dev, dma);
1227 videobuf_dma_free(dma);
1228 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1229 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1230 }
1231
1232 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1233 {
1234 struct cx23885_dev *dev = port->dev;
1235
1236 dprintk(1, "%s() Register Dump\n", __func__);
1237 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1238 cx_read(DEV_CNTRL2));
1239 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1240 cx23885_irq_get_mask(dev));
1241 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1242 cx_read(AUDIO_INT_INT_MSK));
1243 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1244 cx_read(AUD_INT_DMA_CTL));
1245 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1246 cx_read(AUDIO_EXT_INT_MSK));
1247 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1248 cx_read(AUD_EXT_DMA_CTL));
1249 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1250 cx_read(PAD_CTRL));
1251 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1252 cx_read(ALT_PIN_OUT_SEL));
1253 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1254 cx_read(GPIO2));
1255 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1256 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1257 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1258 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1259 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1260 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1261 if (port->reg_src_sel)
1262 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1263 port->reg_src_sel, cx_read(port->reg_src_sel));
1264 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1265 port->reg_lngth, cx_read(port->reg_lngth));
1266 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1267 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1268 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1269 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1270 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1271 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1272 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1273 port->reg_sop_status, cx_read(port->reg_sop_status));
1274 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1275 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1276 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1277 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1278 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1279 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1280 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1281 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1282 }
1283
1284 static int cx23885_start_dma(struct cx23885_tsport *port,
1285 struct cx23885_dmaqueue *q,
1286 struct cx23885_buffer *buf)
1287 {
1288 struct cx23885_dev *dev = port->dev;
1289 u32 reg;
1290
1291 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1292 buf->vb.width, buf->vb.height, buf->vb.field);
1293
1294 /* Stop the fifo and risc engine for this port */
1295 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1296
1297 /* setup fifo + format */
1298 cx23885_sram_channel_setup(dev,
1299 &dev->sram_channels[port->sram_chno],
1300 port->ts_packet_size, buf->risc.dma);
1301 if (debug > 5) {
1302 cx23885_sram_channel_dump(dev,
1303 &dev->sram_channels[port->sram_chno]);
1304 cx23885_risc_disasm(port, &buf->risc);
1305 }
1306
1307 /* write TS length to chip */
1308 cx_write(port->reg_lngth, buf->vb.width);
1309
1310 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1311 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1312 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1313 __func__,
1314 cx23885_boards[dev->board].portb,
1315 cx23885_boards[dev->board].portc);
1316 return -EINVAL;
1317 }
1318
1319 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1320 cx23885_av_clk(dev, 0);
1321
1322 udelay(100);
1323
1324 /* If the port supports SRC SELECT, configure it */
1325 if (port->reg_src_sel)
1326 cx_write(port->reg_src_sel, port->src_sel_val);
1327
1328 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1329 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1330 cx_write(port->reg_vld_misc, port->vld_misc_val);
1331 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1332 udelay(100);
1333
1334 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1335 /* reset counter to zero */
1336 cx_write(port->reg_gpcnt_ctl, 3);
1337 q->count = 1;
1338
1339 /* Set VIDB pins to input */
1340 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1341 reg = cx_read(PAD_CTRL);
1342 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1343 cx_write(PAD_CTRL, reg);
1344 }
1345
1346 /* Set VIDC pins to input */
1347 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1348 reg = cx_read(PAD_CTRL);
1349 reg &= ~0x4; /* Clear TS2_SOP_OE */
1350 cx_write(PAD_CTRL, reg);
1351 }
1352
1353 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1354
1355 reg = cx_read(PAD_CTRL);
1356 reg = reg & ~0x1; /* Clear TS1_OE */
1357
1358 /* FIXME, bit 2 writing here is questionable */
1359 /* set TS1_SOP_OE and TS1_OE_HI */
1360 reg = reg | 0xa;
1361 cx_write(PAD_CTRL, reg);
1362
1363 /* FIXME and these two registers should be documented. */
1364 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1365 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1366 }
1367
1368 switch (dev->bridge) {
1369 case CX23885_BRIDGE_885:
1370 case CX23885_BRIDGE_887:
1371 case CX23885_BRIDGE_888:
1372 /* enable irqs */
1373 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1374 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1375 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1376 cx23885_irq_add(dev, port->pci_irqmask);
1377 cx23885_irq_enable_all(dev);
1378 break;
1379 default:
1380 BUG();
1381 }
1382
1383 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1384
1385 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1386 cx23885_av_clk(dev, 1);
1387
1388 if (debug > 4)
1389 cx23885_tsport_reg_dump(port);
1390
1391 return 0;
1392 }
1393
1394 static int cx23885_stop_dma(struct cx23885_tsport *port)
1395 {
1396 struct cx23885_dev *dev = port->dev;
1397 u32 reg;
1398
1399 dprintk(1, "%s()\n", __func__);
1400
1401 /* Stop interrupts and DMA */
1402 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1403 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1404
1405 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1406
1407 reg = cx_read(PAD_CTRL);
1408
1409 /* Set TS1_OE */
1410 reg = reg | 0x1;
1411
1412 /* clear TS1_SOP_OE and TS1_OE_HI */
1413 reg = reg & ~0xa;
1414 cx_write(PAD_CTRL, reg);
1415 cx_write(port->reg_src_sel, 0);
1416 cx_write(port->reg_gen_ctrl, 8);
1417
1418 }
1419
1420 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1421 cx23885_av_clk(dev, 0);
1422
1423 return 0;
1424 }
1425
1426 int cx23885_restart_queue(struct cx23885_tsport *port,
1427 struct cx23885_dmaqueue *q)
1428 {
1429 struct cx23885_dev *dev = port->dev;
1430 struct cx23885_buffer *buf;
1431
1432 dprintk(5, "%s()\n", __func__);
1433 if (list_empty(&q->active)) {
1434 struct cx23885_buffer *prev;
1435 prev = NULL;
1436
1437 dprintk(5, "%s() queue is empty\n", __func__);
1438
1439 for (;;) {
1440 if (list_empty(&q->queued))
1441 return 0;
1442 buf = list_entry(q->queued.next, struct cx23885_buffer,
1443 vb.queue);
1444 if (NULL == prev) {
1445 list_del(&buf->vb.queue);
1446 list_add_tail(&buf->vb.queue, &q->active);
1447 cx23885_start_dma(port, q, buf);
1448 buf->vb.state = VIDEOBUF_ACTIVE;
1449 buf->count = q->count++;
1450 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1451 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1452 buf, buf->vb.i);
1453
1454 } else if (prev->vb.width == buf->vb.width &&
1455 prev->vb.height == buf->vb.height &&
1456 prev->fmt == buf->fmt) {
1457 list_del(&buf->vb.queue);
1458 list_add_tail(&buf->vb.queue, &q->active);
1459 buf->vb.state = VIDEOBUF_ACTIVE;
1460 buf->count = q->count++;
1461 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1462 /* 64 bit bits 63-32 */
1463 prev->risc.jmp[2] = cpu_to_le32(0);
1464 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1465 buf, buf->vb.i);
1466 } else {
1467 return 0;
1468 }
1469 prev = buf;
1470 }
1471 return 0;
1472 }
1473
1474 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1475 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1476 buf, buf->vb.i);
1477 cx23885_start_dma(port, q, buf);
1478 list_for_each_entry(buf, &q->active, vb.queue)
1479 buf->count = q->count++;
1480 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1481 return 0;
1482 }
1483
1484 /* ------------------------------------------------------------------ */
1485
1486 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1487 struct cx23885_buffer *buf, enum v4l2_field field)
1488 {
1489 struct cx23885_dev *dev = port->dev;
1490 int size = port->ts_packet_size * port->ts_packet_count;
1491 int rc;
1492
1493 dprintk(1, "%s: %p\n", __func__, buf);
1494 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1495 return -EINVAL;
1496
1497 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1498 buf->vb.width = port->ts_packet_size;
1499 buf->vb.height = port->ts_packet_count;
1500 buf->vb.size = size;
1501 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1502
1503 rc = videobuf_iolock(q, &buf->vb, NULL);
1504 if (0 != rc)
1505 goto fail;
1506 cx23885_risc_databuffer(dev->pci, &buf->risc,
1507 videobuf_to_dma(&buf->vb)->sglist,
1508 buf->vb.width, buf->vb.height);
1509 }
1510 buf->vb.state = VIDEOBUF_PREPARED;
1511 return 0;
1512
1513 fail:
1514 cx23885_free_buffer(q, buf);
1515 return rc;
1516 }
1517
1518 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1519 {
1520 struct cx23885_buffer *prev;
1521 struct cx23885_dev *dev = port->dev;
1522 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1523
1524 /* add jump to stopper */
1525 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1526 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1527 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1528
1529 if (list_empty(&cx88q->active)) {
1530 dprintk(1, "queue is empty - first active\n");
1531 list_add_tail(&buf->vb.queue, &cx88q->active);
1532 cx23885_start_dma(port, cx88q, buf);
1533 buf->vb.state = VIDEOBUF_ACTIVE;
1534 buf->count = cx88q->count++;
1535 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1536 dprintk(1, "[%p/%d] %s - first active\n",
1537 buf, buf->vb.i, __func__);
1538 } else {
1539 dprintk(1, "queue is not empty - append to active\n");
1540 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1541 vb.queue);
1542 list_add_tail(&buf->vb.queue, &cx88q->active);
1543 buf->vb.state = VIDEOBUF_ACTIVE;
1544 buf->count = cx88q->count++;
1545 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1546 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1547 dprintk(1, "[%p/%d] %s - append to active\n",
1548 buf, buf->vb.i, __func__);
1549 }
1550 }
1551
1552 /* ----------------------------------------------------------- */
1553
1554 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1555 int restart)
1556 {
1557 struct cx23885_dev *dev = port->dev;
1558 struct cx23885_dmaqueue *q = &port->mpegq;
1559 struct cx23885_buffer *buf;
1560 unsigned long flags;
1561
1562 spin_lock_irqsave(&port->slock, flags);
1563 while (!list_empty(&q->active)) {
1564 buf = list_entry(q->active.next, struct cx23885_buffer,
1565 vb.queue);
1566 list_del(&buf->vb.queue);
1567 buf->vb.state = VIDEOBUF_ERROR;
1568 wake_up(&buf->vb.done);
1569 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1570 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1571 }
1572 if (restart) {
1573 dprintk(1, "restarting queue\n");
1574 cx23885_restart_queue(port, q);
1575 }
1576 spin_unlock_irqrestore(&port->slock, flags);
1577 }
1578
1579 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1580 {
1581 struct cx23885_dev *dev = port->dev;
1582 struct cx23885_dmaqueue *q = &port->mpegq;
1583
1584 dprintk(1, "%s()\n", __func__);
1585 del_timer_sync(&q->timeout);
1586 cx23885_stop_dma(port);
1587 do_cancel_buffers(port, "cancel", 0);
1588 }
1589
1590 static void cx23885_timeout(unsigned long data)
1591 {
1592 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1593 struct cx23885_dev *dev = port->dev;
1594
1595 dprintk(1, "%s()\n", __func__);
1596
1597 if (debug > 5)
1598 cx23885_sram_channel_dump(dev,
1599 &dev->sram_channels[port->sram_chno]);
1600
1601 cx23885_stop_dma(port);
1602 do_cancel_buffers(port, "timeout", 1);
1603 }
1604
1605 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1606 {
1607 /* FIXME: port1 assumption here. */
1608 struct cx23885_tsport *port = &dev->ts1;
1609 int count = 0;
1610 int handled = 0;
1611
1612 if (status == 0)
1613 return handled;
1614
1615 count = cx_read(port->reg_gpcnt);
1616 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1617 status, cx_read(port->reg_ts_int_msk), count);
1618
1619 if ((status & VID_B_MSK_BAD_PKT) ||
1620 (status & VID_B_MSK_OPC_ERR) ||
1621 (status & VID_B_MSK_VBI_OPC_ERR) ||
1622 (status & VID_B_MSK_SYNC) ||
1623 (status & VID_B_MSK_VBI_SYNC) ||
1624 (status & VID_B_MSK_OF) ||
1625 (status & VID_B_MSK_VBI_OF)) {
1626 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1627 "= 0x%x\n", dev->name, status);
1628 if (status & VID_B_MSK_BAD_PKT)
1629 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1630 if (status & VID_B_MSK_OPC_ERR)
1631 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1632 if (status & VID_B_MSK_VBI_OPC_ERR)
1633 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1634 if (status & VID_B_MSK_SYNC)
1635 dprintk(1, " VID_B_MSK_SYNC\n");
1636 if (status & VID_B_MSK_VBI_SYNC)
1637 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1638 if (status & VID_B_MSK_OF)
1639 dprintk(1, " VID_B_MSK_OF\n");
1640 if (status & VID_B_MSK_VBI_OF)
1641 dprintk(1, " VID_B_MSK_VBI_OF\n");
1642
1643 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1644 cx23885_sram_channel_dump(dev,
1645 &dev->sram_channels[port->sram_chno]);
1646 cx23885_417_check_encoder(dev);
1647 } else if (status & VID_B_MSK_RISCI1) {
1648 dprintk(7, " VID_B_MSK_RISCI1\n");
1649 spin_lock(&port->slock);
1650 cx23885_wakeup(port, &port->mpegq, count);
1651 spin_unlock(&port->slock);
1652 } else if (status & VID_B_MSK_RISCI2) {
1653 dprintk(7, " VID_B_MSK_RISCI2\n");
1654 spin_lock(&port->slock);
1655 cx23885_restart_queue(port, &port->mpegq);
1656 spin_unlock(&port->slock);
1657 }
1658 if (status) {
1659 cx_write(port->reg_ts_int_stat, status);
1660 handled = 1;
1661 }
1662
1663 return handled;
1664 }
1665
1666 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1667 {
1668 struct cx23885_dev *dev = port->dev;
1669 int handled = 0;
1670 u32 count;
1671
1672 if ((status & VID_BC_MSK_OPC_ERR) ||
1673 (status & VID_BC_MSK_BAD_PKT) ||
1674 (status & VID_BC_MSK_SYNC) ||
1675 (status & VID_BC_MSK_OF)) {
1676
1677 if (status & VID_BC_MSK_OPC_ERR)
1678 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1679 VID_BC_MSK_OPC_ERR);
1680
1681 if (status & VID_BC_MSK_BAD_PKT)
1682 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1683 VID_BC_MSK_BAD_PKT);
1684
1685 if (status & VID_BC_MSK_SYNC)
1686 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1687 VID_BC_MSK_SYNC);
1688
1689 if (status & VID_BC_MSK_OF)
1690 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1691 VID_BC_MSK_OF);
1692
1693 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1694
1695 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1696 cx23885_sram_channel_dump(dev,
1697 &dev->sram_channels[port->sram_chno]);
1698
1699 } else if (status & VID_BC_MSK_RISCI1) {
1700
1701 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1702
1703 spin_lock(&port->slock);
1704 count = cx_read(port->reg_gpcnt);
1705 cx23885_wakeup(port, &port->mpegq, count);
1706 spin_unlock(&port->slock);
1707
1708 } else if (status & VID_BC_MSK_RISCI2) {
1709
1710 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1711
1712 spin_lock(&port->slock);
1713 cx23885_restart_queue(port, &port->mpegq);
1714 spin_unlock(&port->slock);
1715
1716 }
1717 if (status) {
1718 cx_write(port->reg_ts_int_stat, status);
1719 handled = 1;
1720 }
1721
1722 return handled;
1723 }
1724
1725 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1726 {
1727 struct cx23885_dev *dev = dev_id;
1728 struct cx23885_tsport *ts1 = &dev->ts1;
1729 struct cx23885_tsport *ts2 = &dev->ts2;
1730 u32 pci_status, pci_mask;
1731 u32 vida_status, vida_mask;
1732 u32 ts1_status, ts1_mask;
1733 u32 ts2_status, ts2_mask;
1734 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1735 bool subdev_handled;
1736
1737 pci_status = cx_read(PCI_INT_STAT);
1738 pci_mask = cx23885_irq_get_mask(dev);
1739 vida_status = cx_read(VID_A_INT_STAT);
1740 vida_mask = cx_read(VID_A_INT_MSK);
1741 ts1_status = cx_read(VID_B_INT_STAT);
1742 ts1_mask = cx_read(VID_B_INT_MSK);
1743 ts2_status = cx_read(VID_C_INT_STAT);
1744 ts2_mask = cx_read(VID_C_INT_MSK);
1745
1746 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1747 goto out;
1748
1749 vida_count = cx_read(VID_A_GPCNT);
1750 ts1_count = cx_read(ts1->reg_gpcnt);
1751 ts2_count = cx_read(ts2->reg_gpcnt);
1752 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1753 pci_status, pci_mask);
1754 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1755 vida_status, vida_mask, vida_count);
1756 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1757 ts1_status, ts1_mask, ts1_count);
1758 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1759 ts2_status, ts2_mask, ts2_count);
1760
1761 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1762 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1763 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1764 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1765 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1766 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1767
1768 if (pci_status & PCI_MSK_RISC_RD)
1769 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1770 PCI_MSK_RISC_RD);
1771
1772 if (pci_status & PCI_MSK_RISC_WR)
1773 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1774 PCI_MSK_RISC_WR);
1775
1776 if (pci_status & PCI_MSK_AL_RD)
1777 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1778 PCI_MSK_AL_RD);
1779
1780 if (pci_status & PCI_MSK_AL_WR)
1781 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1782 PCI_MSK_AL_WR);
1783
1784 if (pci_status & PCI_MSK_APB_DMA)
1785 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1786 PCI_MSK_APB_DMA);
1787
1788 if (pci_status & PCI_MSK_VID_C)
1789 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1790 PCI_MSK_VID_C);
1791
1792 if (pci_status & PCI_MSK_VID_B)
1793 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1794 PCI_MSK_VID_B);
1795
1796 if (pci_status & PCI_MSK_VID_A)
1797 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1798 PCI_MSK_VID_A);
1799
1800 if (pci_status & PCI_MSK_AUD_INT)
1801 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1802 PCI_MSK_AUD_INT);
1803
1804 if (pci_status & PCI_MSK_AUD_EXT)
1805 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1806 PCI_MSK_AUD_EXT);
1807
1808 if (pci_status & PCI_MSK_GPIO0)
1809 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1810 PCI_MSK_GPIO0);
1811
1812 if (pci_status & PCI_MSK_GPIO1)
1813 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1814 PCI_MSK_GPIO1);
1815
1816 if (pci_status & PCI_MSK_AV_CORE)
1817 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1818 PCI_MSK_AV_CORE);
1819
1820 if (pci_status & PCI_MSK_IR)
1821 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1822 PCI_MSK_IR);
1823 }
1824
1825 if (cx23885_boards[dev->board].ci_type == 1 &&
1826 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1827 handled += netup_ci_slot_status(dev, pci_status);
1828
1829 if (cx23885_boards[dev->board].ci_type == 2 &&
1830 (pci_status & PCI_MSK_GPIO0))
1831 handled += altera_ci_irq(dev);
1832
1833 if (ts1_status) {
1834 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1835 handled += cx23885_irq_ts(ts1, ts1_status);
1836 else
1837 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1838 handled += cx23885_irq_417(dev, ts1_status);
1839 }
1840
1841 if (ts2_status) {
1842 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1843 handled += cx23885_irq_ts(ts2, ts2_status);
1844 else
1845 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1846 handled += cx23885_irq_417(dev, ts2_status);
1847 }
1848
1849 if (vida_status)
1850 handled += cx23885_video_irq(dev, vida_status);
1851
1852 if (pci_status & PCI_MSK_IR) {
1853 subdev_handled = false;
1854 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1855 pci_status, &subdev_handled);
1856 if (subdev_handled)
1857 handled++;
1858 }
1859
1860 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1861 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1862 if (!schedule_work(&dev->cx25840_work))
1863 printk(KERN_ERR "%s: failed to set up deferred work for"
1864 " AV Core/IR interrupt. Interrupt is disabled"
1865 " and won't be re-enabled\n", dev->name);
1866 handled++;
1867 }
1868
1869 if (handled)
1870 cx_write(PCI_INT_STAT, pci_status);
1871 out:
1872 return IRQ_RETVAL(handled);
1873 }
1874
1875 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1876 unsigned int notification, void *arg)
1877 {
1878 struct cx23885_dev *dev;
1879
1880 if (sd == NULL)
1881 return;
1882
1883 dev = to_cx23885(sd->v4l2_dev);
1884
1885 switch (notification) {
1886 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1887 if (sd == dev->sd_ir)
1888 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1889 break;
1890 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1891 if (sd == dev->sd_ir)
1892 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1893 break;
1894 }
1895 }
1896
1897 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1898 {
1899 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1900 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1901 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1902 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1903 }
1904
1905 static inline int encoder_on_portb(struct cx23885_dev *dev)
1906 {
1907 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1908 }
1909
1910 static inline int encoder_on_portc(struct cx23885_dev *dev)
1911 {
1912 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1913 }
1914
1915 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1916 * registers depending on the board configuration (and whether the
1917 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1918 * be pushed into the correct hardware register, regardless of the
1919 * physical location. Certain registers are shared so we sanity check
1920 * and report errors if we think we're tampering with a GPIo that might
1921 * be assigned to the encoder (and used for the host bus).
1922 *
1923 * GPIO 2 thru 0 - On the cx23885 bridge
1924 * GPIO 18 thru 3 - On the cx23417 host bus interface
1925 * GPIO 23 thru 19 - On the cx25840 a/v core
1926 */
1927 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1928 {
1929 if (mask & 0x7)
1930 cx_set(GP0_IO, mask & 0x7);
1931
1932 if (mask & 0x0007fff8) {
1933 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1934 printk(KERN_ERR
1935 "%s: Setting GPIO on encoder ports\n",
1936 dev->name);
1937 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1938 }
1939
1940 /* TODO: 23-19 */
1941 if (mask & 0x00f80000)
1942 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1943 }
1944
1945 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1946 {
1947 if (mask & 0x00000007)
1948 cx_clear(GP0_IO, mask & 0x7);
1949
1950 if (mask & 0x0007fff8) {
1951 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1952 printk(KERN_ERR
1953 "%s: Clearing GPIO moving on encoder ports\n",
1954 dev->name);
1955 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1956 }
1957
1958 /* TODO: 23-19 */
1959 if (mask & 0x00f80000)
1960 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1961 }
1962
1963 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1964 {
1965 if (mask & 0x00000007)
1966 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1967
1968 if (mask & 0x0007fff8) {
1969 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1970 printk(KERN_ERR
1971 "%s: Reading GPIO moving on encoder ports\n",
1972 dev->name);
1973 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1974 }
1975
1976 /* TODO: 23-19 */
1977 if (mask & 0x00f80000)
1978 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1979
1980 return 0;
1981 }
1982
1983 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1984 {
1985 if ((mask & 0x00000007) && asoutput)
1986 cx_set(GP0_IO, (mask & 0x7) << 16);
1987 else if ((mask & 0x00000007) && !asoutput)
1988 cx_clear(GP0_IO, (mask & 0x7) << 16);
1989
1990 if (mask & 0x0007fff8) {
1991 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1992 printk(KERN_ERR
1993 "%s: Enabling GPIO on encoder ports\n",
1994 dev->name);
1995 }
1996
1997 /* MC417_OEN is active low for output, write 1 for an input */
1998 if ((mask & 0x0007fff8) && asoutput)
1999 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2000
2001 else if ((mask & 0x0007fff8) && !asoutput)
2002 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2003
2004 /* TODO: 23-19 */
2005 }
2006
2007 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2008 const struct pci_device_id *pci_id)
2009 {
2010 struct cx23885_dev *dev;
2011 int err;
2012
2013 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2014 if (NULL == dev)
2015 return -ENOMEM;
2016
2017 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2018 if (err < 0)
2019 goto fail_free;
2020
2021 /* Prepare to handle notifications from subdevices */
2022 cx23885_v4l2_dev_notify_init(dev);
2023
2024 /* pci init */
2025 dev->pci = pci_dev;
2026 if (pci_enable_device(pci_dev)) {
2027 err = -EIO;
2028 goto fail_unreg;
2029 }
2030
2031 if (cx23885_dev_setup(dev) < 0) {
2032 err = -EINVAL;
2033 goto fail_unreg;
2034 }
2035
2036 /* print pci info */
2037 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
2038 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2039 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2040 "latency: %d, mmio: 0x%llx\n", dev->name,
2041 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2042 dev->pci_lat,
2043 (unsigned long long)pci_resource_start(pci_dev, 0));
2044
2045 pci_set_master(pci_dev);
2046 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2047 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2048 err = -EIO;
2049 goto fail_irq;
2050 }
2051
2052 if (!pci_enable_msi(pci_dev))
2053 err = request_irq(pci_dev->irq, cx23885_irq,
2054 IRQF_DISABLED, dev->name, dev);
2055 else
2056 err = request_irq(pci_dev->irq, cx23885_irq,
2057 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2058 if (err < 0) {
2059 printk(KERN_ERR "%s: can't get IRQ %d\n",
2060 dev->name, pci_dev->irq);
2061 goto fail_irq;
2062 }
2063
2064 switch (dev->board) {
2065 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2066 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2067 break;
2068 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2069 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2070 break;
2071 }
2072
2073 /*
2074 * The CX2388[58] IR controller can start firing interrupts when
2075 * enabled, so these have to take place after the cx23885_irq() handler
2076 * is hooked up by the call to request_irq() above.
2077 */
2078 cx23885_ir_pci_int_enable(dev);
2079 cx23885_input_init(dev);
2080
2081 return 0;
2082
2083 fail_irq:
2084 cx23885_dev_unregister(dev);
2085 fail_unreg:
2086 v4l2_device_unregister(&dev->v4l2_dev);
2087 fail_free:
2088 kfree(dev);
2089 return err;
2090 }
2091
2092 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2093 {
2094 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2095 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2096
2097 cx23885_input_fini(dev);
2098 cx23885_ir_fini(dev);
2099
2100 cx23885_shutdown(dev);
2101
2102 pci_disable_device(pci_dev);
2103
2104 /* unregister stuff */
2105 free_irq(pci_dev->irq, dev);
2106 pci_disable_msi(pci_dev);
2107
2108 cx23885_dev_unregister(dev);
2109 v4l2_device_unregister(v4l2_dev);
2110 kfree(dev);
2111 }
2112
2113 static struct pci_device_id cx23885_pci_tbl[] = {
2114 {
2115 /* CX23885 */
2116 .vendor = 0x14f1,
2117 .device = 0x8852,
2118 .subvendor = PCI_ANY_ID,
2119 .subdevice = PCI_ANY_ID,
2120 }, {
2121 /* CX23887 Rev 2 */
2122 .vendor = 0x14f1,
2123 .device = 0x8880,
2124 .subvendor = PCI_ANY_ID,
2125 .subdevice = PCI_ANY_ID,
2126 }, {
2127 /* --- end of list --- */
2128 }
2129 };
2130 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2131
2132 static struct pci_driver cx23885_pci_driver = {
2133 .name = "cx23885",
2134 .id_table = cx23885_pci_tbl,
2135 .probe = cx23885_initdev,
2136 .remove = __devexit_p(cx23885_finidev),
2137 /* TODO */
2138 .suspend = NULL,
2139 .resume = NULL,
2140 };
2141
2142 static int __init cx23885_init(void)
2143 {
2144 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
2145 (CX23885_VERSION_CODE >> 16) & 0xff,
2146 (CX23885_VERSION_CODE >> 8) & 0xff,
2147 CX23885_VERSION_CODE & 0xff);
2148 #ifdef SNAPSHOT
2149 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
2150 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2151 #endif
2152 return pci_register_driver(&cx23885_pci_driver);
2153 }
2154
2155 static void __exit cx23885_fini(void)
2156 {
2157 pci_unregister_driver(&cx23885_pci_driver);
2158 }
2159
2160 module_init(cx23885_init);
2161 module_exit(cx23885_fini);
2162
2163 /* ----------------------------------------------------------- */
This page took 0.137972 seconds and 5 git commands to generate.