8d77a564977772e1a24a87089a6cc348dc9ac802
[deliverable/linux.git] / drivers / media / pci / cx23885 / cx23885-core.c
1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kmod.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/div64.h>
28 #include <linux/firmware.h>
29
30 #include "cx23885.h"
31 #include "cimax2.h"
32 #include "altera-ci.h"
33 #include "cx23888-ir.h"
34 #include "cx23885-ir.h"
35 #include "cx23885-av.h"
36 #include "cx23885-input.h"
37
38 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(CX23885_VERSION);
42
43 static unsigned int debug;
44 module_param(debug, int, 0644);
45 MODULE_PARM_DESC(debug, "enable debug messages");
46
47 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48 module_param_array(card, int, NULL, 0444);
49 MODULE_PARM_DESC(card, "card type");
50
51 #define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
54 } while (0)
55
56 static unsigned int cx23885_devcount;
57
58 #define NO_SYNC_LINE (-1U)
59
60 /* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
62 * CX23887 Assumptions
63 * 1 line = 16 bytes of CDT
64 * cmds size = 80
65 * cdt size = 16 * linesize
66 * iqsize = 64
67 * maxlines = 6
68 *
69 * Address Space:
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
78 */
79
80 static struct sram_channel cx23885_sram_channels[] = {
81 [SRAM_CH01] = {
82 .name = "VID A",
83 .cmds_start = 0x10000,
84 .ctrl_start = 0x10380,
85 .cdt = 0x104c0,
86 .fifo_start = 0x40,
87 .fifo_size = 0x2800,
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
92 },
93 [SRAM_CH02] = {
94 .name = "ch2",
95 .cmds_start = 0x0,
96 .ctrl_start = 0x0,
97 .cdt = 0x0,
98 .fifo_start = 0x0,
99 .fifo_size = 0x0,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
104 },
105 [SRAM_CH03] = {
106 .name = "TS1 B",
107 .cmds_start = 0x100A0,
108 .ctrl_start = 0x10400,
109 .cdt = 0x10580,
110 .fifo_start = 0x5000,
111 .fifo_size = 0x1000,
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
116 },
117 [SRAM_CH04] = {
118 .name = "ch4",
119 .cmds_start = 0x0,
120 .ctrl_start = 0x0,
121 .cdt = 0x0,
122 .fifo_start = 0x0,
123 .fifo_size = 0x0,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
128 },
129 [SRAM_CH05] = {
130 .name = "ch5",
131 .cmds_start = 0x0,
132 .ctrl_start = 0x0,
133 .cdt = 0x0,
134 .fifo_start = 0x0,
135 .fifo_size = 0x0,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
140 },
141 [SRAM_CH06] = {
142 .name = "TS2 C",
143 .cmds_start = 0x10140,
144 .ctrl_start = 0x10440,
145 .cdt = 0x105e0,
146 .fifo_start = 0x6000,
147 .fifo_size = 0x1000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
152 },
153 [SRAM_CH07] = {
154 .name = "TV Audio",
155 .cmds_start = 0x10190,
156 .ctrl_start = 0x10480,
157 .cdt = 0x10a00,
158 .fifo_start = 0x7000,
159 .fifo_size = 0x1000,
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
164 },
165 [SRAM_CH08] = {
166 .name = "ch8",
167 .cmds_start = 0x0,
168 .ctrl_start = 0x0,
169 .cdt = 0x0,
170 .fifo_start = 0x0,
171 .fifo_size = 0x0,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
176 },
177 [SRAM_CH09] = {
178 .name = "ch9",
179 .cmds_start = 0x0,
180 .ctrl_start = 0x0,
181 .cdt = 0x0,
182 .fifo_start = 0x0,
183 .fifo_size = 0x0,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
188 },
189 };
190
191 static struct sram_channel cx23887_sram_channels[] = {
192 [SRAM_CH01] = {
193 .name = "VID A",
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
196 .cdt = 0x107b0,
197 .fifo_start = 0x40,
198 .fifo_size = 0x2800,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
203 },
204 [SRAM_CH02] = {
205 .name = "VID A (VBI)",
206 .cmds_start = 0x10050,
207 .ctrl_start = 0x105F0,
208 .cdt = 0x10810,
209 .fifo_start = 0x3000,
210 .fifo_size = 0x1000,
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
215 },
216 [SRAM_CH03] = {
217 .name = "TS1 B",
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
220 .cdt = 0x10870,
221 .fifo_start = 0x5000,
222 .fifo_size = 0x1000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
227 },
228 [SRAM_CH04] = {
229 .name = "ch4",
230 .cmds_start = 0x0,
231 .ctrl_start = 0x0,
232 .cdt = 0x0,
233 .fifo_start = 0x0,
234 .fifo_size = 0x0,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
239 },
240 [SRAM_CH05] = {
241 .name = "ch5",
242 .cmds_start = 0x0,
243 .ctrl_start = 0x0,
244 .cdt = 0x0,
245 .fifo_start = 0x0,
246 .fifo_size = 0x0,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
251 },
252 [SRAM_CH06] = {
253 .name = "TS2 C",
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
256 .cdt = 0x108d0,
257 .fifo_start = 0x6000,
258 .fifo_size = 0x1000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
263 },
264 [SRAM_CH07] = {
265 .name = "TV Audio",
266 .cmds_start = 0x10190,
267 .ctrl_start = 0x106B0,
268 .cdt = 0x10930,
269 .fifo_start = 0x7000,
270 .fifo_size = 0x1000,
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
275 },
276 [SRAM_CH08] = {
277 .name = "ch8",
278 .cmds_start = 0x0,
279 .ctrl_start = 0x0,
280 .cdt = 0x0,
281 .fifo_start = 0x0,
282 .fifo_size = 0x0,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
287 },
288 [SRAM_CH09] = {
289 .name = "ch9",
290 .cmds_start = 0x0,
291 .ctrl_start = 0x0,
292 .cdt = 0x0,
293 .fifo_start = 0x0,
294 .fifo_size = 0x0,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
299 },
300 };
301
302 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
303 {
304 unsigned long flags;
305 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306
307 dev->pci_irqmask |= mask;
308
309 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310 }
311
312 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313 {
314 unsigned long flags;
315 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316
317 dev->pci_irqmask |= mask;
318 cx_set(PCI_INT_MSK, mask);
319
320 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321 }
322
323 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324 {
325 u32 v;
326 unsigned long flags;
327 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328
329 v = mask & dev->pci_irqmask;
330 if (v)
331 cx_set(PCI_INT_MSK, v);
332
333 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334 }
335
336 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337 {
338 cx23885_irq_enable(dev, 0xffffffff);
339 }
340
341 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342 {
343 unsigned long flags;
344 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345
346 cx_clear(PCI_INT_MSK, mask);
347
348 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349 }
350
351 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352 {
353 cx23885_irq_disable(dev, 0xffffffff);
354 }
355
356 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357 {
358 unsigned long flags;
359 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360
361 dev->pci_irqmask &= ~mask;
362 cx_clear(PCI_INT_MSK, mask);
363
364 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365 }
366
367 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368 {
369 u32 v;
370 unsigned long flags;
371 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372
373 v = cx_read(PCI_INT_MSK);
374
375 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 return v;
377 }
378
379 static int cx23885_risc_decode(u32 risc)
380 {
381 static char *instr[16] = {
382 [RISC_SYNC >> 28] = "sync",
383 [RISC_WRITE >> 28] = "write",
384 [RISC_WRITEC >> 28] = "writec",
385 [RISC_READ >> 28] = "read",
386 [RISC_READC >> 28] = "readc",
387 [RISC_JUMP >> 28] = "jump",
388 [RISC_SKIP >> 28] = "skip",
389 [RISC_WRITERM >> 28] = "writerm",
390 [RISC_WRITECM >> 28] = "writecm",
391 [RISC_WRITECR >> 28] = "writecr",
392 };
393 static int incr[16] = {
394 [RISC_WRITE >> 28] = 3,
395 [RISC_JUMP >> 28] = 3,
396 [RISC_SKIP >> 28] = 1,
397 [RISC_SYNC >> 28] = 1,
398 [RISC_WRITERM >> 28] = 3,
399 [RISC_WRITECM >> 28] = 3,
400 [RISC_WRITECR >> 28] = 4,
401 };
402 static char *bits[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
407 };
408 int i;
409
410 printk("0x%08x [ %s", risc,
411 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413 if (risc & (1 << (i + 12)))
414 printk(" %s", bits[i]);
415 printk(" count=%d ]\n", risc & 0xfff);
416 return incr[risc >> 28] ? incr[risc >> 28] : 1;
417 }
418
419 static void cx23885_wakeup(struct cx23885_tsport *port,
420 struct cx23885_dmaqueue *q, u32 count)
421 {
422 struct cx23885_dev *dev = port->dev;
423 struct cx23885_buffer *buf;
424
425 if (list_empty(&q->active))
426 return;
427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue);
429
430 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
431 buf->vb.v4l2_buf.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
433 count, q->count);
434 list_del(&buf->queue);
435 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
436 }
437
438 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
439 struct sram_channel *ch,
440 unsigned int bpl, u32 risc)
441 {
442 unsigned int i, lines;
443 u32 cdt;
444
445 if (ch->cmds_start == 0) {
446 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
447 ch->name);
448 cx_write(ch->ptr1_reg, 0);
449 cx_write(ch->ptr2_reg, 0);
450 cx_write(ch->cnt2_reg, 0);
451 cx_write(ch->cnt1_reg, 0);
452 return 0;
453 } else {
454 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
455 ch->name);
456 }
457
458 bpl = (bpl + 7) & ~7; /* alignment */
459 cdt = ch->cdt;
460 lines = ch->fifo_size / bpl;
461 if (lines > 6)
462 lines = 6;
463 BUG_ON(lines < 2);
464
465 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
466 cx_write(8 + 4, 12);
467 cx_write(8 + 8, 0);
468
469 /* write CDT */
470 for (i = 0; i < lines; i++) {
471 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
472 ch->fifo_start + bpl*i);
473 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
474 cx_write(cdt + 16*i + 4, 0);
475 cx_write(cdt + 16*i + 8, 0);
476 cx_write(cdt + 16*i + 12, 0);
477 }
478
479 /* write CMDS */
480 if (ch->jumponly)
481 cx_write(ch->cmds_start + 0, 8);
482 else
483 cx_write(ch->cmds_start + 0, risc);
484 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
485 cx_write(ch->cmds_start + 8, cdt);
486 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
487 cx_write(ch->cmds_start + 16, ch->ctrl_start);
488 if (ch->jumponly)
489 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
490 else
491 cx_write(ch->cmds_start + 20, 64 >> 2);
492 for (i = 24; i < 80; i += 4)
493 cx_write(ch->cmds_start + i, 0);
494
495 /* fill registers */
496 cx_write(ch->ptr1_reg, ch->fifo_start);
497 cx_write(ch->ptr2_reg, cdt);
498 cx_write(ch->cnt2_reg, (lines*16) >> 3);
499 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
500
501 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
502 dev->bridge,
503 ch->name,
504 bpl,
505 lines);
506
507 return 0;
508 }
509
510 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
511 struct sram_channel *ch)
512 {
513 static char *name[] = {
514 "init risc lo",
515 "init risc hi",
516 "cdt base",
517 "cdt size",
518 "iq base",
519 "iq size",
520 "risc pc lo",
521 "risc pc hi",
522 "iq wr ptr",
523 "iq rd ptr",
524 "cdt current",
525 "pci target lo",
526 "pci target hi",
527 "line / byte",
528 };
529 u32 risc;
530 unsigned int i, j, n;
531
532 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
533 dev->name, ch->name);
534 for (i = 0; i < ARRAY_SIZE(name); i++)
535 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
536 dev->name, name[i],
537 cx_read(ch->cmds_start + 4*i));
538
539 for (i = 0; i < 4; i++) {
540 risc = cx_read(ch->cmds_start + 4 * (i + 14));
541 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
542 cx23885_risc_decode(risc);
543 }
544 for (i = 0; i < (64 >> 2); i += n) {
545 risc = cx_read(ch->ctrl_start + 4 * i);
546 /* No consideration for bits 63-32 */
547
548 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
549 ch->ctrl_start + 4 * i, i);
550 n = cx23885_risc_decode(risc);
551 for (j = 1; j < n; j++) {
552 risc = cx_read(ch->ctrl_start + 4 * (i + j));
553 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
554 dev->name, i+j, risc, j);
555 }
556 }
557
558 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
559 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
560 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
561 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
562 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
563 dev->name, cx_read(ch->ptr1_reg));
564 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
565 dev->name, cx_read(ch->ptr2_reg));
566 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
567 dev->name, cx_read(ch->cnt1_reg));
568 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
569 dev->name, cx_read(ch->cnt2_reg));
570 }
571
572 static void cx23885_risc_disasm(struct cx23885_tsport *port,
573 struct btcx_riscmem *risc)
574 {
575 struct cx23885_dev *dev = port->dev;
576 unsigned int i, j, n;
577
578 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
579 dev->name, risc->cpu, (unsigned long)risc->dma);
580 for (i = 0; i < (risc->size >> 2); i += n) {
581 printk(KERN_INFO "%s: %04d: ", dev->name, i);
582 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
583 for (j = 1; j < n; j++)
584 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
585 dev->name, i + j, risc->cpu[i + j], j);
586 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
587 break;
588 }
589 }
590
591 static void cx23885_shutdown(struct cx23885_dev *dev)
592 {
593 /* disable RISC controller */
594 cx_write(DEV_CNTRL2, 0);
595
596 /* Disable all IR activity */
597 cx_write(IR_CNTRL_REG, 0);
598
599 /* Disable Video A/B activity */
600 cx_write(VID_A_DMA_CTL, 0);
601 cx_write(VID_B_DMA_CTL, 0);
602 cx_write(VID_C_DMA_CTL, 0);
603
604 /* Disable Audio activity */
605 cx_write(AUD_INT_DMA_CTL, 0);
606 cx_write(AUD_EXT_DMA_CTL, 0);
607
608 /* Disable Serial port */
609 cx_write(UART_CTL, 0);
610
611 /* Disable Interrupts */
612 cx23885_irq_disable_all(dev);
613 cx_write(VID_A_INT_MSK, 0);
614 cx_write(VID_B_INT_MSK, 0);
615 cx_write(VID_C_INT_MSK, 0);
616 cx_write(AUDIO_INT_INT_MSK, 0);
617 cx_write(AUDIO_EXT_INT_MSK, 0);
618
619 }
620
621 static void cx23885_reset(struct cx23885_dev *dev)
622 {
623 dprintk(1, "%s()\n", __func__);
624
625 cx23885_shutdown(dev);
626
627 cx_write(PCI_INT_STAT, 0xffffffff);
628 cx_write(VID_A_INT_STAT, 0xffffffff);
629 cx_write(VID_B_INT_STAT, 0xffffffff);
630 cx_write(VID_C_INT_STAT, 0xffffffff);
631 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
632 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
633 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
634 cx_write(PAD_CTRL, 0x00500300);
635
636 mdelay(100);
637
638 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
639 720*4, 0);
640 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
642 188*4, 0);
643 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
644 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
646 188*4, 0);
647 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
648 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
649 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
650
651 cx23885_gpio_setup(dev);
652 }
653
654
655 static int cx23885_pci_quirks(struct cx23885_dev *dev)
656 {
657 dprintk(1, "%s()\n", __func__);
658
659 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
660 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
661 * occur on the cx23887 bridge.
662 */
663 if (dev->bridge == CX23885_BRIDGE_885)
664 cx_clear(RDR_TLCTL0, 1 << 4);
665
666 return 0;
667 }
668
669 static int get_resources(struct cx23885_dev *dev)
670 {
671 if (request_mem_region(pci_resource_start(dev->pci, 0),
672 pci_resource_len(dev->pci, 0),
673 dev->name))
674 return 0;
675
676 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
677 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
678
679 return -EBUSY;
680 }
681
682 static int cx23885_init_tsport(struct cx23885_dev *dev,
683 struct cx23885_tsport *port, int portno)
684 {
685 dprintk(1, "%s(portno=%d)\n", __func__, portno);
686
687 /* Transport bus init dma queue - Common settings */
688 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
689 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
690 port->vld_misc_val = 0x0;
691 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
692
693 spin_lock_init(&port->slock);
694 port->dev = dev;
695 port->nr = portno;
696
697 INIT_LIST_HEAD(&port->mpegq.active);
698 mutex_init(&port->frontends.lock);
699 INIT_LIST_HEAD(&port->frontends.felist);
700 port->frontends.active_fe_id = 0;
701
702 /* This should be hardcoded allow a single frontend
703 * attachment to this tsport, keeping the -dvb.c
704 * code clean and safe.
705 */
706 if (!port->num_frontends)
707 port->num_frontends = 1;
708
709 switch (portno) {
710 case 1:
711 port->reg_gpcnt = VID_B_GPCNT;
712 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
713 port->reg_dma_ctl = VID_B_DMA_CTL;
714 port->reg_lngth = VID_B_LNGTH;
715 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
716 port->reg_gen_ctrl = VID_B_GEN_CTL;
717 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
718 port->reg_sop_status = VID_B_SOP_STATUS;
719 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
720 port->reg_vld_misc = VID_B_VLD_MISC;
721 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
722 port->reg_src_sel = VID_B_SRC_SEL;
723 port->reg_ts_int_msk = VID_B_INT_MSK;
724 port->reg_ts_int_stat = VID_B_INT_STAT;
725 port->sram_chno = SRAM_CH03; /* VID_B */
726 port->pci_irqmask = 0x02; /* VID_B bit1 */
727 break;
728 case 2:
729 port->reg_gpcnt = VID_C_GPCNT;
730 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
731 port->reg_dma_ctl = VID_C_DMA_CTL;
732 port->reg_lngth = VID_C_LNGTH;
733 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
734 port->reg_gen_ctrl = VID_C_GEN_CTL;
735 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
736 port->reg_sop_status = VID_C_SOP_STATUS;
737 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
738 port->reg_vld_misc = VID_C_VLD_MISC;
739 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
740 port->reg_src_sel = 0;
741 port->reg_ts_int_msk = VID_C_INT_MSK;
742 port->reg_ts_int_stat = VID_C_INT_STAT;
743 port->sram_chno = SRAM_CH06; /* VID_C */
744 port->pci_irqmask = 0x04; /* VID_C bit2 */
745 break;
746 default:
747 BUG();
748 }
749
750 return 0;
751 }
752
753 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
754 {
755 switch (cx_read(RDR_CFG2) & 0xff) {
756 case 0x00:
757 /* cx23885 */
758 dev->hwrevision = 0xa0;
759 break;
760 case 0x01:
761 /* CX23885-12Z */
762 dev->hwrevision = 0xa1;
763 break;
764 case 0x02:
765 /* CX23885-13Z/14Z */
766 dev->hwrevision = 0xb0;
767 break;
768 case 0x03:
769 if (dev->pci->device == 0x8880) {
770 /* CX23888-21Z/22Z */
771 dev->hwrevision = 0xc0;
772 } else {
773 /* CX23885-14Z */
774 dev->hwrevision = 0xa4;
775 }
776 break;
777 case 0x04:
778 if (dev->pci->device == 0x8880) {
779 /* CX23888-31Z */
780 dev->hwrevision = 0xd0;
781 } else {
782 /* CX23885-15Z, CX23888-31Z */
783 dev->hwrevision = 0xa5;
784 }
785 break;
786 case 0x0e:
787 /* CX23887-15Z */
788 dev->hwrevision = 0xc0;
789 break;
790 case 0x0f:
791 /* CX23887-14Z */
792 dev->hwrevision = 0xb1;
793 break;
794 default:
795 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
796 __func__, dev->hwrevision);
797 }
798 if (dev->hwrevision)
799 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
800 __func__, dev->hwrevision);
801 else
802 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
803 __func__, dev->hwrevision);
804 }
805
806 /* Find the first v4l2_subdev member of the group id in hw */
807 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
808 {
809 struct v4l2_subdev *result = NULL;
810 struct v4l2_subdev *sd;
811
812 spin_lock(&dev->v4l2_dev.lock);
813 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
814 if (sd->grp_id == hw) {
815 result = sd;
816 break;
817 }
818 }
819 spin_unlock(&dev->v4l2_dev.lock);
820 return result;
821 }
822
823 static int cx23885_dev_setup(struct cx23885_dev *dev)
824 {
825 int i;
826
827 spin_lock_init(&dev->pci_irqmask_lock);
828
829 mutex_init(&dev->lock);
830 mutex_init(&dev->gpio_lock);
831
832 atomic_inc(&dev->refcount);
833
834 dev->nr = cx23885_devcount++;
835 sprintf(dev->name, "cx23885[%d]", dev->nr);
836
837 /* Configure the internal memory */
838 if (dev->pci->device == 0x8880) {
839 /* Could be 887 or 888, assume a default */
840 dev->bridge = CX23885_BRIDGE_887;
841 /* Apply a sensible clock frequency for the PCIe bridge */
842 dev->clk_freq = 25000000;
843 dev->sram_channels = cx23887_sram_channels;
844 } else
845 if (dev->pci->device == 0x8852) {
846 dev->bridge = CX23885_BRIDGE_885;
847 /* Apply a sensible clock frequency for the PCIe bridge */
848 dev->clk_freq = 28000000;
849 dev->sram_channels = cx23885_sram_channels;
850 } else
851 BUG();
852
853 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
854 __func__, dev->bridge);
855
856 /* board config */
857 dev->board = UNSET;
858 if (card[dev->nr] < cx23885_bcount)
859 dev->board = card[dev->nr];
860 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
861 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
862 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
863 dev->board = cx23885_subids[i].card;
864 if (UNSET == dev->board) {
865 dev->board = CX23885_BOARD_UNKNOWN;
866 cx23885_card_list(dev);
867 }
868
869 /* If the user specific a clk freq override, apply it */
870 if (cx23885_boards[dev->board].clk_freq > 0)
871 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
872
873 dev->pci_bus = dev->pci->bus->number;
874 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
875 cx23885_irq_add(dev, 0x001f00);
876
877 /* External Master 1 Bus */
878 dev->i2c_bus[0].nr = 0;
879 dev->i2c_bus[0].dev = dev;
880 dev->i2c_bus[0].reg_stat = I2C1_STAT;
881 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
882 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
883 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
884 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
885 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
886
887 /* External Master 2 Bus */
888 dev->i2c_bus[1].nr = 1;
889 dev->i2c_bus[1].dev = dev;
890 dev->i2c_bus[1].reg_stat = I2C2_STAT;
891 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
892 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
893 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
894 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
895 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
896
897 /* Internal Master 3 Bus */
898 dev->i2c_bus[2].nr = 2;
899 dev->i2c_bus[2].dev = dev;
900 dev->i2c_bus[2].reg_stat = I2C3_STAT;
901 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
902 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
903 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
904 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
905 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
906
907 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
908 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
909 cx23885_init_tsport(dev, &dev->ts1, 1);
910
911 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
912 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
913 cx23885_init_tsport(dev, &dev->ts2, 2);
914
915 if (get_resources(dev) < 0) {
916 printk(KERN_ERR "CORE %s No more PCIe resources for "
917 "subsystem: %04x:%04x\n",
918 dev->name, dev->pci->subsystem_vendor,
919 dev->pci->subsystem_device);
920
921 cx23885_devcount--;
922 return -ENODEV;
923 }
924
925 /* PCIe stuff */
926 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
927 pci_resource_len(dev->pci, 0));
928
929 dev->bmmio = (u8 __iomem *)dev->lmmio;
930
931 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
932 dev->name, dev->pci->subsystem_vendor,
933 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
934 dev->board, card[dev->nr] == dev->board ?
935 "insmod option" : "autodetected");
936
937 cx23885_pci_quirks(dev);
938
939 /* Assume some sensible defaults */
940 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
941 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
942 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
943 dev->radio_type = cx23885_boards[dev->board].radio_type;
944 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
945
946 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
947 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
948 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
949 __func__, dev->radio_type, dev->radio_addr);
950
951 /* The cx23417 encoder has GPIO's that need to be initialised
952 * before DVB, so that demodulators and tuners are out of
953 * reset before DVB uses them.
954 */
955 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
956 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
957 cx23885_mc417_init(dev);
958
959 /* init hardware */
960 cx23885_reset(dev);
961
962 cx23885_i2c_register(&dev->i2c_bus[0]);
963 cx23885_i2c_register(&dev->i2c_bus[1]);
964 cx23885_i2c_register(&dev->i2c_bus[2]);
965 cx23885_card_setup(dev);
966 call_all(dev, core, s_power, 0);
967 cx23885_ir_init(dev);
968
969 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
970 if (cx23885_video_register(dev) < 0) {
971 printk(KERN_ERR "%s() Failed to register analog "
972 "video adapters on VID_A\n", __func__);
973 }
974 }
975
976 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
977 if (cx23885_boards[dev->board].num_fds_portb)
978 dev->ts1.num_frontends =
979 cx23885_boards[dev->board].num_fds_portb;
980 if (cx23885_dvb_register(&dev->ts1) < 0) {
981 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
982 __func__);
983 }
984 } else
985 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
986 if (cx23885_417_register(dev) < 0) {
987 printk(KERN_ERR
988 "%s() Failed to register 417 on VID_B\n",
989 __func__);
990 }
991 }
992
993 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
994 if (cx23885_boards[dev->board].num_fds_portc)
995 dev->ts2.num_frontends =
996 cx23885_boards[dev->board].num_fds_portc;
997 if (cx23885_dvb_register(&dev->ts2) < 0) {
998 printk(KERN_ERR
999 "%s() Failed to register dvb on VID_C\n",
1000 __func__);
1001 }
1002 } else
1003 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1004 if (cx23885_417_register(dev) < 0) {
1005 printk(KERN_ERR
1006 "%s() Failed to register 417 on VID_C\n",
1007 __func__);
1008 }
1009 }
1010
1011 cx23885_dev_checkrevision(dev);
1012
1013 /* disable MSI for NetUP cards, otherwise CI is not working */
1014 if (cx23885_boards[dev->board].ci_type > 0)
1015 cx_clear(RDR_RDRCTL1, 1 << 8);
1016
1017 switch (dev->board) {
1018 case CX23885_BOARD_TEVII_S470:
1019 case CX23885_BOARD_TEVII_S471:
1020 cx_clear(RDR_RDRCTL1, 1 << 8);
1021 break;
1022 }
1023
1024 return 0;
1025 }
1026
1027 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1028 {
1029 release_mem_region(pci_resource_start(dev->pci, 0),
1030 pci_resource_len(dev->pci, 0));
1031
1032 if (!atomic_dec_and_test(&dev->refcount))
1033 return;
1034
1035 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1036 cx23885_video_unregister(dev);
1037
1038 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1039 cx23885_dvb_unregister(&dev->ts1);
1040
1041 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1042 cx23885_417_unregister(dev);
1043
1044 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1045 cx23885_dvb_unregister(&dev->ts2);
1046
1047 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1048 cx23885_417_unregister(dev);
1049
1050 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1051 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1052 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1053
1054 iounmap(dev->lmmio);
1055 }
1056
1057 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1058 unsigned int offset, u32 sync_line,
1059 unsigned int bpl, unsigned int padding,
1060 unsigned int lines, unsigned int lpi, bool jump)
1061 {
1062 struct scatterlist *sg;
1063 unsigned int line, todo, sol;
1064
1065
1066 if (jump) {
1067 *(rp++) = cpu_to_le32(RISC_JUMP);
1068 *(rp++) = cpu_to_le32(0);
1069 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1070 }
1071
1072 /* sync instruction */
1073 if (sync_line != NO_SYNC_LINE)
1074 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1075
1076 /* scan lines */
1077 sg = sglist;
1078 for (line = 0; line < lines; line++) {
1079 while (offset && offset >= sg_dma_len(sg)) {
1080 offset -= sg_dma_len(sg);
1081 sg++;
1082 }
1083
1084 if (lpi && line > 0 && !(line % lpi))
1085 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1086 else
1087 sol = RISC_SOL;
1088
1089 if (bpl <= sg_dma_len(sg)-offset) {
1090 /* fits into current chunk */
1091 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1092 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1094 offset += bpl;
1095 } else {
1096 /* scanline needs to be split */
1097 todo = bpl;
1098 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1099 (sg_dma_len(sg)-offset));
1100 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0;
1104 sg++;
1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110 todo -= sg_dma_len(sg);
1111 sg++;
1112 }
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1115 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1116 offset += todo;
1117 }
1118 offset += padding;
1119 }
1120
1121 return rp;
1122 }
1123
1124 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1125 struct scatterlist *sglist, unsigned int top_offset,
1126 unsigned int bottom_offset, unsigned int bpl,
1127 unsigned int padding, unsigned int lines)
1128 {
1129 u32 instructions, fields;
1130 __le32 *rp;
1131 int rc;
1132
1133 fields = 0;
1134 if (UNSET != top_offset)
1135 fields++;
1136 if (UNSET != bottom_offset)
1137 fields++;
1138
1139 /* estimate risc mem: worst case is one write per page border +
1140 one write per scan line + syncs + jump (all 2 dwords). Padding
1141 can cause next bpl to start close to a page border. First DMA
1142 region may be smaller than PAGE_SIZE */
1143 /* write and jump need and extra dword */
1144 instructions = fields * (1 + ((bpl + padding) * lines)
1145 / PAGE_SIZE + lines);
1146 instructions += 5;
1147 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1148 if (rc < 0)
1149 return rc;
1150
1151 /* write risc instructions */
1152 rp = risc->cpu;
1153 if (UNSET != top_offset)
1154 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1155 bpl, padding, lines, 0, true);
1156 if (UNSET != bottom_offset)
1157 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1158 bpl, padding, lines, 0, UNSET == top_offset);
1159
1160 /* save pointer to jmp instruction address */
1161 risc->jmp = rp;
1162 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1163 return 0;
1164 }
1165
1166 int cx23885_risc_databuffer(struct pci_dev *pci,
1167 struct btcx_riscmem *risc,
1168 struct scatterlist *sglist,
1169 unsigned int bpl,
1170 unsigned int lines, unsigned int lpi)
1171 {
1172 u32 instructions;
1173 __le32 *rp;
1174 int rc;
1175
1176 /* estimate risc mem: worst case is one write per page border +
1177 one write per scan line + syncs + jump (all 2 dwords). Here
1178 there is no padding and no sync. First DMA region may be smaller
1179 than PAGE_SIZE */
1180 /* Jump and write need an extra dword */
1181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1182 instructions += 4;
1183
1184 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1185 if (rc < 0)
1186 return rc;
1187
1188 /* write risc instructions */
1189 rp = risc->cpu;
1190 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1191 bpl, 0, lines, lpi, lpi == 0);
1192
1193 /* save pointer to jmp instruction address */
1194 risc->jmp = rp;
1195 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1196 return 0;
1197 }
1198
1199 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1200 struct scatterlist *sglist, unsigned int top_offset,
1201 unsigned int bottom_offset, unsigned int bpl,
1202 unsigned int padding, unsigned int lines)
1203 {
1204 u32 instructions, fields;
1205 __le32 *rp;
1206 int rc;
1207
1208 fields = 0;
1209 if (UNSET != top_offset)
1210 fields++;
1211 if (UNSET != bottom_offset)
1212 fields++;
1213
1214 /* estimate risc mem: worst case is one write per page border +
1215 one write per scan line + syncs + jump (all 2 dwords). Padding
1216 can cause next bpl to start close to a page border. First DMA
1217 region may be smaller than PAGE_SIZE */
1218 /* write and jump need and extra dword */
1219 instructions = fields * (1 + ((bpl + padding) * lines)
1220 / PAGE_SIZE + lines);
1221 instructions += 5;
1222 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1223 if (rc < 0)
1224 return rc;
1225 /* write risc instructions */
1226 rp = risc->cpu;
1227
1228 /* Sync to line 6, so US CC line 21 will appear in line '12'
1229 * in the userland vbi payload */
1230 if (UNSET != top_offset)
1231 rp = cx23885_risc_field(rp, sglist, top_offset, 6,
1232 bpl, padding, lines, 0, true);
1233
1234 if (UNSET != bottom_offset)
1235 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x207,
1236 bpl, padding, lines, 0, UNSET == top_offset);
1237
1238
1239
1240 /* save pointer to jmp instruction address */
1241 risc->jmp = rp;
1242 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1243 return 0;
1244 }
1245
1246
1247 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1248 {
1249 BUG_ON(in_interrupt());
1250 btcx_riscmem_free(dev->pci, &buf->risc);
1251 }
1252
1253 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1254 {
1255 struct cx23885_dev *dev = port->dev;
1256
1257 dprintk(1, "%s() Register Dump\n", __func__);
1258 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1259 cx_read(DEV_CNTRL2));
1260 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1261 cx23885_irq_get_mask(dev));
1262 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1263 cx_read(AUDIO_INT_INT_MSK));
1264 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1265 cx_read(AUD_INT_DMA_CTL));
1266 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1267 cx_read(AUDIO_EXT_INT_MSK));
1268 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1269 cx_read(AUD_EXT_DMA_CTL));
1270 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1271 cx_read(PAD_CTRL));
1272 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1273 cx_read(ALT_PIN_OUT_SEL));
1274 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1275 cx_read(GPIO2));
1276 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1277 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1278 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1279 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1280 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1281 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1282 if (port->reg_src_sel)
1283 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1284 port->reg_src_sel, cx_read(port->reg_src_sel));
1285 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1286 port->reg_lngth, cx_read(port->reg_lngth));
1287 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1288 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1289 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1290 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1291 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1292 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1293 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1294 port->reg_sop_status, cx_read(port->reg_sop_status));
1295 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1296 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1297 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1298 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1299 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1300 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1301 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1302 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1303 }
1304
1305 int cx23885_start_dma(struct cx23885_tsport *port,
1306 struct cx23885_dmaqueue *q,
1307 struct cx23885_buffer *buf)
1308 {
1309 struct cx23885_dev *dev = port->dev;
1310 u32 reg;
1311
1312 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1313 dev->width, dev->height, dev->field);
1314
1315 /* Stop the fifo and risc engine for this port */
1316 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1317
1318 /* setup fifo + format */
1319 cx23885_sram_channel_setup(dev,
1320 &dev->sram_channels[port->sram_chno],
1321 port->ts_packet_size, buf->risc.dma);
1322 if (debug > 5) {
1323 cx23885_sram_channel_dump(dev,
1324 &dev->sram_channels[port->sram_chno]);
1325 cx23885_risc_disasm(port, &buf->risc);
1326 }
1327
1328 /* write TS length to chip */
1329 cx_write(port->reg_lngth, port->ts_packet_size);
1330
1331 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1332 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1333 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1334 __func__,
1335 cx23885_boards[dev->board].portb,
1336 cx23885_boards[dev->board].portc);
1337 return -EINVAL;
1338 }
1339
1340 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1341 cx23885_av_clk(dev, 0);
1342
1343 udelay(100);
1344
1345 /* If the port supports SRC SELECT, configure it */
1346 if (port->reg_src_sel)
1347 cx_write(port->reg_src_sel, port->src_sel_val);
1348
1349 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1350 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1351 cx_write(port->reg_vld_misc, port->vld_misc_val);
1352 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1353 udelay(100);
1354
1355 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1356 /* reset counter to zero */
1357 cx_write(port->reg_gpcnt_ctl, 3);
1358 q->count = 0;
1359
1360 /* Set VIDB pins to input */
1361 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1362 reg = cx_read(PAD_CTRL);
1363 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1364 cx_write(PAD_CTRL, reg);
1365 }
1366
1367 /* Set VIDC pins to input */
1368 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1369 reg = cx_read(PAD_CTRL);
1370 reg &= ~0x4; /* Clear TS2_SOP_OE */
1371 cx_write(PAD_CTRL, reg);
1372 }
1373
1374 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1375
1376 reg = cx_read(PAD_CTRL);
1377 reg = reg & ~0x1; /* Clear TS1_OE */
1378
1379 /* FIXME, bit 2 writing here is questionable */
1380 /* set TS1_SOP_OE and TS1_OE_HI */
1381 reg = reg | 0xa;
1382 cx_write(PAD_CTRL, reg);
1383
1384 /* FIXME and these two registers should be documented. */
1385 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1386 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1387 }
1388
1389 switch (dev->bridge) {
1390 case CX23885_BRIDGE_885:
1391 case CX23885_BRIDGE_887:
1392 case CX23885_BRIDGE_888:
1393 /* enable irqs */
1394 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1395 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1396 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1397 cx23885_irq_add(dev, port->pci_irqmask);
1398 cx23885_irq_enable_all(dev);
1399 break;
1400 default:
1401 BUG();
1402 }
1403
1404 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1405
1406 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1407 cx23885_av_clk(dev, 1);
1408
1409 if (debug > 4)
1410 cx23885_tsport_reg_dump(port);
1411
1412 return 0;
1413 }
1414
1415 static int cx23885_stop_dma(struct cx23885_tsport *port)
1416 {
1417 struct cx23885_dev *dev = port->dev;
1418 u32 reg;
1419
1420 dprintk(1, "%s()\n", __func__);
1421
1422 /* Stop interrupts and DMA */
1423 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1424 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1425
1426 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1427
1428 reg = cx_read(PAD_CTRL);
1429
1430 /* Set TS1_OE */
1431 reg = reg | 0x1;
1432
1433 /* clear TS1_SOP_OE and TS1_OE_HI */
1434 reg = reg & ~0xa;
1435 cx_write(PAD_CTRL, reg);
1436 cx_write(port->reg_src_sel, 0);
1437 cx_write(port->reg_gen_ctrl, 8);
1438
1439 }
1440
1441 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1442 cx23885_av_clk(dev, 0);
1443
1444 return 0;
1445 }
1446
1447 /* ------------------------------------------------------------------ */
1448
1449 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1450 {
1451 struct cx23885_dev *dev = port->dev;
1452 int size = port->ts_packet_size * port->ts_packet_count;
1453 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
1454 int rc;
1455
1456 dprintk(1, "%s: %p\n", __func__, buf);
1457 if (vb2_plane_size(&buf->vb, 0) < size)
1458 return -EINVAL;
1459 vb2_set_plane_payload(&buf->vb, 0, size);
1460
1461 rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
1462 if (!rc)
1463 return -EIO;
1464
1465 cx23885_risc_databuffer(dev->pci, &buf->risc,
1466 sgt->sgl,
1467 port->ts_packet_size, port->ts_packet_count, 0);
1468 return 0;
1469 }
1470
1471 /*
1472 * The risc program for each buffer works as follows: it starts with a simple
1473 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1474 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1475 * the initial JUMP).
1476 *
1477 * This is the risc program of the first buffer to be queued if the active list
1478 * is empty and it just keeps DMAing this buffer without generating any
1479 * interrupts.
1480 *
1481 * If a new buffer is added then the initial JUMP in the code for that buffer
1482 * will generate an interrupt which signals that the previous buffer has been
1483 * DMAed successfully and that it can be returned to userspace.
1484 *
1485 * It also sets the final jump of the previous buffer to the start of the new
1486 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1487 * atomic u32 write, so there is no race condition.
1488 *
1489 * The end-result of all this that you only get an interrupt when a buffer
1490 * is ready, so the control flow is very easy.
1491 */
1492 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1493 {
1494 struct cx23885_buffer *prev;
1495 struct cx23885_dev *dev = port->dev;
1496 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1497 unsigned long flags;
1498
1499 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1500 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1501 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1502 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1503
1504 spin_lock_irqsave(&dev->slock, flags);
1505 if (list_empty(&cx88q->active)) {
1506 list_add_tail(&buf->queue, &cx88q->active);
1507 dprintk(1, "[%p/%d] %s - first active\n",
1508 buf, buf->vb.v4l2_buf.index, __func__);
1509 } else {
1510 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1511 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1512 queue);
1513 list_add_tail(&buf->queue, &cx88q->active);
1514 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1515 dprintk(1, "[%p/%d] %s - append to active\n",
1516 buf, buf->vb.v4l2_buf.index, __func__);
1517 }
1518 spin_unlock_irqrestore(&dev->slock, flags);
1519 }
1520
1521 /* ----------------------------------------------------------- */
1522
1523 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1524 {
1525 struct cx23885_dev *dev = port->dev;
1526 struct cx23885_dmaqueue *q = &port->mpegq;
1527 struct cx23885_buffer *buf;
1528 unsigned long flags;
1529
1530 spin_lock_irqsave(&port->slock, flags);
1531 while (!list_empty(&q->active)) {
1532 buf = list_entry(q->active.next, struct cx23885_buffer,
1533 queue);
1534 list_del(&buf->queue);
1535 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
1536 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1537 buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
1538 }
1539 spin_unlock_irqrestore(&port->slock, flags);
1540 }
1541
1542 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1543 {
1544 struct cx23885_dev *dev = port->dev;
1545
1546 dprintk(1, "%s()\n", __func__);
1547 cx23885_stop_dma(port);
1548 do_cancel_buffers(port, "cancel");
1549 }
1550
1551 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1552 {
1553 /* FIXME: port1 assumption here. */
1554 struct cx23885_tsport *port = &dev->ts1;
1555 int count = 0;
1556 int handled = 0;
1557
1558 if (status == 0)
1559 return handled;
1560
1561 count = cx_read(port->reg_gpcnt);
1562 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1563 status, cx_read(port->reg_ts_int_msk), count);
1564
1565 if ((status & VID_B_MSK_BAD_PKT) ||
1566 (status & VID_B_MSK_OPC_ERR) ||
1567 (status & VID_B_MSK_VBI_OPC_ERR) ||
1568 (status & VID_B_MSK_SYNC) ||
1569 (status & VID_B_MSK_VBI_SYNC) ||
1570 (status & VID_B_MSK_OF) ||
1571 (status & VID_B_MSK_VBI_OF)) {
1572 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1573 "= 0x%x\n", dev->name, status);
1574 if (status & VID_B_MSK_BAD_PKT)
1575 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1576 if (status & VID_B_MSK_OPC_ERR)
1577 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1578 if (status & VID_B_MSK_VBI_OPC_ERR)
1579 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1580 if (status & VID_B_MSK_SYNC)
1581 dprintk(1, " VID_B_MSK_SYNC\n");
1582 if (status & VID_B_MSK_VBI_SYNC)
1583 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1584 if (status & VID_B_MSK_OF)
1585 dprintk(1, " VID_B_MSK_OF\n");
1586 if (status & VID_B_MSK_VBI_OF)
1587 dprintk(1, " VID_B_MSK_VBI_OF\n");
1588
1589 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1590 cx23885_sram_channel_dump(dev,
1591 &dev->sram_channels[port->sram_chno]);
1592 cx23885_417_check_encoder(dev);
1593 } else if (status & VID_B_MSK_RISCI1) {
1594 dprintk(7, " VID_B_MSK_RISCI1\n");
1595 spin_lock(&port->slock);
1596 cx23885_wakeup(port, &port->mpegq, count);
1597 spin_unlock(&port->slock);
1598 }
1599 if (status) {
1600 cx_write(port->reg_ts_int_stat, status);
1601 handled = 1;
1602 }
1603
1604 return handled;
1605 }
1606
1607 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1608 {
1609 struct cx23885_dev *dev = port->dev;
1610 int handled = 0;
1611 u32 count;
1612
1613 if ((status & VID_BC_MSK_OPC_ERR) ||
1614 (status & VID_BC_MSK_BAD_PKT) ||
1615 (status & VID_BC_MSK_SYNC) ||
1616 (status & VID_BC_MSK_OF)) {
1617
1618 if (status & VID_BC_MSK_OPC_ERR)
1619 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1620 VID_BC_MSK_OPC_ERR);
1621
1622 if (status & VID_BC_MSK_BAD_PKT)
1623 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1624 VID_BC_MSK_BAD_PKT);
1625
1626 if (status & VID_BC_MSK_SYNC)
1627 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1628 VID_BC_MSK_SYNC);
1629
1630 if (status & VID_BC_MSK_OF)
1631 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1632 VID_BC_MSK_OF);
1633
1634 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1635
1636 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1637 cx23885_sram_channel_dump(dev,
1638 &dev->sram_channels[port->sram_chno]);
1639
1640 } else if (status & VID_BC_MSK_RISCI1) {
1641
1642 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1643
1644 spin_lock(&port->slock);
1645 count = cx_read(port->reg_gpcnt);
1646 cx23885_wakeup(port, &port->mpegq, count);
1647 spin_unlock(&port->slock);
1648
1649 }
1650 if (status) {
1651 cx_write(port->reg_ts_int_stat, status);
1652 handled = 1;
1653 }
1654
1655 return handled;
1656 }
1657
1658 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1659 {
1660 struct cx23885_dev *dev = dev_id;
1661 struct cx23885_tsport *ts1 = &dev->ts1;
1662 struct cx23885_tsport *ts2 = &dev->ts2;
1663 u32 pci_status, pci_mask;
1664 u32 vida_status, vida_mask;
1665 u32 audint_status, audint_mask;
1666 u32 ts1_status, ts1_mask;
1667 u32 ts2_status, ts2_mask;
1668 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1669 int audint_count = 0;
1670 bool subdev_handled;
1671
1672 pci_status = cx_read(PCI_INT_STAT);
1673 pci_mask = cx23885_irq_get_mask(dev);
1674 vida_status = cx_read(VID_A_INT_STAT);
1675 vida_mask = cx_read(VID_A_INT_MSK);
1676 audint_status = cx_read(AUDIO_INT_INT_STAT);
1677 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1678 ts1_status = cx_read(VID_B_INT_STAT);
1679 ts1_mask = cx_read(VID_B_INT_MSK);
1680 ts2_status = cx_read(VID_C_INT_STAT);
1681 ts2_mask = cx_read(VID_C_INT_MSK);
1682
1683 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1684 goto out;
1685
1686 vida_count = cx_read(VID_A_GPCNT);
1687 audint_count = cx_read(AUD_INT_A_GPCNT);
1688 ts1_count = cx_read(ts1->reg_gpcnt);
1689 ts2_count = cx_read(ts2->reg_gpcnt);
1690 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1691 pci_status, pci_mask);
1692 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1693 vida_status, vida_mask, vida_count);
1694 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1695 audint_status, audint_mask, audint_count);
1696 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1697 ts1_status, ts1_mask, ts1_count);
1698 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1699 ts2_status, ts2_mask, ts2_count);
1700
1701 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1702 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1703 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1704 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1705 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1706 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1707
1708 if (pci_status & PCI_MSK_RISC_RD)
1709 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1710 PCI_MSK_RISC_RD);
1711
1712 if (pci_status & PCI_MSK_RISC_WR)
1713 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1714 PCI_MSK_RISC_WR);
1715
1716 if (pci_status & PCI_MSK_AL_RD)
1717 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1718 PCI_MSK_AL_RD);
1719
1720 if (pci_status & PCI_MSK_AL_WR)
1721 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1722 PCI_MSK_AL_WR);
1723
1724 if (pci_status & PCI_MSK_APB_DMA)
1725 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1726 PCI_MSK_APB_DMA);
1727
1728 if (pci_status & PCI_MSK_VID_C)
1729 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1730 PCI_MSK_VID_C);
1731
1732 if (pci_status & PCI_MSK_VID_B)
1733 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1734 PCI_MSK_VID_B);
1735
1736 if (pci_status & PCI_MSK_VID_A)
1737 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1738 PCI_MSK_VID_A);
1739
1740 if (pci_status & PCI_MSK_AUD_INT)
1741 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1742 PCI_MSK_AUD_INT);
1743
1744 if (pci_status & PCI_MSK_AUD_EXT)
1745 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1746 PCI_MSK_AUD_EXT);
1747
1748 if (pci_status & PCI_MSK_GPIO0)
1749 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1750 PCI_MSK_GPIO0);
1751
1752 if (pci_status & PCI_MSK_GPIO1)
1753 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1754 PCI_MSK_GPIO1);
1755
1756 if (pci_status & PCI_MSK_AV_CORE)
1757 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1758 PCI_MSK_AV_CORE);
1759
1760 if (pci_status & PCI_MSK_IR)
1761 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1762 PCI_MSK_IR);
1763 }
1764
1765 if (cx23885_boards[dev->board].ci_type == 1 &&
1766 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1767 handled += netup_ci_slot_status(dev, pci_status);
1768
1769 if (cx23885_boards[dev->board].ci_type == 2 &&
1770 (pci_status & PCI_MSK_GPIO0))
1771 handled += altera_ci_irq(dev);
1772
1773 if (ts1_status) {
1774 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1775 handled += cx23885_irq_ts(ts1, ts1_status);
1776 else
1777 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1778 handled += cx23885_irq_417(dev, ts1_status);
1779 }
1780
1781 if (ts2_status) {
1782 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1783 handled += cx23885_irq_ts(ts2, ts2_status);
1784 else
1785 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1786 handled += cx23885_irq_417(dev, ts2_status);
1787 }
1788
1789 if (vida_status)
1790 handled += cx23885_video_irq(dev, vida_status);
1791
1792 if (audint_status)
1793 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1794
1795 if (pci_status & PCI_MSK_IR) {
1796 subdev_handled = false;
1797 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1798 pci_status, &subdev_handled);
1799 if (subdev_handled)
1800 handled++;
1801 }
1802
1803 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1804 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1805 schedule_work(&dev->cx25840_work);
1806 handled++;
1807 }
1808
1809 if (handled)
1810 cx_write(PCI_INT_STAT, pci_status);
1811 out:
1812 return IRQ_RETVAL(handled);
1813 }
1814
1815 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1816 unsigned int notification, void *arg)
1817 {
1818 struct cx23885_dev *dev;
1819
1820 if (sd == NULL)
1821 return;
1822
1823 dev = to_cx23885(sd->v4l2_dev);
1824
1825 switch (notification) {
1826 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1827 if (sd == dev->sd_ir)
1828 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1829 break;
1830 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1831 if (sd == dev->sd_ir)
1832 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1833 break;
1834 }
1835 }
1836
1837 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1838 {
1839 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1840 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1841 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1842 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1843 }
1844
1845 static inline int encoder_on_portb(struct cx23885_dev *dev)
1846 {
1847 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1848 }
1849
1850 static inline int encoder_on_portc(struct cx23885_dev *dev)
1851 {
1852 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1853 }
1854
1855 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1856 * registers depending on the board configuration (and whether the
1857 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1858 * be pushed into the correct hardware register, regardless of the
1859 * physical location. Certain registers are shared so we sanity check
1860 * and report errors if we think we're tampering with a GPIo that might
1861 * be assigned to the encoder (and used for the host bus).
1862 *
1863 * GPIO 2 thru 0 - On the cx23885 bridge
1864 * GPIO 18 thru 3 - On the cx23417 host bus interface
1865 * GPIO 23 thru 19 - On the cx25840 a/v core
1866 */
1867 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1868 {
1869 if (mask & 0x7)
1870 cx_set(GP0_IO, mask & 0x7);
1871
1872 if (mask & 0x0007fff8) {
1873 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1874 printk(KERN_ERR
1875 "%s: Setting GPIO on encoder ports\n",
1876 dev->name);
1877 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1878 }
1879
1880 /* TODO: 23-19 */
1881 if (mask & 0x00f80000)
1882 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1883 }
1884
1885 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1886 {
1887 if (mask & 0x00000007)
1888 cx_clear(GP0_IO, mask & 0x7);
1889
1890 if (mask & 0x0007fff8) {
1891 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1892 printk(KERN_ERR
1893 "%s: Clearing GPIO moving on encoder ports\n",
1894 dev->name);
1895 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1896 }
1897
1898 /* TODO: 23-19 */
1899 if (mask & 0x00f80000)
1900 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1901 }
1902
1903 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1904 {
1905 if (mask & 0x00000007)
1906 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1907
1908 if (mask & 0x0007fff8) {
1909 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1910 printk(KERN_ERR
1911 "%s: Reading GPIO moving on encoder ports\n",
1912 dev->name);
1913 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1914 }
1915
1916 /* TODO: 23-19 */
1917 if (mask & 0x00f80000)
1918 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1919
1920 return 0;
1921 }
1922
1923 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1924 {
1925 if ((mask & 0x00000007) && asoutput)
1926 cx_set(GP0_IO, (mask & 0x7) << 16);
1927 else if ((mask & 0x00000007) && !asoutput)
1928 cx_clear(GP0_IO, (mask & 0x7) << 16);
1929
1930 if (mask & 0x0007fff8) {
1931 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1932 printk(KERN_ERR
1933 "%s: Enabling GPIO on encoder ports\n",
1934 dev->name);
1935 }
1936
1937 /* MC417_OEN is active low for output, write 1 for an input */
1938 if ((mask & 0x0007fff8) && asoutput)
1939 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1940
1941 else if ((mask & 0x0007fff8) && !asoutput)
1942 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1943
1944 /* TODO: 23-19 */
1945 }
1946
1947 static int cx23885_initdev(struct pci_dev *pci_dev,
1948 const struct pci_device_id *pci_id)
1949 {
1950 struct cx23885_dev *dev;
1951 struct v4l2_ctrl_handler *hdl;
1952 int err;
1953
1954 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1955 if (NULL == dev)
1956 return -ENOMEM;
1957
1958 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1959 if (err < 0)
1960 goto fail_free;
1961
1962 hdl = &dev->ctrl_handler;
1963 v4l2_ctrl_handler_init(hdl, 6);
1964 if (hdl->error) {
1965 err = hdl->error;
1966 goto fail_ctrl;
1967 }
1968 dev->v4l2_dev.ctrl_handler = hdl;
1969
1970 /* Prepare to handle notifications from subdevices */
1971 cx23885_v4l2_dev_notify_init(dev);
1972
1973 /* pci init */
1974 dev->pci = pci_dev;
1975 if (pci_enable_device(pci_dev)) {
1976 err = -EIO;
1977 goto fail_ctrl;
1978 }
1979
1980 if (cx23885_dev_setup(dev) < 0) {
1981 err = -EINVAL;
1982 goto fail_ctrl;
1983 }
1984
1985 /* print pci info */
1986 dev->pci_rev = pci_dev->revision;
1987 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1988 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1989 "latency: %d, mmio: 0x%llx\n", dev->name,
1990 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1991 dev->pci_lat,
1992 (unsigned long long)pci_resource_start(pci_dev, 0));
1993
1994 pci_set_master(pci_dev);
1995 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1996 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1997 err = -EIO;
1998 goto fail_irq;
1999 }
2000
2001 err = request_irq(pci_dev->irq, cx23885_irq,
2002 IRQF_SHARED, dev->name, dev);
2003 if (err < 0) {
2004 printk(KERN_ERR "%s: can't get IRQ %d\n",
2005 dev->name, pci_dev->irq);
2006 goto fail_irq;
2007 }
2008
2009 switch (dev->board) {
2010 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2011 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2012 break;
2013 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2014 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2015 break;
2016 }
2017
2018 /*
2019 * The CX2388[58] IR controller can start firing interrupts when
2020 * enabled, so these have to take place after the cx23885_irq() handler
2021 * is hooked up by the call to request_irq() above.
2022 */
2023 cx23885_ir_pci_int_enable(dev);
2024 cx23885_input_init(dev);
2025
2026 return 0;
2027
2028 fail_irq:
2029 cx23885_dev_unregister(dev);
2030 fail_ctrl:
2031 v4l2_ctrl_handler_free(hdl);
2032 v4l2_device_unregister(&dev->v4l2_dev);
2033 fail_free:
2034 kfree(dev);
2035 return err;
2036 }
2037
2038 static void cx23885_finidev(struct pci_dev *pci_dev)
2039 {
2040 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2041 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2042
2043 cx23885_input_fini(dev);
2044 cx23885_ir_fini(dev);
2045
2046 cx23885_shutdown(dev);
2047
2048 pci_disable_device(pci_dev);
2049
2050 /* unregister stuff */
2051 free_irq(pci_dev->irq, dev);
2052
2053 cx23885_dev_unregister(dev);
2054 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2055 v4l2_device_unregister(v4l2_dev);
2056 kfree(dev);
2057 }
2058
2059 static struct pci_device_id cx23885_pci_tbl[] = {
2060 {
2061 /* CX23885 */
2062 .vendor = 0x14f1,
2063 .device = 0x8852,
2064 .subvendor = PCI_ANY_ID,
2065 .subdevice = PCI_ANY_ID,
2066 }, {
2067 /* CX23887 Rev 2 */
2068 .vendor = 0x14f1,
2069 .device = 0x8880,
2070 .subvendor = PCI_ANY_ID,
2071 .subdevice = PCI_ANY_ID,
2072 }, {
2073 /* --- end of list --- */
2074 }
2075 };
2076 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2077
2078 static struct pci_driver cx23885_pci_driver = {
2079 .name = "cx23885",
2080 .id_table = cx23885_pci_tbl,
2081 .probe = cx23885_initdev,
2082 .remove = cx23885_finidev,
2083 /* TODO */
2084 .suspend = NULL,
2085 .resume = NULL,
2086 };
2087
2088 static int __init cx23885_init(void)
2089 {
2090 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2091 CX23885_VERSION);
2092 return pci_register_driver(&cx23885_pci_driver);
2093 }
2094
2095 static void __exit cx23885_fini(void)
2096 {
2097 pci_unregister_driver(&cx23885_pci_driver);
2098 }
2099
2100 module_init(cx23885_init);
2101 module_exit(cx23885_fini);
This page took 0.13218 seconds and 4 git commands to generate.