[media] cx23885: Always initialise dev->slock spinlock
[deliverable/linux.git] / drivers / media / pci / cx23885 / cx23885-core.c
CommitLineData
d19770e5
ST
1/*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
6d897616 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
d19770e5
ST
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
d19770e5
ST
16 */
17
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kmod.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/div64.h>
78db8547 28#include <linux/firmware.h>
d19770e5
ST
29
30#include "cx23885.h"
5a23b076 31#include "cimax2.h"
78db8547 32#include "altera-ci.h"
29f8a0a5 33#include "cx23888-ir.h"
f59ad611 34#include "cx23885-ir.h"
e5514f10 35#include "cx23885-av.h"
dbda8f70 36#include "cx23885-input.h"
d19770e5
ST
37
38MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
6d897616 39MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
d19770e5 40MODULE_LICENSE("GPL");
1990d50b 41MODULE_VERSION(CX23885_VERSION);
d19770e5 42
4513fc69 43static unsigned int debug;
9c8ced51
ST
44module_param(debug, int, 0644);
45MODULE_PARM_DESC(debug, "enable debug messages");
d19770e5
ST
46
47static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48module_param_array(card, int, NULL, 0444);
9c8ced51 49MODULE_PARM_DESC(card, "card type");
d19770e5 50
4513fc69
ST
51#define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
b5f74050 53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
4513fc69 54 } while (0)
d19770e5
ST
55
56static unsigned int cx23885_devcount;
57
d19770e5
ST
58#define NO_SYNC_LINE (-1U)
59
d19770e5
ST
60/* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
62 * CX23887 Assumptions
63 * 1 line = 16 bytes of CDT
64 * cmds size = 80
65 * cdt size = 16 * linesize
66 * iqsize = 64
67 * maxlines = 6
68 *
69 * Address Space:
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
78 */
79
7e994302 80static struct sram_channel cx23885_sram_channels[] = {
d19770e5 81 [SRAM_CH01] = {
69ad6e56
ST
82 .name = "VID A",
83 .cmds_start = 0x10000,
d8d12b43
ST
84 .ctrl_start = 0x10380,
85 .cdt = 0x104c0,
69ad6e56
ST
86 .fifo_start = 0x40,
87 .fifo_size = 0x2800,
d19770e5
ST
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
92 },
93 [SRAM_CH02] = {
94 .name = "ch2",
95 .cmds_start = 0x0,
96 .ctrl_start = 0x0,
97 .cdt = 0x0,
98 .fifo_start = 0x0,
99 .fifo_size = 0x0,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
104 },
105 [SRAM_CH03] = {
69ad6e56
ST
106 .name = "TS1 B",
107 .cmds_start = 0x100A0,
d8d12b43
ST
108 .ctrl_start = 0x10400,
109 .cdt = 0x10580,
69ad6e56
ST
110 .fifo_start = 0x5000,
111 .fifo_size = 0x1000,
d19770e5
ST
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
116 },
117 [SRAM_CH04] = {
118 .name = "ch4",
119 .cmds_start = 0x0,
120 .ctrl_start = 0x0,
121 .cdt = 0x0,
122 .fifo_start = 0x0,
123 .fifo_size = 0x0,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
128 },
129 [SRAM_CH05] = {
130 .name = "ch5",
131 .cmds_start = 0x0,
132 .ctrl_start = 0x0,
133 .cdt = 0x0,
134 .fifo_start = 0x0,
135 .fifo_size = 0x0,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
140 },
141 [SRAM_CH06] = {
142 .name = "TS2 C",
143 .cmds_start = 0x10140,
d8d12b43
ST
144 .ctrl_start = 0x10440,
145 .cdt = 0x105e0,
d19770e5
ST
146 .fifo_start = 0x6000,
147 .fifo_size = 0x1000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
152 },
153 [SRAM_CH07] = {
9e44d632
MM
154 .name = "TV Audio",
155 .cmds_start = 0x10190,
156 .ctrl_start = 0x10480,
157 .cdt = 0x10a00,
158 .fifo_start = 0x7000,
159 .fifo_size = 0x1000,
d19770e5
ST
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
164 },
165 [SRAM_CH08] = {
166 .name = "ch8",
167 .cmds_start = 0x0,
168 .ctrl_start = 0x0,
169 .cdt = 0x0,
170 .fifo_start = 0x0,
171 .fifo_size = 0x0,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
176 },
177 [SRAM_CH09] = {
178 .name = "ch9",
179 .cmds_start = 0x0,
180 .ctrl_start = 0x0,
181 .cdt = 0x0,
182 .fifo_start = 0x0,
183 .fifo_size = 0x0,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
188 },
189};
190
7e994302
ST
191static struct sram_channel cx23887_sram_channels[] = {
192 [SRAM_CH01] = {
193 .name = "VID A",
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
196 .cdt = 0x107b0,
197 .fifo_start = 0x40,
198 .fifo_size = 0x2800,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
203 },
204 [SRAM_CH02] = {
35045137
ST
205 .name = "VID A (VBI)",
206 .cmds_start = 0x10050,
207 .ctrl_start = 0x105F0,
208 .cdt = 0x10810,
209 .fifo_start = 0x3000,
210 .fifo_size = 0x1000,
7e994302
ST
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
215 },
216 [SRAM_CH03] = {
217 .name = "TS1 B",
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
220 .cdt = 0x10870,
221 .fifo_start = 0x5000,
222 .fifo_size = 0x1000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
227 },
228 [SRAM_CH04] = {
229 .name = "ch4",
230 .cmds_start = 0x0,
231 .ctrl_start = 0x0,
232 .cdt = 0x0,
233 .fifo_start = 0x0,
234 .fifo_size = 0x0,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
239 },
240 [SRAM_CH05] = {
241 .name = "ch5",
242 .cmds_start = 0x0,
243 .ctrl_start = 0x0,
244 .cdt = 0x0,
245 .fifo_start = 0x0,
246 .fifo_size = 0x0,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
251 },
252 [SRAM_CH06] = {
253 .name = "TS2 C",
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
256 .cdt = 0x108d0,
257 .fifo_start = 0x6000,
258 .fifo_size = 0x1000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
263 },
264 [SRAM_CH07] = {
35045137
ST
265 .name = "TV Audio",
266 .cmds_start = 0x10190,
267 .ctrl_start = 0x106B0,
268 .cdt = 0x10930,
269 .fifo_start = 0x7000,
270 .fifo_size = 0x1000,
7e994302
ST
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
275 },
276 [SRAM_CH08] = {
277 .name = "ch8",
278 .cmds_start = 0x0,
279 .ctrl_start = 0x0,
280 .cdt = 0x0,
281 .fifo_start = 0x0,
282 .fifo_size = 0x0,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
287 },
288 [SRAM_CH09] = {
289 .name = "ch9",
290 .cmds_start = 0x0,
291 .ctrl_start = 0x0,
292 .cdt = 0x0,
293 .fifo_start = 0x0,
294 .fifo_size = 0x0,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
299 },
300};
301
ada73eee 302static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
dbe83a3b
AW
303{
304 unsigned long flags;
305 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306
307 dev->pci_irqmask |= mask;
308
309 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310}
311
312void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313{
314 unsigned long flags;
315 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316
317 dev->pci_irqmask |= mask;
318 cx_set(PCI_INT_MSK, mask);
319
320 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321}
322
323void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324{
325 u32 v;
326 unsigned long flags;
327 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328
329 v = mask & dev->pci_irqmask;
330 if (v)
331 cx_set(PCI_INT_MSK, v);
332
333 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334}
335
336static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337{
338 cx23885_irq_enable(dev, 0xffffffff);
339}
340
341void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342{
343 unsigned long flags;
344 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345
346 cx_clear(PCI_INT_MSK, mask);
347
348 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349}
350
351static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352{
353 cx23885_irq_disable(dev, 0xffffffff);
354}
355
356void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357{
358 unsigned long flags;
359 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360
361 dev->pci_irqmask &= ~mask;
362 cx_clear(PCI_INT_MSK, mask);
363
364 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365}
366
367static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368{
369 u32 v;
370 unsigned long flags;
371 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372
373 v = cx_read(PCI_INT_MSK);
374
375 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 return v;
377}
378
d19770e5
ST
379static int cx23885_risc_decode(u32 risc)
380{
381 static char *instr[16] = {
b1b81f1d
ST
382 [RISC_SYNC >> 28] = "sync",
383 [RISC_WRITE >> 28] = "write",
384 [RISC_WRITEC >> 28] = "writec",
385 [RISC_READ >> 28] = "read",
386 [RISC_READC >> 28] = "readc",
387 [RISC_JUMP >> 28] = "jump",
388 [RISC_SKIP >> 28] = "skip",
389 [RISC_WRITERM >> 28] = "writerm",
390 [RISC_WRITECM >> 28] = "writecm",
391 [RISC_WRITECR >> 28] = "writecr",
d19770e5
ST
392 };
393 static int incr[16] = {
b1b81f1d
ST
394 [RISC_WRITE >> 28] = 3,
395 [RISC_JUMP >> 28] = 3,
396 [RISC_SKIP >> 28] = 1,
397 [RISC_SYNC >> 28] = 1,
398 [RISC_WRITERM >> 28] = 3,
399 [RISC_WRITECM >> 28] = 3,
400 [RISC_WRITECR >> 28] = 4,
d19770e5
ST
401 };
402 static char *bits[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
407 };
408 int i;
409
410 printk("0x%08x [ %s", risc,
411 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
44a6481d 412 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
d19770e5 413 if (risc & (1 << (i + 12)))
44a6481d 414 printk(" %s", bits[i]);
d19770e5
ST
415 printk(" count=%d ]\n", risc & 0xfff);
416 return incr[risc >> 28] ? incr[risc >> 28] : 1;
417}
418
453afdd9 419static void cx23885_wakeup(struct cx23885_tsport *port,
39e75cfe 420 struct cx23885_dmaqueue *q, u32 count)
d19770e5
ST
421{
422 struct cx23885_dev *dev = port->dev;
423 struct cx23885_buffer *buf;
d19770e5 424
9c8ced51 425 if (list_empty(&q->active))
453afdd9
HV
426 return;
427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue);
429
430 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
431 buf->vb.v4l2_buf.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
433 count, q->count);
434 list_del(&buf->queue);
435 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
d19770e5 436}
d19770e5 437
7b888014 438int cx23885_sram_channel_setup(struct cx23885_dev *dev,
39e75cfe
AB
439 struct sram_channel *ch,
440 unsigned int bpl, u32 risc)
d19770e5 441{
44a6481d 442 unsigned int i, lines;
d19770e5
ST
443 u32 cdt;
444
9c8ced51 445 if (ch->cmds_start == 0) {
22b4e64f 446 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
44a6481d 447 ch->name);
d19770e5
ST
448 cx_write(ch->ptr1_reg, 0);
449 cx_write(ch->ptr2_reg, 0);
450 cx_write(ch->cnt2_reg, 0);
451 cx_write(ch->cnt1_reg, 0);
452 return 0;
453 } else {
22b4e64f 454 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
44a6481d 455 ch->name);
d19770e5
ST
456 }
457
458 bpl = (bpl + 7) & ~7; /* alignment */
459 cdt = ch->cdt;
460 lines = ch->fifo_size / bpl;
461 if (lines > 6)
462 lines = 6;
463 BUG_ON(lines < 2);
464
453afdd9
HV
465 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
466 cx_write(8 + 4, 12);
86ecc027 467 cx_write(8 + 8, 0);
d19770e5
ST
468
469 /* write CDT */
470 for (i = 0; i < lines; i++) {
22b4e64f 471 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
44a6481d 472 ch->fifo_start + bpl*i);
d19770e5
ST
473 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
474 cx_write(cdt + 16*i + 4, 0);
475 cx_write(cdt + 16*i + 8, 0);
476 cx_write(cdt + 16*i + 12, 0);
477 }
478
479 /* write CMDS */
480 if (ch->jumponly)
9c8ced51 481 cx_write(ch->cmds_start + 0, 8);
d19770e5 482 else
9c8ced51 483 cx_write(ch->cmds_start + 0, risc);
d19770e5
ST
484 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
485 cx_write(ch->cmds_start + 8, cdt);
486 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
487 cx_write(ch->cmds_start + 16, ch->ctrl_start);
488 if (ch->jumponly)
9c8ced51 489 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
d19770e5
ST
490 else
491 cx_write(ch->cmds_start + 20, 64 >> 2);
492 for (i = 24; i < 80; i += 4)
493 cx_write(ch->cmds_start + i, 0);
494
495 /* fill registers */
496 cx_write(ch->ptr1_reg, ch->fifo_start);
497 cx_write(ch->ptr2_reg, cdt);
498 cx_write(ch->cnt2_reg, (lines*16) >> 3);
9c8ced51 499 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
d19770e5 500
9c8ced51 501 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
e133be0f 502 dev->bridge,
d19770e5
ST
503 ch->name,
504 bpl,
505 lines);
506
507 return 0;
508}
509
7b888014 510void cx23885_sram_channel_dump(struct cx23885_dev *dev,
39e75cfe 511 struct sram_channel *ch)
d19770e5
ST
512{
513 static char *name[] = {
514 "init risc lo",
515 "init risc hi",
516 "cdt base",
517 "cdt size",
518 "iq base",
519 "iq size",
520 "risc pc lo",
521 "risc pc hi",
522 "iq wr ptr",
523 "iq rd ptr",
524 "cdt current",
525 "pci target lo",
526 "pci target hi",
527 "line / byte",
528 };
529 u32 risc;
44a6481d 530 unsigned int i, j, n;
d19770e5 531
9c8ced51 532 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
d19770e5
ST
533 dev->name, ch->name);
534 for (i = 0; i < ARRAY_SIZE(name); i++)
9c8ced51 535 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
d19770e5
ST
536 dev->name, name[i],
537 cx_read(ch->cmds_start + 4*i));
538
539 for (i = 0; i < 4; i++) {
44a6481d 540 risc = cx_read(ch->cmds_start + 4 * (i + 14));
9c8ced51 541 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
d19770e5
ST
542 cx23885_risc_decode(risc);
543 }
544 for (i = 0; i < (64 >> 2); i += n) {
44a6481d
MK
545 risc = cx_read(ch->ctrl_start + 4 * i);
546 /* No consideration for bits 63-32 */
547
9c8ced51 548 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
44a6481d 549 ch->ctrl_start + 4 * i, i);
d19770e5
ST
550 n = cx23885_risc_decode(risc);
551 for (j = 1; j < n; j++) {
44a6481d 552 risc = cx_read(ch->ctrl_start + 4 * (i + j));
9c8ced51 553 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
d19770e5
ST
554 dev->name, i+j, risc, j);
555 }
556 }
557
9c8ced51 558 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
d19770e5 559 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
9c8ced51 560 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
44a6481d 561 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
9c8ced51 562 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
d19770e5 563 dev->name, cx_read(ch->ptr1_reg));
9c8ced51 564 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
d19770e5 565 dev->name, cx_read(ch->ptr2_reg));
9c8ced51 566 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
d19770e5 567 dev->name, cx_read(ch->cnt1_reg));
9c8ced51 568 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
d19770e5
ST
569 dev->name, cx_read(ch->cnt2_reg));
570}
571
39e75cfe 572static void cx23885_risc_disasm(struct cx23885_tsport *port,
4d63a25c 573 struct cx23885_riscmem *risc)
d19770e5
ST
574{
575 struct cx23885_dev *dev = port->dev;
44a6481d 576 unsigned int i, j, n;
d19770e5 577
9c8ced51 578 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
d19770e5
ST
579 dev->name, risc->cpu, (unsigned long)risc->dma);
580 for (i = 0; i < (risc->size >> 2); i += n) {
9c8ced51 581 printk(KERN_INFO "%s: %04d: ", dev->name, i);
86ecc027 582 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
d19770e5 583 for (j = 1; j < n; j++)
9c8ced51 584 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
44a6481d 585 dev->name, i + j, risc->cpu[i + j], j);
86ecc027 586 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
d19770e5
ST
587 break;
588 }
589}
590
39e75cfe 591static void cx23885_shutdown(struct cx23885_dev *dev)
d19770e5
ST
592{
593 /* disable RISC controller */
594 cx_write(DEV_CNTRL2, 0);
595
596 /* Disable all IR activity */
597 cx_write(IR_CNTRL_REG, 0);
598
599 /* Disable Video A/B activity */
600 cx_write(VID_A_DMA_CTL, 0);
601 cx_write(VID_B_DMA_CTL, 0);
602 cx_write(VID_C_DMA_CTL, 0);
603
604 /* Disable Audio activity */
605 cx_write(AUD_INT_DMA_CTL, 0);
606 cx_write(AUD_EXT_DMA_CTL, 0);
607
608 /* Disable Serial port */
609 cx_write(UART_CTL, 0);
610
611 /* Disable Interrupts */
dbe83a3b 612 cx23885_irq_disable_all(dev);
d19770e5
ST
613 cx_write(VID_A_INT_MSK, 0);
614 cx_write(VID_B_INT_MSK, 0);
615 cx_write(VID_C_INT_MSK, 0);
616 cx_write(AUDIO_INT_INT_MSK, 0);
617 cx_write(AUDIO_EXT_INT_MSK, 0);
618
619}
620
39e75cfe 621static void cx23885_reset(struct cx23885_dev *dev)
d19770e5 622{
22b4e64f 623 dprintk(1, "%s()\n", __func__);
d19770e5
ST
624
625 cx23885_shutdown(dev);
626
627 cx_write(PCI_INT_STAT, 0xffffffff);
628 cx_write(VID_A_INT_STAT, 0xffffffff);
629 cx_write(VID_B_INT_STAT, 0xffffffff);
630 cx_write(VID_C_INT_STAT, 0xffffffff);
631 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
632 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
633 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
ecda5966 634 cx_write(PAD_CTRL, 0x00500300);
d19770e5
ST
635
636 mdelay(100);
637
7b888014
ST
638 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
639 720*4, 0);
640 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
642 188*4, 0);
643 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
644 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
646 188*4, 0);
647 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
648 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
649 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
d19770e5 650
a6a3f140 651 cx23885_gpio_setup(dev);
d19770e5
ST
652}
653
654
655static int cx23885_pci_quirks(struct cx23885_dev *dev)
656{
22b4e64f 657 dprintk(1, "%s()\n", __func__);
d19770e5 658
2df9a4c2
ST
659 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
660 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
661 * occur on the cx23887 bridge.
662 */
9c8ced51 663 if (dev->bridge == CX23885_BRIDGE_885)
d19770e5 664 cx_clear(RDR_TLCTL0, 1 << 4);
4823e9ee 665
d19770e5
ST
666 return 0;
667}
668
669static int get_resources(struct cx23885_dev *dev)
670{
9c8ced51
ST
671 if (request_mem_region(pci_resource_start(dev->pci, 0),
672 pci_resource_len(dev->pci, 0),
44a6481d 673 dev->name))
d19770e5
ST
674 return 0;
675
676 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
9c8ced51 677 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
d19770e5
ST
678
679 return -EBUSY;
680}
681
9c8ced51
ST
682static int cx23885_init_tsport(struct cx23885_dev *dev,
683 struct cx23885_tsport *port, int portno)
d19770e5 684{
22b4e64f 685 dprintk(1, "%s(portno=%d)\n", __func__, portno);
a6a3f140
ST
686
687 /* Transport bus init dma queue - Common settings */
688 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
689 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
b1b81f1d
ST
690 port->vld_misc_val = 0x0;
691 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
a6a3f140
ST
692
693 spin_lock_init(&port->slock);
694 port->dev = dev;
695 port->nr = portno;
696
697 INIT_LIST_HEAD(&port->mpegq.active);
d782ffa2 698 mutex_init(&port->frontends.lock);
7bdf84fc 699 INIT_LIST_HEAD(&port->frontends.felist);
d782ffa2
ST
700 port->frontends.active_fe_id = 0;
701
a739a7e4
ST
702 /* This should be hardcoded allow a single frontend
703 * attachment to this tsport, keeping the -dvb.c
704 * code clean and safe.
705 */
9c8ced51 706 if (!port->num_frontends)
a739a7e4
ST
707 port->num_frontends = 1;
708
9c8ced51 709 switch (portno) {
a6a3f140
ST
710 case 1:
711 port->reg_gpcnt = VID_B_GPCNT;
712 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
713 port->reg_dma_ctl = VID_B_DMA_CTL;
714 port->reg_lngth = VID_B_LNGTH;
715 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
716 port->reg_gen_ctrl = VID_B_GEN_CTL;
717 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
718 port->reg_sop_status = VID_B_SOP_STATUS;
719 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
720 port->reg_vld_misc = VID_B_VLD_MISC;
721 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
722 port->reg_src_sel = VID_B_SRC_SEL;
723 port->reg_ts_int_msk = VID_B_INT_MSK;
b1b81f1d 724 port->reg_ts_int_stat = VID_B_INT_STAT;
a6a3f140
ST
725 port->sram_chno = SRAM_CH03; /* VID_B */
726 port->pci_irqmask = 0x02; /* VID_B bit1 */
727 break;
728 case 2:
729 port->reg_gpcnt = VID_C_GPCNT;
730 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
731 port->reg_dma_ctl = VID_C_DMA_CTL;
732 port->reg_lngth = VID_C_LNGTH;
733 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
734 port->reg_gen_ctrl = VID_C_GEN_CTL;
735 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
736 port->reg_sop_status = VID_C_SOP_STATUS;
737 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
738 port->reg_vld_misc = VID_C_VLD_MISC;
739 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
740 port->reg_src_sel = 0;
741 port->reg_ts_int_msk = VID_C_INT_MSK;
742 port->reg_ts_int_stat = VID_C_INT_STAT;
743 port->sram_chno = SRAM_CH06; /* VID_C */
744 port->pci_irqmask = 0x04; /* VID_C bit2 */
d19770e5 745 break;
a6a3f140
ST
746 default:
747 BUG();
d19770e5
ST
748 }
749
750 return 0;
751}
752
0ac5881a
ST
753static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
754{
755 switch (cx_read(RDR_CFG2) & 0xff) {
756 case 0x00:
757 /* cx23885 */
758 dev->hwrevision = 0xa0;
759 break;
760 case 0x01:
761 /* CX23885-12Z */
762 dev->hwrevision = 0xa1;
763 break;
764 case 0x02:
25ea66e2 765 /* CX23885-13Z/14Z */
0ac5881a
ST
766 dev->hwrevision = 0xb0;
767 break;
768 case 0x03:
25ea66e2
ST
769 if (dev->pci->device == 0x8880) {
770 /* CX23888-21Z/22Z */
771 dev->hwrevision = 0xc0;
772 } else {
773 /* CX23885-14Z */
774 dev->hwrevision = 0xa4;
775 }
776 break;
777 case 0x04:
778 if (dev->pci->device == 0x8880) {
779 /* CX23888-31Z */
780 dev->hwrevision = 0xd0;
781 } else {
782 /* CX23885-15Z, CX23888-31Z */
783 dev->hwrevision = 0xa5;
784 }
0ac5881a
ST
785 break;
786 case 0x0e:
787 /* CX23887-15Z */
788 dev->hwrevision = 0xc0;
abe1def4 789 break;
0ac5881a
ST
790 case 0x0f:
791 /* CX23887-14Z */
792 dev->hwrevision = 0xb1;
793 break;
794 default:
795 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
22b4e64f 796 __func__, dev->hwrevision);
0ac5881a
ST
797 }
798 if (dev->hwrevision)
799 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
22b4e64f 800 __func__, dev->hwrevision);
0ac5881a
ST
801 else
802 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
22b4e64f 803 __func__, dev->hwrevision);
0ac5881a
ST
804}
805
29f8a0a5
AW
806/* Find the first v4l2_subdev member of the group id in hw */
807struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
808{
809 struct v4l2_subdev *result = NULL;
810 struct v4l2_subdev *sd;
811
812 spin_lock(&dev->v4l2_dev.lock);
813 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
814 if (sd->grp_id == hw) {
815 result = sd;
816 break;
817 }
818 }
819 spin_unlock(&dev->v4l2_dev.lock);
820 return result;
821}
822
d19770e5
ST
823static int cx23885_dev_setup(struct cx23885_dev *dev)
824{
825 int i;
826
dbe83a3b 827 spin_lock_init(&dev->pci_irqmask_lock);
af7f388e 828 spin_lock_init(&dev->slock);
dbe83a3b 829
d19770e5 830 mutex_init(&dev->lock);
8386c27f 831 mutex_init(&dev->gpio_lock);
d19770e5
ST
832
833 atomic_inc(&dev->refcount);
834
835 dev->nr = cx23885_devcount++;
579f1163
ST
836 sprintf(dev->name, "cx23885[%d]", dev->nr);
837
579f1163 838 /* Configure the internal memory */
9c8ced51 839 if (dev->pci->device == 0x8880) {
25ea66e2 840 /* Could be 887 or 888, assume a default */
579f1163 841 dev->bridge = CX23885_BRIDGE_887;
c7712613
ST
842 /* Apply a sensible clock frequency for the PCIe bridge */
843 dev->clk_freq = 25000000;
7e994302 844 dev->sram_channels = cx23887_sram_channels;
579f1163 845 } else
9c8ced51 846 if (dev->pci->device == 0x8852) {
579f1163 847 dev->bridge = CX23885_BRIDGE_885;
c7712613
ST
848 /* Apply a sensible clock frequency for the PCIe bridge */
849 dev->clk_freq = 28000000;
7e994302 850 dev->sram_channels = cx23885_sram_channels;
579f1163
ST
851 } else
852 BUG();
853
854 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
22b4e64f 855 __func__, dev->bridge);
579f1163
ST
856
857 /* board config */
858 dev->board = UNSET;
859 if (card[dev->nr] < cx23885_bcount)
860 dev->board = card[dev->nr];
861 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
862 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
863 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
864 dev->board = cx23885_subids[i].card;
865 if (UNSET == dev->board) {
866 dev->board = CX23885_BOARD_UNKNOWN;
867 cx23885_card_list(dev);
868 }
869
c7712613
ST
870 /* If the user specific a clk freq override, apply it */
871 if (cx23885_boards[dev->board].clk_freq > 0)
872 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
873
d19770e5
ST
874 dev->pci_bus = dev->pci->bus->number;
875 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dbe83a3b 876 cx23885_irq_add(dev, 0x001f00);
d19770e5
ST
877
878 /* External Master 1 Bus */
879 dev->i2c_bus[0].nr = 0;
880 dev->i2c_bus[0].dev = dev;
881 dev->i2c_bus[0].reg_stat = I2C1_STAT;
882 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
883 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
884 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
885 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
886 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
887
888 /* External Master 2 Bus */
889 dev->i2c_bus[1].nr = 1;
890 dev->i2c_bus[1].dev = dev;
891 dev->i2c_bus[1].reg_stat = I2C2_STAT;
892 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
893 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
894 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
895 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
896 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
897
898 /* Internal Master 3 Bus */
899 dev->i2c_bus[2].nr = 2;
900 dev->i2c_bus[2].dev = dev;
901 dev->i2c_bus[2].reg_stat = I2C3_STAT;
902 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
a2129af5 903 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
d19770e5
ST
904 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
905 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
906 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
907
b1b81f1d
ST
908 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
909 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
a6a3f140 910 cx23885_init_tsport(dev, &dev->ts1, 1);
579f1163 911
b1b81f1d
ST
912 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
913 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
a6a3f140 914 cx23885_init_tsport(dev, &dev->ts2, 2);
d19770e5 915
d19770e5
ST
916 if (get_resources(dev) < 0) {
917 printk(KERN_ERR "CORE %s No more PCIe resources for "
44a6481d
MK
918 "subsystem: %04x:%04x\n",
919 dev->name, dev->pci->subsystem_vendor,
920 dev->pci->subsystem_device);
d19770e5
ST
921
922 cx23885_devcount--;
fcf94c89 923 return -ENODEV;
d19770e5
ST
924 }
925
d19770e5 926 /* PCIe stuff */
9c8ced51
ST
927 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
928 pci_resource_len(dev->pci, 0));
d19770e5
ST
929
930 dev->bmmio = (u8 __iomem *)dev->lmmio;
931
d19770e5 932 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
44a6481d
MK
933 dev->name, dev->pci->subsystem_vendor,
934 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
935 dev->board, card[dev->nr] == dev->board ?
936 "insmod option" : "autodetected");
d19770e5 937
4823e9ee
ST
938 cx23885_pci_quirks(dev);
939
7b888014
ST
940 /* Assume some sensible defaults */
941 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
942 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
557f48d5 943 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
7b888014
ST
944 dev->radio_type = cx23885_boards[dev->board].radio_type;
945 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
946
557f48d5
IL
947 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
948 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
7b888014 949 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
22b4e64f 950 __func__, dev->radio_type, dev->radio_addr);
7b888014 951
f659c513
ST
952 /* The cx23417 encoder has GPIO's that need to be initialised
953 * before DVB, so that demodulators and tuners are out of
954 * reset before DVB uses them.
955 */
956 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
957 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
958 cx23885_mc417_init(dev);
959
d19770e5
ST
960 /* init hardware */
961 cx23885_reset(dev);
962
963 cx23885_i2c_register(&dev->i2c_bus[0]);
964 cx23885_i2c_register(&dev->i2c_bus[1]);
965 cx23885_i2c_register(&dev->i2c_bus[2]);
d19770e5 966 cx23885_card_setup(dev);
622b828a 967 call_all(dev, core, s_power, 0);
d19770e5
ST
968 cx23885_ir_init(dev);
969
7b888014
ST
970 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
971 if (cx23885_video_register(dev) < 0) {
972 printk(KERN_ERR "%s() Failed to register analog "
22b4e64f 973 "video adapters on VID_A\n", __func__);
7b888014
ST
974 }
975 }
976
977 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
10d0dcd7
IL
978 if (cx23885_boards[dev->board].num_fds_portb)
979 dev->ts1.num_frontends =
980 cx23885_boards[dev->board].num_fds_portb;
a6a3f140
ST
981 if (cx23885_dvb_register(&dev->ts1) < 0) {
982 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
22b4e64f 983 __func__);
a6a3f140 984 }
b1b81f1d
ST
985 } else
986 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
987 if (cx23885_417_register(dev) < 0) {
988 printk(KERN_ERR
989 "%s() Failed to register 417 on VID_B\n",
990 __func__);
991 }
579f1163
ST
992 }
993
7b888014 994 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
10d0dcd7
IL
995 if (cx23885_boards[dev->board].num_fds_portc)
996 dev->ts2.num_frontends =
997 cx23885_boards[dev->board].num_fds_portc;
a6a3f140 998 if (cx23885_dvb_register(&dev->ts2) < 0) {
b1b81f1d
ST
999 printk(KERN_ERR
1000 "%s() Failed to register dvb on VID_C\n",
1001 __func__);
1002 }
1003 } else
1004 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1005 if (cx23885_417_register(dev) < 0) {
1006 printk(KERN_ERR
1007 "%s() Failed to register 417 on VID_C\n",
22b4e64f 1008 __func__);
a6a3f140 1009 }
d19770e5
ST
1010 }
1011
0ac5881a
ST
1012 cx23885_dev_checkrevision(dev);
1013
702dd790
IL
1014 /* disable MSI for NetUP cards, otherwise CI is not working */
1015 if (cx23885_boards[dev->board].ci_type > 0)
1016 cx_clear(RDR_RDRCTL1, 1 << 8);
1017
7b134e85
IL
1018 switch (dev->board) {
1019 case CX23885_BOARD_TEVII_S470:
1020 case CX23885_BOARD_TEVII_S471:
1021 cx_clear(RDR_RDRCTL1, 1 << 8);
1022 break;
1023 }
1024
d19770e5 1025 return 0;
d19770e5
ST
1026}
1027
39e75cfe 1028static void cx23885_dev_unregister(struct cx23885_dev *dev)
d19770e5 1029{
9c8ced51
ST
1030 release_mem_region(pci_resource_start(dev->pci, 0),
1031 pci_resource_len(dev->pci, 0));
d19770e5
ST
1032
1033 if (!atomic_dec_and_test(&dev->refcount))
1034 return;
1035
7b888014
ST
1036 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1037 cx23885_video_unregister(dev);
1038
b1b81f1d 1039 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
a6a3f140
ST
1040 cx23885_dvb_unregister(&dev->ts1);
1041
b1b81f1d
ST
1042 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1043 cx23885_417_unregister(dev);
1044
1045 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
a6a3f140
ST
1046 cx23885_dvb_unregister(&dev->ts2);
1047
b1b81f1d
ST
1048 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1049 cx23885_417_unregister(dev);
1050
d19770e5
ST
1051 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1052 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1053 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1054
1055 iounmap(dev->lmmio);
1056}
1057
9c8ced51 1058static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
44a6481d
MK
1059 unsigned int offset, u32 sync_line,
1060 unsigned int bpl, unsigned int padding,
453afdd9 1061 unsigned int lines, unsigned int lpi, bool jump)
d19770e5
ST
1062{
1063 struct scatterlist *sg;
9e44d632 1064 unsigned int line, todo, sol;
d19770e5 1065
453afdd9
HV
1066
1067 if (jump) {
1068 *(rp++) = cpu_to_le32(RISC_JUMP);
1069 *(rp++) = cpu_to_le32(0);
1070 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1071 }
1072
d19770e5
ST
1073 /* sync instruction */
1074 if (sync_line != NO_SYNC_LINE)
1075 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1076
1077 /* scan lines */
1078 sg = sglist;
1079 for (line = 0; line < lines; line++) {
1080 while (offset && offset >= sg_dma_len(sg)) {
1081 offset -= sg_dma_len(sg);
7675fe99 1082 sg = sg_next(sg);
d19770e5 1083 }
9e44d632
MM
1084
1085 if (lpi && line > 0 && !(line % lpi))
1086 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1087 else
1088 sol = RISC_SOL;
1089
d19770e5
ST
1090 if (bpl <= sg_dma_len(sg)-offset) {
1091 /* fits into current chunk */
9e44d632 1092 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
9c8ced51
ST
1093 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1094 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1095 offset += bpl;
d19770e5
ST
1096 } else {
1097 /* scanline needs to be split */
1098 todo = bpl;
9e44d632 1099 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
d19770e5 1100 (sg_dma_len(sg)-offset));
9c8ced51
ST
1101 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1102 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1103 todo -= (sg_dma_len(sg)-offset);
1104 offset = 0;
7675fe99 1105 sg = sg_next(sg);
d19770e5 1106 while (todo > sg_dma_len(sg)) {
9c8ced51 1107 *(rp++) = cpu_to_le32(RISC_WRITE|
d19770e5 1108 sg_dma_len(sg));
9c8ced51
ST
1109 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1110 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5 1111 todo -= sg_dma_len(sg);
7675fe99 1112 sg = sg_next(sg);
d19770e5 1113 }
9c8ced51
ST
1114 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1115 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1116 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1117 offset += todo;
1118 }
1119 offset += padding;
1120 }
1121
1122 return rp;
1123}
1124
4d63a25c 1125int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
7b888014
ST
1126 struct scatterlist *sglist, unsigned int top_offset,
1127 unsigned int bottom_offset, unsigned int bpl,
1128 unsigned int padding, unsigned int lines)
1129{
1130 u32 instructions, fields;
d8eaa58b 1131 __le32 *rp;
7b888014
ST
1132
1133 fields = 0;
1134 if (UNSET != top_offset)
1135 fields++;
1136 if (UNSET != bottom_offset)
1137 fields++;
1138
1139 /* estimate risc mem: worst case is one write per page border +
1140 one write per scan line + syncs + jump (all 2 dwords). Padding
1141 can cause next bpl to start close to a page border. First DMA
1142 region may be smaller than PAGE_SIZE */
1143 /* write and jump need and extra dword */
9c8ced51
ST
1144 instructions = fields * (1 + ((bpl + padding) * lines)
1145 / PAGE_SIZE + lines);
453afdd9 1146 instructions += 5;
4d63a25c
HV
1147 risc->size = instructions * 12;
1148 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1149 if (risc->cpu == NULL)
1150 return -ENOMEM;
7b888014
ST
1151
1152 /* write risc instructions */
1153 rp = risc->cpu;
1154 if (UNSET != top_offset)
1155 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1156 bpl, padding, lines, 0, true);
7b888014
ST
1157 if (UNSET != bottom_offset)
1158 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1159 bpl, padding, lines, 0, UNSET == top_offset);
7b888014
ST
1160
1161 /* save pointer to jmp instruction address */
1162 risc->jmp = rp;
9c8ced51 1163 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
7b888014
ST
1164 return 0;
1165}
d19770e5 1166
9e44d632 1167int cx23885_risc_databuffer(struct pci_dev *pci,
4d63a25c 1168 struct cx23885_riscmem *risc,
39e75cfe
AB
1169 struct scatterlist *sglist,
1170 unsigned int bpl,
9e44d632 1171 unsigned int lines, unsigned int lpi)
d19770e5
ST
1172{
1173 u32 instructions;
d8eaa58b 1174 __le32 *rp;
d19770e5
ST
1175
1176 /* estimate risc mem: worst case is one write per page border +
1177 one write per scan line + syncs + jump (all 2 dwords). Here
1178 there is no padding and no sync. First DMA region may be smaller
1179 than PAGE_SIZE */
1180 /* Jump and write need an extra dword */
1181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
453afdd9 1182 instructions += 4;
d19770e5 1183
4d63a25c
HV
1184 risc->size = instructions * 12;
1185 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1186 if (risc->cpu == NULL)
1187 return -ENOMEM;
d19770e5
ST
1188
1189 /* write risc instructions */
1190 rp = risc->cpu;
9e44d632 1191 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
453afdd9 1192 bpl, 0, lines, lpi, lpi == 0);
d19770e5
ST
1193
1194 /* save pointer to jmp instruction address */
1195 risc->jmp = rp;
9c8ced51 1196 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
d19770e5
ST
1197 return 0;
1198}
1199
4d63a25c 1200int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
5ab27e6d
ST
1201 struct scatterlist *sglist, unsigned int top_offset,
1202 unsigned int bottom_offset, unsigned int bpl,
1203 unsigned int padding, unsigned int lines)
1204{
1205 u32 instructions, fields;
1206 __le32 *rp;
5ab27e6d
ST
1207
1208 fields = 0;
1209 if (UNSET != top_offset)
1210 fields++;
1211 if (UNSET != bottom_offset)
1212 fields++;
1213
1214 /* estimate risc mem: worst case is one write per page border +
1215 one write per scan line + syncs + jump (all 2 dwords). Padding
1216 can cause next bpl to start close to a page border. First DMA
1217 region may be smaller than PAGE_SIZE */
1218 /* write and jump need and extra dword */
1219 instructions = fields * (1 + ((bpl + padding) * lines)
1220 / PAGE_SIZE + lines);
453afdd9 1221 instructions += 5;
4d63a25c
HV
1222 risc->size = instructions * 12;
1223 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1224 if (risc->cpu == NULL)
1225 return -ENOMEM;
5ab27e6d
ST
1226 /* write risc instructions */
1227 rp = risc->cpu;
1228
1229 /* Sync to line 6, so US CC line 21 will appear in line '12'
1230 * in the userland vbi payload */
1231 if (UNSET != top_offset)
420b2176 1232 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1233 bpl, padding, lines, 0, true);
5ab27e6d
ST
1234
1235 if (UNSET != bottom_offset)
420b2176 1236 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1237 bpl, padding, lines, 0, UNSET == top_offset);
5ab27e6d
ST
1238
1239
1240
1241 /* save pointer to jmp instruction address */
1242 risc->jmp = rp;
1243 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1244 return 0;
1245}
1246
1247
453afdd9 1248void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
d19770e5 1249{
4d63a25c
HV
1250 struct cx23885_riscmem *risc = &buf->risc;
1251
d19770e5 1252 BUG_ON(in_interrupt());
4d63a25c 1253 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
d19770e5
ST
1254}
1255
7b888014
ST
1256static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1257{
1258 struct cx23885_dev *dev = port->dev;
1259
22b4e64f
HH
1260 dprintk(1, "%s() Register Dump\n", __func__);
1261 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
7b888014 1262 cx_read(DEV_CNTRL2));
22b4e64f 1263 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
dbe83a3b 1264 cx23885_irq_get_mask(dev));
22b4e64f 1265 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
7b888014 1266 cx_read(AUDIO_INT_INT_MSK));
22b4e64f 1267 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
7b888014 1268 cx_read(AUD_INT_DMA_CTL));
22b4e64f 1269 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
7b888014 1270 cx_read(AUDIO_EXT_INT_MSK));
22b4e64f 1271 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
7b888014 1272 cx_read(AUD_EXT_DMA_CTL));
22b4e64f 1273 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
7b888014 1274 cx_read(PAD_CTRL));
22b4e64f 1275 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
7b888014 1276 cx_read(ALT_PIN_OUT_SEL));
22b4e64f 1277 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
7b888014 1278 cx_read(GPIO2));
22b4e64f 1279 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
7b888014 1280 port->reg_gpcnt, cx_read(port->reg_gpcnt));
22b4e64f 1281 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1282 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
22b4e64f 1283 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1284 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
7b913908
ST
1285 if (port->reg_src_sel)
1286 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1287 port->reg_src_sel, cx_read(port->reg_src_sel));
22b4e64f 1288 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
7b888014 1289 port->reg_lngth, cx_read(port->reg_lngth));
22b4e64f 1290 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1291 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
22b4e64f 1292 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1293 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
22b4e64f 1294 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
7b888014 1295 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
22b4e64f 1296 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
7b888014 1297 port->reg_sop_status, cx_read(port->reg_sop_status));
22b4e64f 1298 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
7b888014 1299 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
22b4e64f 1300 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
7b888014 1301 port->reg_vld_misc, cx_read(port->reg_vld_misc));
22b4e64f 1302 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
7b888014 1303 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
22b4e64f 1304 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
7b888014
ST
1305 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1306}
1307
453afdd9 1308int cx23885_start_dma(struct cx23885_tsport *port,
44a6481d
MK
1309 struct cx23885_dmaqueue *q,
1310 struct cx23885_buffer *buf)
d19770e5
ST
1311{
1312 struct cx23885_dev *dev = port->dev;
a589b665 1313 u32 reg;
d19770e5 1314
22b4e64f 1315 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
453afdd9 1316 dev->width, dev->height, dev->field);
d19770e5 1317
d8d12b43
ST
1318 /* Stop the fifo and risc engine for this port */
1319 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1320
d19770e5
ST
1321 /* setup fifo + format */
1322 cx23885_sram_channel_setup(dev,
9c8ced51 1323 &dev->sram_channels[port->sram_chno],
44a6481d 1324 port->ts_packet_size, buf->risc.dma);
9c8ced51
ST
1325 if (debug > 5) {
1326 cx23885_sram_channel_dump(dev,
1327 &dev->sram_channels[port->sram_chno]);
d19770e5 1328 cx23885_risc_disasm(port, &buf->risc);
3328e4fb 1329 }
d19770e5
ST
1330
1331 /* write TS length to chip */
453afdd9 1332 cx_write(port->reg_lngth, port->ts_packet_size);
d19770e5 1333
9c8ced51
ST
1334 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1335 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1336 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
22b4e64f 1337 __func__,
661c7e44 1338 cx23885_boards[dev->board].portb,
9c8ced51 1339 cx23885_boards[dev->board].portc);
d19770e5
ST
1340 return -EINVAL;
1341 }
1342
a589b665
ST
1343 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1344 cx23885_av_clk(dev, 0);
1345
d19770e5
ST
1346 udelay(100);
1347
579f1163 1348 /* If the port supports SRC SELECT, configure it */
9c8ced51 1349 if (port->reg_src_sel)
579f1163
ST
1350 cx_write(port->reg_src_sel, port->src_sel_val);
1351
b1b81f1d 1352 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
d19770e5 1353 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
b1b81f1d 1354 cx_write(port->reg_vld_misc, port->vld_misc_val);
d19770e5
ST
1355 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1356 udelay(100);
1357
9c8ced51 1358 /* NOTE: this is 2 (reserved) for portb, does it matter? */
d19770e5
ST
1359 /* reset counter to zero */
1360 cx_write(port->reg_gpcnt_ctl, 3);
453afdd9 1361 q->count = 0;
d19770e5 1362
52ce27bf
ST
1363 /* Set VIDB pins to input */
1364 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1365 reg = cx_read(PAD_CTRL);
1366 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1367 cx_write(PAD_CTRL, reg);
1368 }
1369
1370 /* Set VIDC pins to input */
1371 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1372 reg = cx_read(PAD_CTRL);
1373 reg &= ~0x4; /* Clear TS2_SOP_OE */
1374 cx_write(PAD_CTRL, reg);
1375 }
1376
1377 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1378
1379 reg = cx_read(PAD_CTRL);
1380 reg = reg & ~0x1; /* Clear TS1_OE */
1381
1382 /* FIXME, bit 2 writing here is questionable */
1383 /* set TS1_SOP_OE and TS1_OE_HI */
1384 reg = reg | 0xa;
1385 cx_write(PAD_CTRL, reg);
1386
1387 /* FIXME and these two registers should be documented. */
1388 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1389 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1390 }
1391
9c8ced51 1392 switch (dev->bridge) {
d19770e5 1393 case CX23885_BRIDGE_885:
3bd40659 1394 case CX23885_BRIDGE_887:
25ea66e2 1395 case CX23885_BRIDGE_888:
d19770e5 1396 /* enable irqs */
9c8ced51 1397 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
d19770e5
ST
1398 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1399 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
dbe83a3b
AW
1400 cx23885_irq_add(dev, port->pci_irqmask);
1401 cx23885_irq_enable_all(dev);
d19770e5 1402 break;
d19770e5 1403 default:
579f1163 1404 BUG();
d19770e5
ST
1405 }
1406
d19770e5
ST
1407 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1408
a589b665
ST
1409 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1410 cx23885_av_clk(dev, 1);
1411
7b888014
ST
1412 if (debug > 4)
1413 cx23885_tsport_reg_dump(port);
1414
d19770e5
ST
1415 return 0;
1416}
1417
1418static int cx23885_stop_dma(struct cx23885_tsport *port)
1419{
1420 struct cx23885_dev *dev = port->dev;
a589b665
ST
1421 u32 reg;
1422
22b4e64f 1423 dprintk(1, "%s()\n", __func__);
d19770e5
ST
1424
1425 /* Stop interrupts and DMA */
1426 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1427 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1428
52ce27bf 1429 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1430
1431 reg = cx_read(PAD_CTRL);
1432
1433 /* Set TS1_OE */
1434 reg = reg | 0x1;
1435
1436 /* clear TS1_SOP_OE and TS1_OE_HI */
1437 reg = reg & ~0xa;
1438 cx_write(PAD_CTRL, reg);
1439 cx_write(port->reg_src_sel, 0);
1440 cx_write(port->reg_gen_ctrl, 8);
1441
1442 }
1443
1444 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1445 cx23885_av_clk(dev, 0);
1446
d19770e5
ST
1447 return 0;
1448}
1449
d19770e5
ST
1450/* ------------------------------------------------------------------ */
1451
453afdd9 1452int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
d19770e5
ST
1453{
1454 struct cx23885_dev *dev = port->dev;
1455 int size = port->ts_packet_size * port->ts_packet_count;
453afdd9 1456 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
d19770e5 1457
22b4e64f 1458 dprintk(1, "%s: %p\n", __func__, buf);
453afdd9 1459 if (vb2_plane_size(&buf->vb, 0) < size)
d19770e5 1460 return -EINVAL;
453afdd9 1461 vb2_set_plane_payload(&buf->vb, 0, size);
d19770e5 1462
453afdd9
HV
1463 cx23885_risc_databuffer(dev->pci, &buf->risc,
1464 sgt->sgl,
1465 port->ts_packet_size, port->ts_packet_count, 0);
1466 return 0;
d19770e5
ST
1467}
1468
453afdd9
HV
1469/*
1470 * The risc program for each buffer works as follows: it starts with a simple
1471 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1472 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1473 * the initial JUMP).
1474 *
1475 * This is the risc program of the first buffer to be queued if the active list
1476 * is empty and it just keeps DMAing this buffer without generating any
1477 * interrupts.
1478 *
1479 * If a new buffer is added then the initial JUMP in the code for that buffer
1480 * will generate an interrupt which signals that the previous buffer has been
1481 * DMAed successfully and that it can be returned to userspace.
1482 *
1483 * It also sets the final jump of the previous buffer to the start of the new
1484 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1485 * atomic u32 write, so there is no race condition.
1486 *
1487 * The end-result of all this that you only get an interrupt when a buffer
1488 * is ready, so the control flow is very easy.
1489 */
d19770e5
ST
1490void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1491{
1492 struct cx23885_buffer *prev;
1493 struct cx23885_dev *dev = port->dev;
1494 struct cx23885_dmaqueue *cx88q = &port->mpegq;
453afdd9 1495 unsigned long flags;
d19770e5 1496
453afdd9
HV
1497 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1498 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1499 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
d19770e5
ST
1500 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1501
453afdd9 1502 spin_lock_irqsave(&dev->slock, flags);
d19770e5 1503 if (list_empty(&cx88q->active)) {
453afdd9 1504 list_add_tail(&buf->queue, &cx88q->active);
44a6481d 1505 dprintk(1, "[%p/%d] %s - first active\n",
453afdd9 1506 buf, buf->vb.v4l2_buf.index, __func__);
d19770e5 1507 } else {
453afdd9 1508 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
44a6481d 1509 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
453afdd9
HV
1510 queue);
1511 list_add_tail(&buf->queue, &cx88q->active);
d19770e5 1512 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
9c8ced51 1513 dprintk(1, "[%p/%d] %s - append to active\n",
453afdd9 1514 buf, buf->vb.v4l2_buf.index, __func__);
d19770e5 1515 }
453afdd9 1516 spin_unlock_irqrestore(&dev->slock, flags);
d19770e5
ST
1517}
1518
1519/* ----------------------------------------------------------- */
1520
453afdd9 1521static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
d19770e5
ST
1522{
1523 struct cx23885_dev *dev = port->dev;
1524 struct cx23885_dmaqueue *q = &port->mpegq;
1525 struct cx23885_buffer *buf;
1526 unsigned long flags;
1527
44a6481d 1528 spin_lock_irqsave(&port->slock, flags);
d19770e5 1529 while (!list_empty(&q->active)) {
44a6481d 1530 buf = list_entry(q->active.next, struct cx23885_buffer,
453afdd9
HV
1531 queue);
1532 list_del(&buf->queue);
1533 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
44a6481d 1534 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
453afdd9 1535 buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
d19770e5 1536 }
44a6481d 1537 spin_unlock_irqrestore(&port->slock, flags);
d19770e5
ST
1538}
1539
b1b81f1d
ST
1540void cx23885_cancel_buffers(struct cx23885_tsport *port)
1541{
1542 struct cx23885_dev *dev = port->dev;
d19770e5 1543
9c8ced51 1544 dprintk(1, "%s()\n", __func__);
d19770e5 1545 cx23885_stop_dma(port);
453afdd9 1546 do_cancel_buffers(port, "cancel");
d19770e5
ST
1547}
1548
b1b81f1d
ST
1549int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1550{
1551 /* FIXME: port1 assumption here. */
1552 struct cx23885_tsport *port = &dev->ts1;
1553 int count = 0;
1554 int handled = 0;
1555
1556 if (status == 0)
1557 return handled;
1558
1559 count = cx_read(port->reg_gpcnt);
1560 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1561 status, cx_read(port->reg_ts_int_msk), count);
1562
1563 if ((status & VID_B_MSK_BAD_PKT) ||
1564 (status & VID_B_MSK_OPC_ERR) ||
1565 (status & VID_B_MSK_VBI_OPC_ERR) ||
1566 (status & VID_B_MSK_SYNC) ||
1567 (status & VID_B_MSK_VBI_SYNC) ||
1568 (status & VID_B_MSK_OF) ||
1569 (status & VID_B_MSK_VBI_OF)) {
1570 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1571 "= 0x%x\n", dev->name, status);
1572 if (status & VID_B_MSK_BAD_PKT)
1573 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1574 if (status & VID_B_MSK_OPC_ERR)
1575 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1576 if (status & VID_B_MSK_VBI_OPC_ERR)
1577 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1578 if (status & VID_B_MSK_SYNC)
1579 dprintk(1, " VID_B_MSK_SYNC\n");
1580 if (status & VID_B_MSK_VBI_SYNC)
1581 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1582 if (status & VID_B_MSK_OF)
1583 dprintk(1, " VID_B_MSK_OF\n");
1584 if (status & VID_B_MSK_VBI_OF)
1585 dprintk(1, " VID_B_MSK_VBI_OF\n");
1586
1587 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1588 cx23885_sram_channel_dump(dev,
1589 &dev->sram_channels[port->sram_chno]);
1590 cx23885_417_check_encoder(dev);
1591 } else if (status & VID_B_MSK_RISCI1) {
1592 dprintk(7, " VID_B_MSK_RISCI1\n");
1593 spin_lock(&port->slock);
1594 cx23885_wakeup(port, &port->mpegq, count);
1595 spin_unlock(&port->slock);
b1b81f1d
ST
1596 }
1597 if (status) {
1598 cx_write(port->reg_ts_int_stat, status);
1599 handled = 1;
1600 }
1601
1602 return handled;
1603}
1604
a6a3f140
ST
1605static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1606{
1607 struct cx23885_dev *dev = port->dev;
1608 int handled = 0;
1609 u32 count;
1610
b1b81f1d
ST
1611 if ((status & VID_BC_MSK_OPC_ERR) ||
1612 (status & VID_BC_MSK_BAD_PKT) ||
1613 (status & VID_BC_MSK_SYNC) ||
9c8ced51
ST
1614 (status & VID_BC_MSK_OF)) {
1615
a6a3f140 1616 if (status & VID_BC_MSK_OPC_ERR)
9c8ced51
ST
1617 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1618 VID_BC_MSK_OPC_ERR);
1619
a6a3f140 1620 if (status & VID_BC_MSK_BAD_PKT)
9c8ced51
ST
1621 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1622 VID_BC_MSK_BAD_PKT);
1623
a6a3f140 1624 if (status & VID_BC_MSK_SYNC)
9c8ced51
ST
1625 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1626 VID_BC_MSK_SYNC);
1627
a6a3f140 1628 if (status & VID_BC_MSK_OF)
9c8ced51
ST
1629 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1630 VID_BC_MSK_OF);
a6a3f140
ST
1631
1632 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1633
1634 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
b1b81f1d
ST
1635 cx23885_sram_channel_dump(dev,
1636 &dev->sram_channels[port->sram_chno]);
a6a3f140
ST
1637
1638 } else if (status & VID_BC_MSK_RISCI1) {
1639
1640 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1641
1642 spin_lock(&port->slock);
1643 count = cx_read(port->reg_gpcnt);
1644 cx23885_wakeup(port, &port->mpegq, count);
1645 spin_unlock(&port->slock);
1646
a6a3f140
ST
1647 }
1648 if (status) {
1649 cx_write(port->reg_ts_int_stat, status);
1650 handled = 1;
1651 }
1652
1653 return handled;
1654}
1655
03121f05 1656static irqreturn_t cx23885_irq(int irq, void *dev_id)
d19770e5
ST
1657{
1658 struct cx23885_dev *dev = dev_id;
a6a3f140
ST
1659 struct cx23885_tsport *ts1 = &dev->ts1;
1660 struct cx23885_tsport *ts2 = &dev->ts2;
d19770e5 1661 u32 pci_status, pci_mask;
7b888014 1662 u32 vida_status, vida_mask;
9e44d632 1663 u32 audint_status, audint_mask;
6f074abb 1664 u32 ts1_status, ts1_mask;
d19770e5 1665 u32 ts2_status, ts2_mask;
7b888014 1666 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
9e44d632 1667 int audint_count = 0;
98d109f9 1668 bool subdev_handled;
d19770e5
ST
1669
1670 pci_status = cx_read(PCI_INT_STAT);
dbe83a3b 1671 pci_mask = cx23885_irq_get_mask(dev);
7b888014
ST
1672 vida_status = cx_read(VID_A_INT_STAT);
1673 vida_mask = cx_read(VID_A_INT_MSK);
9e44d632
MM
1674 audint_status = cx_read(AUDIO_INT_INT_STAT);
1675 audint_mask = cx_read(AUDIO_INT_INT_MSK);
6f074abb
ST
1676 ts1_status = cx_read(VID_B_INT_STAT);
1677 ts1_mask = cx_read(VID_B_INT_MSK);
d19770e5
ST
1678 ts2_status = cx_read(VID_C_INT_STAT);
1679 ts2_mask = cx_read(VID_C_INT_MSK);
1680
9c8ced51 1681 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
d19770e5
ST
1682 goto out;
1683
7b888014 1684 vida_count = cx_read(VID_A_GPCNT);
9e44d632 1685 audint_count = cx_read(AUD_INT_A_GPCNT);
a6a3f140
ST
1686 ts1_count = cx_read(ts1->reg_gpcnt);
1687 ts2_count = cx_read(ts2->reg_gpcnt);
7b888014
ST
1688 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1689 pci_status, pci_mask);
1690 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1691 vida_status, vida_mask, vida_count);
9e44d632
MM
1692 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1693 audint_status, audint_mask, audint_count);
7b888014
ST
1694 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1695 ts1_status, ts1_mask, ts1_count);
1696 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1697 ts2_status, ts2_mask, ts2_count);
d19770e5 1698
f59ad611
AW
1699 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1700 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1701 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1702 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1703 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
98d109f9 1704 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
d19770e5
ST
1705
1706 if (pci_status & PCI_MSK_RISC_RD)
9c8ced51
ST
1707 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1708 PCI_MSK_RISC_RD);
1709
d19770e5 1710 if (pci_status & PCI_MSK_RISC_WR)
9c8ced51
ST
1711 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1712 PCI_MSK_RISC_WR);
1713
d19770e5 1714 if (pci_status & PCI_MSK_AL_RD)
9c8ced51
ST
1715 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1716 PCI_MSK_AL_RD);
1717
d19770e5 1718 if (pci_status & PCI_MSK_AL_WR)
9c8ced51
ST
1719 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1720 PCI_MSK_AL_WR);
1721
d19770e5 1722 if (pci_status & PCI_MSK_APB_DMA)
9c8ced51
ST
1723 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1724 PCI_MSK_APB_DMA);
1725
d19770e5 1726 if (pci_status & PCI_MSK_VID_C)
9c8ced51
ST
1727 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1728 PCI_MSK_VID_C);
1729
d19770e5 1730 if (pci_status & PCI_MSK_VID_B)
9c8ced51
ST
1731 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1732 PCI_MSK_VID_B);
1733
d19770e5 1734 if (pci_status & PCI_MSK_VID_A)
9c8ced51
ST
1735 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1736 PCI_MSK_VID_A);
1737
d19770e5 1738 if (pci_status & PCI_MSK_AUD_INT)
9c8ced51
ST
1739 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1740 PCI_MSK_AUD_INT);
1741
d19770e5 1742 if (pci_status & PCI_MSK_AUD_EXT)
9c8ced51
ST
1743 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1744 PCI_MSK_AUD_EXT);
d19770e5 1745
5a23b076
IL
1746 if (pci_status & PCI_MSK_GPIO0)
1747 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1748 PCI_MSK_GPIO0);
1749
1750 if (pci_status & PCI_MSK_GPIO1)
1751 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1752 PCI_MSK_GPIO1);
f59ad611 1753
98d109f9
AW
1754 if (pci_status & PCI_MSK_AV_CORE)
1755 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1756 PCI_MSK_AV_CORE);
1757
f59ad611
AW
1758 if (pci_status & PCI_MSK_IR)
1759 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1760 PCI_MSK_IR);
d19770e5
ST
1761 }
1762
78db8547
IL
1763 if (cx23885_boards[dev->board].ci_type == 1 &&
1764 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1765 handled += netup_ci_slot_status(dev, pci_status);
a26ccc9d 1766
78db8547
IL
1767 if (cx23885_boards[dev->board].ci_type == 2 &&
1768 (pci_status & PCI_MSK_GPIO0))
1769 handled += altera_ci_irq(dev);
5a23b076 1770
7b888014
ST
1771 if (ts1_status) {
1772 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1773 handled += cx23885_irq_ts(ts1, ts1_status);
b1b81f1d
ST
1774 else
1775 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1776 handled += cx23885_irq_417(dev, ts1_status);
7b888014
ST
1777 }
1778
1779 if (ts2_status) {
1780 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1781 handled += cx23885_irq_ts(ts2, ts2_status);
b1b81f1d
ST
1782 else
1783 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1784 handled += cx23885_irq_417(dev, ts2_status);
7b888014 1785 }
6f074abb 1786
7b888014
ST
1787 if (vida_status)
1788 handled += cx23885_video_irq(dev, vida_status);
6f074abb 1789
9e44d632
MM
1790 if (audint_status)
1791 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1792
f59ad611 1793 if (pci_status & PCI_MSK_IR) {
98d109f9 1794 subdev_handled = false;
260e689b 1795 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
98d109f9
AW
1796 pci_status, &subdev_handled);
1797 if (subdev_handled)
1798 handled++;
1799 }
1800
e5514f10
AW
1801 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1802 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
c21412f5 1803 schedule_work(&dev->cx25840_work);
e5514f10 1804 handled++;
f59ad611
AW
1805 }
1806
6f074abb
ST
1807 if (handled)
1808 cx_write(PCI_INT_STAT, pci_status);
d19770e5
ST
1809out:
1810 return IRQ_RETVAL(handled);
1811}
1812
f59ad611
AW
1813static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1814 unsigned int notification, void *arg)
1815{
1816 struct cx23885_dev *dev;
1817
1818 if (sd == NULL)
1819 return;
1820
1821 dev = to_cx23885(sd->v4l2_dev);
1822
1823 switch (notification) {
e5514f10 1824 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1825 if (sd == dev->sd_ir)
1826 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1827 break;
e5514f10 1828 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1829 if (sd == dev->sd_ir)
1830 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1831 break;
1832 }
1833}
1834
1835static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1836{
e5514f10 1837 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
f59ad611
AW
1838 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1839 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1840 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1841}
1842
6de72bd6 1843static inline int encoder_on_portb(struct cx23885_dev *dev)
6f8bee9b
ST
1844{
1845 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1846}
1847
6de72bd6 1848static inline int encoder_on_portc(struct cx23885_dev *dev)
6f8bee9b
ST
1849{
1850 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1851}
1852
1853/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1854 * registers depending on the board configuration (and whether the
1855 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1856 * be pushed into the correct hardware register, regardless of the
1857 * physical location. Certain registers are shared so we sanity check
1858 * and report errors if we think we're tampering with a GPIo that might
1859 * be assigned to the encoder (and used for the host bus).
1860 *
1861 * GPIO 2 thru 0 - On the cx23885 bridge
1862 * GPIO 18 thru 3 - On the cx23417 host bus interface
1863 * GPIO 23 thru 19 - On the cx25840 a/v core
1864 */
1865void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1866{
1867 if (mask & 0x7)
1868 cx_set(GP0_IO, mask & 0x7);
1869
1870 if (mask & 0x0007fff8) {
1871 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1872 printk(KERN_ERR
1873 "%s: Setting GPIO on encoder ports\n",
1874 dev->name);
1875 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1876 }
1877
1878 /* TODO: 23-19 */
1879 if (mask & 0x00f80000)
1880 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1881}
1882
1883void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1884{
1885 if (mask & 0x00000007)
1886 cx_clear(GP0_IO, mask & 0x7);
1887
1888 if (mask & 0x0007fff8) {
1889 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1890 printk(KERN_ERR
1891 "%s: Clearing GPIO moving on encoder ports\n",
1892 dev->name);
1893 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1894 }
1895
1896 /* TODO: 23-19 */
1897 if (mask & 0x00f80000)
1898 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1899}
1900
09ea33e5
IL
1901u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1902{
1903 if (mask & 0x00000007)
1904 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1905
1906 if (mask & 0x0007fff8) {
1907 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1908 printk(KERN_ERR
1909 "%s: Reading GPIO moving on encoder ports\n",
1910 dev->name);
1911 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1912 }
1913
1914 /* TODO: 23-19 */
1915 if (mask & 0x00f80000)
1916 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1917
1918 return 0;
1919}
1920
6f8bee9b
ST
1921void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1922{
1923 if ((mask & 0x00000007) && asoutput)
1924 cx_set(GP0_IO, (mask & 0x7) << 16);
1925 else if ((mask & 0x00000007) && !asoutput)
1926 cx_clear(GP0_IO, (mask & 0x7) << 16);
1927
1928 if (mask & 0x0007fff8) {
1929 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1930 printk(KERN_ERR
1931 "%s: Enabling GPIO on encoder ports\n",
1932 dev->name);
1933 }
1934
1935 /* MC417_OEN is active low for output, write 1 for an input */
1936 if ((mask & 0x0007fff8) && asoutput)
1937 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1938
1939 else if ((mask & 0x0007fff8) && !asoutput)
1940 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1941
1942 /* TODO: 23-19 */
1943}
1944
4c62e976
GKH
1945static int cx23885_initdev(struct pci_dev *pci_dev,
1946 const struct pci_device_id *pci_id)
d19770e5
ST
1947{
1948 struct cx23885_dev *dev;
da59a4de 1949 struct v4l2_ctrl_handler *hdl;
d19770e5
ST
1950 int err;
1951
44a6481d 1952 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
d19770e5
ST
1953 if (NULL == dev)
1954 return -ENOMEM;
1955
c0714f6c
HV
1956 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1957 if (err < 0)
1958 goto fail_free;
1959
da59a4de
HV
1960 hdl = &dev->ctrl_handler;
1961 v4l2_ctrl_handler_init(hdl, 6);
1962 if (hdl->error) {
1963 err = hdl->error;
1964 goto fail_ctrl;
1965 }
1966 dev->v4l2_dev.ctrl_handler = hdl;
1967
f59ad611
AW
1968 /* Prepare to handle notifications from subdevices */
1969 cx23885_v4l2_dev_notify_init(dev);
1970
d19770e5
ST
1971 /* pci init */
1972 dev->pci = pci_dev;
1973 if (pci_enable_device(pci_dev)) {
1974 err = -EIO;
da59a4de 1975 goto fail_ctrl;
d19770e5
ST
1976 }
1977
1978 if (cx23885_dev_setup(dev) < 0) {
1979 err = -EINVAL;
da59a4de 1980 goto fail_ctrl;
d19770e5
ST
1981 }
1982
1983 /* print pci info */
abd34d8d 1984 dev->pci_rev = pci_dev->revision;
d19770e5
ST
1985 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1986 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1987 "latency: %d, mmio: 0x%llx\n", dev->name,
1988 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
a589b665
ST
1989 dev->pci_lat,
1990 (unsigned long long)pci_resource_start(pci_dev, 0));
d19770e5
ST
1991
1992 pci_set_master(pci_dev);
1993 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1994 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1995 err = -EIO;
0c3a14c1 1996 goto fail_context;
d19770e5
ST
1997 }
1998
0c3a14c1
HV
1999 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
2000 if (IS_ERR(dev->alloc_ctx)) {
2001 err = PTR_ERR(dev->alloc_ctx);
2002 goto fail_context;
2003 }
d7515b88 2004 err = request_irq(pci_dev->irq, cx23885_irq,
3e018fe4 2005 IRQF_SHARED, dev->name, dev);
d19770e5
ST
2006 if (err < 0) {
2007 printk(KERN_ERR "%s: can't get IRQ %d\n",
2008 dev->name, pci_dev->irq);
2009 goto fail_irq;
2010 }
2011
afd96668
HV
2012 switch (dev->board) {
2013 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
78db8547
IL
2014 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2015 break;
2016 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2017 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
afd96668
HV
2018 break;
2019 }
5a23b076 2020
f59ad611
AW
2021 /*
2022 * The CX2388[58] IR controller can start firing interrupts when
2023 * enabled, so these have to take place after the cx23885_irq() handler
2024 * is hooked up by the call to request_irq() above.
2025 */
2026 cx23885_ir_pci_int_enable(dev);
dbda8f70 2027 cx23885_input_init(dev);
f59ad611 2028
d19770e5
ST
2029 return 0;
2030
2031fail_irq:
0c3a14c1
HV
2032 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2033fail_context:
d19770e5 2034 cx23885_dev_unregister(dev);
da59a4de
HV
2035fail_ctrl:
2036 v4l2_ctrl_handler_free(hdl);
c0714f6c 2037 v4l2_device_unregister(&dev->v4l2_dev);
d19770e5
ST
2038fail_free:
2039 kfree(dev);
2040 return err;
2041}
2042
4c62e976 2043static void cx23885_finidev(struct pci_dev *pci_dev)
d19770e5 2044{
c0714f6c
HV
2045 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2046 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
d19770e5 2047
dbda8f70 2048 cx23885_input_fini(dev);
f59ad611 2049 cx23885_ir_fini(dev);
d19770e5 2050
f59ad611 2051 cx23885_shutdown(dev);
29f8a0a5 2052
d19770e5
ST
2053 /* unregister stuff */
2054 free_irq(pci_dev->irq, dev);
d19770e5 2055
8d4d9329
HV
2056 pci_disable_device(pci_dev);
2057
d19770e5 2058 cx23885_dev_unregister(dev);
0c3a14c1 2059 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
da59a4de 2060 v4l2_ctrl_handler_free(&dev->ctrl_handler);
c0714f6c 2061 v4l2_device_unregister(v4l2_dev);
d19770e5
ST
2062 kfree(dev);
2063}
2064
2065static struct pci_device_id cx23885_pci_tbl[] = {
2066 {
2067 /* CX23885 */
2068 .vendor = 0x14f1,
2069 .device = 0x8852,
2070 .subvendor = PCI_ANY_ID,
2071 .subdevice = PCI_ANY_ID,
9c8ced51 2072 }, {
d19770e5
ST
2073 /* CX23887 Rev 2 */
2074 .vendor = 0x14f1,
2075 .device = 0x8880,
2076 .subvendor = PCI_ANY_ID,
2077 .subdevice = PCI_ANY_ID,
9c8ced51 2078 }, {
d19770e5
ST
2079 /* --- end of list --- */
2080 }
2081};
2082MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2083
2084static struct pci_driver cx23885_pci_driver = {
2085 .name = "cx23885",
2086 .id_table = cx23885_pci_tbl,
2087 .probe = cx23885_initdev,
4c62e976 2088 .remove = cx23885_finidev,
d19770e5
ST
2089 /* TODO */
2090 .suspend = NULL,
2091 .resume = NULL,
2092};
2093
9710e7a7 2094static int __init cx23885_init(void)
d19770e5 2095{
1990d50b
MCC
2096 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2097 CX23885_VERSION);
d19770e5
ST
2098 return pci_register_driver(&cx23885_pci_driver);
2099}
2100
9710e7a7 2101static void __exit cx23885_fini(void)
d19770e5
ST
2102{
2103 pci_unregister_driver(&cx23885_pci_driver);
2104}
2105
2106module_init(cx23885_init);
2107module_exit(cx23885_fini);
This page took 0.90788 seconds and 5 git commands to generate.