[media] media: videobuf2: Restructure vb2_buffer
[deliverable/linux.git] / drivers / media / pci / netup_unidvb / netup_unidvb_core.c
CommitLineData
52b1eaf4
KS
1/*
2 * netup_unidvb_core.c
3 *
4 * Main module for NetUP Universal Dual DVB-CI
5 *
6 * Copyright (C) 2014 NetUP Inc.
7 * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
8 * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kmod.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/list.h>
2d700715 30#include <media/videobuf2-v4l2.h>
52b1eaf4
KS
31#include <media/videobuf2-vmalloc.h>
32
33#include "netup_unidvb.h"
34#include "cxd2841er.h"
35#include "horus3a.h"
36#include "ascot2e.h"
37#include "lnbh25.h"
38
39static int spi_enable;
40module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
41
42MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
43MODULE_AUTHOR("info@netup.ru");
44MODULE_VERSION(NETUP_UNIDVB_VERSION);
45MODULE_LICENSE("GPL");
46
47DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
48
49/* Avalon-MM PCI-E registers */
50#define AVL_PCIE_IENR 0x50
51#define AVL_PCIE_ISR 0x40
52#define AVL_IRQ_ENABLE 0x80
53#define AVL_IRQ_ASSERTED 0x80
54/* GPIO registers */
55#define GPIO_REG_IO 0x4880
56#define GPIO_REG_IO_TOGGLE 0x4882
57#define GPIO_REG_IO_SET 0x4884
58#define GPIO_REG_IO_CLEAR 0x4886
59/* GPIO bits */
60#define GPIO_FEA_RESET (1 << 0)
61#define GPIO_FEB_RESET (1 << 1)
62#define GPIO_RFA_CTL (1 << 2)
63#define GPIO_RFB_CTL (1 << 3)
64#define GPIO_FEA_TU_RESET (1 << 4)
65#define GPIO_FEB_TU_RESET (1 << 5)
66/* DMA base address */
67#define NETUP_DMA0_ADDR 0x4900
68#define NETUP_DMA1_ADDR 0x4940
69/* 8 DMA blocks * 128 packets * 188 bytes*/
70#define NETUP_DMA_BLOCKS_COUNT 8
71#define NETUP_DMA_PACKETS_COUNT 128
72/* DMA status bits */
73#define BIT_DMA_RUN 1
74#define BIT_DMA_ERROR 2
75#define BIT_DMA_IRQ 0x200
76
77/**
78 * struct netup_dma_regs - the map of DMA module registers
79 * @ctrlstat_set: Control register, write to set control bits
80 * @ctrlstat_clear: Control register, write to clear control bits
81 * @start_addr_lo: DMA ring buffer start address, lower part
82 * @start_addr_hi: DMA ring buffer start address, higher part
83 * @size: DMA ring buffer size register
84 Bits [0-7]: DMA packet size, 188 bytes
85 Bits [16-23]: packets count in block, 128 packets
86 Bits [24-31]: blocks count, 8 blocks
87 * @timeout: DMA timeout in units of 8ns
88 For example, value of 375000000 equals to 3 sec
89 * @curr_addr_lo: Current ring buffer head address, lower part
90 * @curr_addr_hi: Current ring buffer head address, higher part
91 * @stat_pkt_received: Statistic register, not tested
92 * @stat_pkt_accepted: Statistic register, not tested
93 * @stat_pkt_overruns: Statistic register, not tested
94 * @stat_pkt_underruns: Statistic register, not tested
95 * @stat_fifo_overruns: Statistic register, not tested
96 */
97struct netup_dma_regs {
98 __le32 ctrlstat_set;
99 __le32 ctrlstat_clear;
100 __le32 start_addr_lo;
101 __le32 start_addr_hi;
102 __le32 size;
103 __le32 timeout;
104 __le32 curr_addr_lo;
105 __le32 curr_addr_hi;
106 __le32 stat_pkt_received;
107 __le32 stat_pkt_accepted;
108 __le32 stat_pkt_overruns;
109 __le32 stat_pkt_underruns;
110 __le32 stat_fifo_overruns;
111} __packed __aligned(1);
112
113struct netup_unidvb_buffer {
2d700715 114 struct vb2_v4l2_buffer vb;
52b1eaf4
KS
115 struct list_head list;
116 u32 size;
117};
118
119static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
120static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
121
122static struct cxd2841er_config demod_config = {
123 .i2c_addr = 0xc8
124};
125
126static struct horus3a_config horus3a_conf = {
127 .i2c_address = 0xc0,
128 .xtal_freq_mhz = 16,
129 .set_tuner_callback = netup_unidvb_tuner_ctrl
130};
131
132static struct ascot2e_config ascot2e_conf = {
133 .i2c_address = 0xc2,
134 .set_tuner_callback = netup_unidvb_tuner_ctrl
135};
136
137static struct lnbh25_config lnbh25_conf = {
138 .i2c_address = 0x10,
139 .data2_config = LNBH25_TEN | LNBH25_EXTM
140};
141
142static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
143{
144 u8 reg, mask;
145 struct netup_dma *dma = priv;
146 struct netup_unidvb_dev *ndev;
147
148 if (!priv)
149 return -EINVAL;
150 ndev = dma->ndev;
151 dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
152 __func__, dma->num, is_dvb_tc);
153 reg = readb(ndev->bmmio0 + GPIO_REG_IO);
154 mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
155 if (!is_dvb_tc)
156 reg |= mask;
157 else
158 reg &= ~mask;
159 writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
160 return 0;
161}
162
163static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
164{
165 u16 gpio_reg;
166
167 /* enable PCI-E interrupts */
168 writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
169 /* unreset frontends bits[0:1] */
170 writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
171 msleep(100);
172 gpio_reg =
173 GPIO_FEA_RESET | GPIO_FEB_RESET |
174 GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
175 GPIO_RFA_CTL | GPIO_RFB_CTL;
176 writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
177 dev_dbg(&ndev->pci_dev->dev,
178 "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
179 __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
180 (int)readb(ndev->bmmio0 + GPIO_REG_IO));
181
182}
183
184static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
185{
186 u32 irq_mask = (dma->num == 0 ?
187 NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
188
189 dev_dbg(&dma->ndev->pci_dev->dev,
190 "%s(): DMA%d enable %d\n", __func__, dma->num, enable);
191 if (enable) {
192 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
193 writew(irq_mask,
194 (u16 *)(dma->ndev->bmmio0 + REG_IMASK_SET));
195 } else {
196 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
197 writew(irq_mask,
198 (u16 *)(dma->ndev->bmmio0 + REG_IMASK_CLEAR));
199 }
200}
201
202static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
203{
204 u64 addr_curr;
205 u32 size;
206 unsigned long flags;
207 struct device *dev = &dma->ndev->pci_dev->dev;
208
209 spin_lock_irqsave(&dma->lock, flags);
210 addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
211 (u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
212 /* clear IRQ */
213 writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
214 /* sanity check */
215 if (addr_curr < dma->addr_phys ||
216 addr_curr > dma->addr_phys + dma->ring_buffer_size) {
217 if (addr_curr != 0) {
218 dev_err(dev,
219 "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
220 __func__, addr_curr, (u64)dma->addr_phys,
221 (u64)(dma->addr_phys + dma->ring_buffer_size));
222 }
223 goto irq_handled;
224 }
225 size = (addr_curr >= dma->addr_last) ?
226 (u32)(addr_curr - dma->addr_last) :
227 (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
228 if (dma->data_size != 0) {
229 printk_ratelimited("%s(): lost interrupt, data size %d\n",
230 __func__, dma->data_size);
231 dma->data_size += size;
232 }
233 if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
234 dma->data_size = size;
235 dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
236 }
237 dma->addr_last = addr_curr;
238 queue_work(dma->ndev->wq, &dma->work);
239irq_handled:
240 spin_unlock_irqrestore(&dma->lock, flags);
241 return IRQ_HANDLED;
242}
243
244static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
245{
246 struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
247 struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
248 u32 reg40, reg_isr;
249 irqreturn_t iret = IRQ_NONE;
250
251 /* disable interrupts */
252 writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
253 /* check IRQ source */
254 reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
255 if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
256 /* IRQ is being signaled */
257 reg_isr = readw(ndev->bmmio0 + REG_ISR);
258 if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
259 iret = netup_i2c_interrupt(&ndev->i2c[0]);
260 } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
261 iret = netup_i2c_interrupt(&ndev->i2c[1]);
262 } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
263 iret = netup_spi_interrupt(ndev->spi);
264 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
265 iret = netup_dma_interrupt(&ndev->dma[0]);
266 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
267 iret = netup_dma_interrupt(&ndev->dma[1]);
268 } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
269 iret = netup_ci_interrupt(ndev);
270 } else {
271 dev_err(&pci_dev->dev,
272 "%s(): unknown interrupt 0x%x\n",
273 __func__, reg_isr);
274 }
275 }
276 /* re-enable interrupts */
277 writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
278 return iret;
279}
280
281static int netup_unidvb_queue_setup(struct vb2_queue *vq,
282 const struct v4l2_format *fmt,
283 unsigned int *nbuffers,
284 unsigned int *nplanes,
285 unsigned int sizes[],
286 void *alloc_ctxs[])
287{
288 struct netup_dma *dma = vb2_get_drv_priv(vq);
289
290 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
291
292 *nplanes = 1;
293 if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
294 *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
295 sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
296 dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
297 __func__, *nbuffers, sizes[0]);
298 return 0;
299}
300
301static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
302{
303 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
2d700715
JS
304 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
305 struct netup_unidvb_buffer *buf = container_of(vbuf,
52b1eaf4
KS
306 struct netup_unidvb_buffer, vb);
307
308 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
309 buf->size = 0;
310 return 0;
311}
312
313static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
314{
315 unsigned long flags;
316 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
2d700715
JS
317 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
318 struct netup_unidvb_buffer *buf = container_of(vbuf,
52b1eaf4
KS
319 struct netup_unidvb_buffer, vb);
320
321 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
322 spin_lock_irqsave(&dma->lock, flags);
323 list_add_tail(&buf->list, &dma->free_buffers);
324 spin_unlock_irqrestore(&dma->lock, flags);
325 mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
326}
327
328static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
329{
330 struct netup_dma *dma = vb2_get_drv_priv(q);
331
332 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
333 netup_unidvb_dma_enable(dma, 1);
334 return 0;
335}
336
337static void netup_unidvb_stop_streaming(struct vb2_queue *q)
338{
339 struct netup_dma *dma = vb2_get_drv_priv(q);
340
341 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
342 netup_unidvb_dma_enable(dma, 0);
343 netup_unidvb_queue_cleanup(dma);
344}
345
346static struct vb2_ops dvb_qops = {
347 .queue_setup = netup_unidvb_queue_setup,
348 .buf_prepare = netup_unidvb_buf_prepare,
349 .buf_queue = netup_unidvb_buf_queue,
350 .start_streaming = netup_unidvb_start_streaming,
351 .stop_streaming = netup_unidvb_stop_streaming,
352};
353
354static int netup_unidvb_queue_init(struct netup_dma *dma,
355 struct vb2_queue *vb_queue)
356{
357 int res;
358
359 /* Init videobuf2 queue structure */
360 vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
361 vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
362 vb_queue->drv_priv = dma;
363 vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
364 vb_queue->ops = &dvb_qops;
365 vb_queue->mem_ops = &vb2_vmalloc_memops;
366 vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
367 res = vb2_queue_init(vb_queue);
368 if (res != 0) {
369 dev_err(&dma->ndev->pci_dev->dev,
370 "%s(): vb2_queue_init failed (%d)\n", __func__, res);
371 }
372 return res;
373}
374
375static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
376 int num)
377{
378 struct vb2_dvb_frontend *fe0, *fe1, *fe2;
379
380 if (num < 0 || num > 1) {
381 dev_dbg(&ndev->pci_dev->dev,
382 "%s(): unable to init DVB bus %d\n", __func__, num);
383 return -ENODEV;
384 }
385 mutex_init(&ndev->frontends[num].lock);
386 INIT_LIST_HEAD(&ndev->frontends[num].felist);
387 if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL ||
388 vb2_dvb_alloc_frontend(
389 &ndev->frontends[num], 2) == NULL ||
390 vb2_dvb_alloc_frontend(
391 &ndev->frontends[num], 3) == NULL) {
392 dev_dbg(&ndev->pci_dev->dev,
393 "%s(): unable to to alllocate vb2_dvb_frontend\n",
394 __func__);
395 return -ENOMEM;
396 }
397 fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1);
398 fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2);
399 fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3);
400 if (fe0 == NULL || fe1 == NULL || fe2 == NULL) {
401 dev_dbg(&ndev->pci_dev->dev,
402 "%s(): frontends has not been allocated\n", __func__);
403 return -EINVAL;
404 }
405 netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq);
406 netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq);
407 netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq);
408 fe0->dvb.name = "netup_fe0";
409 fe1->dvb.name = "netup_fe1";
410 fe2->dvb.name = "netup_fe2";
411 fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s,
412 &demod_config, &ndev->i2c[num].adap);
413 if (fe0->dvb.frontend == NULL) {
414 dev_dbg(&ndev->pci_dev->dev,
415 "%s(): unable to attach DVB-S/S2 frontend\n",
416 __func__);
417 goto frontend_detach;
418 }
419 horus3a_conf.set_tuner_priv = &ndev->dma[num];
420 if (!dvb_attach(horus3a_attach, fe0->dvb.frontend,
421 &horus3a_conf, &ndev->i2c[num].adap)) {
422 dev_dbg(&ndev->pci_dev->dev,
423 "%s(): unable to attach DVB-S/S2 tuner frontend\n",
424 __func__);
425 goto frontend_detach;
426 }
427 if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend,
428 &lnbh25_conf, &ndev->i2c[num].adap)) {
429 dev_dbg(&ndev->pci_dev->dev,
430 "%s(): unable to attach SEC frontend\n", __func__);
431 goto frontend_detach;
432 }
433 /* DVB-T/T2 frontend */
434 fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t,
435 &demod_config, &ndev->i2c[num].adap);
436 if (fe1->dvb.frontend == NULL) {
437 dev_dbg(&ndev->pci_dev->dev,
438 "%s(): unable to attach DVB-T frontend\n", __func__);
439 goto frontend_detach;
440 }
441 fe1->dvb.frontend->id = 1;
442 ascot2e_conf.set_tuner_priv = &ndev->dma[num];
443 if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend,
444 &ascot2e_conf, &ndev->i2c[num].adap)) {
445 dev_dbg(&ndev->pci_dev->dev,
446 "%s(): unable to attach DVB-T tuner frontend\n",
447 __func__);
448 goto frontend_detach;
449 }
450 /* DVB-C/C2 frontend */
451 fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c,
452 &demod_config, &ndev->i2c[num].adap);
453 if (fe2->dvb.frontend == NULL) {
454 dev_dbg(&ndev->pci_dev->dev,
455 "%s(): unable to attach DVB-C frontend\n", __func__);
456 goto frontend_detach;
457 }
458 fe2->dvb.frontend->id = 2;
459 if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend,
460 &ascot2e_conf, &ndev->i2c[num].adap)) {
461 dev_dbg(&ndev->pci_dev->dev,
462 "%s(): unable to attach DVB-T/C tuner frontend\n",
463 __func__);
464 goto frontend_detach;
465 }
466
467 if (vb2_dvb_register_bus(&ndev->frontends[num],
468 THIS_MODULE, NULL,
469 &ndev->pci_dev->dev, adapter_nr, 1)) {
470 dev_dbg(&ndev->pci_dev->dev,
471 "%s(): unable to register DVB bus %d\n",
472 __func__, num);
473 goto frontend_detach;
474 }
475 dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
476 return 0;
477frontend_detach:
478 vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
479 return -EINVAL;
480}
481
482static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
483{
484 if (num < 0 || num > 1) {
485 dev_err(&ndev->pci_dev->dev,
486 "%s(): unable to unregister DVB bus %d\n",
487 __func__, num);
488 return;
489 }
490 vb2_dvb_unregister_bus(&ndev->frontends[num]);
491 dev_info(&ndev->pci_dev->dev,
492 "%s(): DVB bus %d unregistered\n", __func__, num);
493}
494
495static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
496{
497 int res;
498
499 res = netup_unidvb_dvb_init(ndev, 0);
500 if (res)
501 return res;
502 res = netup_unidvb_dvb_init(ndev, 1);
503 if (res) {
504 netup_unidvb_dvb_fini(ndev, 0);
505 return res;
506 }
507 return 0;
508}
509
510static int netup_unidvb_ring_copy(struct netup_dma *dma,
511 struct netup_unidvb_buffer *buf)
512{
513 u32 copy_bytes, ring_bytes;
514 u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
2d700715 515 u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
52b1eaf4
KS
516 struct netup_unidvb_dev *ndev = dma->ndev;
517
518 if (p == NULL) {
519 dev_err(&ndev->pci_dev->dev,
520 "%s(): buffer is NULL\n", __func__);
521 return -EINVAL;
522 }
523 p += buf->size;
524 if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
525 ring_bytes = dma->ring_buffer_size - dma->data_offset;
526 copy_bytes = (ring_bytes > buff_bytes) ?
527 buff_bytes : ring_bytes;
528 memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
529 p += copy_bytes;
530 buf->size += copy_bytes;
531 buff_bytes -= copy_bytes;
532 dma->data_size -= copy_bytes;
533 dma->data_offset += copy_bytes;
534 if (dma->data_offset == dma->ring_buffer_size)
535 dma->data_offset = 0;
536 }
537 if (buff_bytes > 0) {
538 ring_bytes = dma->data_size;
539 copy_bytes = (ring_bytes > buff_bytes) ?
540 buff_bytes : ring_bytes;
541 memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
542 buf->size += copy_bytes;
543 dma->data_size -= copy_bytes;
544 dma->data_offset += copy_bytes;
545 if (dma->data_offset == dma->ring_buffer_size)
546 dma->data_offset = 0;
547 }
548 return 0;
549}
550
551static void netup_unidvb_dma_worker(struct work_struct *work)
552{
553 struct netup_dma *dma = container_of(work, struct netup_dma, work);
554 struct netup_unidvb_dev *ndev = dma->ndev;
555 struct netup_unidvb_buffer *buf;
556 unsigned long flags;
557
558 spin_lock_irqsave(&dma->lock, flags);
559 if (dma->data_size == 0) {
560 dev_dbg(&ndev->pci_dev->dev,
561 "%s(): data_size == 0\n", __func__);
562 goto work_done;
563 }
564 while (dma->data_size > 0) {
565 if (list_empty(&dma->free_buffers)) {
566 dev_dbg(&ndev->pci_dev->dev,
567 "%s(): no free buffers\n", __func__);
568 goto work_done;
569 }
570 buf = list_first_entry(&dma->free_buffers,
571 struct netup_unidvb_buffer, list);
572 if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
573 dev_dbg(&ndev->pci_dev->dev,
574 "%s(): buffer overflow, size %d\n",
575 __func__, buf->size);
576 goto work_done;
577 }
578 if (netup_unidvb_ring_copy(dma, buf))
579 goto work_done;
580 if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
581 list_del(&buf->list);
582 dev_dbg(&ndev->pci_dev->dev,
583 "%s(): buffer %p done, size %d\n",
584 __func__, buf, buf->size);
2d700715
JS
585 v4l2_get_timestamp(&buf->vb.timestamp);
586 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
587 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
52b1eaf4
KS
588 }
589 }
590work_done:
591 dma->data_size = 0;
592 spin_unlock_irqrestore(&dma->lock, flags);
593}
594
595static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
596{
597 struct netup_unidvb_buffer *buf;
598 unsigned long flags;
599
600 spin_lock_irqsave(&dma->lock, flags);
601 while (!list_empty(&dma->free_buffers)) {
602 buf = list_first_entry(&dma->free_buffers,
603 struct netup_unidvb_buffer, list);
604 list_del(&buf->list);
2d700715 605 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
52b1eaf4
KS
606 }
607 spin_unlock_irqrestore(&dma->lock, flags);
608}
609
610static void netup_unidvb_dma_timeout(unsigned long data)
611{
612 struct netup_dma *dma = (struct netup_dma *)data;
613 struct netup_unidvb_dev *ndev = dma->ndev;
614
615 dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
616 netup_unidvb_queue_cleanup(dma);
617}
618
619static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
620{
621 struct netup_dma *dma;
622 struct device *dev = &ndev->pci_dev->dev;
623
624 if (num < 0 || num > 1) {
625 dev_err(dev, "%s(): unable to register DMA%d\n",
626 __func__, num);
627 return -ENODEV;
628 }
629 dma = &ndev->dma[num];
630 dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
631 dma->num = num;
632 dma->ndev = ndev;
633 spin_lock_init(&dma->lock);
634 INIT_WORK(&dma->work, netup_unidvb_dma_worker);
635 INIT_LIST_HEAD(&dma->free_buffers);
636 dma->timeout.function = netup_unidvb_dma_timeout;
637 dma->timeout.data = (unsigned long)dma;
638 init_timer(&dma->timeout);
639 dma->ring_buffer_size = ndev->dma_size / 2;
640 dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
641 dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
642 dma->ring_buffer_size * num);
643 dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
644 __func__, num, dma->addr_virt,
645 (unsigned long long)dma->addr_phys,
646 dma->ring_buffer_size);
647 memset_io(dma->addr_virt, 0, dma->ring_buffer_size);
648 dma->addr_last = dma->addr_phys;
649 dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
650 dma->regs = (struct netup_dma_regs *)(num == 0 ?
651 ndev->bmmio0 + NETUP_DMA0_ADDR :
652 ndev->bmmio0 + NETUP_DMA1_ADDR);
653 writel((NETUP_DMA_BLOCKS_COUNT << 24) |
654 (NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
655 writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
656 writel(0, &dma->regs->start_addr_hi);
657 writel(dma->high_addr, ndev->bmmio0 + 0x1000);
658 writel(375000000, &dma->regs->timeout);
659 msleep(1000);
660 writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
661 return 0;
662}
663
664static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
665{
666 struct netup_dma *dma;
667
668 if (num < 0 || num > 1)
669 return;
670 dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
671 dma = &ndev->dma[num];
672 netup_unidvb_dma_enable(dma, 0);
673 msleep(50);
674 cancel_work_sync(&dma->work);
675 del_timer(&dma->timeout);
676}
677
678static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
679{
680 int res;
681
682 res = netup_unidvb_dma_init(ndev, 0);
683 if (res)
684 return res;
685 res = netup_unidvb_dma_init(ndev, 1);
686 if (res) {
687 netup_unidvb_dma_fini(ndev, 0);
688 return res;
689 }
690 netup_unidvb_dma_enable(&ndev->dma[0], 0);
691 netup_unidvb_dma_enable(&ndev->dma[1], 0);
692 return 0;
693}
694
695static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
696 struct pci_dev *pci_dev)
697{
698 int res;
699
700 writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
701 res = netup_unidvb_ci_register(ndev, 0, pci_dev);
702 if (res)
703 return res;
704 res = netup_unidvb_ci_register(ndev, 1, pci_dev);
705 if (res)
706 netup_unidvb_ci_unregister(ndev, 0);
707 return res;
708}
709
710static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
711{
712 if (!request_mem_region(pci_resource_start(pci_dev, 0),
713 pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
714 dev_err(&pci_dev->dev,
715 "%s(): unable to request MMIO bar 0 at 0x%llx\n",
716 __func__,
717 (unsigned long long)pci_resource_start(pci_dev, 0));
718 return -EBUSY;
719 }
720 if (!request_mem_region(pci_resource_start(pci_dev, 1),
721 pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
722 dev_err(&pci_dev->dev,
723 "%s(): unable to request MMIO bar 1 at 0x%llx\n",
724 __func__,
725 (unsigned long long)pci_resource_start(pci_dev, 1));
726 release_mem_region(pci_resource_start(pci_dev, 0),
727 pci_resource_len(pci_dev, 0));
728 return -EBUSY;
729 }
730 return 0;
731}
732
733static int netup_unidvb_request_modules(struct device *dev)
734{
735 static const char * const modules[] = {
736 "lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
737 };
738 const char * const *curr_mod = modules;
739 int err;
740
741 while (*curr_mod != NULL) {
742 err = request_module(*curr_mod);
743 if (err) {
744 dev_warn(dev, "request_module(%s) failed: %d\n",
745 *curr_mod, err);
746 }
747 ++curr_mod;
748 }
749 return 0;
750}
751
752static int netup_unidvb_initdev(struct pci_dev *pci_dev,
753 const struct pci_device_id *pci_id)
754{
755 u8 board_revision;
756 u16 board_vendor;
757 struct netup_unidvb_dev *ndev;
758 int old_firmware = 0;
759
760 netup_unidvb_request_modules(&pci_dev->dev);
761
762 /* Check card revision */
763 if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
764 dev_err(&pci_dev->dev,
765 "netup_unidvb: expected card revision %d, got %d\n",
766 NETUP_PCI_DEV_REVISION, pci_dev->revision);
767 dev_err(&pci_dev->dev,
768 "Please upgrade firmware!\n");
769 dev_err(&pci_dev->dev,
770 "Instructions on http://www.netup.tv\n");
771 old_firmware = 1;
772 spi_enable = 1;
773 }
774
775 /* allocate device context */
776 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
777
778 if (!ndev)
779 goto dev_alloc_err;
780 memset(ndev, 0, sizeof(*ndev));
781 ndev->old_fw = old_firmware;
782 ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
783 if (!ndev->wq) {
784 dev_err(&pci_dev->dev,
785 "%s(): unable to create workqueue\n", __func__);
786 goto wq_create_err;
787 }
788 ndev->pci_dev = pci_dev;
789 ndev->pci_bus = pci_dev->bus->number;
790 ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
791 ndev->pci_func = PCI_FUNC(pci_dev->devfn);
792 ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
793 pci_set_drvdata(pci_dev, ndev);
794 /* PCI init */
795 dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
796 __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
797
798 if (pci_enable_device(pci_dev)) {
799 dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
800 __func__);
801 goto pci_enable_err;
802 }
803 /* read PCI info */
804 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
805 pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
806 if (board_vendor != NETUP_VENDOR_ID) {
807 dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
808 __func__, board_vendor);
809 goto pci_detect_err;
810 }
811 dev_info(&pci_dev->dev,
812 "%s(): board vendor 0x%x, revision 0x%x\n",
813 __func__, board_vendor, board_revision);
814 pci_set_master(pci_dev);
815 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
816 dev_err(&pci_dev->dev,
817 "%s(): 32bit PCI DMA is not supported\n", __func__);
818 goto pci_detect_err;
819 }
820 dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
821 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
822 pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
823 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
824 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
825 /* Adjust PCIe completion timeout. */
826 pcie_capability_clear_and_set_word(pci_dev,
827 PCI_EXP_DEVCTL2, 0xf, 0x2);
828
829 if (netup_unidvb_request_mmio(pci_dev)) {
830 dev_err(&pci_dev->dev,
831 "%s(): unable to request MMIO regions\n", __func__);
832 goto pci_detect_err;
833 }
834 ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
835 pci_resource_len(pci_dev, 0));
836 if (!ndev->lmmio0) {
837 dev_err(&pci_dev->dev,
838 "%s(): unable to remap MMIO bar 0\n", __func__);
839 goto pci_bar0_error;
840 }
841 ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
842 pci_resource_len(pci_dev, 1));
843 if (!ndev->lmmio1) {
844 dev_err(&pci_dev->dev,
845 "%s(): unable to remap MMIO bar 1\n", __func__);
846 goto pci_bar1_error;
847 }
848 ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
849 ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
850 dev_info(&pci_dev->dev,
851 "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
852 __func__,
853 ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
854 ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
855 pci_dev->irq);
856 if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
857 "netup_unidvb", pci_dev) < 0) {
858 dev_err(&pci_dev->dev,
859 "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
860 goto irq_request_err;
861 }
862 ndev->dma_size = 2 * 188 *
863 NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
864 ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
865 ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
866 if (!ndev->dma_virt) {
867 dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
868 __func__);
869 goto dma_alloc_err;
870 }
871 netup_unidvb_dev_enable(ndev);
872 if (spi_enable && netup_spi_init(ndev)) {
873 dev_warn(&pci_dev->dev,
874 "netup_unidvb: SPI flash setup failed\n");
875 goto spi_setup_err;
876 }
877 if (old_firmware) {
878 dev_err(&pci_dev->dev,
879 "netup_unidvb: card initialization was incomplete\n");
880 return 0;
881 }
882 if (netup_i2c_register(ndev)) {
883 dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
884 goto i2c_setup_err;
885 }
886 /* enable I2C IRQs */
887 writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
888 ndev->bmmio0 + REG_IMASK_SET);
889 usleep_range(5000, 10000);
890 if (netup_unidvb_dvb_setup(ndev)) {
891 dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
892 goto dvb_setup_err;
893 }
894 if (netup_unidvb_ci_setup(ndev, pci_dev)) {
895 dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
896 goto ci_setup_err;
897 }
898 if (netup_unidvb_dma_setup(ndev)) {
899 dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
900 goto dma_setup_err;
901 }
902 dev_info(&pci_dev->dev,
903 "netup_unidvb: device has been initialized\n");
904 return 0;
905dma_setup_err:
906 netup_unidvb_ci_unregister(ndev, 0);
907 netup_unidvb_ci_unregister(ndev, 1);
908ci_setup_err:
909 netup_unidvb_dvb_fini(ndev, 0);
910 netup_unidvb_dvb_fini(ndev, 1);
911dvb_setup_err:
912 netup_i2c_unregister(ndev);
913i2c_setup_err:
914 if (ndev->spi)
915 netup_spi_release(ndev);
916spi_setup_err:
917 dma_free_coherent(&pci_dev->dev, ndev->dma_size,
918 ndev->dma_virt, ndev->dma_phys);
919dma_alloc_err:
920 free_irq(pci_dev->irq, pci_dev);
921irq_request_err:
922 iounmap(ndev->lmmio1);
923pci_bar1_error:
924 iounmap(ndev->lmmio0);
925pci_bar0_error:
926 release_mem_region(pci_resource_start(pci_dev, 0),
927 pci_resource_len(pci_dev, 0));
928 release_mem_region(pci_resource_start(pci_dev, 1),
929 pci_resource_len(pci_dev, 1));
930pci_detect_err:
931 pci_disable_device(pci_dev);
932pci_enable_err:
933 pci_set_drvdata(pci_dev, NULL);
934 destroy_workqueue(ndev->wq);
935wq_create_err:
936 kfree(ndev);
937dev_alloc_err:
938 dev_err(&pci_dev->dev,
939 "%s(): failed to initizalize device\n", __func__);
940 return -EIO;
941}
942
943static void netup_unidvb_finidev(struct pci_dev *pci_dev)
944{
945 struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
946
947 dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
948 if (!ndev->old_fw) {
949 netup_unidvb_dma_fini(ndev, 0);
950 netup_unidvb_dma_fini(ndev, 1);
951 netup_unidvb_ci_unregister(ndev, 0);
952 netup_unidvb_ci_unregister(ndev, 1);
953 netup_unidvb_dvb_fini(ndev, 0);
954 netup_unidvb_dvb_fini(ndev, 1);
955 netup_i2c_unregister(ndev);
956 }
957 if (ndev->spi)
958 netup_spi_release(ndev);
959 writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
960 dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
961 ndev->dma_virt, ndev->dma_phys);
962 free_irq(pci_dev->irq, pci_dev);
963 iounmap(ndev->lmmio0);
964 iounmap(ndev->lmmio1);
965 release_mem_region(pci_resource_start(pci_dev, 0),
966 pci_resource_len(pci_dev, 0));
967 release_mem_region(pci_resource_start(pci_dev, 1),
968 pci_resource_len(pci_dev, 1));
969 pci_disable_device(pci_dev);
970 pci_set_drvdata(pci_dev, NULL);
971 destroy_workqueue(ndev->wq);
972 kfree(ndev);
973 dev_info(&pci_dev->dev,
974 "%s(): device has been successfully stopped\n", __func__);
975}
976
977
978static struct pci_device_id netup_unidvb_pci_tbl[] = {
979 { PCI_DEVICE(0x1b55, 0x18f6) },
980 { 0, }
981};
982MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
983
984static struct pci_driver netup_unidvb_pci_driver = {
985 .name = "netup_unidvb",
986 .id_table = netup_unidvb_pci_tbl,
987 .probe = netup_unidvb_initdev,
988 .remove = netup_unidvb_finidev,
989 .suspend = NULL,
990 .resume = NULL,
991};
992
993static int __init netup_unidvb_init(void)
994{
995 return pci_register_driver(&netup_unidvb_pci_driver);
996}
997
998static void __exit netup_unidvb_fini(void)
999{
1000 pci_unregister_driver(&netup_unidvb_pci_driver);
1001}
1002
1003module_init(netup_unidvb_init);
1004module_exit(netup_unidvb_fini);
This page took 0.66486 seconds and 5 git commands to generate.