[media] media: videobuf2: Restructure vb2_buffer
[deliverable/linux.git] / drivers / media / pci / cx88 / cx88-vbi.c
1 /*
2 */
3 #include <linux/kernel.h>
4 #include <linux/module.h>
5 #include <linux/init.h>
6
7 #include "cx88.h"
8
9 static unsigned int vbi_debug;
10 module_param(vbi_debug,int,0644);
11 MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
12
13 #define dprintk(level,fmt, arg...) if (vbi_debug >= level) \
14 printk(KERN_DEBUG "%s: " fmt, dev->core->name , ## arg)
15
16 /* ------------------------------------------------------------------ */
17
18 int cx8800_vbi_fmt (struct file *file, void *priv,
19 struct v4l2_format *f)
20 {
21 struct cx8800_dev *dev = video_drvdata(file);
22
23 f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
24 f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
25 f->fmt.vbi.offset = 244;
26
27 if (dev->core->tvnorm & V4L2_STD_525_60) {
28 /* ntsc */
29 f->fmt.vbi.sampling_rate = 28636363;
30 f->fmt.vbi.start[0] = 10;
31 f->fmt.vbi.start[1] = 273;
32 f->fmt.vbi.count[0] = VBI_LINE_NTSC_COUNT;
33 f->fmt.vbi.count[1] = VBI_LINE_NTSC_COUNT;
34
35 } else if (dev->core->tvnorm & V4L2_STD_625_50) {
36 /* pal */
37 f->fmt.vbi.sampling_rate = 35468950;
38 f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
39 f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
40 f->fmt.vbi.count[0] = VBI_LINE_PAL_COUNT;
41 f->fmt.vbi.count[1] = VBI_LINE_PAL_COUNT;
42 }
43 return 0;
44 }
45
46 static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
47 struct cx88_dmaqueue *q,
48 struct cx88_buffer *buf)
49 {
50 struct cx88_core *core = dev->core;
51
52 /* setup fifo + format */
53 cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
54 VBI_LINE_LENGTH, buf->risc.dma);
55
56 cx_write(MO_VBOS_CONTROL, ( (1 << 18) | // comb filter delay fixup
57 (1 << 15) | // enable vbi capture
58 (1 << 11) ));
59
60 /* reset counter */
61 cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
62 q->count = 0;
63
64 /* enable irqs */
65 cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
66 cx_set(MO_VID_INTMSK, 0x0f0088);
67
68 /* enable capture */
69 cx_set(VID_CAPTURE_CONTROL,0x18);
70
71 /* start dma */
72 cx_set(MO_DEV_CNTRL2, (1<<5));
73 cx_set(MO_VID_DMACNTRL, 0x88);
74
75 return 0;
76 }
77
78 void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
79 {
80 struct cx88_core *core = dev->core;
81
82 /* stop dma */
83 cx_clear(MO_VID_DMACNTRL, 0x88);
84
85 /* disable capture */
86 cx_clear(VID_CAPTURE_CONTROL,0x18);
87
88 /* disable irqs */
89 cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
90 cx_clear(MO_VID_INTMSK, 0x0f0088);
91 }
92
93 int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
94 struct cx88_dmaqueue *q)
95 {
96 struct cx88_buffer *buf;
97
98 if (list_empty(&q->active))
99 return 0;
100
101 buf = list_entry(q->active.next, struct cx88_buffer, list);
102 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
103 buf, buf->vb.vb2_buf.index);
104 cx8800_start_vbi_dma(dev, q, buf);
105 return 0;
106 }
107
108 /* ------------------------------------------------------------------ */
109
110 static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
111 unsigned int *num_buffers, unsigned int *num_planes,
112 unsigned int sizes[], void *alloc_ctxs[])
113 {
114 struct cx8800_dev *dev = q->drv_priv;
115
116 *num_planes = 1;
117 if (dev->core->tvnorm & V4L2_STD_525_60)
118 sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
119 else
120 sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
121 alloc_ctxs[0] = dev->alloc_ctx;
122 return 0;
123 }
124
125
126 static int buffer_prepare(struct vb2_buffer *vb)
127 {
128 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
129 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
130 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
131 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
132 unsigned int lines;
133 unsigned int size;
134
135 if (dev->core->tvnorm & V4L2_STD_525_60)
136 lines = VBI_LINE_NTSC_COUNT;
137 else
138 lines = VBI_LINE_PAL_COUNT;
139 size = lines * VBI_LINE_LENGTH * 2;
140 if (vb2_plane_size(vb, 0) < size)
141 return -EINVAL;
142 vb2_set_plane_payload(vb, 0, size);
143
144 cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
145 0, VBI_LINE_LENGTH * lines,
146 VBI_LINE_LENGTH, 0,
147 lines);
148 return 0;
149 }
150
151 static void buffer_finish(struct vb2_buffer *vb)
152 {
153 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
154 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
155 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
156 struct cx88_riscmem *risc = &buf->risc;
157
158 if (risc->cpu)
159 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
160 memset(risc, 0, sizeof(*risc));
161 }
162
163 static void buffer_queue(struct vb2_buffer *vb)
164 {
165 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
166 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
167 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
168 struct cx88_buffer *prev;
169 struct cx88_dmaqueue *q = &dev->vbiq;
170
171 /* add jump to start */
172 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
173 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
174 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
175
176 if (list_empty(&q->active)) {
177 list_add_tail(&buf->list, &q->active);
178 cx8800_start_vbi_dma(dev, q, buf);
179 dprintk(2,"[%p/%d] vbi_queue - first active\n",
180 buf, buf->vb.vb2_buf.index);
181
182 } else {
183 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
184 prev = list_entry(q->active.prev, struct cx88_buffer, list);
185 list_add_tail(&buf->list, &q->active);
186 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
187 dprintk(2,"[%p/%d] buffer_queue - append to active\n",
188 buf, buf->vb.vb2_buf.index);
189 }
190 }
191
192 static int start_streaming(struct vb2_queue *q, unsigned int count)
193 {
194 struct cx8800_dev *dev = q->drv_priv;
195 struct cx88_dmaqueue *dmaq = &dev->vbiq;
196 struct cx88_buffer *buf = list_entry(dmaq->active.next,
197 struct cx88_buffer, list);
198
199 cx8800_start_vbi_dma(dev, dmaq, buf);
200 return 0;
201 }
202
203 static void stop_streaming(struct vb2_queue *q)
204 {
205 struct cx8800_dev *dev = q->drv_priv;
206 struct cx88_core *core = dev->core;
207 struct cx88_dmaqueue *dmaq = &dev->vbiq;
208 unsigned long flags;
209
210 cx_clear(MO_VID_DMACNTRL, 0x11);
211 cx_clear(VID_CAPTURE_CONTROL, 0x06);
212 cx8800_stop_vbi_dma(dev);
213 spin_lock_irqsave(&dev->slock, flags);
214 while (!list_empty(&dmaq->active)) {
215 struct cx88_buffer *buf = list_entry(dmaq->active.next,
216 struct cx88_buffer, list);
217
218 list_del(&buf->list);
219 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
220 }
221 spin_unlock_irqrestore(&dev->slock, flags);
222 }
223
224 const struct vb2_ops cx8800_vbi_qops = {
225 .queue_setup = queue_setup,
226 .buf_prepare = buffer_prepare,
227 .buf_finish = buffer_finish,
228 .buf_queue = buffer_queue,
229 .wait_prepare = vb2_ops_wait_prepare,
230 .wait_finish = vb2_ops_wait_finish,
231 .start_streaming = start_streaming,
232 .stop_streaming = stop_streaming,
233 };
This page took 0.123727 seconds and 5 git commands to generate.