Commit | Line | Data |
---|---|---|
44c88924 DD |
1 | /* |
2 | * Brontes PCI frame grabber driver | |
3 | * | |
4 | * Copyright (C) 2008 3M Company | |
ecf47451 JB |
5 | * Contact: Justin Bronder <jsbronder@brontes3d.com> |
6 | * Original Authors: Daniel Drake <ddrake@brontes3d.com> | |
7 | * Duane Griffin <duaneg@dghda.com> | |
44c88924 DD |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
22 | */ | |
23 | ||
24 | #include <linux/device.h> | |
25 | #include <linux/fs.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/ioctl.h> | |
29 | #include <linux/kernel.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/pci.h> | |
32 | #include <linux/types.h> | |
33 | #include <linux/cdev.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/poll.h> | |
36 | #include <linux/wait.h> | |
37 | #include <linux/mm.h> | |
ecf47451 | 38 | #include <linux/uaccess.h> |
44c88924 | 39 | |
5f4e925a DG |
40 | static unsigned int b3dfg_nbuf = 2; |
41 | ||
42 | module_param_named(buffer_count, b3dfg_nbuf, uint, 0444); | |
43 | ||
99dc2a7f | 44 | MODULE_PARM_DESC(buffer_count, "Number of buffers (min 2, default 2)"); |
5f4e925a DG |
45 | |
46 | MODULE_AUTHOR("Daniel Drake <ddrake@brontes3d.com>"); | |
47 | MODULE_DESCRIPTION("Brontes frame grabber driver"); | |
48 | MODULE_LICENSE("GPL"); | |
44c88924 DD |
49 | |
50 | #define DRIVER_NAME "b3dfg" | |
44c88924 | 51 | #define B3DFG_MAX_DEVS 4 |
44c88924 DD |
52 | #define B3DFG_FRAMES_PER_BUFFER 3 |
53 | ||
54 | #define B3DFG_BAR_REGS 0 | |
55 | #define B3DFG_REGS_LENGTH 0x10000 | |
56 | ||
5f4e925a DG |
57 | #define B3DFG_IOC_MAGIC 0xb3 /* dfg :-) */ |
58 | #define B3DFG_IOCGFRMSZ _IOR(B3DFG_IOC_MAGIC, 1, int) | |
59 | #define B3DFG_IOCTNUMBUFS _IO(B3DFG_IOC_MAGIC, 2) | |
60 | #define B3DFG_IOCTTRANS _IO(B3DFG_IOC_MAGIC, 3) | |
61 | #define B3DFG_IOCTQUEUEBUF _IO(B3DFG_IOC_MAGIC, 4) | |
62 | #define B3DFG_IOCTPOLLBUF _IOWR(B3DFG_IOC_MAGIC, 5, struct b3dfg_poll) | |
63 | #define B3DFG_IOCTWAITBUF _IOWR(B3DFG_IOC_MAGIC, 6, struct b3dfg_wait) | |
64 | #define B3DFG_IOCGWANDSTAT _IOR(B3DFG_IOC_MAGIC, 7, int) | |
44c88924 DD |
65 | |
66 | enum { | |
67 | /* number of 4kb pages per frame */ | |
68 | B3D_REG_FRM_SIZE = 0x0, | |
69 | ||
5f4e925a DG |
70 | /* bit 0: set to enable interrupts |
71 | * bit 1: set to enable cable status change interrupts */ | |
44c88924 DD |
72 | B3D_REG_HW_CTRL = 0x4, |
73 | ||
5f4e925a | 74 | /* bit 0-1 - 1-based ID of next pending frame transfer (0 = none) |
44c88924 | 75 | * bit 2 indicates the previous DMA transfer has completed |
5f4e925a | 76 | * bit 3 indicates wand cable status change |
44c88924 DD |
77 | * bit 8:15 - counter of number of discarded triplets */ |
78 | B3D_REG_DMA_STS = 0x8, | |
79 | ||
80 | /* bit 0: wand status (1 = present, 0 = disconnected) */ | |
81 | B3D_REG_WAND_STS = 0xc, | |
82 | ||
83 | /* bus address for DMA transfers. lower 2 bits must be zero because DMA | |
84 | * works with 32 bit word size. */ | |
85 | B3D_REG_EC220_DMA_ADDR = 0x8000, | |
86 | ||
87 | /* bit 20:0 - number of 32 bit words to be transferred | |
88 | * bit 21:31 - reserved */ | |
89 | B3D_REG_EC220_TRF_SIZE = 0x8004, | |
90 | ||
91 | /* bit 0 - error bit | |
92 | * bit 1 - interrupt bit (set to generate interrupt at end of transfer) | |
93 | * bit 2 - start bit (set to start transfer) | |
94 | * bit 3 - direction (0 = DMA_TO_DEVICE, 1 = DMA_FROM_DEVICE | |
95 | * bit 4:31 - reserved */ | |
96 | B3D_REG_EC220_DMA_STS = 0x8008, | |
97 | }; | |
98 | ||
99 | enum b3dfg_buffer_state { | |
100 | B3DFG_BUFFER_POLLED = 0, | |
101 | B3DFG_BUFFER_PENDING, | |
102 | B3DFG_BUFFER_POPULATED, | |
103 | }; | |
104 | ||
105 | struct b3dfg_buffer { | |
106 | unsigned char *frame[B3DFG_FRAMES_PER_BUFFER]; | |
44c88924 | 107 | struct list_head list; |
5f4e925a | 108 | u8 state; |
44c88924 DD |
109 | }; |
110 | ||
111 | struct b3dfg_dev { | |
5f4e925a | 112 | |
44c88924 DD |
113 | /* no protection needed: all finalized at initialization time */ |
114 | struct pci_dev *pdev; | |
5f4e925a | 115 | struct cdev chardev; |
ecf47451 | 116 | struct device *dev; |
44c88924 DD |
117 | void __iomem *regs; |
118 | unsigned int frame_size; | |
119 | ||
5f4e925a DG |
120 | /* |
121 | * Protects buffer state, including buffer_queue, triplet_ready, | |
122 | * cur_dma_frame_idx & cur_dma_frame_addr. | |
123 | */ | |
44c88924 | 124 | spinlock_t buffer_lock; |
44c88924 DD |
125 | struct b3dfg_buffer *buffers; |
126 | struct list_head buffer_queue; | |
127 | ||
5f4e925a DG |
128 | /* Last frame in triplet transferred (-1 if none). */ |
129 | int cur_dma_frame_idx; | |
44c88924 | 130 | |
5f4e925a DG |
131 | /* Current frame's address for DMA. */ |
132 | dma_addr_t cur_dma_frame_addr; | |
44c88924 | 133 | |
5f4e925a DG |
134 | /* |
135 | * Protects cstate_tstamp. | |
136 | * Nests inside buffer_lock. | |
137 | */ | |
138 | spinlock_t cstate_lock; | |
139 | unsigned long cstate_tstamp; | |
140 | ||
141 | /* | |
142 | * Protects triplets_dropped. | |
143 | * Nests inside buffers_lock. | |
144 | */ | |
44c88924 DD |
145 | spinlock_t triplets_dropped_lock; |
146 | unsigned int triplets_dropped; | |
147 | ||
5f4e925a | 148 | wait_queue_head_t buffer_waitqueue; |
44c88924 DD |
149 | |
150 | unsigned int transmission_enabled:1; | |
151 | unsigned int triplet_ready:1; | |
152 | }; | |
153 | ||
154 | static u8 b3dfg_devices[B3DFG_MAX_DEVS]; | |
155 | ||
156 | static struct class *b3dfg_class; | |
157 | static dev_t b3dfg_devt; | |
158 | ||
159 | static const struct pci_device_id b3dfg_ids[] __devinitdata = { | |
160 | { PCI_DEVICE(0x0b3d, 0x0001) }, | |
44c88924 DD |
161 | { }, |
162 | }; | |
163 | ||
5f4e925a DG |
164 | MODULE_DEVICE_TABLE(pci, b3dfg_ids); |
165 | ||
44c88924 DD |
166 | /***** user-visible types *****/ |
167 | ||
168 | struct b3dfg_poll { | |
169 | int buffer_idx; | |
170 | unsigned int triplets_dropped; | |
171 | }; | |
172 | ||
173 | struct b3dfg_wait { | |
174 | int buffer_idx; | |
175 | unsigned int timeout; | |
176 | unsigned int triplets_dropped; | |
177 | }; | |
178 | ||
179 | /**** register I/O ****/ | |
180 | ||
181 | static u32 b3dfg_read32(struct b3dfg_dev *fgdev, u16 reg) | |
182 | { | |
183 | return ioread32(fgdev->regs + reg); | |
184 | } | |
185 | ||
186 | static void b3dfg_write32(struct b3dfg_dev *fgdev, u16 reg, u32 value) | |
187 | { | |
188 | iowrite32(value, fgdev->regs + reg); | |
189 | } | |
190 | ||
191 | /**** buffer management ****/ | |
192 | ||
5f4e925a DG |
193 | /* |
194 | * Program EC220 for transfer of a specific frame. | |
195 | * Called with buffer_lock held. | |
196 | */ | |
197 | static int setup_frame_transfer(struct b3dfg_dev *fgdev, | |
198 | struct b3dfg_buffer *buf, int frame) | |
44c88924 DD |
199 | { |
200 | unsigned char *frm_addr; | |
201 | dma_addr_t frm_addr_dma; | |
5f4e925a | 202 | unsigned int frm_size = fgdev->frame_size; |
44c88924 DD |
203 | |
204 | frm_addr = buf->frame[frame]; | |
5f4e925a | 205 | frm_addr_dma = pci_map_single(fgdev->pdev, frm_addr, |
ecf47451 JB |
206 | frm_size, PCI_DMA_FROMDEVICE); |
207 | if (pci_dma_mapping_error(fgdev->pdev, frm_addr_dma)) | |
5f4e925a DG |
208 | return -ENOMEM; |
209 | ||
44c88924 DD |
210 | fgdev->cur_dma_frame_addr = frm_addr_dma; |
211 | fgdev->cur_dma_frame_idx = frame; | |
212 | ||
ecf47451 JB |
213 | b3dfg_write32(fgdev, B3D_REG_EC220_DMA_ADDR, |
214 | cpu_to_le32(frm_addr_dma)); | |
215 | b3dfg_write32(fgdev, B3D_REG_EC220_TRF_SIZE, | |
216 | cpu_to_le32(frm_size >> 2)); | |
44c88924 | 217 | b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0xf); |
44c88924 | 218 | |
5f4e925a | 219 | return 0; |
44c88924 DD |
220 | } |
221 | ||
5f4e925a | 222 | /* Caller should hold buffer lock */ |
44c88924 DD |
223 | static void dequeue_all_buffers(struct b3dfg_dev *fgdev) |
224 | { | |
225 | int i; | |
5f4e925a | 226 | for (i = 0; i < b3dfg_nbuf; i++) { |
44c88924 DD |
227 | struct b3dfg_buffer *buf = &fgdev->buffers[i]; |
228 | buf->state = B3DFG_BUFFER_POLLED; | |
229 | list_del_init(&buf->list); | |
230 | } | |
231 | } | |
232 | ||
44c88924 DD |
233 | /* queue a buffer to receive data */ |
234 | static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx) | |
235 | { | |
5f4e925a | 236 | struct device *dev = &fgdev->pdev->dev; |
44c88924 DD |
237 | struct b3dfg_buffer *buf; |
238 | unsigned long flags; | |
239 | int r = 0; | |
240 | ||
241 | spin_lock_irqsave(&fgdev->buffer_lock, flags); | |
5f4e925a | 242 | if (bufidx < 0 || bufidx >= b3dfg_nbuf) { |
ecf47451 | 243 | dev_dbg(dev, "Invalid buffer index, %d\n", bufidx); |
44c88924 DD |
244 | r = -ENOENT; |
245 | goto out; | |
246 | } | |
5f4e925a | 247 | buf = &fgdev->buffers[bufidx]; |
44c88924 DD |
248 | |
249 | if (unlikely(buf->state == B3DFG_BUFFER_PENDING)) { | |
5f4e925a | 250 | dev_dbg(dev, "buffer %d is already queued\n", bufidx); |
44c88924 DD |
251 | r = -EINVAL; |
252 | goto out; | |
253 | } | |
254 | ||
255 | buf->state = B3DFG_BUFFER_PENDING; | |
256 | list_add_tail(&buf->list, &fgdev->buffer_queue); | |
257 | ||
258 | if (fgdev->transmission_enabled && fgdev->triplet_ready) { | |
5f4e925a | 259 | dev_dbg(dev, "triplet is ready, pushing immediately\n"); |
44c88924 | 260 | fgdev->triplet_ready = 0; |
5f4e925a DG |
261 | r = setup_frame_transfer(fgdev, buf, 0); |
262 | if (r) | |
263 | dev_err(dev, "unable to map DMA buffer\n"); | |
44c88924 DD |
264 | } |
265 | ||
266 | out: | |
267 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); | |
268 | return r; | |
269 | } | |
270 | ||
271 | /* non-blocking buffer poll. returns 1 if data is present in the buffer, | |
272 | * 0 otherwise */ | |
273 | static int poll_buffer(struct b3dfg_dev *fgdev, void __user *arg) | |
274 | { | |
5f4e925a | 275 | struct device *dev = &fgdev->pdev->dev; |
44c88924 DD |
276 | struct b3dfg_poll p; |
277 | struct b3dfg_buffer *buf; | |
278 | unsigned long flags; | |
279 | int r = 1; | |
5f4e925a | 280 | int arg_out = 0; |
44c88924 DD |
281 | |
282 | if (copy_from_user(&p, arg, sizeof(p))) | |
283 | return -EFAULT; | |
284 | ||
285 | if (unlikely(!fgdev->transmission_enabled)) { | |
5f4e925a | 286 | dev_dbg(dev, "cannot poll, transmission disabled\n"); |
44c88924 DD |
287 | return -EINVAL; |
288 | } | |
289 | ||
5f4e925a DG |
290 | if (p.buffer_idx < 0 || p.buffer_idx >= b3dfg_nbuf) |
291 | return -ENOENT; | |
44c88924 | 292 | |
5f4e925a DG |
293 | buf = &fgdev->buffers[p.buffer_idx]; |
294 | ||
295 | spin_lock_irqsave(&fgdev->buffer_lock, flags); | |
44c88924 DD |
296 | |
297 | if (likely(buf->state == B3DFG_BUFFER_POPULATED)) { | |
5f4e925a | 298 | arg_out = 1; |
44c88924 | 299 | buf->state = B3DFG_BUFFER_POLLED; |
5f4e925a DG |
300 | |
301 | /* IRQs already disabled by spin_lock_irqsave above. */ | |
44c88924 DD |
302 | spin_lock(&fgdev->triplets_dropped_lock); |
303 | p.triplets_dropped = fgdev->triplets_dropped; | |
304 | fgdev->triplets_dropped = 0; | |
305 | spin_unlock(&fgdev->triplets_dropped_lock); | |
5f4e925a DG |
306 | } else { |
307 | r = 0; | |
44c88924 DD |
308 | } |
309 | ||
44c88924 | 310 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); |
5f4e925a DG |
311 | |
312 | if (arg_out && copy_to_user(arg, &p, sizeof(p))) | |
313 | r = -EFAULT; | |
314 | ||
44c88924 DD |
315 | return r; |
316 | } | |
317 | ||
5f4e925a DG |
318 | static unsigned long get_cstate_change(struct b3dfg_dev *fgdev) |
319 | { | |
320 | unsigned long flags, when; | |
321 | ||
322 | spin_lock_irqsave(&fgdev->cstate_lock, flags); | |
323 | when = fgdev->cstate_tstamp; | |
324 | spin_unlock_irqrestore(&fgdev->cstate_lock, flags); | |
325 | return when; | |
326 | } | |
327 | ||
328 | static int is_event_ready(struct b3dfg_dev *fgdev, struct b3dfg_buffer *buf, | |
329 | unsigned long when) | |
44c88924 | 330 | { |
5f4e925a | 331 | int result; |
44c88924 | 332 | unsigned long flags; |
44c88924 DD |
333 | |
334 | spin_lock_irqsave(&fgdev->buffer_lock, flags); | |
5f4e925a DG |
335 | spin_lock(&fgdev->cstate_lock); |
336 | result = (!fgdev->transmission_enabled || | |
337 | buf->state == B3DFG_BUFFER_POPULATED || | |
338 | when != fgdev->cstate_tstamp); | |
339 | spin_unlock(&fgdev->cstate_lock); | |
44c88924 | 340 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); |
5f4e925a DG |
341 | |
342 | return result; | |
44c88924 DD |
343 | } |
344 | ||
345 | /* sleep until a specific buffer becomes populated */ | |
346 | static int wait_buffer(struct b3dfg_dev *fgdev, void __user *arg) | |
347 | { | |
5f4e925a | 348 | struct device *dev = &fgdev->pdev->dev; |
44c88924 DD |
349 | struct b3dfg_wait w; |
350 | struct b3dfg_buffer *buf; | |
5f4e925a | 351 | unsigned long flags, when; |
44c88924 DD |
352 | int r; |
353 | ||
354 | if (copy_from_user(&w, arg, sizeof(w))) | |
355 | return -EFAULT; | |
356 | ||
5f4e925a DG |
357 | if (!fgdev->transmission_enabled) { |
358 | dev_dbg(dev, "cannot wait, transmission disabled\n"); | |
44c88924 DD |
359 | return -EINVAL; |
360 | } | |
361 | ||
5f4e925a DG |
362 | if (w.buffer_idx < 0 || w.buffer_idx >= b3dfg_nbuf) |
363 | return -ENOENT; | |
364 | ||
365 | buf = &fgdev->buffers[w.buffer_idx]; | |
366 | ||
44c88924 | 367 | spin_lock_irqsave(&fgdev->buffer_lock, flags); |
44c88924 DD |
368 | |
369 | if (buf->state == B3DFG_BUFFER_POPULATED) { | |
5f4e925a | 370 | r = w.timeout; |
44c88924 DD |
371 | goto out_triplets_dropped; |
372 | } | |
373 | ||
374 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); | |
44c88924 | 375 | |
5f4e925a | 376 | when = get_cstate_change(fgdev); |
44c88924 DD |
377 | if (w.timeout > 0) { |
378 | r = wait_event_interruptible_timeout(fgdev->buffer_waitqueue, | |
5f4e925a | 379 | is_event_ready(fgdev, buf, when), |
44c88924 | 380 | (w.timeout * HZ) / 1000); |
5f4e925a | 381 | |
44c88924 | 382 | if (unlikely(r < 0)) |
5f4e925a DG |
383 | goto out; |
384 | ||
44c88924 DD |
385 | w.timeout = r * 1000 / HZ; |
386 | } else { | |
387 | r = wait_event_interruptible(fgdev->buffer_waitqueue, | |
5f4e925a DG |
388 | is_event_ready(fgdev, buf, when)); |
389 | ||
390 | if (unlikely(r)) { | |
391 | r = -ERESTARTSYS; | |
392 | goto out; | |
393 | } | |
394 | } | |
395 | ||
396 | /* TODO: Inform the user via field(s) in w? */ | |
397 | if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev)) { | |
398 | r = -EINVAL; | |
399 | goto out; | |
44c88924 DD |
400 | } |
401 | ||
402 | spin_lock_irqsave(&fgdev->buffer_lock, flags); | |
5f4e925a DG |
403 | |
404 | if (buf->state != B3DFG_BUFFER_POPULATED) { | |
405 | r = -ETIMEDOUT; | |
406 | goto out_unlock; | |
407 | } | |
408 | ||
44c88924 DD |
409 | buf->state = B3DFG_BUFFER_POLLED; |
410 | ||
411 | out_triplets_dropped: | |
5f4e925a DG |
412 | |
413 | /* IRQs already disabled by spin_lock_irqsave above. */ | |
44c88924 DD |
414 | spin_lock(&fgdev->triplets_dropped_lock); |
415 | w.triplets_dropped = fgdev->triplets_dropped; | |
416 | fgdev->triplets_dropped = 0; | |
417 | spin_unlock(&fgdev->triplets_dropped_lock); | |
5f4e925a DG |
418 | |
419 | out_unlock: | |
420 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); | |
44c88924 DD |
421 | if (copy_to_user(arg, &w, sizeof(w))) |
422 | r = -EFAULT; | |
423 | out: | |
44c88924 DD |
424 | return r; |
425 | } | |
426 | ||
5f4e925a | 427 | /* mmap page fault handler */ |
ecf47451 JB |
428 | static int b3dfg_vma_fault(struct vm_area_struct *vma, |
429 | struct vm_fault *vmf) | |
44c88924 DD |
430 | { |
431 | struct b3dfg_dev *fgdev = vma->vm_file->private_data; | |
ecf47451 | 432 | unsigned long off = vmf->pgoff << PAGE_SHIFT; |
44c88924 DD |
433 | unsigned int frame_size = fgdev->frame_size; |
434 | unsigned int buf_size = frame_size * B3DFG_FRAMES_PER_BUFFER; | |
44c88924 DD |
435 | unsigned char *addr; |
436 | ||
437 | /* determine which buffer the offset lies within */ | |
438 | unsigned int buf_idx = off / buf_size; | |
439 | /* and the offset into the buffer */ | |
440 | unsigned int buf_off = off % buf_size; | |
441 | ||
442 | /* determine which frame inside the buffer the offset lies in */ | |
443 | unsigned int frm_idx = buf_off / frame_size; | |
444 | /* and the offset into the frame */ | |
445 | unsigned int frm_off = buf_off % frame_size; | |
446 | ||
5f4e925a | 447 | if (unlikely(buf_idx >= b3dfg_nbuf)) |
ecf47451 | 448 | return VM_FAULT_SIGBUS; |
44c88924 DD |
449 | |
450 | addr = fgdev->buffers[buf_idx].frame[frm_idx] + frm_off; | |
ecf47451 JB |
451 | vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, |
452 | virt_to_phys(addr) >> PAGE_SHIFT); | |
5f4e925a | 453 | |
ecf47451 | 454 | return VM_FAULT_NOPAGE; |
44c88924 DD |
455 | } |
456 | ||
457 | static struct vm_operations_struct b3dfg_vm_ops = { | |
ecf47451 | 458 | .fault = b3dfg_vma_fault, |
44c88924 DD |
459 | }; |
460 | ||
461 | static int get_wand_status(struct b3dfg_dev *fgdev, int __user *arg) | |
462 | { | |
463 | u32 wndstat = b3dfg_read32(fgdev, B3D_REG_WAND_STS); | |
5f4e925a | 464 | dev_dbg(&fgdev->pdev->dev, "wand status %x\n", wndstat); |
44c88924 DD |
465 | return __put_user(wndstat & 0x1, arg); |
466 | } | |
467 | ||
468 | static int enable_transmission(struct b3dfg_dev *fgdev) | |
469 | { | |
470 | u16 command; | |
471 | unsigned long flags; | |
5f4e925a DG |
472 | struct device *dev = &fgdev->pdev->dev; |
473 | ||
474 | dev_dbg(dev, "enable transmission\n"); | |
44c88924 | 475 | |
5f4e925a DG |
476 | /* check the cable is plugged in. */ |
477 | if (!b3dfg_read32(fgdev, B3D_REG_WAND_STS)) { | |
478 | dev_dbg(dev, "cannot start transmission without wand\n"); | |
479 | return -EINVAL; | |
480 | } | |
44c88924 | 481 | |
5f4e925a DG |
482 | /* |
483 | * Check we're a bus master. | |
484 | * TODO: I think we can remove this having added the pci_set_master call | |
485 | */ | |
44c88924 DD |
486 | pci_read_config_word(fgdev->pdev, PCI_COMMAND, &command); |
487 | if (!(command & PCI_COMMAND_MASTER)) { | |
5f4e925a | 488 | dev_err(dev, "not a bus master, force-enabling\n"); |
44c88924 DD |
489 | pci_write_config_word(fgdev->pdev, PCI_COMMAND, |
490 | command | PCI_COMMAND_MASTER); | |
491 | } | |
492 | ||
493 | spin_lock_irqsave(&fgdev->buffer_lock, flags); | |
5f4e925a DG |
494 | |
495 | /* Handle racing enable_transmission calls. */ | |
496 | if (fgdev->transmission_enabled) { | |
44c88924 | 497 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); |
5f4e925a | 498 | goto out; |
44c88924 | 499 | } |
44c88924 | 500 | |
5f4e925a | 501 | spin_lock(&fgdev->triplets_dropped_lock); |
44c88924 | 502 | fgdev->triplets_dropped = 0; |
5f4e925a | 503 | spin_unlock(&fgdev->triplets_dropped_lock); |
44c88924 DD |
504 | |
505 | fgdev->triplet_ready = 0; | |
44c88924 | 506 | fgdev->cur_dma_frame_idx = -1; |
5f4e925a DG |
507 | fgdev->transmission_enabled = 1; |
508 | ||
509 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); | |
510 | ||
511 | /* Enable DMA and cable status interrupts. */ | |
512 | b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0x03); | |
513 | ||
514 | out: | |
44c88924 DD |
515 | return 0; |
516 | } | |
517 | ||
518 | static void disable_transmission(struct b3dfg_dev *fgdev) | |
519 | { | |
5f4e925a | 520 | struct device *dev = &fgdev->pdev->dev; |
44c88924 DD |
521 | unsigned long flags; |
522 | u32 tmp; | |
523 | ||
5f4e925a | 524 | dev_dbg(dev, "disable transmission\n"); |
44c88924 DD |
525 | |
526 | /* guarantee that no more interrupts will be serviced */ | |
5f4e925a | 527 | spin_lock_irqsave(&fgdev->buffer_lock, flags); |
44c88924 | 528 | fgdev->transmission_enabled = 0; |
44c88924 DD |
529 | |
530 | b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0); | |
531 | ||
532 | /* FIXME: temporary debugging only. if the board stops transmitting, | |
533 | * hitting ctrl+c and seeing this message is useful for determining | |
534 | * the state of the board. */ | |
535 | tmp = b3dfg_read32(fgdev, B3D_REG_DMA_STS); | |
5f4e925a | 536 | dev_dbg(dev, "DMA_STS reads %x after TX stopped\n", tmp); |
44c88924 | 537 | |
44c88924 DD |
538 | dequeue_all_buffers(fgdev); |
539 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); | |
5f4e925a DG |
540 | |
541 | wake_up_interruptible(&fgdev->buffer_waitqueue); | |
44c88924 DD |
542 | } |
543 | ||
544 | static int set_transmission(struct b3dfg_dev *fgdev, int enabled) | |
545 | { | |
5f4e925a DG |
546 | int res = 0; |
547 | ||
44c88924 | 548 | if (enabled && !fgdev->transmission_enabled) |
5f4e925a | 549 | res = enable_transmission(fgdev); |
44c88924 DD |
550 | else if (!enabled && fgdev->transmission_enabled) |
551 | disable_transmission(fgdev); | |
5f4e925a DG |
552 | |
553 | return res; | |
554 | } | |
555 | ||
556 | /* Called in interrupt context. */ | |
557 | static void handle_cstate_unplug(struct b3dfg_dev *fgdev) | |
558 | { | |
559 | /* Disable all interrupts. */ | |
560 | b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0); | |
561 | ||
562 | /* Stop transmission. */ | |
563 | spin_lock(&fgdev->buffer_lock); | |
564 | fgdev->transmission_enabled = 0; | |
565 | ||
566 | fgdev->cur_dma_frame_idx = -1; | |
567 | fgdev->triplet_ready = 0; | |
568 | if (fgdev->cur_dma_frame_addr) { | |
569 | pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr, | |
570 | fgdev->frame_size, PCI_DMA_FROMDEVICE); | |
571 | fgdev->cur_dma_frame_addr = 0; | |
572 | } | |
573 | dequeue_all_buffers(fgdev); | |
574 | spin_unlock(&fgdev->buffer_lock); | |
575 | } | |
576 | ||
577 | /* Called in interrupt context. */ | |
578 | static void handle_cstate_change(struct b3dfg_dev *fgdev) | |
579 | { | |
580 | u32 cstate = b3dfg_read32(fgdev, B3D_REG_WAND_STS); | |
581 | unsigned long when; | |
582 | struct device *dev = &fgdev->pdev->dev; | |
583 | ||
584 | dev_dbg(dev, "cable state change: %u\n", cstate); | |
585 | ||
586 | /* | |
587 | * When the wand is unplugged we reset our state. The hardware will | |
588 | * have done the same internally. | |
589 | * | |
590 | * Note we should never see a cable *plugged* event, as interrupts | |
591 | * should only be enabled when transmitting, which requires the cable | |
592 | * to be plugged. If we do see one it probably means the cable has been | |
593 | * unplugged and re-plugged very rapidly. Possibly because it has a | |
594 | * broken wire and is momentarily losing contact. | |
595 | * | |
596 | * TODO: At the moment if you plug in the cable then enable transmission | |
ecf47451 JB |
597 | * the hardware will raise a couple of spurious interrupts, so |
598 | * just ignore them for now. | |
5f4e925a | 599 | * |
ecf47451 JB |
600 | * Once the hardware is fixed we should complain and treat it as an |
601 | * unplug. Or at least track how frequently it is happening and do | |
602 | * so if too many come in. | |
5f4e925a DG |
603 | */ |
604 | if (cstate) { | |
605 | dev_warn(dev, "ignoring unexpected plug event\n"); | |
606 | return; | |
607 | } | |
608 | handle_cstate_unplug(fgdev); | |
609 | ||
610 | /* | |
611 | * Record cable state change timestamp & wake anyone waiting | |
612 | * on a cable state change. Be paranoid about ensuring events | |
613 | * are not missed if we somehow get two interrupts in a jiffy. | |
614 | */ | |
615 | spin_lock(&fgdev->cstate_lock); | |
616 | when = jiffies_64; | |
617 | if (when <= fgdev->cstate_tstamp) | |
618 | when = fgdev->cstate_tstamp + 1; | |
619 | fgdev->cstate_tstamp = when; | |
620 | wake_up_interruptible(&fgdev->buffer_waitqueue); | |
621 | spin_unlock(&fgdev->cstate_lock); | |
622 | } | |
623 | ||
624 | /* Called with buffer_lock held. */ | |
625 | static void transfer_complete(struct b3dfg_dev *fgdev) | |
626 | { | |
627 | struct b3dfg_buffer *buf; | |
628 | struct device *dev = &fgdev->pdev->dev; | |
629 | ||
630 | pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr, | |
631 | fgdev->frame_size, PCI_DMA_FROMDEVICE); | |
632 | fgdev->cur_dma_frame_addr = 0; | |
633 | ||
634 | buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list); | |
980fd895 JL |
635 | |
636 | dev_dbg(dev, "handle frame completion\n"); | |
637 | if (fgdev->cur_dma_frame_idx == B3DFG_FRAMES_PER_BUFFER - 1) { | |
638 | ||
639 | /* last frame of that triplet completed */ | |
640 | dev_dbg(dev, "triplet completed\n"); | |
641 | buf->state = B3DFG_BUFFER_POPULATED; | |
642 | list_del_init(&buf->list); | |
643 | wake_up_interruptible(&fgdev->buffer_waitqueue); | |
5f4e925a DG |
644 | } |
645 | } | |
646 | ||
647 | /* | |
648 | * Called with buffer_lock held. | |
649 | * | |
650 | * Note that idx is the (1-based) *next* frame to be transferred, while | |
651 | * cur_dma_frame_idx is the (0-based) *last* frame to have been transferred (or | |
652 | * -1 if none). Thus there should be a difference of 2 between them. | |
653 | */ | |
654 | static bool setup_next_frame_transfer(struct b3dfg_dev *fgdev, int idx) | |
655 | { | |
656 | struct b3dfg_buffer *buf; | |
657 | struct device *dev = &fgdev->pdev->dev; | |
658 | bool need_ack = 1; | |
659 | ||
660 | dev_dbg(dev, "program DMA transfer for next frame: %d\n", idx); | |
661 | ||
662 | buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list); | |
980fd895 JL |
663 | if (idx == fgdev->cur_dma_frame_idx + 2) { |
664 | if (setup_frame_transfer(fgdev, buf, idx - 1)) | |
665 | dev_err(dev, "unable to map DMA buffer\n"); | |
666 | need_ack = 0; | |
5f4e925a | 667 | } else { |
980fd895 JL |
668 | dev_err(dev, "frame mismatch, got %d, expected %d\n", |
669 | idx, fgdev->cur_dma_frame_idx + 2); | |
670 | ||
671 | /* FIXME: handle dropped triplets here */ | |
5f4e925a DG |
672 | } |
673 | ||
674 | return need_ack; | |
44c88924 DD |
675 | } |
676 | ||
677 | static irqreturn_t b3dfg_intr(int irq, void *dev_id) | |
678 | { | |
679 | struct b3dfg_dev *fgdev = dev_id; | |
5f4e925a | 680 | struct device *dev = &fgdev->pdev->dev; |
44c88924 DD |
681 | u32 sts; |
682 | u8 dropped; | |
5f4e925a DG |
683 | bool need_ack = 1; |
684 | irqreturn_t res = IRQ_HANDLED; | |
44c88924 | 685 | |
5f4e925a DG |
686 | sts = b3dfg_read32(fgdev, B3D_REG_DMA_STS); |
687 | if (unlikely(sts == 0)) { | |
688 | dev_warn(dev, "ignore interrupt, DMA status is 0\n"); | |
689 | res = IRQ_NONE; | |
44c88924 DD |
690 | goto out; |
691 | } | |
692 | ||
5f4e925a DG |
693 | if (unlikely(!fgdev->transmission_enabled)) { |
694 | dev_warn(dev, "ignore interrupt, TX disabled\n"); | |
695 | res = IRQ_HANDLED; | |
44c88924 DD |
696 | goto out; |
697 | } | |
698 | ||
5f4e925a | 699 | /* Handle dropped frames, as reported by the hardware. */ |
44c88924 | 700 | dropped = (sts >> 8) & 0xff; |
5f4e925a DG |
701 | dev_dbg(dev, "intr: DMA_STS=%08x (drop=%d comp=%d next=%d)\n", |
702 | sts, dropped, !!(sts & 0x4), sts & 0x3); | |
44c88924 DD |
703 | if (unlikely(dropped > 0)) { |
704 | spin_lock(&fgdev->triplets_dropped_lock); | |
705 | fgdev->triplets_dropped += dropped; | |
706 | spin_unlock(&fgdev->triplets_dropped_lock); | |
707 | } | |
708 | ||
5f4e925a DG |
709 | /* Handle a cable state change (i.e. the wand being unplugged). */ |
710 | if (sts & 0x08) { | |
711 | handle_cstate_change(fgdev); | |
712 | goto out; | |
713 | } | |
44c88924 DD |
714 | |
715 | spin_lock(&fgdev->buffer_lock); | |
716 | if (unlikely(list_empty(&fgdev->buffer_queue))) { | |
5f4e925a | 717 | |
44c88924 | 718 | /* FIXME need more sanity checking here */ |
5f4e925a | 719 | dev_info(dev, "buffer not ready for next transfer\n"); |
44c88924 DD |
720 | fgdev->triplet_ready = 1; |
721 | goto out_unlock; | |
722 | } | |
723 | ||
5f4e925a | 724 | /* Has a frame transfer been completed? */ |
44c88924 | 725 | if (sts & 0x4) { |
5f4e925a DG |
726 | u32 dma_status = b3dfg_read32(fgdev, B3D_REG_EC220_DMA_STS); |
727 | ||
728 | /* Check for DMA errors reported by the hardware. */ | |
729 | if (unlikely(dma_status & 0x1)) { | |
730 | dev_err(dev, "EC220 error: %08x\n", dma_status); | |
44c88924 | 731 | |
44c88924 DD |
732 | /* FIXME flesh out error handling */ |
733 | goto out_unlock; | |
734 | } | |
5f4e925a DG |
735 | |
736 | /* Sanity check, we should have a frame index at this point. */ | |
44c88924 | 737 | if (unlikely(fgdev->cur_dma_frame_idx == -1)) { |
5f4e925a DG |
738 | dev_err(dev, "completed but no last idx?\n"); |
739 | ||
44c88924 DD |
740 | /* FIXME flesh out error handling */ |
741 | goto out_unlock; | |
742 | } | |
5f4e925a DG |
743 | |
744 | transfer_complete(fgdev); | |
44c88924 DD |
745 | } |
746 | ||
5f4e925a DG |
747 | /* Is there another frame transfer pending? */ |
748 | if (sts & 0x3) | |
749 | need_ack = setup_next_frame_transfer(fgdev, sts & 0x3); | |
750 | else | |
44c88924 | 751 | fgdev->cur_dma_frame_idx = -1; |
44c88924 DD |
752 | |
753 | out_unlock: | |
754 | spin_unlock(&fgdev->buffer_lock); | |
755 | out: | |
756 | if (need_ack) { | |
5f4e925a | 757 | dev_dbg(dev, "acknowledging interrupt\n"); |
44c88924 DD |
758 | b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0x0b); |
759 | } | |
5f4e925a | 760 | return res; |
44c88924 DD |
761 | } |
762 | ||
763 | static int b3dfg_open(struct inode *inode, struct file *filp) | |
764 | { | |
765 | struct b3dfg_dev *fgdev = | |
766 | container_of(inode->i_cdev, struct b3dfg_dev, chardev); | |
767 | ||
5f4e925a | 768 | dev_dbg(&fgdev->pdev->dev, "open\n"); |
44c88924 DD |
769 | filp->private_data = fgdev; |
770 | return 0; | |
771 | } | |
772 | ||
773 | static int b3dfg_release(struct inode *inode, struct file *filp) | |
774 | { | |
775 | struct b3dfg_dev *fgdev = filp->private_data; | |
5f4e925a DG |
776 | dev_dbg(&fgdev->pdev->dev, "release\n"); |
777 | disable_transmission(fgdev); | |
778 | return 0; | |
44c88924 DD |
779 | } |
780 | ||
781 | static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |
782 | { | |
783 | struct b3dfg_dev *fgdev = filp->private_data; | |
44c88924 DD |
784 | |
785 | switch (cmd) { | |
786 | case B3DFG_IOCGFRMSZ: | |
787 | return __put_user(fgdev->frame_size, (int __user *) arg); | |
788 | case B3DFG_IOCGWANDSTAT: | |
789 | return get_wand_status(fgdev, (int __user *) arg); | |
44c88924 | 790 | case B3DFG_IOCTTRANS: |
5f4e925a | 791 | return set_transmission(fgdev, (int) arg); |
44c88924 DD |
792 | case B3DFG_IOCTQUEUEBUF: |
793 | return queue_buffer(fgdev, (int) arg); | |
794 | case B3DFG_IOCTPOLLBUF: | |
795 | return poll_buffer(fgdev, (void __user *) arg); | |
796 | case B3DFG_IOCTWAITBUF: | |
797 | return wait_buffer(fgdev, (void __user *) arg); | |
798 | default: | |
5f4e925a | 799 | dev_dbg(&fgdev->pdev->dev, "unrecognised ioctl %x\n", cmd); |
44c88924 DD |
800 | return -EINVAL; |
801 | } | |
802 | } | |
803 | ||
804 | static unsigned int b3dfg_poll(struct file *filp, poll_table *poll_table) | |
805 | { | |
806 | struct b3dfg_dev *fgdev = filp->private_data; | |
5f4e925a | 807 | unsigned long flags, when; |
44c88924 DD |
808 | int i; |
809 | int r = 0; | |
810 | ||
5f4e925a | 811 | when = get_cstate_change(fgdev); |
44c88924 | 812 | poll_wait(filp, &fgdev->buffer_waitqueue, poll_table); |
5f4e925a | 813 | |
44c88924 | 814 | spin_lock_irqsave(&fgdev->buffer_lock, flags); |
5f4e925a | 815 | for (i = 0; i < b3dfg_nbuf; i++) { |
44c88924 DD |
816 | if (fgdev->buffers[i].state == B3DFG_BUFFER_POPULATED) { |
817 | r = POLLIN | POLLRDNORM; | |
5f4e925a | 818 | break; |
44c88924 DD |
819 | } |
820 | } | |
44c88924 | 821 | spin_unlock_irqrestore(&fgdev->buffer_lock, flags); |
5f4e925a DG |
822 | |
823 | /* TODO: Confirm this is how we want to communicate the change. */ | |
824 | if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev)) | |
825 | r = POLLERR; | |
826 | ||
44c88924 DD |
827 | return r; |
828 | } | |
829 | ||
830 | static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma) | |
831 | { | |
832 | struct b3dfg_dev *fgdev = filp->private_data; | |
833 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
834 | unsigned long vsize = vma->vm_end - vma->vm_start; | |
5f4e925a DG |
835 | unsigned long bufdatalen = b3dfg_nbuf * fgdev->frame_size * 3; |
836 | unsigned long psize = bufdatalen - offset; | |
44c88924 DD |
837 | int r = 0; |
838 | ||
5f4e925a DG |
839 | if (vsize <= psize) { |
840 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_CAN_NONLINEAR | | |
841 | VM_PFNMAP; | |
842 | vma->vm_ops = &b3dfg_vm_ops; | |
843 | } else { | |
44c88924 | 844 | r = -EINVAL; |
44c88924 DD |
845 | } |
846 | ||
44c88924 DD |
847 | return r; |
848 | } | |
849 | ||
850 | static struct file_operations b3dfg_fops = { | |
851 | .owner = THIS_MODULE, | |
852 | .open = b3dfg_open, | |
853 | .release = b3dfg_release, | |
854 | .unlocked_ioctl = b3dfg_ioctl, | |
855 | .poll = b3dfg_poll, | |
856 | .mmap = b3dfg_mmap, | |
857 | }; | |
858 | ||
859 | static void free_all_frame_buffers(struct b3dfg_dev *fgdev) | |
860 | { | |
5f4e925a DG |
861 | int i, j; |
862 | for (i = 0; i < b3dfg_nbuf; i++) | |
863 | for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++) | |
864 | kfree(fgdev->buffers[i].frame[j]); | |
865 | kfree(fgdev->buffers); | |
44c88924 DD |
866 | } |
867 | ||
868 | /* initialize device and any data structures. called before any interrupts | |
869 | * are enabled. */ | |
870 | static int b3dfg_init_dev(struct b3dfg_dev *fgdev) | |
871 | { | |
5f4e925a | 872 | int i, j; |
44c88924 DD |
873 | u32 frm_size = b3dfg_read32(fgdev, B3D_REG_FRM_SIZE); |
874 | ||
5f4e925a DG |
875 | /* Disable interrupts. In abnormal circumstances (e.g. after a crash) |
876 | * the board may still be transmitting from the previous session. If we | |
877 | * ensure that interrupts are disabled before we later enable them, we | |
878 | * are sure to capture a triplet from the start, rather than starting | |
879 | * from frame 2 or 3. Disabling interrupts causes the FG to throw away | |
880 | * all buffered data and stop buffering more until interrupts are | |
881 | * enabled again. | |
882 | */ | |
44c88924 DD |
883 | b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0); |
884 | ||
885 | fgdev->frame_size = frm_size * 4096; | |
5f4e925a DG |
886 | fgdev->buffers = kzalloc(sizeof(struct b3dfg_buffer) * b3dfg_nbuf, |
887 | GFP_KERNEL); | |
888 | if (!fgdev->buffers) | |
889 | goto err_no_buf; | |
890 | for (i = 0; i < b3dfg_nbuf; i++) { | |
891 | struct b3dfg_buffer *buf = &fgdev->buffers[i]; | |
892 | for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++) { | |
893 | buf->frame[j] = kmalloc(fgdev->frame_size, GFP_KERNEL); | |
894 | if (!buf->frame[j]) | |
895 | goto err_no_mem; | |
896 | } | |
897 | INIT_LIST_HEAD(&buf->list); | |
44c88924 DD |
898 | } |
899 | ||
900 | INIT_LIST_HEAD(&fgdev->buffer_queue); | |
901 | init_waitqueue_head(&fgdev->buffer_waitqueue); | |
902 | spin_lock_init(&fgdev->buffer_lock); | |
5f4e925a | 903 | spin_lock_init(&fgdev->cstate_lock); |
44c88924 | 904 | spin_lock_init(&fgdev->triplets_dropped_lock); |
44c88924 DD |
905 | return 0; |
906 | ||
907 | err_no_mem: | |
908 | free_all_frame_buffers(fgdev); | |
5f4e925a | 909 | err_no_buf: |
44c88924 DD |
910 | return -ENOMEM; |
911 | } | |
912 | ||
913 | /* find next free minor number, returns -1 if none are availabile */ | |
914 | static int get_free_minor(void) | |
915 | { | |
916 | int i; | |
917 | for (i = 0; i < B3DFG_MAX_DEVS; i++) { | |
918 | if (b3dfg_devices[i] == 0) | |
919 | return i; | |
920 | } | |
921 | return -1; | |
922 | } | |
923 | ||
924 | static int __devinit b3dfg_probe(struct pci_dev *pdev, | |
925 | const struct pci_device_id *id) | |
926 | { | |
927 | struct b3dfg_dev *fgdev = kzalloc(sizeof(*fgdev), GFP_KERNEL); | |
928 | int r = 0; | |
929 | int minor = get_free_minor(); | |
930 | dev_t devno = MKDEV(MAJOR(b3dfg_devt), minor); | |
5f4e925a DG |
931 | unsigned long res_len; |
932 | resource_size_t res_base; | |
44c88924 DD |
933 | |
934 | if (fgdev == NULL) | |
935 | return -ENOMEM; | |
936 | ||
937 | if (minor < 0) { | |
5f4e925a DG |
938 | dev_err(&pdev->dev, "too many devices found!\n"); |
939 | r = -EIO; | |
940 | goto err_free; | |
44c88924 DD |
941 | } |
942 | ||
943 | b3dfg_devices[minor] = 1; | |
5f4e925a | 944 | dev_info(&pdev->dev, "probe device with IRQ %d\n", pdev->irq); |
44c88924 DD |
945 | |
946 | cdev_init(&fgdev->chardev, &b3dfg_fops); | |
947 | fgdev->chardev.owner = THIS_MODULE; | |
948 | ||
949 | r = cdev_add(&fgdev->chardev, devno, 1); | |
5f4e925a DG |
950 | if (r) { |
951 | dev_err(&pdev->dev, "cannot add char device\n"); | |
952 | goto err_release_minor; | |
953 | } | |
44c88924 | 954 | |
ecf47451 JB |
955 | fgdev->dev = device_create( |
956 | b3dfg_class, | |
957 | &pdev->dev, | |
958 | devno, | |
959 | dev_get_drvdata(&pdev->dev), | |
960 | DRIVER_NAME "%d", minor); | |
961 | ||
962 | if (IS_ERR(fgdev->dev)) { | |
963 | dev_err(&pdev->dev, "cannot create device\n"); | |
964 | r = PTR_ERR(fgdev->dev); | |
5f4e925a | 965 | goto err_del_cdev; |
44c88924 DD |
966 | } |
967 | ||
968 | r = pci_enable_device(pdev); | |
5f4e925a DG |
969 | if (r) { |
970 | dev_err(&pdev->dev, "cannot enable PCI device\n"); | |
971 | goto err_dev_unreg; | |
972 | } | |
44c88924 | 973 | |
5f4e925a DG |
974 | res_len = pci_resource_len(pdev, B3DFG_BAR_REGS); |
975 | if (res_len != B3DFG_REGS_LENGTH) { | |
976 | dev_err(&pdev->dev, "invalid register resource size\n"); | |
977 | r = -EIO; | |
978 | goto err_disable; | |
44c88924 DD |
979 | } |
980 | ||
ecf47451 JB |
981 | if (pci_resource_flags(pdev, B3DFG_BAR_REGS) |
982 | != (IORESOURCE_MEM | IORESOURCE_SIZEALIGN)) { | |
5f4e925a DG |
983 | dev_err(&pdev->dev, "invalid resource flags\n"); |
984 | r = -EIO; | |
985 | goto err_disable; | |
44c88924 | 986 | } |
5f4e925a DG |
987 | r = pci_request_regions(pdev, DRIVER_NAME); |
988 | if (r) { | |
989 | dev_err(&pdev->dev, "cannot obtain PCI resources\n"); | |
990 | goto err_disable; | |
991 | } | |
992 | ||
993 | pci_set_master(pdev); | |
994 | ||
e930438c | 995 | r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
5f4e925a DG |
996 | if (r) { |
997 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | |
998 | goto err_free_res; | |
999 | } | |
1000 | ||
1001 | res_base = pci_resource_start(pdev, B3DFG_BAR_REGS); | |
1002 | fgdev->regs = ioremap_nocache(res_base, res_len); | |
44c88924 | 1003 | if (!fgdev->regs) { |
5f4e925a DG |
1004 | dev_err(&pdev->dev, "regs ioremap failed\n"); |
1005 | r = -EIO; | |
1006 | goto err_free_res; | |
44c88924 DD |
1007 | } |
1008 | ||
1009 | fgdev->pdev = pdev; | |
1010 | pci_set_drvdata(pdev, fgdev); | |
1011 | r = b3dfg_init_dev(fgdev); | |
1012 | if (r < 0) { | |
5f4e925a DG |
1013 | dev_err(&pdev->dev, "failed to initalize device\n"); |
1014 | goto err_unmap; | |
44c88924 DD |
1015 | } |
1016 | ||
1017 | r = request_irq(pdev->irq, b3dfg_intr, IRQF_SHARED, DRIVER_NAME, fgdev); | |
1018 | if (r) { | |
5f4e925a DG |
1019 | dev_err(&pdev->dev, "couldn't request irq %d\n", pdev->irq); |
1020 | goto err_free_bufs; | |
44c88924 DD |
1021 | } |
1022 | ||
1023 | return 0; | |
1024 | ||
5f4e925a | 1025 | err_free_bufs: |
44c88924 | 1026 | free_all_frame_buffers(fgdev); |
5f4e925a | 1027 | err_unmap: |
44c88924 | 1028 | iounmap(fgdev->regs); |
5f4e925a DG |
1029 | err_free_res: |
1030 | pci_release_regions(pdev); | |
1031 | err_disable: | |
44c88924 | 1032 | pci_disable_device(pdev); |
5f4e925a | 1033 | err_dev_unreg: |
ecf47451 | 1034 | device_destroy(b3dfg_class, devno); |
5f4e925a | 1035 | err_del_cdev: |
44c88924 | 1036 | cdev_del(&fgdev->chardev); |
5f4e925a DG |
1037 | err_release_minor: |
1038 | b3dfg_devices[minor] = 0; | |
1039 | err_free: | |
44c88924 | 1040 | kfree(fgdev); |
44c88924 DD |
1041 | return r; |
1042 | } | |
1043 | ||
1044 | static void __devexit b3dfg_remove(struct pci_dev *pdev) | |
1045 | { | |
1046 | struct b3dfg_dev *fgdev = pci_get_drvdata(pdev); | |
1047 | unsigned int minor = MINOR(fgdev->chardev.dev); | |
1048 | ||
5f4e925a | 1049 | dev_dbg(&pdev->dev, "remove\n"); |
44c88924 DD |
1050 | |
1051 | free_irq(pdev->irq, fgdev); | |
1052 | iounmap(fgdev->regs); | |
5f4e925a | 1053 | pci_release_regions(pdev); |
44c88924 | 1054 | pci_disable_device(pdev); |
ecf47451 | 1055 | device_destroy(b3dfg_class, MKDEV(MAJOR(b3dfg_devt), minor)); |
44c88924 DD |
1056 | cdev_del(&fgdev->chardev); |
1057 | free_all_frame_buffers(fgdev); | |
1058 | kfree(fgdev); | |
1059 | b3dfg_devices[minor] = 0; | |
1060 | } | |
1061 | ||
1062 | static struct pci_driver b3dfg_driver = { | |
1063 | .name = DRIVER_NAME, | |
1064 | .id_table = b3dfg_ids, | |
1065 | .probe = b3dfg_probe, | |
5f4e925a | 1066 | .remove = __devexit_p(b3dfg_remove), |
44c88924 DD |
1067 | }; |
1068 | ||
1069 | static int __init b3dfg_module_init(void) | |
1070 | { | |
1071 | int r; | |
1072 | ||
5f4e925a DG |
1073 | if (b3dfg_nbuf < 2) { |
1074 | printk(KERN_ERR DRIVER_NAME | |
ecf47451 | 1075 | ": buffer_count is out of range (must be >= 2)"); |
5f4e925a DG |
1076 | return -EINVAL; |
1077 | } | |
1078 | ||
1079 | printk(KERN_INFO DRIVER_NAME ": loaded\n"); | |
44c88924 DD |
1080 | |
1081 | b3dfg_class = class_create(THIS_MODULE, DRIVER_NAME); | |
1082 | if (IS_ERR(b3dfg_class)) | |
1083 | return PTR_ERR(b3dfg_class); | |
1084 | ||
1085 | r = alloc_chrdev_region(&b3dfg_devt, 0, B3DFG_MAX_DEVS, DRIVER_NAME); | |
1086 | if (r) | |
1087 | goto err1; | |
1088 | ||
1089 | r = pci_register_driver(&b3dfg_driver); | |
1090 | if (r) | |
1091 | goto err2; | |
1092 | ||
1093 | return r; | |
1094 | ||
1095 | err2: | |
1096 | unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS); | |
1097 | err1: | |
1098 | class_destroy(b3dfg_class); | |
1099 | return r; | |
1100 | } | |
1101 | ||
1102 | static void __exit b3dfg_module_exit(void) | |
1103 | { | |
5f4e925a | 1104 | printk(KERN_INFO DRIVER_NAME ": unloaded\n"); |
44c88924 DD |
1105 | pci_unregister_driver(&b3dfg_driver); |
1106 | unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS); | |
1107 | class_destroy(b3dfg_class); | |
1108 | } | |
1109 | ||
1110 | module_init(b3dfg_module_init); | |
1111 | module_exit(b3dfg_module_exit); |