1 /* -*- c-basic-offset: 8 -*-
3 * fw-device-cdev.c - Char device for device raw access
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/errno.h>
26 #include <linux/device.h>
27 #include <linux/vmalloc.h>
28 #include <linux/poll.h>
29 #include <linux/delay.h>
31 #include <linux/compat.h>
32 #include <asm/uaccess.h>
33 #include "fw-transaction.h"
34 #include "fw-topology.h"
35 #include "fw-device.h"
36 #include "fw-device-cdev.h"
41 * - bus resets sends a new packet with new generation and node id
45 /* dequeue_event() just kfree()'s the event, so the event has to be
46 * the first field in the struct. */
49 struct { void *data
; size_t size
; } v
[2];
50 struct list_head link
;
55 struct fw_transaction transaction
;
56 struct client
*client
;
57 struct fw_cdev_event_response response
;
60 struct iso_interrupt
{
62 struct fw_cdev_event_iso_interrupt interrupt
;
66 struct fw_device
*device
;
68 struct list_head handler_list
;
69 struct list_head request_list
;
71 struct list_head event_list
;
72 struct semaphore event_list_sem
;
73 wait_queue_head_t wait
;
75 struct fw_iso_context
*iso_context
;
76 struct fw_iso_buffer buffer
;
77 unsigned long vm_start
;
80 static inline void __user
*
81 u64_to_uptr(__u64 value
)
83 return (void __user
*)(unsigned long)value
;
87 uptr_to_u64(void __user
*ptr
)
89 return (__u64
)(unsigned long)ptr
;
92 static int fw_device_op_open(struct inode
*inode
, struct file
*file
)
94 struct fw_device
*device
;
95 struct client
*client
;
97 device
= container_of(inode
->i_cdev
, struct fw_device
, cdev
);
99 client
= kzalloc(sizeof *client
, GFP_KERNEL
);
103 client
->device
= fw_device_get(device
);
104 INIT_LIST_HEAD(&client
->event_list
);
105 sema_init(&client
->event_list_sem
, 0);
106 INIT_LIST_HEAD(&client
->handler_list
);
107 INIT_LIST_HEAD(&client
->request_list
);
108 spin_lock_init(&client
->lock
);
109 init_waitqueue_head(&client
->wait
);
111 file
->private_data
= client
;
116 static void queue_event(struct client
*client
, struct event
*event
,
117 void *data0
, size_t size0
, void *data1
, size_t size1
)
121 event
->v
[0].data
= data0
;
122 event
->v
[0].size
= size0
;
123 event
->v
[1].data
= data1
;
124 event
->v
[1].size
= size1
;
126 spin_lock_irqsave(&client
->lock
, flags
);
128 list_add_tail(&event
->link
, &client
->event_list
);
130 up(&client
->event_list_sem
);
131 wake_up_interruptible(&client
->wait
);
133 spin_unlock_irqrestore(&client
->lock
, flags
);
136 static int dequeue_event(struct client
*client
, char __user
*buffer
, size_t count
)
141 int i
, retval
= -EFAULT
;
143 if (down_interruptible(&client
->event_list_sem
) < 0)
146 spin_lock_irqsave(&client
->lock
, flags
);
148 event
= container_of(client
->event_list
.next
, struct event
, link
);
149 list_del(&event
->link
);
151 spin_unlock_irqrestore(&client
->lock
, flags
);
157 for (i
= 0; i
< ARRAY_SIZE(event
->v
) && total
< count
; i
++) {
158 size
= min(event
->v
[i
].size
, count
- total
);
159 if (copy_to_user(buffer
+ total
, event
->v
[i
].data
, size
))
172 fw_device_op_read(struct file
*file
,
173 char __user
*buffer
, size_t count
, loff_t
*offset
)
175 struct client
*client
= file
->private_data
;
177 return dequeue_event(client
, buffer
, count
);
180 static int ioctl_config_rom(struct client
*client
, void __user
*arg
)
182 struct fw_cdev_get_config_rom rom
;
184 rom
.length
= client
->device
->config_rom_length
;
185 memcpy(rom
.data
, client
->device
->config_rom
, rom
.length
* 4);
186 if (copy_to_user(arg
, &rom
,
187 (char *)&rom
.data
[rom
.length
] - (char *)&rom
))
194 complete_transaction(struct fw_card
*card
, int rcode
,
195 void *payload
, size_t length
, void *data
)
197 struct response
*response
= data
;
198 struct client
*client
= response
->client
;
200 if (length
< response
->response
.length
)
201 response
->response
.length
= length
;
202 if (rcode
== RCODE_COMPLETE
)
203 memcpy(response
->response
.data
, payload
,
204 response
->response
.length
);
206 response
->response
.type
= FW_CDEV_EVENT_RESPONSE
;
207 response
->response
.rcode
= rcode
;
208 queue_event(client
, &response
->event
,
209 &response
->response
, sizeof response
->response
,
210 response
->response
.data
, response
->response
.length
);
213 static ssize_t
ioctl_send_request(struct client
*client
, void __user
*arg
)
215 struct fw_device
*device
= client
->device
;
216 struct fw_cdev_send_request request
;
217 struct response
*response
;
219 if (copy_from_user(&request
, arg
, sizeof request
))
222 /* What is the biggest size we'll accept, really? */
223 if (request
.length
> 4096)
226 response
= kmalloc(sizeof *response
+ request
.length
, GFP_KERNEL
);
227 if (response
== NULL
)
230 response
->client
= client
;
231 response
->response
.length
= request
.length
;
232 response
->response
.closure
= request
.closure
;
235 copy_from_user(response
->response
.data
,
236 u64_to_uptr(request
.data
), request
.length
)) {
241 fw_send_request(device
->card
, &response
->transaction
,
243 device
->node
->node_id
,
244 device
->card
->generation
,
245 device
->node
->max_speed
,
247 response
->response
.data
, request
.length
,
248 complete_transaction
, response
);
251 return sizeof request
+ request
.length
;
253 return sizeof request
;
256 struct address_handler
{
257 struct fw_address_handler handler
;
259 struct client
*client
;
260 struct list_head link
;
264 struct fw_request
*request
;
268 struct list_head link
;
271 struct request_event
{
273 struct fw_cdev_event_request request
;
277 handle_request(struct fw_card
*card
, struct fw_request
*r
,
278 int tcode
, int destination
, int source
,
279 int generation
, int speed
,
280 unsigned long long offset
,
281 void *payload
, size_t length
, void *callback_data
)
283 struct address_handler
*handler
= callback_data
;
284 struct request
*request
;
285 struct request_event
*e
;
287 struct client
*client
= handler
->client
;
289 request
= kmalloc(sizeof *request
, GFP_ATOMIC
);
290 e
= kmalloc(sizeof *e
, GFP_ATOMIC
);
291 if (request
== NULL
|| e
== NULL
) {
294 fw_send_response(card
, r
, RCODE_CONFLICT_ERROR
);
298 request
->request
= r
;
299 request
->data
= payload
;
300 request
->length
= length
;
302 spin_lock_irqsave(&client
->lock
, flags
);
303 request
->serial
= client
->request_serial
++;
304 list_add_tail(&request
->link
, &client
->request_list
);
305 spin_unlock_irqrestore(&client
->lock
, flags
);
307 e
->request
.type
= FW_CDEV_EVENT_REQUEST
;
308 e
->request
.tcode
= tcode
;
309 e
->request
.offset
= offset
;
310 e
->request
.length
= length
;
311 e
->request
.serial
= request
->serial
;
312 e
->request
.closure
= handler
->closure
;
314 queue_event(client
, &e
->event
,
315 &e
->request
, sizeof e
->request
, payload
, length
);
318 static int ioctl_allocate(struct client
*client
, void __user
*arg
)
320 struct fw_cdev_allocate request
;
321 struct address_handler
*handler
;
323 struct fw_address_region region
;
325 if (copy_from_user(&request
, arg
, sizeof request
))
328 handler
= kmalloc(sizeof *handler
, GFP_KERNEL
);
332 region
.start
= request
.offset
;
333 region
.end
= request
.offset
+ request
.length
;
334 handler
->handler
.length
= request
.length
;
335 handler
->handler
.address_callback
= handle_request
;
336 handler
->handler
.callback_data
= handler
;
337 handler
->closure
= request
.closure
;
338 handler
->client
= client
;
340 if (fw_core_add_address_handler(&handler
->handler
, ®ion
) < 0) {
345 spin_lock_irqsave(&client
->lock
, flags
);
346 list_add_tail(&handler
->link
, &client
->handler_list
);
347 spin_unlock_irqrestore(&client
->lock
, flags
);
352 static int ioctl_send_response(struct client
*client
, void __user
*arg
)
354 struct fw_cdev_send_response request
;
358 if (copy_from_user(&request
, arg
, sizeof request
))
361 spin_lock_irqsave(&client
->lock
, flags
);
362 list_for_each_entry(r
, &client
->request_list
, link
) {
363 if (r
->serial
== request
.serial
) {
368 spin_unlock_irqrestore(&client
->lock
, flags
);
370 if (&r
->link
== &client
->request_list
)
373 if (request
.length
< r
->length
)
374 r
->length
= request
.length
;
375 if (copy_from_user(r
->data
, u64_to_uptr(request
.data
), r
->length
))
378 fw_send_response(client
->device
->card
, r
->request
, request
.rcode
);
386 iso_callback(struct fw_iso_context
*context
, int status
, u32 cycle
, void *data
)
388 struct client
*client
= data
;
389 struct iso_interrupt
*interrupt
;
391 interrupt
= kzalloc(sizeof *interrupt
, GFP_ATOMIC
);
392 if (interrupt
== NULL
)
395 interrupt
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT
;
396 interrupt
->interrupt
.closure
= 0;
397 interrupt
->interrupt
.cycle
= cycle
;
398 queue_event(client
, &interrupt
->event
,
399 &interrupt
->interrupt
, sizeof interrupt
->interrupt
, NULL
, 0);
402 static int ioctl_create_iso_context(struct client
*client
, void __user
*arg
)
404 struct fw_cdev_create_iso_context request
;
406 if (copy_from_user(&request
, arg
, sizeof request
))
409 if (request
.type
> FW_ISO_CONTEXT_RECEIVE
)
412 client
->iso_context
= fw_iso_context_create(client
->device
->card
,
415 iso_callback
, client
);
416 if (IS_ERR(client
->iso_context
))
417 return PTR_ERR(client
->iso_context
);
422 static int ioctl_queue_iso(struct client
*client
, void __user
*arg
)
424 struct fw_cdev_queue_iso request
;
425 struct fw_cdev_iso_packet __user
*p
, *end
, *next
;
426 unsigned long payload
, payload_end
, header_length
;
429 struct fw_iso_packet packet
;
433 if (client
->iso_context
== NULL
)
435 if (copy_from_user(&request
, arg
, sizeof request
))
438 /* If the user passes a non-NULL data pointer, has mmap()'ed
439 * the iso buffer, and the pointer points inside the buffer,
440 * we setup the payload pointers accordingly. Otherwise we
441 * set them both to 0, which will still let packets with
442 * payload_length == 0 through. In other words, if no packets
443 * use the indirect payload, the iso buffer need not be mapped
444 * and the request.data pointer is ignored.*/
446 payload
= (unsigned long)request
.data
- client
->vm_start
;
447 payload_end
= payload
+ (client
->buffer
.page_count
<< PAGE_SHIFT
);
448 if (request
.data
== 0 || client
->buffer
.pages
== NULL
||
449 payload
>= payload_end
) {
454 if (!access_ok(VERIFY_READ
, request
.packets
, request
.size
))
457 p
= (struct fw_cdev_iso_packet __user
*)u64_to_uptr(request
.packets
);
458 end
= (void __user
*)p
+ request
.size
;
461 if (__copy_from_user(&u
.packet
, p
, sizeof *p
))
464 if (client
->iso_context
->type
== FW_ISO_CONTEXT_TRANSMIT
) {
465 header_length
= u
.packet
.header_length
;
467 /* We require that header_length is a multiple of
468 * the fixed header size, ctx->header_size */
469 if (u
.packet
.header_length
% client
->iso_context
->header_size
!= 0)
474 next
= (struct fw_cdev_iso_packet __user
*)
475 &p
->header
[header_length
/ 4];
479 (u
.packet
.header
, p
->header
, header_length
))
482 u
.packet
.header_length
+ u
.packet
.payload_length
> 0)
484 if (payload
+ u
.packet
.payload_length
> payload_end
)
487 if (fw_iso_context_queue(client
->iso_context
,
488 &u
.packet
, &client
->buffer
, payload
))
492 payload
+= u
.packet
.payload_length
;
496 request
.size
-= uptr_to_u64(p
) - request
.packets
;
497 request
.packets
= uptr_to_u64(p
);
498 request
.data
= client
->vm_start
+ payload
;
500 if (copy_to_user(arg
, &request
, sizeof request
))
506 static int ioctl_start_iso(struct client
*client
, void __user
*arg
)
508 struct fw_cdev_start_iso request
;
510 if (copy_from_user(&request
, arg
, sizeof request
))
513 return fw_iso_context_start(client
->iso_context
, request
.channel
,
514 request
.speed
, request
.cycle
);
518 dispatch_ioctl(struct client
*client
, unsigned int cmd
, void __user
*arg
)
521 case FW_CDEV_IOC_GET_CONFIG_ROM
:
522 return ioctl_config_rom(client
, arg
);
523 case FW_CDEV_IOC_SEND_REQUEST
:
524 return ioctl_send_request(client
, arg
);
525 case FW_CDEV_IOC_ALLOCATE
:
526 return ioctl_allocate(client
, arg
);
527 case FW_CDEV_IOC_SEND_RESPONSE
:
528 return ioctl_send_response(client
, arg
);
529 case FW_CDEV_IOC_CREATE_ISO_CONTEXT
:
530 return ioctl_create_iso_context(client
, arg
);
531 case FW_CDEV_IOC_QUEUE_ISO
:
532 return ioctl_queue_iso(client
, arg
);
533 case FW_CDEV_IOC_START_ISO
:
534 return ioctl_start_iso(client
, arg
);
541 fw_device_op_ioctl(struct file
*file
,
542 unsigned int cmd
, unsigned long arg
)
544 struct client
*client
= file
->private_data
;
546 return dispatch_ioctl(client
, cmd
, (void __user
*) arg
);
551 fw_device_op_compat_ioctl(struct file
*file
,
552 unsigned int cmd
, unsigned long arg
)
554 struct client
*client
= file
->private_data
;
556 return dispatch_ioctl(client
, cmd
, compat_ptr(arg
));
560 static int fw_device_op_mmap(struct file
*file
, struct vm_area_struct
*vma
)
562 struct client
*client
= file
->private_data
;
563 enum dma_data_direction direction
;
565 int page_count
, retval
;
567 /* FIXME: We could support multiple buffers, but we don't. */
568 if (client
->buffer
.pages
!= NULL
)
571 if (!(vma
->vm_flags
& VM_SHARED
))
574 if (vma
->vm_start
& ~PAGE_MASK
)
577 client
->vm_start
= vma
->vm_start
;
578 size
= vma
->vm_end
- vma
->vm_start
;
579 page_count
= size
>> PAGE_SHIFT
;
580 if (size
& ~PAGE_MASK
)
583 if (vma
->vm_flags
& VM_WRITE
)
584 direction
= DMA_TO_DEVICE
;
586 direction
= DMA_FROM_DEVICE
;
588 retval
= fw_iso_buffer_init(&client
->buffer
, client
->device
->card
,
589 page_count
, direction
);
593 retval
= fw_iso_buffer_map(&client
->buffer
, vma
);
595 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
600 static int fw_device_op_release(struct inode
*inode
, struct file
*file
)
602 struct client
*client
= file
->private_data
;
603 struct address_handler
*h
, *next
;
604 struct request
*r
, *next_r
;
606 if (client
->buffer
.pages
)
607 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
609 if (client
->iso_context
)
610 fw_iso_context_destroy(client
->iso_context
);
612 list_for_each_entry_safe(h
, next
, &client
->handler_list
, link
) {
613 fw_core_remove_address_handler(&h
->handler
);
617 list_for_each_entry_safe(r
, next_r
, &client
->request_list
, link
) {
618 fw_send_response(client
->device
->card
, r
->request
,
619 RCODE_CONFLICT_ERROR
);
623 /* TODO: wait for all transactions to finish so
624 * complete_transaction doesn't try to queue up responses
625 * after we free client. */
626 while (!list_empty(&client
->event_list
))
627 dequeue_event(client
, NULL
, 0);
629 fw_device_put(client
->device
);
635 static unsigned int fw_device_op_poll(struct file
*file
, poll_table
* pt
)
637 struct client
*client
= file
->private_data
;
639 poll_wait(file
, &client
->wait
, pt
);
641 if (!list_empty(&client
->event_list
))
642 return POLLIN
| POLLRDNORM
;
647 const struct file_operations fw_device_ops
= {
648 .owner
= THIS_MODULE
,
649 .open
= fw_device_op_open
,
650 .read
= fw_device_op_read
,
651 .unlocked_ioctl
= fw_device_op_ioctl
,
652 .poll
= fw_device_op_poll
,
653 .release
= fw_device_op_release
,
654 .mmap
= fw_device_op_mmap
,
657 .compat_ioctl
= fw_device_op_compat_ioctl
,