1 /* -*- c-basic-offset: 8 -*-
3 * fw-device-cdev.c - Char device for device raw access
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/errno.h>
26 #include <linux/device.h>
27 #include <linux/vmalloc.h>
28 #include <linux/poll.h>
29 #include <linux/delay.h>
31 #include <linux/compat.h>
32 #include <asm/uaccess.h>
33 #include "fw-transaction.h"
34 #include "fw-topology.h"
35 #include "fw-device.h"
36 #include "fw-device-cdev.h"
41 * - bus resets sends a new packet with new generation and node id
45 /* dequeue_event() just kfree()'s the event, so the event has to be
46 * the first field in the struct. */
49 struct { void *data
; size_t size
; } v
[2];
50 struct list_head link
;
55 struct fw_cdev_event_bus_reset reset
;
60 struct fw_transaction transaction
;
61 struct client
*client
;
62 struct fw_cdev_event_response response
;
65 struct iso_interrupt
{
67 struct fw_cdev_event_iso_interrupt interrupt
;
71 struct fw_device
*device
;
73 struct list_head handler_list
;
74 struct list_head request_list
;
76 struct list_head event_list
;
77 struct semaphore event_list_sem
;
78 wait_queue_head_t wait
;
80 struct fw_iso_context
*iso_context
;
81 struct fw_iso_buffer buffer
;
82 unsigned long vm_start
;
84 struct list_head link
;
87 static inline void __user
*
88 u64_to_uptr(__u64 value
)
90 return (void __user
*)(unsigned long)value
;
94 uptr_to_u64(void __user
*ptr
)
96 return (__u64
)(unsigned long)ptr
;
99 static int fw_device_op_open(struct inode
*inode
, struct file
*file
)
101 struct fw_device
*device
;
102 struct client
*client
;
105 device
= container_of(inode
->i_cdev
, struct fw_device
, cdev
);
107 client
= kzalloc(sizeof *client
, GFP_KERNEL
);
111 client
->device
= fw_device_get(device
);
112 INIT_LIST_HEAD(&client
->event_list
);
113 sema_init(&client
->event_list_sem
, 0);
114 INIT_LIST_HEAD(&client
->handler_list
);
115 INIT_LIST_HEAD(&client
->request_list
);
116 spin_lock_init(&client
->lock
);
117 init_waitqueue_head(&client
->wait
);
119 file
->private_data
= client
;
121 spin_lock_irqsave(&device
->card
->lock
, flags
);
122 list_add_tail(&client
->link
, &device
->client_list
);
123 spin_unlock_irqrestore(&device
->card
->lock
, flags
);
128 static void queue_event(struct client
*client
, struct event
*event
,
129 void *data0
, size_t size0
, void *data1
, size_t size1
)
133 event
->v
[0].data
= data0
;
134 event
->v
[0].size
= size0
;
135 event
->v
[1].data
= data1
;
136 event
->v
[1].size
= size1
;
138 spin_lock_irqsave(&client
->lock
, flags
);
140 list_add_tail(&event
->link
, &client
->event_list
);
142 up(&client
->event_list_sem
);
143 wake_up_interruptible(&client
->wait
);
145 spin_unlock_irqrestore(&client
->lock
, flags
);
148 static int dequeue_event(struct client
*client
, char __user
*buffer
, size_t count
)
153 int i
, retval
= -EFAULT
;
155 if (down_interruptible(&client
->event_list_sem
) < 0)
158 spin_lock_irqsave(&client
->lock
, flags
);
160 event
= container_of(client
->event_list
.next
, struct event
, link
);
161 list_del(&event
->link
);
163 spin_unlock_irqrestore(&client
->lock
, flags
);
169 for (i
= 0; i
< ARRAY_SIZE(event
->v
) && total
< count
; i
++) {
170 size
= min(event
->v
[i
].size
, count
- total
);
171 if (copy_to_user(buffer
+ total
, event
->v
[i
].data
, size
))
184 fw_device_op_read(struct file
*file
,
185 char __user
*buffer
, size_t count
, loff_t
*offset
)
187 struct client
*client
= file
->private_data
;
189 return dequeue_event(client
, buffer
, count
);
193 queue_bus_reset_event(struct client
*client
)
195 struct bus_reset
*bus_reset
;
196 struct fw_device
*device
= client
->device
;
197 struct fw_card
*card
= device
->card
;
199 bus_reset
= kzalloc(sizeof *bus_reset
, GFP_ATOMIC
);
200 if (bus_reset
== NULL
) {
201 fw_notify("Out of memory when allocating bus reset event\n");
205 bus_reset
->reset
.type
= FW_CDEV_EVENT_BUS_RESET
;
206 bus_reset
->reset
.node_id
= device
->node_id
;
207 bus_reset
->reset
.local_node_id
= card
->local_node
->node_id
;
208 bus_reset
->reset
.bm_node_id
= 0; /* FIXME: We don't track the BM. */
209 bus_reset
->reset
.irm_node_id
= card
->irm_node
->node_id
;
210 bus_reset
->reset
.root_node_id
= card
->root_node
->node_id
;
211 bus_reset
->reset
.generation
= card
->generation
;
213 queue_event(client
, &bus_reset
->event
,
214 &bus_reset
->reset
, sizeof bus_reset
->reset
, NULL
, 0);
217 void fw_device_cdev_update(struct fw_device
*device
)
219 struct fw_card
*card
= device
->card
;
223 spin_lock_irqsave(&card
->lock
, flags
);
225 list_for_each_entry(c
, &device
->client_list
, link
)
226 queue_bus_reset_event(c
);
228 spin_unlock_irqrestore(&card
->lock
, flags
);
231 static int ioctl_config_rom(struct client
*client
, void __user
*arg
)
233 struct fw_cdev_get_config_rom rom
;
235 rom
.length
= client
->device
->config_rom_length
;
236 memcpy(rom
.data
, client
->device
->config_rom
, rom
.length
* 4);
237 if (copy_to_user(arg
, &rom
,
238 (char *)&rom
.data
[rom
.length
] - (char *)&rom
))
245 complete_transaction(struct fw_card
*card
, int rcode
,
246 void *payload
, size_t length
, void *data
)
248 struct response
*response
= data
;
249 struct client
*client
= response
->client
;
251 if (length
< response
->response
.length
)
252 response
->response
.length
= length
;
253 if (rcode
== RCODE_COMPLETE
)
254 memcpy(response
->response
.data
, payload
,
255 response
->response
.length
);
257 response
->response
.type
= FW_CDEV_EVENT_RESPONSE
;
258 response
->response
.rcode
= rcode
;
259 queue_event(client
, &response
->event
,
260 &response
->response
, sizeof response
->response
,
261 response
->response
.data
, response
->response
.length
);
264 static ssize_t
ioctl_send_request(struct client
*client
, void __user
*arg
)
266 struct fw_device
*device
= client
->device
;
267 struct fw_cdev_send_request request
;
268 struct response
*response
;
270 if (copy_from_user(&request
, arg
, sizeof request
))
273 /* What is the biggest size we'll accept, really? */
274 if (request
.length
> 4096)
277 response
= kmalloc(sizeof *response
+ request
.length
, GFP_KERNEL
);
278 if (response
== NULL
)
281 response
->client
= client
;
282 response
->response
.length
= request
.length
;
283 response
->response
.closure
= request
.closure
;
286 copy_from_user(response
->response
.data
,
287 u64_to_uptr(request
.data
), request
.length
)) {
292 fw_send_request(device
->card
, &response
->transaction
,
294 device
->node
->node_id
,
295 device
->card
->generation
,
296 device
->node
->max_speed
,
298 response
->response
.data
, request
.length
,
299 complete_transaction
, response
);
302 return sizeof request
+ request
.length
;
304 return sizeof request
;
307 struct address_handler
{
308 struct fw_address_handler handler
;
310 struct client
*client
;
311 struct list_head link
;
315 struct fw_request
*request
;
319 struct list_head link
;
322 struct request_event
{
324 struct fw_cdev_event_request request
;
328 handle_request(struct fw_card
*card
, struct fw_request
*r
,
329 int tcode
, int destination
, int source
,
330 int generation
, int speed
,
331 unsigned long long offset
,
332 void *payload
, size_t length
, void *callback_data
)
334 struct address_handler
*handler
= callback_data
;
335 struct request
*request
;
336 struct request_event
*e
;
338 struct client
*client
= handler
->client
;
340 request
= kmalloc(sizeof *request
, GFP_ATOMIC
);
341 e
= kmalloc(sizeof *e
, GFP_ATOMIC
);
342 if (request
== NULL
|| e
== NULL
) {
345 fw_send_response(card
, r
, RCODE_CONFLICT_ERROR
);
349 request
->request
= r
;
350 request
->data
= payload
;
351 request
->length
= length
;
353 spin_lock_irqsave(&client
->lock
, flags
);
354 request
->serial
= client
->request_serial
++;
355 list_add_tail(&request
->link
, &client
->request_list
);
356 spin_unlock_irqrestore(&client
->lock
, flags
);
358 e
->request
.type
= FW_CDEV_EVENT_REQUEST
;
359 e
->request
.tcode
= tcode
;
360 e
->request
.offset
= offset
;
361 e
->request
.length
= length
;
362 e
->request
.serial
= request
->serial
;
363 e
->request
.closure
= handler
->closure
;
365 queue_event(client
, &e
->event
,
366 &e
->request
, sizeof e
->request
, payload
, length
);
369 static int ioctl_allocate(struct client
*client
, void __user
*arg
)
371 struct fw_cdev_allocate request
;
372 struct address_handler
*handler
;
374 struct fw_address_region region
;
376 if (copy_from_user(&request
, arg
, sizeof request
))
379 handler
= kmalloc(sizeof *handler
, GFP_KERNEL
);
383 region
.start
= request
.offset
;
384 region
.end
= request
.offset
+ request
.length
;
385 handler
->handler
.length
= request
.length
;
386 handler
->handler
.address_callback
= handle_request
;
387 handler
->handler
.callback_data
= handler
;
388 handler
->closure
= request
.closure
;
389 handler
->client
= client
;
391 if (fw_core_add_address_handler(&handler
->handler
, ®ion
) < 0) {
396 spin_lock_irqsave(&client
->lock
, flags
);
397 list_add_tail(&handler
->link
, &client
->handler_list
);
398 spin_unlock_irqrestore(&client
->lock
, flags
);
403 static int ioctl_send_response(struct client
*client
, void __user
*arg
)
405 struct fw_cdev_send_response request
;
409 if (copy_from_user(&request
, arg
, sizeof request
))
412 spin_lock_irqsave(&client
->lock
, flags
);
413 list_for_each_entry(r
, &client
->request_list
, link
) {
414 if (r
->serial
== request
.serial
) {
419 spin_unlock_irqrestore(&client
->lock
, flags
);
421 if (&r
->link
== &client
->request_list
)
424 if (request
.length
< r
->length
)
425 r
->length
= request
.length
;
426 if (copy_from_user(r
->data
, u64_to_uptr(request
.data
), r
->length
))
429 fw_send_response(client
->device
->card
, r
->request
, request
.rcode
);
437 iso_callback(struct fw_iso_context
*context
, u32 cycle
,
438 size_t header_length
, void *header
, void *data
)
440 struct client
*client
= data
;
441 struct iso_interrupt
*interrupt
;
443 interrupt
= kzalloc(sizeof *interrupt
+ header_length
, GFP_ATOMIC
);
444 if (interrupt
== NULL
)
447 interrupt
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT
;
448 interrupt
->interrupt
.closure
= 0;
449 interrupt
->interrupt
.cycle
= cycle
;
450 interrupt
->interrupt
.header_length
= header_length
;
451 memcpy(interrupt
->interrupt
.header
, header
, header_length
);
452 queue_event(client
, &interrupt
->event
,
453 &interrupt
->interrupt
,
454 sizeof interrupt
->interrupt
+ header_length
, NULL
, 0);
457 static int ioctl_create_iso_context(struct client
*client
, void __user
*arg
)
459 struct fw_cdev_create_iso_context request
;
461 if (copy_from_user(&request
, arg
, sizeof request
))
464 if (request
.type
> FW_ISO_CONTEXT_RECEIVE
)
467 if (request
.channel
> 63)
470 if (request
.sync
> 15)
473 if (request
.tags
== 0 || request
.tags
> 15)
476 if (request
.speed
> SCODE_3200
)
479 client
->iso_context
= fw_iso_context_create(client
->device
->card
,
486 iso_callback
, client
);
487 if (IS_ERR(client
->iso_context
))
488 return PTR_ERR(client
->iso_context
);
493 static int ioctl_queue_iso(struct client
*client
, void __user
*arg
)
495 struct fw_cdev_queue_iso request
;
496 struct fw_cdev_iso_packet __user
*p
, *end
, *next
;
497 struct fw_iso_context
*ctx
= client
->iso_context
;
498 unsigned long payload
, payload_end
, header_length
;
501 struct fw_iso_packet packet
;
507 if (copy_from_user(&request
, arg
, sizeof request
))
510 /* If the user passes a non-NULL data pointer, has mmap()'ed
511 * the iso buffer, and the pointer points inside the buffer,
512 * we setup the payload pointers accordingly. Otherwise we
513 * set them both to 0, which will still let packets with
514 * payload_length == 0 through. In other words, if no packets
515 * use the indirect payload, the iso buffer need not be mapped
516 * and the request.data pointer is ignored.*/
518 payload
= (unsigned long)request
.data
- client
->vm_start
;
519 payload_end
= payload
+ (client
->buffer
.page_count
<< PAGE_SHIFT
);
520 if (request
.data
== 0 || client
->buffer
.pages
== NULL
||
521 payload
>= payload_end
) {
526 if (!access_ok(VERIFY_READ
, request
.packets
, request
.size
))
529 p
= (struct fw_cdev_iso_packet __user
*)u64_to_uptr(request
.packets
);
530 end
= (void __user
*)p
+ request
.size
;
533 if (__copy_from_user(&u
.packet
, p
, sizeof *p
))
536 if (ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
) {
537 header_length
= u
.packet
.header_length
;
539 /* We require that header_length is a multiple of
540 * the fixed header size, ctx->header_size */
541 if (ctx
->header_size
== 0) {
542 if (u
.packet
.header_length
> 0)
544 } else if (u
.packet
.header_length
% ctx
->header_size
!= 0) {
550 next
= (struct fw_cdev_iso_packet __user
*)
551 &p
->header
[header_length
/ 4];
555 (u
.packet
.header
, p
->header
, header_length
))
557 if (u
.packet
.skip
&& ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
&&
558 u
.packet
.header_length
+ u
.packet
.payload_length
> 0)
560 if (payload
+ u
.packet
.payload_length
> payload_end
)
563 if (fw_iso_context_queue(ctx
, &u
.packet
,
564 &client
->buffer
, payload
))
568 payload
+= u
.packet
.payload_length
;
572 request
.size
-= uptr_to_u64(p
) - request
.packets
;
573 request
.packets
= uptr_to_u64(p
);
574 request
.data
= client
->vm_start
+ payload
;
576 if (copy_to_user(arg
, &request
, sizeof request
))
582 static int ioctl_start_iso(struct client
*client
, void __user
*arg
)
584 struct fw_cdev_start_iso request
;
586 if (copy_from_user(&request
, arg
, sizeof request
))
589 return fw_iso_context_start(client
->iso_context
, request
.cycle
);
592 static int ioctl_stop_iso(struct client
*client
, void __user
*arg
)
594 return fw_iso_context_stop(client
->iso_context
);
598 dispatch_ioctl(struct client
*client
, unsigned int cmd
, void __user
*arg
)
601 case FW_CDEV_IOC_GET_CONFIG_ROM
:
602 return ioctl_config_rom(client
, arg
);
603 case FW_CDEV_IOC_SEND_REQUEST
:
604 return ioctl_send_request(client
, arg
);
605 case FW_CDEV_IOC_ALLOCATE
:
606 return ioctl_allocate(client
, arg
);
607 case FW_CDEV_IOC_SEND_RESPONSE
:
608 return ioctl_send_response(client
, arg
);
609 case FW_CDEV_IOC_CREATE_ISO_CONTEXT
:
610 return ioctl_create_iso_context(client
, arg
);
611 case FW_CDEV_IOC_QUEUE_ISO
:
612 return ioctl_queue_iso(client
, arg
);
613 case FW_CDEV_IOC_START_ISO
:
614 return ioctl_start_iso(client
, arg
);
615 case FW_CDEV_IOC_STOP_ISO
:
616 return ioctl_stop_iso(client
, arg
);
623 fw_device_op_ioctl(struct file
*file
,
624 unsigned int cmd
, unsigned long arg
)
626 struct client
*client
= file
->private_data
;
628 return dispatch_ioctl(client
, cmd
, (void __user
*) arg
);
633 fw_device_op_compat_ioctl(struct file
*file
,
634 unsigned int cmd
, unsigned long arg
)
636 struct client
*client
= file
->private_data
;
638 return dispatch_ioctl(client
, cmd
, compat_ptr(arg
));
642 static int fw_device_op_mmap(struct file
*file
, struct vm_area_struct
*vma
)
644 struct client
*client
= file
->private_data
;
645 enum dma_data_direction direction
;
647 int page_count
, retval
;
649 /* FIXME: We could support multiple buffers, but we don't. */
650 if (client
->buffer
.pages
!= NULL
)
653 if (!(vma
->vm_flags
& VM_SHARED
))
656 if (vma
->vm_start
& ~PAGE_MASK
)
659 client
->vm_start
= vma
->vm_start
;
660 size
= vma
->vm_end
- vma
->vm_start
;
661 page_count
= size
>> PAGE_SHIFT
;
662 if (size
& ~PAGE_MASK
)
665 if (vma
->vm_flags
& VM_WRITE
)
666 direction
= DMA_TO_DEVICE
;
668 direction
= DMA_FROM_DEVICE
;
670 retval
= fw_iso_buffer_init(&client
->buffer
, client
->device
->card
,
671 page_count
, direction
);
675 retval
= fw_iso_buffer_map(&client
->buffer
, vma
);
677 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
682 static int fw_device_op_release(struct inode
*inode
, struct file
*file
)
684 struct client
*client
= file
->private_data
;
685 struct address_handler
*h
, *next
;
686 struct request
*r
, *next_r
;
689 if (client
->buffer
.pages
)
690 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
692 if (client
->iso_context
)
693 fw_iso_context_destroy(client
->iso_context
);
695 list_for_each_entry_safe(h
, next
, &client
->handler_list
, link
) {
696 fw_core_remove_address_handler(&h
->handler
);
700 list_for_each_entry_safe(r
, next_r
, &client
->request_list
, link
) {
701 fw_send_response(client
->device
->card
, r
->request
,
702 RCODE_CONFLICT_ERROR
);
706 /* TODO: wait for all transactions to finish so
707 * complete_transaction doesn't try to queue up responses
708 * after we free client. */
709 while (!list_empty(&client
->event_list
))
710 dequeue_event(client
, NULL
, 0);
712 spin_lock_irqsave(&client
->device
->card
->lock
, flags
);
713 list_del(&client
->link
);
714 spin_unlock_irqrestore(&client
->device
->card
->lock
, flags
);
716 fw_device_put(client
->device
);
722 static unsigned int fw_device_op_poll(struct file
*file
, poll_table
* pt
)
724 struct client
*client
= file
->private_data
;
726 poll_wait(file
, &client
->wait
, pt
);
728 if (!list_empty(&client
->event_list
))
729 return POLLIN
| POLLRDNORM
;
734 const struct file_operations fw_device_ops
= {
735 .owner
= THIS_MODULE
,
736 .open
= fw_device_op_open
,
737 .read
= fw_device_op_read
,
738 .unlocked_ioctl
= fw_device_op_ioctl
,
739 .poll
= fw_device_op_poll
,
740 .release
= fw_device_op_release
,
741 .mmap
= fw_device_op_mmap
,
744 .compat_ioctl
= fw_device_op_compat_ioctl
,