2 * RapidIO mport character device
4 * Copyright 2014-2015 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * Copyright 2014-2015 Prodrive Technologies
7 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
8 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
9 * Copyright (C) 2014 Texas Instruments Incorporated
10 * Aurelien Jacquiot <a-jacquiot@ti.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/cdev.h>
20 #include <linux/ioctl.h>
21 #include <linux/uaccess.h>
22 #include <linux/list.h>
24 #include <linux/err.h>
25 #include <linux/net.h>
26 #include <linux/poll.h>
27 #include <linux/spinlock.h>
28 #include <linux/sched.h>
29 #include <linux/kfifo.h>
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
34 #include <linux/mman.h>
36 #include <linux/dma-mapping.h>
37 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
38 #include <linux/dmaengine.h>
41 #include <linux/rio.h>
42 #include <linux/rio_ids.h>
43 #include <linux/rio_drv.h>
44 #include <linux/rio_mport_cdev.h>
48 #define DRV_NAME "rio_mport"
49 #define DRV_PREFIX DRV_NAME ": "
50 #define DEV_NAME "rio_mport"
51 #define DRV_VERSION "1.0.0"
53 /* Debug output filtering masks */
56 DBG_INIT
= BIT(0), /* driver init */
57 DBG_EXIT
= BIT(1), /* driver exit */
58 DBG_MPORT
= BIT(2), /* mport add/remove */
59 DBG_RDEV
= BIT(3), /* RapidIO device add/remove */
60 DBG_DMA
= BIT(4), /* DMA transfer messages */
61 DBG_MMAP
= BIT(5), /* mapping messages */
62 DBG_IBW
= BIT(6), /* inbound window */
63 DBG_EVENT
= BIT(7), /* event handling messages */
64 DBG_OBW
= BIT(8), /* outbound window messages */
65 DBG_DBELL
= BIT(9), /* doorbell messages */
70 #define rmcd_debug(level, fmt, arg...) \
72 if (DBG_##level & dbg_level) \
73 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
76 #define rmcd_debug(level, fmt, arg...) \
77 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
80 #define rmcd_warn(fmt, arg...) \
81 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
83 #define rmcd_error(fmt, arg...) \
84 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
86 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
87 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
88 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
89 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
90 MODULE_DESCRIPTION("RapidIO mport character device driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION
);
94 static int dma_timeout
= 3000; /* DMA transfer timeout in msec */
95 module_param(dma_timeout
, int, S_IRUGO
);
96 MODULE_PARM_DESC(dma_timeout
, "DMA Transfer Timeout in msec (default: 3000)");
99 static u32 dbg_level
= DBG_NONE
;
100 module_param(dbg_level
, uint
, S_IWUSR
| S_IWGRP
| S_IRUGO
);
101 MODULE_PARM_DESC(dbg_level
, "Debugging output level (default 0 = none)");
105 * An internal DMA coherent buffer
107 struct mport_dma_buf
{
117 * Internal memory mapping structure
119 enum rio_mport_map_dir
{
125 struct rio_mport_mapping
{
126 struct list_head node
;
127 struct mport_dev
*md
;
128 enum rio_mport_map_dir dir
;
131 dma_addr_t phys_addr
; /* for mmap */
132 void *virt_addr
; /* kernel address, for dma_free_coherent */
134 struct kref ref
; /* refcount of vmas sharing the mapping */
138 struct rio_mport_dma_map
{
145 #define MPORT_MAX_DMA_BUFS 16
146 #define MPORT_EVENT_DEPTH 10
149 * mport_dev driver-specific structure that represents mport device
150 * @active mport device status flag
151 * @node list node to maintain list of registered mports
152 * @cdev character device
153 * @dev associated device object
154 * @mport associated subsystem's master port device object
155 * @buf_mutex lock for buffer handling
156 * @file_mutex - lock for open files list
157 * @file_list - list of open files on given mport
158 * @properties properties of this mport
159 * @portwrites queue of inbound portwrites
160 * @pw_lock lock for port write queue
161 * @mappings queue for memory mappings
162 * @dma_chan DMA channels associated with this device
168 struct list_head node
;
171 struct rio_mport
*mport
;
172 struct mutex buf_mutex
;
173 struct mutex file_mutex
;
174 struct list_head file_list
;
175 struct rio_mport_properties properties
;
176 struct list_head doorbells
;
178 struct list_head portwrites
;
180 struct list_head mappings
;
181 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
182 struct dma_chan
*dma_chan
;
184 struct completion comp
;
189 * mport_cdev_priv - data structure specific to individual file object
190 * associated with an open device
191 * @md master port character device object
192 * @async_queue - asynchronous notification queue
193 * @list - file objects tracking list
194 * @db_filters inbound doorbell filters for this descriptor
195 * @pw_filters portwrite filters for this descriptor
196 * @event_fifo event fifo for this descriptor
197 * @event_rx_wait wait queue for this descriptor
198 * @fifo_lock lock for event_fifo
199 * @event_mask event mask for this descriptor
200 * @dmach DMA engine channel allocated for specific file object
202 struct mport_cdev_priv
{
203 struct mport_dev
*md
;
204 struct fasync_struct
*async_queue
;
205 struct list_head list
;
206 struct list_head db_filters
;
207 struct list_head pw_filters
;
208 struct kfifo event_fifo
;
209 wait_queue_head_t event_rx_wait
;
210 spinlock_t fifo_lock
;
211 unsigned int event_mask
; /* RIO_DOORBELL, RIO_PORTWRITE */
212 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
213 struct dma_chan
*dmach
;
214 struct list_head async_list
;
215 struct list_head pend_list
;
217 struct mutex dma_lock
;
219 struct completion comp
;
224 * rio_mport_pw_filter - structure to describe a portwrite filter
225 * md_node node in mport device's list
226 * priv_node node in private file object's list
227 * priv reference to private data
228 * filter actual portwrite filter
230 struct rio_mport_pw_filter
{
231 struct list_head md_node
;
232 struct list_head priv_node
;
233 struct mport_cdev_priv
*priv
;
234 struct rio_pw_filter filter
;
238 * rio_mport_db_filter - structure to describe a doorbell filter
239 * @data_node reference to device node
240 * @priv_node node in private data
241 * @priv reference to private data
242 * @filter actual doorbell filter
244 struct rio_mport_db_filter
{
245 struct list_head data_node
;
246 struct list_head priv_node
;
247 struct mport_cdev_priv
*priv
;
248 struct rio_doorbell_filter filter
;
251 static LIST_HEAD(mport_devs
);
252 static DEFINE_MUTEX(mport_devs_lock
);
254 #if (0) /* used by commented out portion of poll function : FIXME */
255 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait
);
258 static struct class *dev_class
;
259 static dev_t dev_number
;
261 static struct workqueue_struct
*dma_wq
;
263 static void mport_release_mapping(struct kref
*ref
);
265 static int rio_mport_maint_rd(struct mport_cdev_priv
*priv
, void __user
*arg
,
268 struct rio_mport
*mport
= priv
->md
->mport
;
269 struct rio_mport_maint_io maint_io
;
275 if (unlikely(copy_from_user(&maint_io
, arg
, sizeof(maint_io
))))
278 if ((maint_io
.offset
% 4) ||
279 (maint_io
.length
== 0) || (maint_io
.length
% 4))
282 buffer
= vmalloc(maint_io
.length
);
285 length
= maint_io
.length
/sizeof(u32
);
286 offset
= maint_io
.offset
;
288 for (i
= 0; i
< length
; i
++) {
290 ret
= __rio_local_read_config_32(mport
,
293 ret
= rio_mport_read_config_32(mport
, maint_io
.rioid
,
294 maint_io
.hopcount
, offset
, &buffer
[i
]);
301 if (unlikely(copy_to_user(maint_io
.buffer
, buffer
, maint_io
.length
)))
308 static int rio_mport_maint_wr(struct mport_cdev_priv
*priv
, void __user
*arg
,
311 struct rio_mport
*mport
= priv
->md
->mport
;
312 struct rio_mport_maint_io maint_io
;
316 int ret
= -EINVAL
, i
;
318 if (unlikely(copy_from_user(&maint_io
, arg
, sizeof(maint_io
))))
321 if ((maint_io
.offset
% 4) ||
322 (maint_io
.length
== 0) || (maint_io
.length
% 4))
325 buffer
= vmalloc(maint_io
.length
);
328 length
= maint_io
.length
;
330 if (unlikely(copy_from_user(buffer
, maint_io
.buffer
, length
))) {
335 offset
= maint_io
.offset
;
336 length
/= sizeof(u32
);
338 for (i
= 0; i
< length
; i
++) {
340 ret
= __rio_local_write_config_32(mport
,
343 ret
= rio_mport_write_config_32(mport
, maint_io
.rioid
,
359 * Inbound/outbound memory mapping functions
362 rio_mport_create_outbound_mapping(struct mport_dev
*md
, struct file
*filp
,
363 u32 rioid
, u64 raddr
, u32 size
,
366 struct rio_mport
*mport
= md
->mport
;
367 struct rio_mport_mapping
*map
;
370 rmcd_debug(OBW
, "did=%d ra=0x%llx sz=0x%x", rioid
, raddr
, size
);
372 map
= kzalloc(sizeof(struct rio_mport_mapping
), GFP_KERNEL
);
376 ret
= rio_map_outb_region(mport
, rioid
, raddr
, size
, 0, paddr
);
380 map
->dir
= MAP_OUTBOUND
;
382 map
->rio_addr
= raddr
;
384 map
->phys_addr
= *paddr
;
387 kref_init(&map
->ref
);
388 list_add_tail(&map
->node
, &md
->mappings
);
396 rio_mport_get_outbound_mapping(struct mport_dev
*md
, struct file
*filp
,
397 u32 rioid
, u64 raddr
, u32 size
,
400 struct rio_mport_mapping
*map
;
403 mutex_lock(&md
->buf_mutex
);
404 list_for_each_entry(map
, &md
->mappings
, node
) {
405 if (map
->dir
!= MAP_OUTBOUND
)
407 if (rioid
== map
->rioid
&&
408 raddr
== map
->rio_addr
&& size
== map
->size
) {
409 *paddr
= map
->phys_addr
;
412 } else if (rioid
== map
->rioid
&&
413 raddr
< (map
->rio_addr
+ map
->size
- 1) &&
414 (raddr
+ size
) > map
->rio_addr
) {
420 /* If not found, create new */
422 err
= rio_mport_create_outbound_mapping(md
, filp
, rioid
, raddr
,
424 mutex_unlock(&md
->buf_mutex
);
428 static int rio_mport_obw_map(struct file
*filp
, void __user
*arg
)
430 struct mport_cdev_priv
*priv
= filp
->private_data
;
431 struct mport_dev
*data
= priv
->md
;
436 if (unlikely(copy_from_user(&map
, arg
, sizeof(struct rio_mmap
))))
439 rmcd_debug(OBW
, "did=%d ra=0x%llx sz=0x%llx",
440 map
.rioid
, map
.rio_addr
, map
.length
);
442 ret
= rio_mport_get_outbound_mapping(data
, filp
, map
.rioid
,
443 map
.rio_addr
, map
.length
, &paddr
);
445 rmcd_error("Failed to set OBW err= %d", ret
);
451 if (unlikely(copy_to_user(arg
, &map
, sizeof(struct rio_mmap
))))
457 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
459 * @priv: driver private data
460 * @arg: buffer handle returned by allocation routine
462 static int rio_mport_obw_free(struct file
*filp
, void __user
*arg
)
464 struct mport_cdev_priv
*priv
= filp
->private_data
;
465 struct mport_dev
*md
= priv
->md
;
467 struct rio_mport_mapping
*map
, *_map
;
469 if (!md
->mport
->ops
->unmap_outb
)
470 return -EPROTONOSUPPORT
;
472 if (copy_from_user(&handle
, arg
, sizeof(u64
)))
475 rmcd_debug(OBW
, "h=0x%llx", handle
);
477 mutex_lock(&md
->buf_mutex
);
478 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
479 if (map
->dir
== MAP_OUTBOUND
&& map
->phys_addr
== handle
) {
480 if (map
->filp
== filp
) {
481 rmcd_debug(OBW
, "kref_put h=0x%llx", handle
);
483 kref_put(&map
->ref
, mport_release_mapping
);
488 mutex_unlock(&md
->buf_mutex
);
494 * maint_hdid_set() - Set the host Device ID
495 * @priv: driver private data
498 static int maint_hdid_set(struct mport_cdev_priv
*priv
, void __user
*arg
)
500 struct mport_dev
*md
= priv
->md
;
503 if (copy_from_user(&hdid
, arg
, sizeof(uint16_t)))
506 md
->mport
->host_deviceid
= hdid
;
507 md
->properties
.hdid
= hdid
;
508 rio_local_set_device_id(md
->mport
, hdid
);
510 rmcd_debug(MPORT
, "Set host device Id to %d", hdid
);
516 * maint_comptag_set() - Set the host Component Tag
517 * @priv: driver private data
518 * @arg: Component Tag
520 static int maint_comptag_set(struct mport_cdev_priv
*priv
, void __user
*arg
)
522 struct mport_dev
*md
= priv
->md
;
525 if (copy_from_user(&comptag
, arg
, sizeof(uint32_t)))
528 rio_local_write_config_32(md
->mport
, RIO_COMPONENT_TAG_CSR
, comptag
);
530 rmcd_debug(MPORT
, "Set host Component Tag to %d", comptag
);
535 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
537 struct mport_dma_req
{
538 struct list_head node
;
540 struct mport_cdev_priv
*priv
;
541 enum rio_transfer_sync sync
;
543 struct page
**page_list
;
544 unsigned int nr_pages
;
545 struct rio_mport_mapping
*map
;
546 struct dma_chan
*dmach
;
547 enum dma_data_direction dir
;
549 enum dma_status status
;
550 struct completion req_comp
;
553 struct mport_faf_work
{
554 struct work_struct work
;
555 struct mport_dma_req
*req
;
558 static void mport_release_def_dma(struct kref
*dma_ref
)
560 struct mport_dev
*md
=
561 container_of(dma_ref
, struct mport_dev
, dma_ref
);
563 rmcd_debug(EXIT
, "DMA_%d", md
->dma_chan
->chan_id
);
564 rio_release_dma(md
->dma_chan
);
568 static void mport_release_dma(struct kref
*dma_ref
)
570 struct mport_cdev_priv
*priv
=
571 container_of(dma_ref
, struct mport_cdev_priv
, dma_ref
);
573 rmcd_debug(EXIT
, "DMA_%d", priv
->dmach
->chan_id
);
574 complete(&priv
->comp
);
577 static void dma_req_free(struct mport_dma_req
*req
)
579 struct mport_cdev_priv
*priv
= req
->priv
;
582 dma_unmap_sg(req
->dmach
->device
->dev
,
583 req
->sgt
.sgl
, req
->sgt
.nents
, req
->dir
);
584 sg_free_table(&req
->sgt
);
585 if (req
->page_list
) {
586 for (i
= 0; i
< req
->nr_pages
; i
++)
587 put_page(req
->page_list
[i
]);
588 kfree(req
->page_list
);
592 mutex_lock(&req
->map
->md
->buf_mutex
);
593 kref_put(&req
->map
->ref
, mport_release_mapping
);
594 mutex_unlock(&req
->map
->md
->buf_mutex
);
597 kref_put(&priv
->dma_ref
, mport_release_dma
);
602 static void dma_xfer_callback(void *param
)
604 struct mport_dma_req
*req
= (struct mport_dma_req
*)param
;
605 struct mport_cdev_priv
*priv
= req
->priv
;
607 req
->status
= dma_async_is_tx_complete(priv
->dmach
, req
->cookie
,
609 complete(&req
->req_comp
);
612 static void dma_faf_cleanup(struct work_struct
*_work
)
614 struct mport_faf_work
*work
= container_of(_work
,
615 struct mport_faf_work
, work
);
616 struct mport_dma_req
*req
= work
->req
;
622 static void dma_faf_callback(void *param
)
624 struct mport_dma_req
*req
= (struct mport_dma_req
*)param
;
625 struct mport_faf_work
*work
;
627 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
631 INIT_WORK(&work
->work
, dma_faf_cleanup
);
633 queue_work(dma_wq
, &work
->work
);
637 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
639 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
640 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
641 * non-NULL pointer using IS_ERR macro.
643 static struct dma_async_tx_descriptor
644 *prep_dma_xfer(struct dma_chan
*chan
, struct rio_transfer_io
*transfer
,
645 struct sg_table
*sgt
, int nents
, enum dma_transfer_direction dir
,
646 enum dma_ctrl_flags flags
)
648 struct rio_dma_data tx_data
;
650 tx_data
.sg
= sgt
->sgl
;
651 tx_data
.sg_len
= nents
;
652 tx_data
.rio_addr_u
= 0;
653 tx_data
.rio_addr
= transfer
->rio_addr
;
654 if (dir
== DMA_MEM_TO_DEV
) {
655 switch (transfer
->method
) {
656 case RIO_EXCHANGE_NWRITE
:
657 tx_data
.wr_type
= RDW_ALL_NWRITE
;
659 case RIO_EXCHANGE_NWRITE_R_ALL
:
660 tx_data
.wr_type
= RDW_ALL_NWRITE_R
;
662 case RIO_EXCHANGE_NWRITE_R
:
663 tx_data
.wr_type
= RDW_LAST_NWRITE_R
;
665 case RIO_EXCHANGE_DEFAULT
:
666 tx_data
.wr_type
= RDW_DEFAULT
;
669 return ERR_PTR(-EINVAL
);
673 return rio_dma_prep_xfer(chan
, transfer
->rioid
, &tx_data
, dir
, flags
);
676 /* Request DMA channel associated with this mport device.
677 * Try to request DMA channel for every new process that opened given
678 * mport. If a new DMA channel is not available use default channel
679 * which is the first DMA channel opened on mport device.
681 static int get_dma_channel(struct mport_cdev_priv
*priv
)
683 mutex_lock(&priv
->dma_lock
);
685 priv
->dmach
= rio_request_mport_dma(priv
->md
->mport
);
687 /* Use default DMA channel if available */
688 if (priv
->md
->dma_chan
) {
689 priv
->dmach
= priv
->md
->dma_chan
;
690 kref_get(&priv
->md
->dma_ref
);
692 rmcd_error("Failed to get DMA channel");
693 mutex_unlock(&priv
->dma_lock
);
696 } else if (!priv
->md
->dma_chan
) {
697 /* Register default DMA channel if we do not have one */
698 priv
->md
->dma_chan
= priv
->dmach
;
699 kref_init(&priv
->md
->dma_ref
);
700 rmcd_debug(DMA
, "Register DMA_chan %d as default",
701 priv
->dmach
->chan_id
);
704 kref_init(&priv
->dma_ref
);
705 init_completion(&priv
->comp
);
708 kref_get(&priv
->dma_ref
);
709 mutex_unlock(&priv
->dma_lock
);
713 static void put_dma_channel(struct mport_cdev_priv
*priv
)
715 kref_put(&priv
->dma_ref
, mport_release_dma
);
719 * DMA transfer functions
721 static int do_dma_request(struct mport_dma_req
*req
,
722 struct rio_transfer_io
*xfer
,
723 enum rio_transfer_sync sync
, int nents
)
725 struct mport_cdev_priv
*priv
;
726 struct sg_table
*sgt
;
727 struct dma_chan
*chan
;
728 struct dma_async_tx_descriptor
*tx
;
730 unsigned long tmo
= msecs_to_jiffies(dma_timeout
);
731 enum dma_transfer_direction dir
;
739 dir
= (req
->dir
== DMA_FROM_DEVICE
) ? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
;
741 rmcd_debug(DMA
, "%s(%d) uses %s for DMA_%s",
742 current
->comm
, task_pid_nr(current
),
743 dev_name(&chan
->dev
->device
),
744 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE");
746 /* Initialize DMA transaction request */
747 tx
= prep_dma_xfer(chan
, xfer
, sgt
, nents
, dir
,
748 DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
);
751 rmcd_debug(DMA
, "prep error for %s A:0x%llx L:0x%llx",
752 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE",
753 xfer
->rio_addr
, xfer
->length
);
756 } else if (IS_ERR(tx
)) {
758 rmcd_debug(DMA
, "prep error %d for %s A:0x%llx L:0x%llx", ret
,
759 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE",
760 xfer
->rio_addr
, xfer
->length
);
764 if (sync
== RIO_TRANSFER_FAF
)
765 tx
->callback
= dma_faf_callback
;
767 tx
->callback
= dma_xfer_callback
;
768 tx
->callback_param
= req
;
772 req
->status
= DMA_IN_PROGRESS
;
773 init_completion(&req
->req_comp
);
775 cookie
= dmaengine_submit(tx
);
776 req
->cookie
= cookie
;
778 rmcd_debug(DMA
, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current
),
779 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE", cookie
);
781 if (dma_submit_error(cookie
)) {
782 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
783 cookie
, xfer
->rio_addr
, xfer
->length
);
788 dma_async_issue_pending(chan
);
790 if (sync
== RIO_TRANSFER_ASYNC
) {
791 spin_lock(&priv
->req_lock
);
792 list_add_tail(&req
->node
, &priv
->async_list
);
793 spin_unlock(&priv
->req_lock
);
795 } else if (sync
== RIO_TRANSFER_FAF
)
798 wret
= wait_for_completion_interruptible_timeout(&req
->req_comp
, tmo
);
801 /* Timeout on wait occurred */
802 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
803 current
->comm
, task_pid_nr(current
),
804 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE", cookie
);
806 } else if (wret
== -ERESTARTSYS
) {
807 /* Wait_for_completion was interrupted by a signal but DMA may
810 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
811 current
->comm
, task_pid_nr(current
),
812 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE", cookie
);
816 if (req
->status
!= DMA_COMPLETE
) {
817 /* DMA transaction completion was signaled with error */
818 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
819 current
->comm
, task_pid_nr(current
),
820 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE",
821 cookie
, req
->status
, ret
);
830 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
831 * the remote RapidIO device
832 * @filp: file pointer associated with the call
833 * @transfer_mode: DMA transfer mode
834 * @sync: synchronization mode
835 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
836 * DMA_DEV_TO_MEM = read)
837 * @xfer: data transfer descriptor structure
840 rio_dma_transfer(struct file
*filp
, uint32_t transfer_mode
,
841 enum rio_transfer_sync sync
, enum dma_data_direction dir
,
842 struct rio_transfer_io
*xfer
)
844 struct mport_cdev_priv
*priv
= filp
->private_data
;
845 unsigned long nr_pages
= 0;
846 struct page
**page_list
= NULL
;
847 struct mport_dma_req
*req
;
848 struct mport_dev
*md
= priv
->md
;
849 struct dma_chan
*chan
;
853 if (xfer
->length
== 0)
855 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
859 ret
= get_dma_channel(priv
);
866 * If parameter loc_addr != NULL, we are transferring data from/to
867 * data buffer allocated in user-space: lock in memory user-space
868 * buffer pages and build an SG table for DMA transfer request
870 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
871 * used for DMA data transfers: build single entry SG table using
872 * offset within the internal buffer specified by handle parameter.
874 if (xfer
->loc_addr
) {
875 unsigned long offset
;
878 offset
= (unsigned long)xfer
->loc_addr
& ~PAGE_MASK
;
879 nr_pages
= PAGE_ALIGN(xfer
->length
+ offset
) >> PAGE_SHIFT
;
881 page_list
= kmalloc_array(nr_pages
,
882 sizeof(*page_list
), GFP_KERNEL
);
883 if (page_list
== NULL
) {
888 down_read(¤t
->mm
->mmap_sem
);
889 pinned
= get_user_pages(
890 (unsigned long)xfer
->loc_addr
& PAGE_MASK
,
891 nr_pages
, dir
== DMA_FROM_DEVICE
, 0,
893 up_read(¤t
->mm
->mmap_sem
);
895 if (pinned
!= nr_pages
) {
897 rmcd_error("get_user_pages err=%ld", pinned
);
900 rmcd_error("pinned %ld out of %ld pages",
906 ret
= sg_alloc_table_from_pages(&req
->sgt
, page_list
, nr_pages
,
907 offset
, xfer
->length
, GFP_KERNEL
);
909 rmcd_error("sg_alloc_table failed with err=%d", ret
);
913 req
->page_list
= page_list
;
914 req
->nr_pages
= nr_pages
;
917 struct rio_mport_mapping
*map
;
919 baddr
= (dma_addr_t
)xfer
->handle
;
921 mutex_lock(&md
->buf_mutex
);
922 list_for_each_entry(map
, &md
->mappings
, node
) {
923 if (baddr
>= map
->phys_addr
&&
924 baddr
< (map
->phys_addr
+ map
->size
)) {
930 mutex_unlock(&md
->buf_mutex
);
932 if (req
->map
== NULL
) {
937 if (xfer
->length
+ xfer
->offset
> map
->size
) {
942 ret
= sg_alloc_table(&req
->sgt
, 1, GFP_KERNEL
);
944 rmcd_error("sg_alloc_table failed for internal buf");
948 sg_set_buf(req
->sgt
.sgl
,
949 map
->virt_addr
+ (baddr
- map
->phys_addr
) +
950 xfer
->offset
, xfer
->length
);
958 nents
= dma_map_sg(chan
->device
->dev
,
959 req
->sgt
.sgl
, req
->sgt
.nents
, dir
);
960 if (nents
== -EFAULT
) {
961 rmcd_error("Failed to map SG list");
965 ret
= do_dma_request(req
, xfer
, sync
, nents
);
968 if (sync
== RIO_TRANSFER_SYNC
)
970 return ret
; /* return ASYNC cookie */
973 if (ret
== -ETIMEDOUT
|| ret
== -EINTR
) {
975 * This can happen only in case of SYNC transfer.
976 * Do not free unfinished request structure immediately.
977 * Place it into pending list and deal with it later
979 spin_lock(&priv
->req_lock
);
980 list_add_tail(&req
->node
, &priv
->pend_list
);
981 spin_unlock(&priv
->req_lock
);
986 rmcd_debug(DMA
, "do_dma_request failed with err=%d", ret
);
988 dma_unmap_sg(chan
->device
->dev
, req
->sgt
.sgl
, req
->sgt
.nents
, dir
);
989 sg_free_table(&req
->sgt
);
992 for (i
= 0; i
< nr_pages
; i
++)
993 put_page(page_list
[i
]);
998 mutex_lock(&md
->buf_mutex
);
999 kref_put(&req
->map
->ref
, mport_release_mapping
);
1000 mutex_unlock(&md
->buf_mutex
);
1002 put_dma_channel(priv
);
1007 static int rio_mport_transfer_ioctl(struct file
*filp
, void __user
*arg
)
1009 struct mport_cdev_priv
*priv
= filp
->private_data
;
1010 struct rio_transaction transaction
;
1011 struct rio_transfer_io
*transfer
;
1012 enum dma_data_direction dir
;
1015 if (unlikely(copy_from_user(&transaction
, arg
, sizeof(transaction
))))
1018 if (transaction
.count
!= 1)
1021 if ((transaction
.transfer_mode
&
1022 priv
->md
->properties
.transfer_mode
) == 0)
1025 transfer
= vmalloc(transaction
.count
* sizeof(struct rio_transfer_io
));
1029 if (unlikely(copy_from_user(transfer
, transaction
.block
,
1030 transaction
.count
* sizeof(struct rio_transfer_io
)))) {
1035 dir
= (transaction
.dir
== RIO_TRANSFER_DIR_READ
) ?
1036 DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1037 for (i
= 0; i
< transaction
.count
&& ret
== 0; i
++)
1038 ret
= rio_dma_transfer(filp
, transaction
.transfer_mode
,
1039 transaction
.sync
, dir
, &transfer
[i
]);
1041 if (unlikely(copy_to_user(transaction
.block
, transfer
,
1042 transaction
.count
* sizeof(struct rio_transfer_io
))))
1051 static int rio_mport_wait_for_async_dma(struct file
*filp
, void __user
*arg
)
1053 struct mport_cdev_priv
*priv
;
1054 struct mport_dev
*md
;
1055 struct rio_async_tx_wait w_param
;
1056 struct mport_dma_req
*req
;
1057 dma_cookie_t cookie
;
1063 priv
= (struct mport_cdev_priv
*)filp
->private_data
;
1066 if (unlikely(copy_from_user(&w_param
, arg
, sizeof(w_param
))))
1069 cookie
= w_param
.token
;
1070 if (w_param
.timeout
)
1071 tmo
= msecs_to_jiffies(w_param
.timeout
);
1072 else /* Use default DMA timeout */
1073 tmo
= msecs_to_jiffies(dma_timeout
);
1075 spin_lock(&priv
->req_lock
);
1076 list_for_each_entry(req
, &priv
->async_list
, node
) {
1077 if (req
->cookie
== cookie
) {
1078 list_del(&req
->node
);
1083 spin_unlock(&priv
->req_lock
);
1088 wret
= wait_for_completion_interruptible_timeout(&req
->req_comp
, tmo
);
1091 /* Timeout on wait occurred */
1092 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1093 current
->comm
, task_pid_nr(current
),
1094 (req
->dir
== DMA_FROM_DEVICE
)?"READ":"WRITE");
1097 } else if (wret
== -ERESTARTSYS
) {
1098 /* Wait_for_completion was interrupted by a signal but DMA may
1099 * be still in progress
1101 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1102 current
->comm
, task_pid_nr(current
),
1103 (req
->dir
== DMA_FROM_DEVICE
)?"READ":"WRITE");
1108 if (req
->status
!= DMA_COMPLETE
) {
1109 /* DMA transaction completion signaled with transfer error */
1110 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1111 current
->comm
, task_pid_nr(current
),
1112 (req
->dir
== DMA_FROM_DEVICE
)?"READ":"WRITE",
1118 if (req
->status
!= DMA_IN_PROGRESS
&& req
->status
!= DMA_PAUSED
)
1124 /* Return request back into async queue */
1125 spin_lock(&priv
->req_lock
);
1126 list_add_tail(&req
->node
, &priv
->async_list
);
1127 spin_unlock(&priv
->req_lock
);
1131 static int rio_mport_create_dma_mapping(struct mport_dev
*md
, struct file
*filp
,
1132 uint64_t size
, struct rio_mport_mapping
**mapping
)
1134 struct rio_mport_mapping
*map
;
1136 map
= kzalloc(sizeof(struct rio_mport_mapping
), GFP_KERNEL
);
1140 map
->virt_addr
= dma_alloc_coherent(md
->mport
->dev
.parent
, size
,
1141 &map
->phys_addr
, GFP_KERNEL
);
1142 if (map
->virt_addr
== NULL
) {
1151 kref_init(&map
->ref
);
1152 mutex_lock(&md
->buf_mutex
);
1153 list_add_tail(&map
->node
, &md
->mappings
);
1154 mutex_unlock(&md
->buf_mutex
);
1160 static int rio_mport_alloc_dma(struct file
*filp
, void __user
*arg
)
1162 struct mport_cdev_priv
*priv
= filp
->private_data
;
1163 struct mport_dev
*md
= priv
->md
;
1164 struct rio_dma_mem map
;
1165 struct rio_mport_mapping
*mapping
= NULL
;
1168 if (unlikely(copy_from_user(&map
, arg
, sizeof(struct rio_dma_mem
))))
1171 ret
= rio_mport_create_dma_mapping(md
, filp
, map
.length
, &mapping
);
1175 map
.dma_handle
= mapping
->phys_addr
;
1177 if (unlikely(copy_to_user(arg
, &map
, sizeof(struct rio_dma_mem
)))) {
1178 mutex_lock(&md
->buf_mutex
);
1179 kref_put(&mapping
->ref
, mport_release_mapping
);
1180 mutex_unlock(&md
->buf_mutex
);
1187 static int rio_mport_free_dma(struct file
*filp
, void __user
*arg
)
1189 struct mport_cdev_priv
*priv
= filp
->private_data
;
1190 struct mport_dev
*md
= priv
->md
;
1193 struct rio_mport_mapping
*map
, *_map
;
1195 if (copy_from_user(&handle
, arg
, sizeof(u64
)))
1197 rmcd_debug(EXIT
, "filp=%p", filp
);
1199 mutex_lock(&md
->buf_mutex
);
1200 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
1201 if (map
->dir
== MAP_DMA
&& map
->phys_addr
== handle
&&
1202 map
->filp
== filp
) {
1203 kref_put(&map
->ref
, mport_release_mapping
);
1208 mutex_unlock(&md
->buf_mutex
);
1210 if (ret
== -EFAULT
) {
1211 rmcd_debug(DMA
, "ERR no matching mapping");
1218 static int rio_mport_transfer_ioctl(struct file
*filp
, void *arg
)
1223 static int rio_mport_wait_for_async_dma(struct file
*filp
, void __user
*arg
)
1228 static int rio_mport_alloc_dma(struct file
*filp
, void __user
*arg
)
1233 static int rio_mport_free_dma(struct file
*filp
, void __user
*arg
)
1237 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1240 * Inbound/outbound memory mapping functions
1244 rio_mport_create_inbound_mapping(struct mport_dev
*md
, struct file
*filp
,
1245 u64 raddr
, u32 size
,
1246 struct rio_mport_mapping
**mapping
)
1248 struct rio_mport
*mport
= md
->mport
;
1249 struct rio_mport_mapping
*map
;
1252 map
= kzalloc(sizeof(struct rio_mport_mapping
), GFP_KERNEL
);
1256 map
->virt_addr
= dma_alloc_coherent(mport
->dev
.parent
, size
,
1257 &map
->phys_addr
, GFP_KERNEL
);
1258 if (map
->virt_addr
== NULL
) {
1263 if (raddr
== RIO_MAP_ANY_ADDR
)
1264 raddr
= map
->phys_addr
;
1265 ret
= rio_map_inb_region(mport
, map
->phys_addr
, raddr
, size
, 0);
1269 map
->dir
= MAP_INBOUND
;
1270 map
->rio_addr
= raddr
;
1274 kref_init(&map
->ref
);
1275 mutex_lock(&md
->buf_mutex
);
1276 list_add_tail(&map
->node
, &md
->mappings
);
1277 mutex_unlock(&md
->buf_mutex
);
1282 dma_free_coherent(mport
->dev
.parent
, size
,
1283 map
->virt_addr
, map
->phys_addr
);
1290 rio_mport_get_inbound_mapping(struct mport_dev
*md
, struct file
*filp
,
1291 u64 raddr
, u32 size
,
1292 struct rio_mport_mapping
**mapping
)
1294 struct rio_mport_mapping
*map
;
1297 if (raddr
== RIO_MAP_ANY_ADDR
)
1300 mutex_lock(&md
->buf_mutex
);
1301 list_for_each_entry(map
, &md
->mappings
, node
) {
1302 if (map
->dir
!= MAP_INBOUND
)
1304 if (raddr
== map
->rio_addr
&& size
== map
->size
) {
1305 /* allow exact match only */
1309 } else if (raddr
< (map
->rio_addr
+ map
->size
- 1) &&
1310 (raddr
+ size
) > map
->rio_addr
) {
1315 mutex_unlock(&md
->buf_mutex
);
1320 /* not found, create new */
1321 return rio_mport_create_inbound_mapping(md
, filp
, raddr
, size
, mapping
);
1324 static int rio_mport_map_inbound(struct file
*filp
, void __user
*arg
)
1326 struct mport_cdev_priv
*priv
= filp
->private_data
;
1327 struct mport_dev
*md
= priv
->md
;
1328 struct rio_mmap map
;
1329 struct rio_mport_mapping
*mapping
= NULL
;
1332 if (!md
->mport
->ops
->map_inb
)
1333 return -EPROTONOSUPPORT
;
1334 if (unlikely(copy_from_user(&map
, arg
, sizeof(struct rio_mmap
))))
1337 rmcd_debug(IBW
, "%s filp=%p", dev_name(&priv
->md
->dev
), filp
);
1339 ret
= rio_mport_get_inbound_mapping(md
, filp
, map
.rio_addr
,
1340 map
.length
, &mapping
);
1344 map
.handle
= mapping
->phys_addr
;
1345 map
.rio_addr
= mapping
->rio_addr
;
1347 if (unlikely(copy_to_user(arg
, &map
, sizeof(struct rio_mmap
)))) {
1348 /* Delete mapping if it was created by this request */
1349 if (ret
== 0 && mapping
->filp
== filp
) {
1350 mutex_lock(&md
->buf_mutex
);
1351 kref_put(&mapping
->ref
, mport_release_mapping
);
1352 mutex_unlock(&md
->buf_mutex
);
1361 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1362 * previously allocated inbound DMA coherent buffer
1363 * @priv: driver private data
1364 * @arg: buffer handle returned by allocation routine
1366 static int rio_mport_inbound_free(struct file
*filp
, void __user
*arg
)
1368 struct mport_cdev_priv
*priv
= filp
->private_data
;
1369 struct mport_dev
*md
= priv
->md
;
1371 struct rio_mport_mapping
*map
, *_map
;
1373 rmcd_debug(IBW
, "%s filp=%p", dev_name(&priv
->md
->dev
), filp
);
1375 if (!md
->mport
->ops
->unmap_inb
)
1376 return -EPROTONOSUPPORT
;
1378 if (copy_from_user(&handle
, arg
, sizeof(u64
)))
1381 mutex_lock(&md
->buf_mutex
);
1382 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
1383 if (map
->dir
== MAP_INBOUND
&& map
->phys_addr
== handle
) {
1384 if (map
->filp
== filp
) {
1386 kref_put(&map
->ref
, mport_release_mapping
);
1391 mutex_unlock(&md
->buf_mutex
);
1397 * maint_port_idx_get() - Get the port index of the mport instance
1398 * @priv: driver private data
1401 static int maint_port_idx_get(struct mport_cdev_priv
*priv
, void __user
*arg
)
1403 struct mport_dev
*md
= priv
->md
;
1404 uint32_t port_idx
= md
->mport
->index
;
1406 rmcd_debug(MPORT
, "port_index=%d", port_idx
);
1408 if (copy_to_user(arg
, &port_idx
, sizeof(port_idx
)))
1414 static int rio_mport_add_event(struct mport_cdev_priv
*priv
,
1415 struct rio_event
*event
)
1419 if (!(priv
->event_mask
& event
->header
))
1422 spin_lock(&priv
->fifo_lock
);
1423 overflow
= kfifo_avail(&priv
->event_fifo
) < sizeof(*event
)
1424 || kfifo_in(&priv
->event_fifo
, (unsigned char *)event
,
1425 sizeof(*event
)) != sizeof(*event
);
1426 spin_unlock(&priv
->fifo_lock
);
1428 wake_up_interruptible(&priv
->event_rx_wait
);
1431 dev_warn(&priv
->md
->dev
, DRV_NAME
": event fifo overflow\n");
1438 static void rio_mport_doorbell_handler(struct rio_mport
*mport
, void *dev_id
,
1439 u16 src
, u16 dst
, u16 info
)
1441 struct mport_dev
*data
= dev_id
;
1442 struct mport_cdev_priv
*priv
;
1443 struct rio_mport_db_filter
*db_filter
;
1444 struct rio_event event
;
1447 event
.header
= RIO_DOORBELL
;
1448 event
.u
.doorbell
.rioid
= src
;
1449 event
.u
.doorbell
.payload
= info
;
1452 spin_lock(&data
->db_lock
);
1453 list_for_each_entry(db_filter
, &data
->doorbells
, data_node
) {
1454 if (((db_filter
->filter
.rioid
== 0xffffffff ||
1455 db_filter
->filter
.rioid
== src
)) &&
1456 info
>= db_filter
->filter
.low
&&
1457 info
<= db_filter
->filter
.high
) {
1458 priv
= db_filter
->priv
;
1459 rio_mport_add_event(priv
, &event
);
1463 spin_unlock(&data
->db_lock
);
1466 dev_warn(&data
->dev
,
1467 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1468 __func__
, src
, info
);
1471 static int rio_mport_add_db_filter(struct mport_cdev_priv
*priv
,
1474 struct mport_dev
*md
= priv
->md
;
1475 struct rio_mport_db_filter
*db_filter
;
1476 struct rio_doorbell_filter filter
;
1477 unsigned long flags
;
1480 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1483 if (filter
.low
> filter
.high
)
1486 ret
= rio_request_inb_dbell(md
->mport
, md
, filter
.low
, filter
.high
,
1487 rio_mport_doorbell_handler
);
1489 rmcd_error("%s failed to register IBDB, err=%d",
1490 dev_name(&md
->dev
), ret
);
1494 db_filter
= kzalloc(sizeof(*db_filter
), GFP_KERNEL
);
1495 if (db_filter
== NULL
) {
1496 rio_release_inb_dbell(md
->mport
, filter
.low
, filter
.high
);
1500 db_filter
->filter
= filter
;
1501 db_filter
->priv
= priv
;
1502 spin_lock_irqsave(&md
->db_lock
, flags
);
1503 list_add_tail(&db_filter
->priv_node
, &priv
->db_filters
);
1504 list_add_tail(&db_filter
->data_node
, &md
->doorbells
);
1505 spin_unlock_irqrestore(&md
->db_lock
, flags
);
1510 static void rio_mport_delete_db_filter(struct rio_mport_db_filter
*db_filter
)
1512 list_del(&db_filter
->data_node
);
1513 list_del(&db_filter
->priv_node
);
1517 static int rio_mport_remove_db_filter(struct mport_cdev_priv
*priv
,
1520 struct rio_mport_db_filter
*db_filter
;
1521 struct rio_doorbell_filter filter
;
1522 unsigned long flags
;
1525 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1528 spin_lock_irqsave(&priv
->md
->db_lock
, flags
);
1529 list_for_each_entry(db_filter
, &priv
->db_filters
, priv_node
) {
1530 if (db_filter
->filter
.rioid
== filter
.rioid
&&
1531 db_filter
->filter
.low
== filter
.low
&&
1532 db_filter
->filter
.high
== filter
.high
) {
1533 rio_mport_delete_db_filter(db_filter
);
1538 spin_unlock_irqrestore(&priv
->md
->db_lock
, flags
);
1541 rio_release_inb_dbell(priv
->md
->mport
, filter
.low
, filter
.high
);
1546 static int rio_mport_match_pw(union rio_pw_msg
*msg
,
1547 struct rio_pw_filter
*filter
)
1549 if ((msg
->em
.comptag
& filter
->mask
) < filter
->low
||
1550 (msg
->em
.comptag
& filter
->mask
) > filter
->high
)
1555 static int rio_mport_pw_handler(struct rio_mport
*mport
, void *context
,
1556 union rio_pw_msg
*msg
, int step
)
1558 struct mport_dev
*md
= context
;
1559 struct mport_cdev_priv
*priv
;
1560 struct rio_mport_pw_filter
*pw_filter
;
1561 struct rio_event event
;
1564 event
.header
= RIO_PORTWRITE
;
1565 memcpy(event
.u
.portwrite
.payload
, msg
->raw
, RIO_PW_MSG_SIZE
);
1568 spin_lock(&md
->pw_lock
);
1569 list_for_each_entry(pw_filter
, &md
->portwrites
, md_node
) {
1570 if (rio_mport_match_pw(msg
, &pw_filter
->filter
)) {
1571 priv
= pw_filter
->priv
;
1572 rio_mport_add_event(priv
, &event
);
1576 spin_unlock(&md
->pw_lock
);
1579 printk_ratelimited(KERN_WARNING DRV_NAME
1580 ": mport%d received spurious PW from 0x%08x\n",
1581 mport
->id
, msg
->em
.comptag
);
1587 static int rio_mport_add_pw_filter(struct mport_cdev_priv
*priv
,
1590 struct mport_dev
*md
= priv
->md
;
1591 struct rio_mport_pw_filter
*pw_filter
;
1592 struct rio_pw_filter filter
;
1593 unsigned long flags
;
1596 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1599 pw_filter
= kzalloc(sizeof(*pw_filter
), GFP_KERNEL
);
1600 if (pw_filter
== NULL
)
1603 pw_filter
->filter
= filter
;
1604 pw_filter
->priv
= priv
;
1605 spin_lock_irqsave(&md
->pw_lock
, flags
);
1606 if (list_empty(&md
->portwrites
))
1608 list_add_tail(&pw_filter
->priv_node
, &priv
->pw_filters
);
1609 list_add_tail(&pw_filter
->md_node
, &md
->portwrites
);
1610 spin_unlock_irqrestore(&md
->pw_lock
, flags
);
1615 ret
= rio_add_mport_pw_handler(md
->mport
, md
,
1616 rio_mport_pw_handler
);
1619 "%s: failed to add IB_PW handler, err=%d\n",
1623 rio_pw_enable(md
->mport
, 1);
1629 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter
*pw_filter
)
1631 list_del(&pw_filter
->md_node
);
1632 list_del(&pw_filter
->priv_node
);
1636 static int rio_mport_match_pw_filter(struct rio_pw_filter
*a
,
1637 struct rio_pw_filter
*b
)
1639 if ((a
->mask
== b
->mask
) && (a
->low
== b
->low
) && (a
->high
== b
->high
))
1644 static int rio_mport_remove_pw_filter(struct mport_cdev_priv
*priv
,
1647 struct mport_dev
*md
= priv
->md
;
1648 struct rio_mport_pw_filter
*pw_filter
;
1649 struct rio_pw_filter filter
;
1650 unsigned long flags
;
1654 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1657 spin_lock_irqsave(&md
->pw_lock
, flags
);
1658 list_for_each_entry(pw_filter
, &priv
->pw_filters
, priv_node
) {
1659 if (rio_mport_match_pw_filter(&pw_filter
->filter
, &filter
)) {
1660 rio_mport_delete_pw_filter(pw_filter
);
1666 if (list_empty(&md
->portwrites
))
1668 spin_unlock_irqrestore(&md
->pw_lock
, flags
);
1671 rio_del_mport_pw_handler(md
->mport
, priv
->md
,
1672 rio_mport_pw_handler
);
1673 rio_pw_enable(md
->mport
, 0);
1680 * rio_release_dev - release routine for kernel RIO device object
1681 * @dev: kernel device object associated with a RIO device structure
1683 * Frees a RIO device struct associated a RIO device struct.
1684 * The RIO device struct is freed.
1686 static void rio_release_dev(struct device
*dev
)
1688 struct rio_dev
*rdev
;
1690 rdev
= to_rio_dev(dev
);
1691 pr_info(DRV_PREFIX
"%s: %s\n", __func__
, rio_name(rdev
));
1696 static void rio_release_net(struct device
*dev
)
1698 struct rio_net
*net
;
1700 net
= to_rio_net(dev
);
1701 rmcd_debug(RDEV
, "net_%d", net
->id
);
1707 * rio_mport_add_riodev - creates a kernel RIO device object
1709 * Allocates a RIO device data structure and initializes required fields based
1710 * on device's configuration space contents.
1711 * If the device has switch capabilities, then a switch specific portion is
1712 * allocated and configured.
1714 static int rio_mport_add_riodev(struct mport_cdev_priv
*priv
,
1717 struct mport_dev
*md
= priv
->md
;
1718 struct rio_rdev_info dev_info
;
1719 struct rio_dev
*rdev
;
1720 struct rio_switch
*rswitch
= NULL
;
1721 struct rio_mport
*mport
;
1729 if (copy_from_user(&dev_info
, arg
, sizeof(dev_info
)))
1732 rmcd_debug(RDEV
, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info
.name
,
1733 dev_info
.comptag
, dev_info
.destid
, dev_info
.hopcount
);
1735 if (bus_find_device_by_name(&rio_bus_type
, NULL
, dev_info
.name
)) {
1736 rmcd_debug(RDEV
, "device %s already exists", dev_info
.name
);
1740 size
= sizeof(struct rio_dev
);
1742 destid
= (u16
)dev_info
.destid
;
1743 hopcount
= (u8
)dev_info
.hopcount
;
1745 if (rio_mport_read_config_32(mport
, destid
, hopcount
,
1746 RIO_PEF_CAR
, &rval
))
1749 if (rval
& RIO_PEF_SWITCH
) {
1750 rio_mport_read_config_32(mport
, destid
, hopcount
,
1751 RIO_SWP_INFO_CAR
, &swpinfo
);
1752 size
+= (RIO_GET_TOTAL_PORTS(swpinfo
) *
1753 sizeof(rswitch
->nextdev
[0])) + sizeof(*rswitch
);
1756 rdev
= kzalloc(size
, GFP_KERNEL
);
1760 if (mport
->net
== NULL
) {
1761 struct rio_net
*net
;
1763 net
= rio_alloc_net(mport
);
1766 rmcd_debug(RDEV
, "failed to allocate net object");
1770 net
->id
= mport
->id
;
1772 dev_set_name(&net
->dev
, "rnet_%d", net
->id
);
1773 net
->dev
.parent
= &mport
->dev
;
1774 net
->dev
.release
= rio_release_net
;
1775 err
= rio_add_net(net
);
1777 rmcd_debug(RDEV
, "failed to register net, err=%d", err
);
1783 rdev
->net
= mport
->net
;
1785 rdev
->swpinfo
= swpinfo
;
1786 rio_mport_read_config_32(mport
, destid
, hopcount
,
1787 RIO_DEV_ID_CAR
, &rval
);
1788 rdev
->did
= rval
>> 16;
1789 rdev
->vid
= rval
& 0xffff;
1790 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_DEV_INFO_CAR
,
1792 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_ASM_ID_CAR
,
1794 rdev
->asm_did
= rval
>> 16;
1795 rdev
->asm_vid
= rval
& 0xffff;
1796 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_ASM_INFO_CAR
,
1798 rdev
->asm_rev
= rval
>> 16;
1800 if (rdev
->pef
& RIO_PEF_EXT_FEATURES
) {
1801 rdev
->efptr
= rval
& 0xffff;
1802 rdev
->phys_efptr
= rio_mport_get_physefb(mport
, 0, destid
,
1805 rdev
->em_efptr
= rio_mport_get_feature(mport
, 0, destid
,
1806 hopcount
, RIO_EFB_ERR_MGMNT
);
1809 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_SRC_OPS_CAR
,
1811 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_DST_OPS_CAR
,
1814 rdev
->comp_tag
= dev_info
.comptag
;
1815 rdev
->destid
= destid
;
1816 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1817 rdev
->hopcount
= hopcount
;
1819 if (rdev
->pef
& RIO_PEF_SWITCH
) {
1820 rswitch
= rdev
->rswitch
;
1821 rswitch
->route_table
= NULL
;
1824 if (strlen(dev_info
.name
))
1825 dev_set_name(&rdev
->dev
, "%s", dev_info
.name
);
1826 else if (rdev
->pef
& RIO_PEF_SWITCH
)
1827 dev_set_name(&rdev
->dev
, "%02x:s:%04x", mport
->id
,
1828 rdev
->comp_tag
& RIO_CTAG_UDEVID
);
1830 dev_set_name(&rdev
->dev
, "%02x:e:%04x", mport
->id
,
1831 rdev
->comp_tag
& RIO_CTAG_UDEVID
);
1833 INIT_LIST_HEAD(&rdev
->net_list
);
1834 rdev
->dev
.parent
= &mport
->net
->dev
;
1835 rio_attach_device(rdev
);
1836 rdev
->dev
.release
= rio_release_dev
;
1838 if (rdev
->dst_ops
& RIO_DST_OPS_DOORBELL
)
1839 rio_init_dbell_res(&rdev
->riores
[RIO_DOORBELL_RESOURCE
],
1841 err
= rio_add_device(rdev
);
1852 static int rio_mport_del_riodev(struct mport_cdev_priv
*priv
, void __user
*arg
)
1854 struct rio_rdev_info dev_info
;
1855 struct rio_dev
*rdev
= NULL
;
1857 struct rio_mport
*mport
;
1858 struct rio_net
*net
;
1860 if (copy_from_user(&dev_info
, arg
, sizeof(dev_info
)))
1863 mport
= priv
->md
->mport
;
1865 /* If device name is specified, removal by name has priority */
1866 if (strlen(dev_info
.name
)) {
1867 dev
= bus_find_device_by_name(&rio_bus_type
, NULL
,
1870 rdev
= to_rio_dev(dev
);
1873 rdev
= rio_get_comptag(dev_info
.comptag
, rdev
);
1874 if (rdev
&& rdev
->dev
.parent
== &mport
->net
->dev
&&
1875 rdev
->destid
== (u16
)dev_info
.destid
&&
1876 rdev
->hopcount
== (u8
)dev_info
.hopcount
)
1883 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1884 dev_info
.name
, dev_info
.comptag
, dev_info
.destid
,
1891 rio_del_device(rdev
, RIO_DEVICE_SHUTDOWN
);
1893 if (list_empty(&net
->devices
)) {
1902 * Mport cdev management
1906 * mport_cdev_open() - Open character device (mport)
1908 static int mport_cdev_open(struct inode
*inode
, struct file
*filp
)
1911 int minor
= iminor(inode
);
1912 struct mport_dev
*chdev
;
1913 struct mport_cdev_priv
*priv
;
1915 /* Test for valid device */
1916 if (minor
>= RIO_MAX_MPORTS
) {
1917 rmcd_error("Invalid minor device number");
1921 chdev
= container_of(inode
->i_cdev
, struct mport_dev
, cdev
);
1923 rmcd_debug(INIT
, "%s filp=%p", dev_name(&chdev
->dev
), filp
);
1925 if (atomic_read(&chdev
->active
) == 0)
1928 get_device(&chdev
->dev
);
1930 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1932 put_device(&chdev
->dev
);
1938 mutex_lock(&chdev
->file_mutex
);
1939 list_add_tail(&priv
->list
, &chdev
->file_list
);
1940 mutex_unlock(&chdev
->file_mutex
);
1942 INIT_LIST_HEAD(&priv
->db_filters
);
1943 INIT_LIST_HEAD(&priv
->pw_filters
);
1944 spin_lock_init(&priv
->fifo_lock
);
1945 init_waitqueue_head(&priv
->event_rx_wait
);
1946 ret
= kfifo_alloc(&priv
->event_fifo
,
1947 sizeof(struct rio_event
) * MPORT_EVENT_DEPTH
,
1950 dev_err(&chdev
->dev
, DRV_NAME
": kfifo_alloc failed\n");
1955 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1956 INIT_LIST_HEAD(&priv
->async_list
);
1957 INIT_LIST_HEAD(&priv
->pend_list
);
1958 spin_lock_init(&priv
->req_lock
);
1959 mutex_init(&priv
->dma_lock
);
1962 filp
->private_data
= priv
;
1970 static int mport_cdev_fasync(int fd
, struct file
*filp
, int mode
)
1972 struct mport_cdev_priv
*priv
= filp
->private_data
;
1974 return fasync_helper(fd
, filp
, mode
, &priv
->async_queue
);
1977 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1978 static void mport_cdev_release_dma(struct file
*filp
)
1980 struct mport_cdev_priv
*priv
= filp
->private_data
;
1981 struct mport_dev
*md
;
1982 struct mport_dma_req
*req
, *req_next
;
1983 unsigned long tmo
= msecs_to_jiffies(dma_timeout
);
1987 rmcd_debug(EXIT
, "from filp=%p %s(%d)",
1988 filp
, current
->comm
, task_pid_nr(current
));
1991 rmcd_debug(EXIT
, "No DMA channel for filp=%p", filp
);
1997 flush_workqueue(dma_wq
);
1999 spin_lock(&priv
->req_lock
);
2000 if (!list_empty(&priv
->async_list
)) {
2001 rmcd_debug(EXIT
, "async list not empty filp=%p %s(%d)",
2002 filp
, current
->comm
, task_pid_nr(current
));
2003 list_splice_init(&priv
->async_list
, &list
);
2005 spin_unlock(&priv
->req_lock
);
2007 if (!list_empty(&list
)) {
2008 rmcd_debug(EXIT
, "temp list not empty");
2009 list_for_each_entry_safe(req
, req_next
, &list
, node
) {
2010 rmcd_debug(EXIT
, "free req->filp=%p cookie=%d compl=%s",
2011 req
->filp
, req
->cookie
,
2012 completion_done(&req
->req_comp
)?"yes":"no");
2013 list_del(&req
->node
);
2018 if (!list_empty(&priv
->pend_list
)) {
2019 rmcd_debug(EXIT
, "Free pending DMA requests for filp=%p %s(%d)",
2020 filp
, current
->comm
, task_pid_nr(current
));
2021 list_for_each_entry_safe(req
,
2022 req_next
, &priv
->pend_list
, node
) {
2023 rmcd_debug(EXIT
, "free req->filp=%p cookie=%d compl=%s",
2024 req
->filp
, req
->cookie
,
2025 completion_done(&req
->req_comp
)?"yes":"no");
2026 list_del(&req
->node
);
2031 put_dma_channel(priv
);
2032 wret
= wait_for_completion_interruptible_timeout(&priv
->comp
, tmo
);
2035 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
2036 current
->comm
, task_pid_nr(current
), wret
);
2039 spin_lock(&priv
->req_lock
);
2041 if (!list_empty(&priv
->pend_list
)) {
2042 rmcd_debug(EXIT
, "ATTN: pending DMA requests, filp=%p %s(%d)",
2043 filp
, current
->comm
, task_pid_nr(current
));
2046 spin_unlock(&priv
->req_lock
);
2048 if (priv
->dmach
!= priv
->md
->dma_chan
) {
2049 rmcd_debug(EXIT
, "Release DMA channel for filp=%p %s(%d)",
2050 filp
, current
->comm
, task_pid_nr(current
));
2051 rio_release_dma(priv
->dmach
);
2053 rmcd_debug(EXIT
, "Adjust default DMA channel refcount");
2054 kref_put(&md
->dma_ref
, mport_release_def_dma
);
2060 #define mport_cdev_release_dma(priv) do {} while (0)
2064 * mport_cdev_release() - Release character device
2066 static int mport_cdev_release(struct inode
*inode
, struct file
*filp
)
2068 struct mport_cdev_priv
*priv
= filp
->private_data
;
2069 struct mport_dev
*chdev
;
2070 struct rio_mport_pw_filter
*pw_filter
, *pw_filter_next
;
2071 struct rio_mport_db_filter
*db_filter
, *db_filter_next
;
2072 struct rio_mport_mapping
*map
, *_map
;
2073 unsigned long flags
;
2075 rmcd_debug(EXIT
, "%s filp=%p", dev_name(&priv
->md
->dev
), filp
);
2078 mport_cdev_release_dma(filp
);
2080 priv
->event_mask
= 0;
2082 spin_lock_irqsave(&chdev
->pw_lock
, flags
);
2083 if (!list_empty(&priv
->pw_filters
)) {
2084 list_for_each_entry_safe(pw_filter
, pw_filter_next
,
2085 &priv
->pw_filters
, priv_node
)
2086 rio_mport_delete_pw_filter(pw_filter
);
2088 spin_unlock_irqrestore(&chdev
->pw_lock
, flags
);
2090 spin_lock_irqsave(&chdev
->db_lock
, flags
);
2091 list_for_each_entry_safe(db_filter
, db_filter_next
,
2092 &priv
->db_filters
, priv_node
) {
2093 rio_mport_delete_db_filter(db_filter
);
2095 spin_unlock_irqrestore(&chdev
->db_lock
, flags
);
2097 kfifo_free(&priv
->event_fifo
);
2099 mutex_lock(&chdev
->buf_mutex
);
2100 list_for_each_entry_safe(map
, _map
, &chdev
->mappings
, node
) {
2101 if (map
->filp
== filp
) {
2102 rmcd_debug(EXIT
, "release mapping %p filp=%p",
2103 map
->virt_addr
, filp
);
2104 kref_put(&map
->ref
, mport_release_mapping
);
2107 mutex_unlock(&chdev
->buf_mutex
);
2109 mport_cdev_fasync(-1, filp
, 0);
2110 filp
->private_data
= NULL
;
2111 mutex_lock(&chdev
->file_mutex
);
2112 list_del(&priv
->list
);
2113 mutex_unlock(&chdev
->file_mutex
);
2114 put_device(&chdev
->dev
);
2120 * mport_cdev_ioctl() - IOCTLs for character device
2122 static long mport_cdev_ioctl(struct file
*filp
,
2123 unsigned int cmd
, unsigned long arg
)
2126 struct mport_cdev_priv
*data
= filp
->private_data
;
2127 struct mport_dev
*md
= data
->md
;
2129 if (atomic_read(&md
->active
) == 0)
2133 case RIO_MPORT_MAINT_READ_LOCAL
:
2134 return rio_mport_maint_rd(data
, (void __user
*)arg
, 1);
2135 case RIO_MPORT_MAINT_WRITE_LOCAL
:
2136 return rio_mport_maint_wr(data
, (void __user
*)arg
, 1);
2137 case RIO_MPORT_MAINT_READ_REMOTE
:
2138 return rio_mport_maint_rd(data
, (void __user
*)arg
, 0);
2139 case RIO_MPORT_MAINT_WRITE_REMOTE
:
2140 return rio_mport_maint_wr(data
, (void __user
*)arg
, 0);
2141 case RIO_MPORT_MAINT_HDID_SET
:
2142 return maint_hdid_set(data
, (void __user
*)arg
);
2143 case RIO_MPORT_MAINT_COMPTAG_SET
:
2144 return maint_comptag_set(data
, (void __user
*)arg
);
2145 case RIO_MPORT_MAINT_PORT_IDX_GET
:
2146 return maint_port_idx_get(data
, (void __user
*)arg
);
2147 case RIO_MPORT_GET_PROPERTIES
:
2148 md
->properties
.hdid
= md
->mport
->host_deviceid
;
2149 if (copy_to_user((void __user
*)arg
, &(data
->md
->properties
),
2150 sizeof(data
->md
->properties
)))
2153 case RIO_ENABLE_DOORBELL_RANGE
:
2154 return rio_mport_add_db_filter(data
, (void __user
*)arg
);
2155 case RIO_DISABLE_DOORBELL_RANGE
:
2156 return rio_mport_remove_db_filter(data
, (void __user
*)arg
);
2157 case RIO_ENABLE_PORTWRITE_RANGE
:
2158 return rio_mport_add_pw_filter(data
, (void __user
*)arg
);
2159 case RIO_DISABLE_PORTWRITE_RANGE
:
2160 return rio_mport_remove_pw_filter(data
, (void __user
*)arg
);
2161 case RIO_SET_EVENT_MASK
:
2162 data
->event_mask
= arg
;
2164 case RIO_GET_EVENT_MASK
:
2165 if (copy_to_user((void __user
*)arg
, &data
->event_mask
,
2166 sizeof(data
->event_mask
)))
2169 case RIO_MAP_OUTBOUND
:
2170 return rio_mport_obw_map(filp
, (void __user
*)arg
);
2171 case RIO_MAP_INBOUND
:
2172 return rio_mport_map_inbound(filp
, (void __user
*)arg
);
2173 case RIO_UNMAP_OUTBOUND
:
2174 return rio_mport_obw_free(filp
, (void __user
*)arg
);
2175 case RIO_UNMAP_INBOUND
:
2176 return rio_mport_inbound_free(filp
, (void __user
*)arg
);
2178 return rio_mport_alloc_dma(filp
, (void __user
*)arg
);
2180 return rio_mport_free_dma(filp
, (void __user
*)arg
);
2181 case RIO_WAIT_FOR_ASYNC
:
2182 return rio_mport_wait_for_async_dma(filp
, (void __user
*)arg
);
2184 return rio_mport_transfer_ioctl(filp
, (void __user
*)arg
);
2186 return rio_mport_add_riodev(data
, (void __user
*)arg
);
2188 return rio_mport_del_riodev(data
, (void __user
*)arg
);
2197 * mport_release_mapping - free mapping resources and info structure
2198 * @ref: a pointer to the kref within struct rio_mport_mapping
2200 * NOTE: Shall be called while holding buf_mutex.
2202 static void mport_release_mapping(struct kref
*ref
)
2204 struct rio_mport_mapping
*map
=
2205 container_of(ref
, struct rio_mport_mapping
, ref
);
2206 struct rio_mport
*mport
= map
->md
->mport
;
2208 rmcd_debug(MMAP
, "type %d mapping @ %p (phys = %pad) for %s",
2209 map
->dir
, map
->virt_addr
,
2210 &map
->phys_addr
, mport
->name
);
2212 list_del(&map
->node
);
2216 rio_unmap_inb_region(mport
, map
->phys_addr
);
2218 dma_free_coherent(mport
->dev
.parent
, map
->size
,
2219 map
->virt_addr
, map
->phys_addr
);
2222 rio_unmap_outb_region(mport
, map
->rioid
, map
->rio_addr
);
2228 static void mport_mm_open(struct vm_area_struct
*vma
)
2230 struct rio_mport_mapping
*map
= vma
->vm_private_data
;
2232 rmcd_debug(MMAP
, "0x%pad", &map
->phys_addr
);
2233 kref_get(&map
->ref
);
2236 static void mport_mm_close(struct vm_area_struct
*vma
)
2238 struct rio_mport_mapping
*map
= vma
->vm_private_data
;
2240 rmcd_debug(MMAP
, "0x%pad", &map
->phys_addr
);
2241 mutex_lock(&map
->md
->buf_mutex
);
2242 kref_put(&map
->ref
, mport_release_mapping
);
2243 mutex_unlock(&map
->md
->buf_mutex
);
2246 static const struct vm_operations_struct vm_ops
= {
2247 .open
= mport_mm_open
,
2248 .close
= mport_mm_close
,
2251 static int mport_cdev_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
2253 struct mport_cdev_priv
*priv
= filp
->private_data
;
2254 struct mport_dev
*md
;
2255 size_t size
= vma
->vm_end
- vma
->vm_start
;
2257 unsigned long offset
;
2259 struct rio_mport_mapping
*map
;
2261 rmcd_debug(MMAP
, "0x%x bytes at offset 0x%lx",
2262 (unsigned int)size
, vma
->vm_pgoff
);
2265 baddr
= ((dma_addr_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
2267 mutex_lock(&md
->buf_mutex
);
2268 list_for_each_entry(map
, &md
->mappings
, node
) {
2269 if (baddr
>= map
->phys_addr
&&
2270 baddr
< (map
->phys_addr
+ map
->size
)) {
2275 mutex_unlock(&md
->buf_mutex
);
2280 offset
= baddr
- map
->phys_addr
;
2282 if (size
+ offset
> map
->size
)
2285 vma
->vm_pgoff
= offset
>> PAGE_SHIFT
;
2286 rmcd_debug(MMAP
, "MMAP adjusted offset = 0x%lx", vma
->vm_pgoff
);
2288 if (map
->dir
== MAP_INBOUND
|| map
->dir
== MAP_DMA
)
2289 ret
= dma_mmap_coherent(md
->mport
->dev
.parent
, vma
,
2290 map
->virt_addr
, map
->phys_addr
, map
->size
);
2291 else if (map
->dir
== MAP_OUTBOUND
) {
2292 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
2293 ret
= vm_iomap_memory(vma
, map
->phys_addr
, map
->size
);
2295 rmcd_error("Attempt to mmap unsupported mapping type");
2300 vma
->vm_private_data
= map
;
2301 vma
->vm_ops
= &vm_ops
;
2304 rmcd_error("MMAP exit with err=%d", ret
);
2310 static unsigned int mport_cdev_poll(struct file
*filp
, poll_table
*wait
)
2312 struct mport_cdev_priv
*priv
= filp
->private_data
;
2314 poll_wait(filp
, &priv
->event_rx_wait
, wait
);
2315 if (kfifo_len(&priv
->event_fifo
))
2316 return POLLIN
| POLLRDNORM
;
2321 static ssize_t
mport_read(struct file
*filp
, char __user
*buf
, size_t count
,
2324 struct mport_cdev_priv
*priv
= filp
->private_data
;
2331 if (kfifo_is_empty(&priv
->event_fifo
) &&
2332 (filp
->f_flags
& O_NONBLOCK
))
2335 if (count
% sizeof(struct rio_event
))
2338 ret
= wait_event_interruptible(priv
->event_rx_wait
,
2339 kfifo_len(&priv
->event_fifo
) != 0);
2343 while (ret
< count
) {
2344 if (kfifo_to_user(&priv
->event_fifo
, buf
,
2345 sizeof(struct rio_event
), &copied
))
2354 static ssize_t
mport_write(struct file
*filp
, const char __user
*buf
,
2355 size_t count
, loff_t
*ppos
)
2357 struct mport_cdev_priv
*priv
= filp
->private_data
;
2358 struct rio_mport
*mport
= priv
->md
->mport
;
2359 struct rio_event event
;
2365 if (count
% sizeof(event
))
2369 while ((count
- len
) >= (int)sizeof(event
)) {
2370 if (copy_from_user(&event
, buf
, sizeof(event
)))
2373 if (event
.header
!= RIO_DOORBELL
)
2376 ret
= rio_mport_send_doorbell(mport
,
2377 (u16
)event
.u
.doorbell
.rioid
,
2378 event
.u
.doorbell
.payload
);
2382 len
+= sizeof(event
);
2383 buf
+= sizeof(event
);
2389 static const struct file_operations mport_fops
= {
2390 .owner
= THIS_MODULE
,
2391 .open
= mport_cdev_open
,
2392 .release
= mport_cdev_release
,
2393 .poll
= mport_cdev_poll
,
2395 .write
= mport_write
,
2396 .mmap
= mport_cdev_mmap
,
2397 .fasync
= mport_cdev_fasync
,
2398 .unlocked_ioctl
= mport_cdev_ioctl
2402 * Character device management
2405 static void mport_device_release(struct device
*dev
)
2407 struct mport_dev
*md
;
2409 rmcd_debug(EXIT
, "%s", dev_name(dev
));
2410 md
= container_of(dev
, struct mport_dev
, dev
);
2415 * mport_cdev_add() - Create mport_dev from rio_mport
2416 * @mport: RapidIO master port
2418 static struct mport_dev
*mport_cdev_add(struct rio_mport
*mport
)
2421 struct mport_dev
*md
;
2422 struct rio_mport_attr attr
;
2424 md
= kzalloc(sizeof(struct mport_dev
), GFP_KERNEL
);
2426 rmcd_error("Unable allocate a device object");
2431 mutex_init(&md
->buf_mutex
);
2432 mutex_init(&md
->file_mutex
);
2433 INIT_LIST_HEAD(&md
->file_list
);
2434 cdev_init(&md
->cdev
, &mport_fops
);
2435 md
->cdev
.owner
= THIS_MODULE
;
2436 ret
= cdev_add(&md
->cdev
, MKDEV(MAJOR(dev_number
), mport
->id
), 1);
2439 rmcd_error("Unable to register a device, err=%d", ret
);
2443 md
->dev
.devt
= md
->cdev
.dev
;
2444 md
->dev
.class = dev_class
;
2445 md
->dev
.parent
= &mport
->dev
;
2446 md
->dev
.release
= mport_device_release
;
2447 dev_set_name(&md
->dev
, DEV_NAME
"%d", mport
->id
);
2448 atomic_set(&md
->active
, 1);
2450 ret
= device_register(&md
->dev
);
2452 rmcd_error("Failed to register mport %d (err=%d)",
2457 get_device(&md
->dev
);
2459 INIT_LIST_HEAD(&md
->doorbells
);
2460 spin_lock_init(&md
->db_lock
);
2461 INIT_LIST_HEAD(&md
->portwrites
);
2462 spin_lock_init(&md
->pw_lock
);
2463 INIT_LIST_HEAD(&md
->mappings
);
2465 md
->properties
.id
= mport
->id
;
2466 md
->properties
.sys_size
= mport
->sys_size
;
2467 md
->properties
.hdid
= mport
->host_deviceid
;
2468 md
->properties
.index
= mport
->index
;
2470 /* The transfer_mode property will be returned through mport query
2473 #ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */
2474 md
->properties
.transfer_mode
|= RIO_TRANSFER_MODE_MAPPED
;
2476 md
->properties
.transfer_mode
|= RIO_TRANSFER_MODE_TRANSFER
;
2478 ret
= rio_query_mport(mport
, &attr
);
2480 md
->properties
.flags
= attr
.flags
;
2481 md
->properties
.link_speed
= attr
.link_speed
;
2482 md
->properties
.link_width
= attr
.link_width
;
2483 md
->properties
.dma_max_sge
= attr
.dma_max_sge
;
2484 md
->properties
.dma_max_size
= attr
.dma_max_size
;
2485 md
->properties
.dma_align
= attr
.dma_align
;
2486 md
->properties
.cap_sys_size
= 0;
2487 md
->properties
.cap_transfer_mode
= 0;
2488 md
->properties
.cap_addr_size
= 0;
2490 pr_info(DRV_PREFIX
"Failed to obtain info for %s cdev(%d:%d)\n",
2491 mport
->name
, MAJOR(dev_number
), mport
->id
);
2493 mutex_lock(&mport_devs_lock
);
2494 list_add_tail(&md
->node
, &mport_devs
);
2495 mutex_unlock(&mport_devs_lock
);
2497 pr_info(DRV_PREFIX
"Added %s cdev(%d:%d)\n",
2498 mport
->name
, MAJOR(dev_number
), mport
->id
);
2503 cdev_del(&md
->cdev
);
2509 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2510 * associated DMA channels.
2512 static void mport_cdev_terminate_dma(struct mport_dev
*md
)
2514 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2515 struct mport_cdev_priv
*client
;
2517 rmcd_debug(DMA
, "%s", dev_name(&md
->dev
));
2519 mutex_lock(&md
->file_mutex
);
2520 list_for_each_entry(client
, &md
->file_list
, list
) {
2521 if (client
->dmach
) {
2522 dmaengine_terminate_all(client
->dmach
);
2523 rio_release_dma(client
->dmach
);
2526 mutex_unlock(&md
->file_mutex
);
2529 dmaengine_terminate_all(md
->dma_chan
);
2530 rio_release_dma(md
->dma_chan
);
2531 md
->dma_chan
= NULL
;
2538 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2541 static int mport_cdev_kill_fasync(struct mport_dev
*md
)
2543 unsigned int files
= 0;
2544 struct mport_cdev_priv
*client
;
2546 mutex_lock(&md
->file_mutex
);
2547 list_for_each_entry(client
, &md
->file_list
, list
) {
2548 if (client
->async_queue
)
2549 kill_fasync(&client
->async_queue
, SIGIO
, POLL_HUP
);
2552 mutex_unlock(&md
->file_mutex
);
2557 * mport_cdev_remove() - Remove mport character device
2558 * @dev: Mport device to remove
2560 static void mport_cdev_remove(struct mport_dev
*md
)
2562 struct rio_mport_mapping
*map
, *_map
;
2564 rmcd_debug(EXIT
, "Remove %s cdev", md
->mport
->name
);
2565 atomic_set(&md
->active
, 0);
2566 mport_cdev_terminate_dma(md
);
2567 rio_del_mport_pw_handler(md
->mport
, md
, rio_mport_pw_handler
);
2568 cdev_del(&(md
->cdev
));
2569 mport_cdev_kill_fasync(md
);
2571 flush_workqueue(dma_wq
);
2573 /* TODO: do we need to give clients some time to close file
2574 * descriptors? Simple wait for XX, or kref?
2578 * Release DMA buffers allocated for the mport device.
2579 * Disable associated inbound Rapidio requests mapping if applicable.
2581 mutex_lock(&md
->buf_mutex
);
2582 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
2583 kref_put(&map
->ref
, mport_release_mapping
);
2585 mutex_unlock(&md
->buf_mutex
);
2587 if (!list_empty(&md
->mappings
))
2588 rmcd_warn("WARNING: %s pending mappings on removal",
2591 rio_release_inb_dbell(md
->mport
, 0, 0x0fff);
2593 device_unregister(&md
->dev
);
2594 put_device(&md
->dev
);
2598 * RIO rio_mport_interface driver
2602 * mport_add_mport() - Add rio_mport from LDM device struct
2603 * @dev: Linux device model struct
2604 * @class_intf: Linux class_interface
2606 static int mport_add_mport(struct device
*dev
,
2607 struct class_interface
*class_intf
)
2609 struct rio_mport
*mport
= NULL
;
2610 struct mport_dev
*chdev
= NULL
;
2612 mport
= to_rio_mport(dev
);
2616 chdev
= mport_cdev_add(mport
);
2624 * mport_remove_mport() - Remove rio_mport from global list
2625 * TODO remove device from global mport_dev list
2627 static void mport_remove_mport(struct device
*dev
,
2628 struct class_interface
*class_intf
)
2630 struct rio_mport
*mport
= NULL
;
2631 struct mport_dev
*chdev
;
2634 mport
= to_rio_mport(dev
);
2635 rmcd_debug(EXIT
, "Remove %s", mport
->name
);
2637 mutex_lock(&mport_devs_lock
);
2638 list_for_each_entry(chdev
, &mport_devs
, node
) {
2639 if (chdev
->mport
->id
== mport
->id
) {
2640 atomic_set(&chdev
->active
, 0);
2641 list_del(&chdev
->node
);
2646 mutex_unlock(&mport_devs_lock
);
2649 mport_cdev_remove(chdev
);
2652 /* the rio_mport_interface is used to handle local mport devices */
2653 static struct class_interface rio_mport_interface __refdata
= {
2654 .class = &rio_mport_class
,
2655 .add_dev
= mport_add_mport
,
2656 .remove_dev
= mport_remove_mport
,
2660 * Linux kernel module
2664 * mport_init - Driver module loading
2666 static int __init
mport_init(void)
2670 /* Create device class needed by udev */
2671 dev_class
= class_create(THIS_MODULE
, DRV_NAME
);
2673 rmcd_error("Unable to create " DRV_NAME
" class");
2677 ret
= alloc_chrdev_region(&dev_number
, 0, RIO_MAX_MPORTS
, DRV_NAME
);
2681 rmcd_debug(INIT
, "Registered class with major=%d", MAJOR(dev_number
));
2683 /* Register to rio_mport_interface */
2684 ret
= class_interface_register(&rio_mport_interface
);
2686 rmcd_error("class_interface_register() failed, err=%d", ret
);
2690 dma_wq
= create_singlethread_workqueue("dma_wq");
2692 rmcd_error("failed to create DMA work queue");
2700 class_interface_unregister(&rio_mport_interface
);
2702 unregister_chrdev_region(dev_number
, RIO_MAX_MPORTS
);
2704 class_destroy(dev_class
);
2709 * mport_exit - Driver module unloading
2711 static void __exit
mport_exit(void)
2713 class_interface_unregister(&rio_mport_interface
);
2714 class_destroy(dev_class
);
2715 unregister_chrdev_region(dev_number
, RIO_MAX_MPORTS
);
2716 destroy_workqueue(dma_wq
);
2719 module_init(mport_init
);
2720 module_exit(mport_exit
);