2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Transport Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
65 #define NTB_TRANSPORT_VERSION 4
66 #define NTB_TRANSPORT_VER "4"
67 #define NTB_TRANSPORT_NAME "ntb_transport"
68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC
);
71 MODULE_VERSION(NTB_TRANSPORT_VER
);
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_AUTHOR("Intel Corporation");
75 static unsigned long max_mw_size
;
76 module_param(max_mw_size
, ulong
, 0644);
77 MODULE_PARM_DESC(max_mw_size
, "Limit size of large memory windows");
79 static unsigned int transport_mtu
= 0x10000;
80 module_param(transport_mtu
, uint
, 0644);
81 MODULE_PARM_DESC(transport_mtu
, "Maximum size of NTB transport packets");
83 static unsigned char max_num_clients
;
84 module_param(max_num_clients
, byte
, 0644);
85 MODULE_PARM_DESC(max_num_clients
, "Maximum number of NTB transport clients");
87 static unsigned int copy_bytes
= 1024;
88 module_param(copy_bytes
, uint
, 0644);
89 MODULE_PARM_DESC(copy_bytes
, "Threshold under which NTB will use the CPU to copy instead of DMA");
92 module_param(use_dma
, bool, 0644);
93 MODULE_PARM_DESC(use_dma
, "Use DMA engine to perform large data copy");
95 static struct dentry
*nt_debugfs_dir
;
97 struct ntb_queue_entry
{
98 /* ntb_queue list reference */
99 struct list_head entry
;
100 /* pointers to data to be transferred */
106 struct ntb_transport_qp
*qp
;
108 struct ntb_payload_header __iomem
*tx_hdr
;
109 struct ntb_payload_header
*rx_hdr
;
118 struct ntb_transport_qp
{
119 struct ntb_transport_ctx
*transport
;
120 struct ntb_dev
*ndev
;
122 struct dma_chan
*tx_dma_chan
;
123 struct dma_chan
*rx_dma_chan
;
128 u8 qp_num
; /* Only 64 QP's are allowed. 0-63 */
131 struct ntb_rx_info __iomem
*rx_info
;
132 struct ntb_rx_info
*remote_rx_info
;
134 void (*tx_handler
)(struct ntb_transport_qp
*qp
, void *qp_data
,
135 void *data
, int len
);
136 struct list_head tx_free_q
;
137 spinlock_t ntb_tx_free_q_lock
;
139 dma_addr_t tx_mw_phys
;
140 unsigned int tx_index
;
141 unsigned int tx_max_entry
;
142 unsigned int tx_max_frame
;
144 void (*rx_handler
)(struct ntb_transport_qp
*qp
, void *qp_data
,
145 void *data
, int len
);
146 struct list_head rx_post_q
;
147 struct list_head rx_pend_q
;
148 struct list_head rx_free_q
;
149 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
150 spinlock_t ntb_rx_q_lock
;
152 unsigned int rx_index
;
153 unsigned int rx_max_entry
;
154 unsigned int rx_max_frame
;
155 dma_cookie_t last_cookie
;
156 struct tasklet_struct rxc_db_work
;
158 void (*event_handler
)(void *data
, int status
);
159 struct delayed_work link_work
;
160 struct work_struct link_cleanup
;
162 struct dentry
*debugfs_dir
;
163 struct dentry
*debugfs_stats
;
184 struct ntb_transport_mw
{
185 phys_addr_t phys_addr
;
186 resource_size_t phys_size
;
187 resource_size_t xlat_align
;
188 resource_size_t xlat_align_size
;
196 struct ntb_transport_client_dev
{
197 struct list_head entry
;
198 struct ntb_transport_ctx
*nt
;
202 struct ntb_transport_ctx
{
203 struct list_head entry
;
204 struct list_head client_devs
;
206 struct ntb_dev
*ndev
;
208 struct ntb_transport_mw
*mw_vec
;
209 struct ntb_transport_qp
*qp_vec
;
210 unsigned int mw_count
;
211 unsigned int qp_count
;
216 struct delayed_work link_work
;
217 struct work_struct link_cleanup
;
219 struct dentry
*debugfs_node_dir
;
223 DESC_DONE_FLAG
= BIT(0),
224 LINK_DOWN_FLAG
= BIT(1),
227 struct ntb_payload_header
{
245 #define dev_client_dev(__dev) \
246 container_of((__dev), struct ntb_transport_client_dev, dev)
248 #define drv_client(__drv) \
249 container_of((__drv), struct ntb_transport_client, driver)
251 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
252 #define NTB_QP_DEF_NUM_ENTRIES 100
253 #define NTB_LINK_DOWN_TIMEOUT 10
254 #define DMA_RETRIES 20
255 #define DMA_OUT_RESOURCE_TO 50
257 static void ntb_transport_rxc_db(unsigned long data
);
258 static const struct ntb_ctx_ops ntb_transport_ops
;
259 static struct ntb_client ntb_transport_client
;
261 static int ntb_transport_bus_match(struct device
*dev
,
262 struct device_driver
*drv
)
264 return !strncmp(dev_name(dev
), drv
->name
, strlen(drv
->name
));
267 static int ntb_transport_bus_probe(struct device
*dev
)
269 const struct ntb_transport_client
*client
;
274 client
= drv_client(dev
->driver
);
275 rc
= client
->probe(dev
);
282 static int ntb_transport_bus_remove(struct device
*dev
)
284 const struct ntb_transport_client
*client
;
286 client
= drv_client(dev
->driver
);
294 static struct bus_type ntb_transport_bus
= {
295 .name
= "ntb_transport",
296 .match
= ntb_transport_bus_match
,
297 .probe
= ntb_transport_bus_probe
,
298 .remove
= ntb_transport_bus_remove
,
301 static LIST_HEAD(ntb_transport_list
);
303 static int ntb_bus_init(struct ntb_transport_ctx
*nt
)
305 list_add_tail(&nt
->entry
, &ntb_transport_list
);
309 static void ntb_bus_remove(struct ntb_transport_ctx
*nt
)
311 struct ntb_transport_client_dev
*client_dev
, *cd
;
313 list_for_each_entry_safe(client_dev
, cd
, &nt
->client_devs
, entry
) {
314 dev_err(client_dev
->dev
.parent
, "%s still attached to bus, removing\n",
315 dev_name(&client_dev
->dev
));
316 list_del(&client_dev
->entry
);
317 device_unregister(&client_dev
->dev
);
320 list_del(&nt
->entry
);
323 static void ntb_transport_client_release(struct device
*dev
)
325 struct ntb_transport_client_dev
*client_dev
;
327 client_dev
= dev_client_dev(dev
);
332 * ntb_transport_unregister_client_dev - Unregister NTB client device
333 * @device_name: Name of NTB client device
335 * Unregister an NTB client device with the NTB transport layer
337 void ntb_transport_unregister_client_dev(char *device_name
)
339 struct ntb_transport_client_dev
*client
, *cd
;
340 struct ntb_transport_ctx
*nt
;
342 list_for_each_entry(nt
, &ntb_transport_list
, entry
)
343 list_for_each_entry_safe(client
, cd
, &nt
->client_devs
, entry
)
344 if (!strncmp(dev_name(&client
->dev
), device_name
,
345 strlen(device_name
))) {
346 list_del(&client
->entry
);
347 device_unregister(&client
->dev
);
350 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev
);
353 * ntb_transport_register_client_dev - Register NTB client device
354 * @device_name: Name of NTB client device
356 * Register an NTB client device with the NTB transport layer
358 int ntb_transport_register_client_dev(char *device_name
)
360 struct ntb_transport_client_dev
*client_dev
;
361 struct ntb_transport_ctx
*nt
;
365 if (list_empty(&ntb_transport_list
))
368 list_for_each_entry(nt
, &ntb_transport_list
, entry
) {
371 node
= dev_to_node(&nt
->ndev
->dev
);
373 client_dev
= kzalloc_node(sizeof(*client_dev
),
380 dev
= &client_dev
->dev
;
382 /* setup and register client devices */
383 dev_set_name(dev
, "%s%d", device_name
, i
);
384 dev
->bus
= &ntb_transport_bus
;
385 dev
->release
= ntb_transport_client_release
;
386 dev
->parent
= &nt
->ndev
->dev
;
388 rc
= device_register(dev
);
394 list_add_tail(&client_dev
->entry
, &nt
->client_devs
);
401 ntb_transport_unregister_client_dev(device_name
);
405 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev
);
408 * ntb_transport_register_client - Register NTB client driver
409 * @drv: NTB client driver to be registered
411 * Register an NTB client driver with the NTB transport layer
413 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
415 int ntb_transport_register_client(struct ntb_transport_client
*drv
)
417 drv
->driver
.bus
= &ntb_transport_bus
;
419 if (list_empty(&ntb_transport_list
))
422 return driver_register(&drv
->driver
);
424 EXPORT_SYMBOL_GPL(ntb_transport_register_client
);
427 * ntb_transport_unregister_client - Unregister NTB client driver
428 * @drv: NTB client driver to be unregistered
430 * Unregister an NTB client driver with the NTB transport layer
432 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
434 void ntb_transport_unregister_client(struct ntb_transport_client
*drv
)
436 driver_unregister(&drv
->driver
);
438 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client
);
440 static ssize_t
debugfs_read(struct file
*filp
, char __user
*ubuf
, size_t count
,
443 struct ntb_transport_qp
*qp
;
445 ssize_t ret
, out_offset
, out_count
;
447 qp
= filp
->private_data
;
449 if (!qp
|| !qp
->link_is_up
)
454 buf
= kmalloc(out_count
, GFP_KERNEL
);
459 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
460 "\nNTB QP stats:\n\n");
461 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
462 "rx_bytes - \t%llu\n", qp
->rx_bytes
);
463 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
464 "rx_pkts - \t%llu\n", qp
->rx_pkts
);
465 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
466 "rx_memcpy - \t%llu\n", qp
->rx_memcpy
);
467 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
468 "rx_async - \t%llu\n", qp
->rx_async
);
469 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
470 "rx_ring_empty - %llu\n", qp
->rx_ring_empty
);
471 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
472 "rx_err_no_buf - %llu\n", qp
->rx_err_no_buf
);
473 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
474 "rx_err_oflow - \t%llu\n", qp
->rx_err_oflow
);
475 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
476 "rx_err_ver - \t%llu\n", qp
->rx_err_ver
);
477 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
478 "rx_buff - \t0x%p\n", qp
->rx_buff
);
479 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
480 "rx_index - \t%u\n", qp
->rx_index
);
481 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
482 "rx_max_entry - \t%u\n\n", qp
->rx_max_entry
);
484 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
485 "tx_bytes - \t%llu\n", qp
->tx_bytes
);
486 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
487 "tx_pkts - \t%llu\n", qp
->tx_pkts
);
488 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
489 "tx_memcpy - \t%llu\n", qp
->tx_memcpy
);
490 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
491 "tx_async - \t%llu\n", qp
->tx_async
);
492 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
493 "tx_ring_full - \t%llu\n", qp
->tx_ring_full
);
494 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
495 "tx_err_no_buf - %llu\n", qp
->tx_err_no_buf
);
496 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
497 "tx_mw - \t0x%p\n", qp
->tx_mw
);
498 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
499 "tx_index (H) - \t%u\n", qp
->tx_index
);
500 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
502 qp
->remote_rx_info
->entry
);
503 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
504 "tx_max_entry - \t%u\n", qp
->tx_max_entry
);
505 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
507 ntb_transport_tx_free_entry(qp
));
508 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
509 "DMA tx prep err - \t%llu\n",
510 qp
->dma_tx_prep_err
);
511 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
512 "DMA rx prep err - \t%llu\n",
513 qp
->dma_rx_prep_err
);
515 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
517 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
518 "Using TX DMA - \t%s\n",
519 qp
->tx_dma_chan
? "Yes" : "No");
520 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
521 "Using RX DMA - \t%s\n",
522 qp
->rx_dma_chan
? "Yes" : "No");
523 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
525 qp
->link_is_up
? "Up" : "Down");
526 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
529 if (out_offset
> out_count
)
530 out_offset
= out_count
;
532 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, out_offset
);
537 static const struct file_operations ntb_qp_debugfs_stats
= {
538 .owner
= THIS_MODULE
,
540 .read
= debugfs_read
,
543 static void ntb_list_add(spinlock_t
*lock
, struct list_head
*entry
,
544 struct list_head
*list
)
548 spin_lock_irqsave(lock
, flags
);
549 list_add_tail(entry
, list
);
550 spin_unlock_irqrestore(lock
, flags
);
553 static struct ntb_queue_entry
*ntb_list_rm(spinlock_t
*lock
,
554 struct list_head
*list
)
556 struct ntb_queue_entry
*entry
;
559 spin_lock_irqsave(lock
, flags
);
560 if (list_empty(list
)) {
564 entry
= list_first_entry(list
, struct ntb_queue_entry
, entry
);
565 list_del(&entry
->entry
);
568 spin_unlock_irqrestore(lock
, flags
);
573 static struct ntb_queue_entry
*ntb_list_mv(spinlock_t
*lock
,
574 struct list_head
*list
,
575 struct list_head
*to_list
)
577 struct ntb_queue_entry
*entry
;
580 spin_lock_irqsave(lock
, flags
);
582 if (list_empty(list
)) {
585 entry
= list_first_entry(list
, struct ntb_queue_entry
, entry
);
586 list_move_tail(&entry
->entry
, to_list
);
589 spin_unlock_irqrestore(lock
, flags
);
594 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx
*nt
,
597 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[qp_num
];
598 struct ntb_transport_mw
*mw
;
599 unsigned int rx_size
, num_qps_mw
;
600 unsigned int mw_num
, mw_count
, qp_count
;
603 mw_count
= nt
->mw_count
;
604 qp_count
= nt
->qp_count
;
606 mw_num
= QP_TO_MW(nt
, qp_num
);
607 mw
= &nt
->mw_vec
[mw_num
];
612 if (qp_count
% mw_count
&& mw_num
+ 1 < qp_count
/ mw_count
)
613 num_qps_mw
= qp_count
/ mw_count
+ 1;
615 num_qps_mw
= qp_count
/ mw_count
;
617 rx_size
= (unsigned int)mw
->xlat_size
/ num_qps_mw
;
618 qp
->rx_buff
= mw
->virt_addr
+ rx_size
* (qp_num
/ mw_count
);
619 rx_size
-= sizeof(struct ntb_rx_info
);
621 qp
->remote_rx_info
= qp
->rx_buff
+ rx_size
;
623 /* Due to housekeeping, there must be atleast 2 buffs */
624 qp
->rx_max_frame
= min(transport_mtu
, rx_size
/ 2);
625 qp
->rx_max_entry
= rx_size
/ qp
->rx_max_frame
;
628 qp
->remote_rx_info
->entry
= qp
->rx_max_entry
- 1;
630 /* setup the hdr offsets with 0's */
631 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
632 void *offset
= (qp
->rx_buff
+ qp
->rx_max_frame
* (i
+ 1) -
633 sizeof(struct ntb_payload_header
));
634 memset(offset
, 0, sizeof(struct ntb_payload_header
));
644 static void ntb_free_mw(struct ntb_transport_ctx
*nt
, int num_mw
)
646 struct ntb_transport_mw
*mw
= &nt
->mw_vec
[num_mw
];
647 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
652 ntb_mw_clear_trans(nt
->ndev
, num_mw
);
653 dma_free_coherent(&pdev
->dev
, mw
->buff_size
,
654 mw
->virt_addr
, mw
->dma_addr
);
657 mw
->virt_addr
= NULL
;
660 static int ntb_set_mw(struct ntb_transport_ctx
*nt
, int num_mw
,
661 resource_size_t size
)
663 struct ntb_transport_mw
*mw
= &nt
->mw_vec
[num_mw
];
664 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
665 size_t xlat_size
, buff_size
;
671 xlat_size
= round_up(size
, mw
->xlat_align_size
);
672 buff_size
= round_up(size
, mw
->xlat_align
);
674 /* No need to re-setup */
675 if (mw
->xlat_size
== xlat_size
)
679 ntb_free_mw(nt
, num_mw
);
681 /* Alloc memory for receiving data. Must be aligned */
682 mw
->xlat_size
= xlat_size
;
683 mw
->buff_size
= buff_size
;
685 mw
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, buff_size
,
686 &mw
->dma_addr
, GFP_KERNEL
);
687 if (!mw
->virt_addr
) {
690 dev_err(&pdev
->dev
, "Unable to alloc MW buff of size %zu\n",
696 * we must ensure that the memory address allocated is BAR size
697 * aligned in order for the XLAT register to take the value. This
698 * is a requirement of the hardware. It is recommended to setup CMA
699 * for BAR sizes equal or greater than 4MB.
701 if (!IS_ALIGNED(mw
->dma_addr
, mw
->xlat_align
)) {
702 dev_err(&pdev
->dev
, "DMA memory %pad is not aligned\n",
704 ntb_free_mw(nt
, num_mw
);
708 /* Notify HW the memory location of the receive buffer */
709 rc
= ntb_mw_set_trans(nt
->ndev
, num_mw
, mw
->dma_addr
, mw
->xlat_size
);
711 dev_err(&pdev
->dev
, "Unable to set mw%d translation", num_mw
);
712 ntb_free_mw(nt
, num_mw
);
719 static void ntb_qp_link_down_reset(struct ntb_transport_qp
*qp
)
721 qp
->link_is_up
= false;
727 qp
->rx_ring_empty
= 0;
728 qp
->rx_err_no_buf
= 0;
729 qp
->rx_err_oflow
= 0;
735 qp
->tx_ring_full
= 0;
736 qp
->tx_err_no_buf
= 0;
739 qp
->dma_tx_prep_err
= 0;
740 qp
->dma_rx_prep_err
= 0;
743 static void ntb_qp_link_cleanup(struct ntb_transport_qp
*qp
)
745 struct ntb_transport_ctx
*nt
= qp
->transport
;
746 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
748 dev_info(&pdev
->dev
, "qp %d: Link Cleanup\n", qp
->qp_num
);
750 cancel_delayed_work_sync(&qp
->link_work
);
751 ntb_qp_link_down_reset(qp
);
753 if (qp
->event_handler
)
754 qp
->event_handler(qp
->cb_data
, qp
->link_is_up
);
757 static void ntb_qp_link_cleanup_work(struct work_struct
*work
)
759 struct ntb_transport_qp
*qp
= container_of(work
,
760 struct ntb_transport_qp
,
762 struct ntb_transport_ctx
*nt
= qp
->transport
;
764 ntb_qp_link_cleanup(qp
);
767 schedule_delayed_work(&qp
->link_work
,
768 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
771 static void ntb_qp_link_down(struct ntb_transport_qp
*qp
)
773 schedule_work(&qp
->link_cleanup
);
776 static void ntb_transport_link_cleanup(struct ntb_transport_ctx
*nt
)
778 struct ntb_transport_qp
*qp
;
782 qp_bitmap_alloc
= nt
->qp_bitmap
& ~nt
->qp_bitmap_free
;
784 /* Pass along the info to any clients */
785 for (i
= 0; i
< nt
->qp_count
; i
++)
786 if (qp_bitmap_alloc
& BIT_ULL(i
)) {
788 ntb_qp_link_cleanup(qp
);
789 cancel_work_sync(&qp
->link_cleanup
);
790 cancel_delayed_work_sync(&qp
->link_work
);
794 cancel_delayed_work_sync(&nt
->link_work
);
796 /* The scratchpad registers keep the values if the remote side
797 * goes down, blast them now to give them a sane value the next
798 * time they are accessed
800 for (i
= 0; i
< MAX_SPAD
; i
++)
801 ntb_spad_write(nt
->ndev
, i
, 0);
804 static void ntb_transport_link_cleanup_work(struct work_struct
*work
)
806 struct ntb_transport_ctx
*nt
=
807 container_of(work
, struct ntb_transport_ctx
, link_cleanup
);
809 ntb_transport_link_cleanup(nt
);
812 static void ntb_transport_event_callback(void *data
)
814 struct ntb_transport_ctx
*nt
= data
;
816 if (ntb_link_is_up(nt
->ndev
, NULL
, NULL
) == 1)
817 schedule_delayed_work(&nt
->link_work
, 0);
819 schedule_work(&nt
->link_cleanup
);
822 static void ntb_transport_link_work(struct work_struct
*work
)
824 struct ntb_transport_ctx
*nt
=
825 container_of(work
, struct ntb_transport_ctx
, link_work
.work
);
826 struct ntb_dev
*ndev
= nt
->ndev
;
827 struct pci_dev
*pdev
= ndev
->pdev
;
828 resource_size_t size
;
832 /* send the local info, in the opposite order of the way we read it */
833 for (i
= 0; i
< nt
->mw_count
; i
++) {
834 size
= nt
->mw_vec
[i
].phys_size
;
836 if (max_mw_size
&& size
> max_mw_size
)
839 spad
= MW0_SZ_HIGH
+ (i
* 2);
840 ntb_peer_spad_write(ndev
, spad
, upper_32_bits(size
));
842 spad
= MW0_SZ_LOW
+ (i
* 2);
843 ntb_peer_spad_write(ndev
, spad
, lower_32_bits(size
));
846 ntb_peer_spad_write(ndev
, NUM_MWS
, nt
->mw_count
);
848 ntb_peer_spad_write(ndev
, NUM_QPS
, nt
->qp_count
);
850 ntb_peer_spad_write(ndev
, VERSION
, NTB_TRANSPORT_VERSION
);
852 /* Query the remote side for its info */
853 val
= ntb_spad_read(ndev
, VERSION
);
854 dev_dbg(&pdev
->dev
, "Remote version = %d\n", val
);
855 if (val
!= NTB_TRANSPORT_VERSION
)
858 val
= ntb_spad_read(ndev
, NUM_QPS
);
859 dev_dbg(&pdev
->dev
, "Remote max number of qps = %d\n", val
);
860 if (val
!= nt
->qp_count
)
863 val
= ntb_spad_read(ndev
, NUM_MWS
);
864 dev_dbg(&pdev
->dev
, "Remote number of mws = %d\n", val
);
865 if (val
!= nt
->mw_count
)
868 for (i
= 0; i
< nt
->mw_count
; i
++) {
871 val
= ntb_spad_read(ndev
, MW0_SZ_HIGH
+ (i
* 2));
872 val64
= (u64
)val
<< 32;
874 val
= ntb_spad_read(ndev
, MW0_SZ_LOW
+ (i
* 2));
877 dev_dbg(&pdev
->dev
, "Remote MW%d size = %#llx\n", i
, val64
);
879 rc
= ntb_set_mw(nt
, i
, val64
);
884 nt
->link_is_up
= true;
886 for (i
= 0; i
< nt
->qp_count
; i
++) {
887 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[i
];
889 ntb_transport_setup_qp_mw(nt
, i
);
891 if (qp
->client_ready
)
892 schedule_delayed_work(&qp
->link_work
, 0);
898 for (i
= 0; i
< nt
->mw_count
; i
++)
901 if (ntb_link_is_up(ndev
, NULL
, NULL
) == 1)
902 schedule_delayed_work(&nt
->link_work
,
903 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
906 static void ntb_qp_link_work(struct work_struct
*work
)
908 struct ntb_transport_qp
*qp
= container_of(work
,
909 struct ntb_transport_qp
,
911 struct pci_dev
*pdev
= qp
->ndev
->pdev
;
912 struct ntb_transport_ctx
*nt
= qp
->transport
;
915 WARN_ON(!nt
->link_is_up
);
917 val
= ntb_spad_read(nt
->ndev
, QP_LINKS
);
919 ntb_peer_spad_write(nt
->ndev
, QP_LINKS
, val
| BIT(qp
->qp_num
));
921 /* query remote spad for qp ready bits */
922 ntb_peer_spad_read(nt
->ndev
, QP_LINKS
);
923 dev_dbg_ratelimited(&pdev
->dev
, "Remote QP link status = %x\n", val
);
925 /* See if the remote side is up */
926 if (val
& BIT(qp
->qp_num
)) {
927 dev_info(&pdev
->dev
, "qp %d: Link Up\n", qp
->qp_num
);
928 qp
->link_is_up
= true;
930 if (qp
->event_handler
)
931 qp
->event_handler(qp
->cb_data
, qp
->link_is_up
);
933 tasklet_schedule(&qp
->rxc_db_work
);
934 } else if (nt
->link_is_up
)
935 schedule_delayed_work(&qp
->link_work
,
936 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
939 static int ntb_transport_init_queue(struct ntb_transport_ctx
*nt
,
942 struct ntb_transport_qp
*qp
;
944 resource_size_t mw_size
;
945 unsigned int num_qps_mw
, tx_size
;
946 unsigned int mw_num
, mw_count
, qp_count
;
949 mw_count
= nt
->mw_count
;
950 qp_count
= nt
->qp_count
;
952 mw_num
= QP_TO_MW(nt
, qp_num
);
954 qp
= &nt
->qp_vec
[qp_num
];
958 qp
->client_ready
= false;
959 qp
->event_handler
= NULL
;
960 ntb_qp_link_down_reset(qp
);
962 if (qp_count
% mw_count
&& mw_num
+ 1 < qp_count
/ mw_count
)
963 num_qps_mw
= qp_count
/ mw_count
+ 1;
965 num_qps_mw
= qp_count
/ mw_count
;
967 mw_base
= nt
->mw_vec
[mw_num
].phys_addr
;
968 mw_size
= nt
->mw_vec
[mw_num
].phys_size
;
970 tx_size
= (unsigned int)mw_size
/ num_qps_mw
;
971 qp_offset
= tx_size
* (qp_num
/ mw_count
);
973 qp
->tx_mw
= nt
->mw_vec
[mw_num
].vbase
+ qp_offset
;
977 qp
->tx_mw_phys
= mw_base
+ qp_offset
;
981 tx_size
-= sizeof(struct ntb_rx_info
);
982 qp
->rx_info
= qp
->tx_mw
+ tx_size
;
984 /* Due to housekeeping, there must be atleast 2 buffs */
985 qp
->tx_max_frame
= min(transport_mtu
, tx_size
/ 2);
986 qp
->tx_max_entry
= tx_size
/ qp
->tx_max_frame
;
988 if (nt
->debugfs_node_dir
) {
989 char debugfs_name
[4];
991 snprintf(debugfs_name
, 4, "qp%d", qp_num
);
992 qp
->debugfs_dir
= debugfs_create_dir(debugfs_name
,
993 nt
->debugfs_node_dir
);
995 qp
->debugfs_stats
= debugfs_create_file("stats", S_IRUSR
,
997 &ntb_qp_debugfs_stats
);
999 qp
->debugfs_dir
= NULL
;
1000 qp
->debugfs_stats
= NULL
;
1003 INIT_DELAYED_WORK(&qp
->link_work
, ntb_qp_link_work
);
1004 INIT_WORK(&qp
->link_cleanup
, ntb_qp_link_cleanup_work
);
1006 spin_lock_init(&qp
->ntb_rx_q_lock
);
1007 spin_lock_init(&qp
->ntb_tx_free_q_lock
);
1009 INIT_LIST_HEAD(&qp
->rx_post_q
);
1010 INIT_LIST_HEAD(&qp
->rx_pend_q
);
1011 INIT_LIST_HEAD(&qp
->rx_free_q
);
1012 INIT_LIST_HEAD(&qp
->tx_free_q
);
1014 tasklet_init(&qp
->rxc_db_work
, ntb_transport_rxc_db
,
1020 static int ntb_transport_probe(struct ntb_client
*self
, struct ntb_dev
*ndev
)
1022 struct ntb_transport_ctx
*nt
;
1023 struct ntb_transport_mw
*mw
;
1024 unsigned int mw_count
, qp_count
;
1029 if (ntb_db_is_unsafe(ndev
))
1031 "doorbell is unsafe, proceed anyway...\n");
1032 if (ntb_spad_is_unsafe(ndev
))
1034 "scratchpad is unsafe, proceed anyway...\n");
1036 node
= dev_to_node(&ndev
->dev
);
1038 nt
= kzalloc_node(sizeof(*nt
), GFP_KERNEL
, node
);
1044 mw_count
= ntb_mw_count(ndev
);
1046 nt
->mw_count
= mw_count
;
1048 nt
->mw_vec
= kzalloc_node(mw_count
* sizeof(*nt
->mw_vec
),
1055 for (i
= 0; i
< mw_count
; i
++) {
1056 mw
= &nt
->mw_vec
[i
];
1058 rc
= ntb_mw_get_range(ndev
, i
, &mw
->phys_addr
, &mw
->phys_size
,
1059 &mw
->xlat_align
, &mw
->xlat_align_size
);
1063 mw
->vbase
= ioremap_wc(mw
->phys_addr
, mw
->phys_size
);
1071 mw
->virt_addr
= NULL
;
1075 qp_bitmap
= ntb_db_valid_mask(ndev
);
1077 qp_count
= ilog2(qp_bitmap
);
1078 if (max_num_clients
&& max_num_clients
< qp_count
)
1079 qp_count
= max_num_clients
;
1080 else if (mw_count
< qp_count
)
1081 qp_count
= mw_count
;
1083 qp_bitmap
&= BIT_ULL(qp_count
) - 1;
1085 nt
->qp_count
= qp_count
;
1086 nt
->qp_bitmap
= qp_bitmap
;
1087 nt
->qp_bitmap_free
= qp_bitmap
;
1089 nt
->qp_vec
= kzalloc_node(qp_count
* sizeof(*nt
->qp_vec
),
1096 if (nt_debugfs_dir
) {
1097 nt
->debugfs_node_dir
=
1098 debugfs_create_dir(pci_name(ndev
->pdev
),
1102 for (i
= 0; i
< qp_count
; i
++) {
1103 rc
= ntb_transport_init_queue(nt
, i
);
1108 INIT_DELAYED_WORK(&nt
->link_work
, ntb_transport_link_work
);
1109 INIT_WORK(&nt
->link_cleanup
, ntb_transport_link_cleanup_work
);
1111 rc
= ntb_set_ctx(ndev
, nt
, &ntb_transport_ops
);
1115 INIT_LIST_HEAD(&nt
->client_devs
);
1116 rc
= ntb_bus_init(nt
);
1120 nt
->link_is_up
= false;
1121 ntb_link_enable(ndev
, NTB_SPEED_AUTO
, NTB_WIDTH_AUTO
);
1122 ntb_link_event(ndev
);
1127 ntb_clear_ctx(ndev
);
1132 mw
= &nt
->mw_vec
[i
];
1141 static void ntb_transport_free(struct ntb_client
*self
, struct ntb_dev
*ndev
)
1143 struct ntb_transport_ctx
*nt
= ndev
->ctx
;
1144 struct ntb_transport_qp
*qp
;
1145 u64 qp_bitmap_alloc
;
1148 ntb_transport_link_cleanup(nt
);
1149 cancel_work_sync(&nt
->link_cleanup
);
1150 cancel_delayed_work_sync(&nt
->link_work
);
1152 qp_bitmap_alloc
= nt
->qp_bitmap
& ~nt
->qp_bitmap_free
;
1154 /* verify that all the qp's are freed */
1155 for (i
= 0; i
< nt
->qp_count
; i
++) {
1156 qp
= &nt
->qp_vec
[i
];
1157 if (qp_bitmap_alloc
& BIT_ULL(i
))
1158 ntb_transport_free_queue(qp
);
1159 debugfs_remove_recursive(qp
->debugfs_dir
);
1162 ntb_link_disable(ndev
);
1163 ntb_clear_ctx(ndev
);
1167 for (i
= nt
->mw_count
; i
--; ) {
1169 iounmap(nt
->mw_vec
[i
].vbase
);
1177 static void ntb_complete_rxc(struct ntb_transport_qp
*qp
)
1179 struct ntb_queue_entry
*entry
;
1182 unsigned long irqflags
;
1184 spin_lock_irqsave(&qp
->ntb_rx_q_lock
, irqflags
);
1186 while (!list_empty(&qp
->rx_post_q
)) {
1187 entry
= list_first_entry(&qp
->rx_post_q
,
1188 struct ntb_queue_entry
, entry
);
1189 if (!(entry
->flags
& DESC_DONE_FLAG
))
1192 entry
->rx_hdr
->flags
= 0;
1193 iowrite32(entry
->index
, &qp
->rx_info
->entry
);
1195 cb_data
= entry
->cb_data
;
1198 list_move_tail(&entry
->entry
, &qp
->rx_free_q
);
1200 spin_unlock_irqrestore(&qp
->ntb_rx_q_lock
, irqflags
);
1202 if (qp
->rx_handler
&& qp
->client_ready
)
1203 qp
->rx_handler(qp
, qp
->cb_data
, cb_data
, len
);
1205 spin_lock_irqsave(&qp
->ntb_rx_q_lock
, irqflags
);
1208 spin_unlock_irqrestore(&qp
->ntb_rx_q_lock
, irqflags
);
1211 static void ntb_rx_copy_callback(void *data
)
1213 struct ntb_queue_entry
*entry
= data
;
1215 entry
->flags
|= DESC_DONE_FLAG
;
1217 ntb_complete_rxc(entry
->qp
);
1220 static void ntb_memcpy_rx(struct ntb_queue_entry
*entry
, void *offset
)
1222 void *buf
= entry
->buf
;
1223 size_t len
= entry
->len
;
1225 memcpy(buf
, offset
, len
);
1227 /* Ensure that the data is fully copied out before clearing the flag */
1230 ntb_rx_copy_callback(entry
);
1233 static void ntb_async_rx(struct ntb_queue_entry
*entry
, void *offset
)
1235 struct dma_async_tx_descriptor
*txd
;
1236 struct ntb_transport_qp
*qp
= entry
->qp
;
1237 struct dma_chan
*chan
= qp
->rx_dma_chan
;
1238 struct dma_device
*device
;
1239 size_t pay_off
, buff_off
, len
;
1240 struct dmaengine_unmap_data
*unmap
;
1241 dma_cookie_t cookie
;
1242 void *buf
= entry
->buf
;
1250 if (len
< copy_bytes
)
1253 device
= chan
->device
;
1254 pay_off
= (size_t)offset
& ~PAGE_MASK
;
1255 buff_off
= (size_t)buf
& ~PAGE_MASK
;
1257 if (!is_dma_copy_aligned(device
, pay_off
, buff_off
, len
))
1260 unmap
= dmaengine_get_unmap_data(device
->dev
, 2, GFP_NOWAIT
);
1265 unmap
->addr
[0] = dma_map_page(device
->dev
, virt_to_page(offset
),
1266 pay_off
, len
, DMA_TO_DEVICE
);
1267 if (dma_mapping_error(device
->dev
, unmap
->addr
[0]))
1272 unmap
->addr
[1] = dma_map_page(device
->dev
, virt_to_page(buf
),
1273 buff_off
, len
, DMA_FROM_DEVICE
);
1274 if (dma_mapping_error(device
->dev
, unmap
->addr
[1]))
1277 unmap
->from_cnt
= 1;
1279 for (retries
= 0; retries
< DMA_RETRIES
; retries
++) {
1280 txd
= device
->device_prep_dma_memcpy(chan
, unmap
->addr
[1],
1281 unmap
->addr
[0], len
,
1282 DMA_PREP_INTERRUPT
);
1286 set_current_state(TASK_INTERRUPTIBLE
);
1287 schedule_timeout(DMA_OUT_RESOURCE_TO
);
1291 qp
->dma_rx_prep_err
++;
1295 txd
->callback
= ntb_rx_copy_callback
;
1296 txd
->callback_param
= entry
;
1297 dma_set_unmap(txd
, unmap
);
1299 cookie
= dmaengine_submit(txd
);
1300 if (dma_submit_error(cookie
))
1303 dmaengine_unmap_put(unmap
);
1305 qp
->last_cookie
= cookie
;
1312 dmaengine_unmap_put(unmap
);
1314 dmaengine_unmap_put(unmap
);
1316 ntb_memcpy_rx(entry
, offset
);
1320 static int ntb_process_rxc(struct ntb_transport_qp
*qp
)
1322 struct ntb_payload_header
*hdr
;
1323 struct ntb_queue_entry
*entry
;
1326 offset
= qp
->rx_buff
+ qp
->rx_max_frame
* qp
->rx_index
;
1327 hdr
= offset
+ qp
->rx_max_frame
- sizeof(struct ntb_payload_header
);
1329 dev_dbg(&qp
->ndev
->pdev
->dev
, "qp %d: RX ver %u len %d flags %x\n",
1330 qp
->qp_num
, hdr
->ver
, hdr
->len
, hdr
->flags
);
1332 if (!(hdr
->flags
& DESC_DONE_FLAG
)) {
1333 dev_dbg(&qp
->ndev
->pdev
->dev
, "done flag not set\n");
1334 qp
->rx_ring_empty
++;
1338 if (hdr
->flags
& LINK_DOWN_FLAG
) {
1339 dev_dbg(&qp
->ndev
->pdev
->dev
, "link down flag set\n");
1340 ntb_qp_link_down(qp
);
1345 if (hdr
->ver
!= (u32
)qp
->rx_pkts
) {
1346 dev_dbg(&qp
->ndev
->pdev
->dev
,
1347 "version mismatch, expected %llu - got %u\n",
1348 qp
->rx_pkts
, hdr
->ver
);
1353 entry
= ntb_list_mv(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
, &qp
->rx_post_q
);
1355 dev_dbg(&qp
->ndev
->pdev
->dev
, "no receive buffer\n");
1356 qp
->rx_err_no_buf
++;
1360 entry
->rx_hdr
= hdr
;
1361 entry
->index
= qp
->rx_index
;
1363 if (hdr
->len
> entry
->len
) {
1364 dev_dbg(&qp
->ndev
->pdev
->dev
,
1365 "receive buffer overflow! Wanted %d got %d\n",
1366 hdr
->len
, entry
->len
);
1370 entry
->flags
|= DESC_DONE_FLAG
;
1372 ntb_complete_rxc(qp
);
1374 dev_dbg(&qp
->ndev
->pdev
->dev
,
1375 "RX OK index %u ver %u size %d into buf size %d\n",
1376 qp
->rx_index
, hdr
->ver
, hdr
->len
, entry
->len
);
1378 qp
->rx_bytes
+= hdr
->len
;
1381 entry
->len
= hdr
->len
;
1383 ntb_async_rx(entry
, offset
);
1387 qp
->rx_index
%= qp
->rx_max_entry
;
1392 static void ntb_transport_rxc_db(unsigned long data
)
1394 struct ntb_transport_qp
*qp
= (void *)data
;
1397 dev_dbg(&qp
->ndev
->pdev
->dev
, "%s: doorbell %d received\n",
1398 __func__
, qp
->qp_num
);
1400 /* Limit the number of packets processed in a single interrupt to
1401 * provide fairness to others
1403 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
1404 rc
= ntb_process_rxc(qp
);
1409 if (i
&& qp
->rx_dma_chan
)
1410 dma_async_issue_pending(qp
->rx_dma_chan
);
1412 if (i
== qp
->rx_max_entry
) {
1413 /* there is more work to do */
1414 tasklet_schedule(&qp
->rxc_db_work
);
1415 } else if (ntb_db_read(qp
->ndev
) & BIT_ULL(qp
->qp_num
)) {
1416 /* the doorbell bit is set: clear it */
1417 ntb_db_clear(qp
->ndev
, BIT_ULL(qp
->qp_num
));
1418 /* ntb_db_read ensures ntb_db_clear write is committed */
1419 ntb_db_read(qp
->ndev
);
1421 /* an interrupt may have arrived between finishing
1422 * ntb_process_rxc and clearing the doorbell bit:
1423 * there might be some more work to do.
1425 tasklet_schedule(&qp
->rxc_db_work
);
1429 static void ntb_tx_copy_callback(void *data
)
1431 struct ntb_queue_entry
*entry
= data
;
1432 struct ntb_transport_qp
*qp
= entry
->qp
;
1433 struct ntb_payload_header __iomem
*hdr
= entry
->tx_hdr
;
1435 iowrite32(entry
->flags
| DESC_DONE_FLAG
, &hdr
->flags
);
1437 ntb_peer_db_set(qp
->ndev
, BIT_ULL(qp
->qp_num
));
1439 /* The entry length can only be zero if the packet is intended to be a
1440 * "link down" or similar. Since no payload is being sent in these
1441 * cases, there is nothing to add to the completion queue.
1443 if (entry
->len
> 0) {
1444 qp
->tx_bytes
+= entry
->len
;
1447 qp
->tx_handler(qp
, qp
->cb_data
, entry
->cb_data
,
1451 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
, &qp
->tx_free_q
);
1454 static void ntb_memcpy_tx(struct ntb_queue_entry
*entry
, void __iomem
*offset
)
1456 #ifdef ARCH_HAS_NOCACHE_UACCESS
1458 * Using non-temporal mov to improve performance on non-cached
1459 * writes, even though we aren't actually copying from user space.
1461 __copy_from_user_inatomic_nocache(offset
, entry
->buf
, entry
->len
);
1463 memcpy_toio(offset
, entry
->buf
, entry
->len
);
1466 /* Ensure that the data is fully copied out before setting the flags */
1469 ntb_tx_copy_callback(entry
);
1472 static void ntb_async_tx(struct ntb_transport_qp
*qp
,
1473 struct ntb_queue_entry
*entry
)
1475 struct ntb_payload_header __iomem
*hdr
;
1476 struct dma_async_tx_descriptor
*txd
;
1477 struct dma_chan
*chan
= qp
->tx_dma_chan
;
1478 struct dma_device
*device
;
1479 size_t dest_off
, buff_off
;
1480 struct dmaengine_unmap_data
*unmap
;
1482 dma_cookie_t cookie
;
1483 void __iomem
*offset
;
1484 size_t len
= entry
->len
;
1485 void *buf
= entry
->buf
;
1488 offset
= qp
->tx_mw
+ qp
->tx_max_frame
* qp
->tx_index
;
1489 hdr
= offset
+ qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
1490 entry
->tx_hdr
= hdr
;
1492 iowrite32(entry
->len
, &hdr
->len
);
1493 iowrite32((u32
)qp
->tx_pkts
, &hdr
->ver
);
1498 if (len
< copy_bytes
)
1501 device
= chan
->device
;
1502 dest
= qp
->tx_mw_phys
+ qp
->tx_max_frame
* qp
->tx_index
;
1503 buff_off
= (size_t)buf
& ~PAGE_MASK
;
1504 dest_off
= (size_t)dest
& ~PAGE_MASK
;
1506 if (!is_dma_copy_aligned(device
, buff_off
, dest_off
, len
))
1509 unmap
= dmaengine_get_unmap_data(device
->dev
, 1, GFP_NOWAIT
);
1514 unmap
->addr
[0] = dma_map_page(device
->dev
, virt_to_page(buf
),
1515 buff_off
, len
, DMA_TO_DEVICE
);
1516 if (dma_mapping_error(device
->dev
, unmap
->addr
[0]))
1521 for (retries
= 0; retries
< DMA_RETRIES
; retries
++) {
1522 txd
= device
->device_prep_dma_memcpy(chan
, dest
, unmap
->addr
[0],
1523 len
, DMA_PREP_INTERRUPT
);
1527 set_current_state(TASK_INTERRUPTIBLE
);
1528 schedule_timeout(DMA_OUT_RESOURCE_TO
);
1532 qp
->dma_tx_prep_err
++;
1536 txd
->callback
= ntb_tx_copy_callback
;
1537 txd
->callback_param
= entry
;
1538 dma_set_unmap(txd
, unmap
);
1540 cookie
= dmaengine_submit(txd
);
1541 if (dma_submit_error(cookie
))
1544 dmaengine_unmap_put(unmap
);
1546 dma_async_issue_pending(chan
);
1551 dmaengine_unmap_put(unmap
);
1553 dmaengine_unmap_put(unmap
);
1555 ntb_memcpy_tx(entry
, offset
);
1559 static int ntb_process_tx(struct ntb_transport_qp
*qp
,
1560 struct ntb_queue_entry
*entry
)
1562 if (qp
->tx_index
== qp
->remote_rx_info
->entry
) {
1567 if (entry
->len
> qp
->tx_max_frame
- sizeof(struct ntb_payload_header
)) {
1569 qp
->tx_handler(qp
, qp
->cb_data
, NULL
, -EIO
);
1571 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1576 ntb_async_tx(qp
, entry
);
1579 qp
->tx_index
%= qp
->tx_max_entry
;
1586 static void ntb_send_link_down(struct ntb_transport_qp
*qp
)
1588 struct pci_dev
*pdev
= qp
->ndev
->pdev
;
1589 struct ntb_queue_entry
*entry
;
1592 if (!qp
->link_is_up
)
1595 dev_info(&pdev
->dev
, "qp %d: Send Link Down\n", qp
->qp_num
);
1597 for (i
= 0; i
< NTB_LINK_DOWN_TIMEOUT
; i
++) {
1598 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
1607 entry
->cb_data
= NULL
;
1610 entry
->flags
= LINK_DOWN_FLAG
;
1612 rc
= ntb_process_tx(qp
, entry
);
1614 dev_err(&pdev
->dev
, "ntb: QP%d unable to send linkdown msg\n",
1617 ntb_qp_link_down_reset(qp
);
1620 static bool ntb_dma_filter_fn(struct dma_chan
*chan
, void *node
)
1622 return dev_to_node(&chan
->dev
->device
) == (int)(unsigned long)node
;
1626 * ntb_transport_create_queue - Create a new NTB transport layer queue
1627 * @rx_handler: receive callback function
1628 * @tx_handler: transmit callback function
1629 * @event_handler: event callback function
1631 * Create a new NTB transport layer queue and provide the queue with a callback
1632 * routine for both transmit and receive. The receive callback routine will be
1633 * used to pass up data when the transport has received it on the queue. The
1634 * transmit callback routine will be called when the transport has completed the
1635 * transmission of the data on the queue and the data is ready to be freed.
1637 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1639 struct ntb_transport_qp
*
1640 ntb_transport_create_queue(void *data
, struct device
*client_dev
,
1641 const struct ntb_queue_handlers
*handlers
)
1643 struct ntb_dev
*ndev
;
1644 struct pci_dev
*pdev
;
1645 struct ntb_transport_ctx
*nt
;
1646 struct ntb_queue_entry
*entry
;
1647 struct ntb_transport_qp
*qp
;
1649 unsigned int free_queue
;
1650 dma_cap_mask_t dma_mask
;
1654 ndev
= dev_ntb(client_dev
->parent
);
1658 node
= dev_to_node(&ndev
->dev
);
1660 free_queue
= ffs(nt
->qp_bitmap
);
1664 /* decrement free_queue to make it zero based */
1667 qp
= &nt
->qp_vec
[free_queue
];
1668 qp_bit
= BIT_ULL(qp
->qp_num
);
1670 nt
->qp_bitmap_free
&= ~qp_bit
;
1673 qp
->rx_handler
= handlers
->rx_handler
;
1674 qp
->tx_handler
= handlers
->tx_handler
;
1675 qp
->event_handler
= handlers
->event_handler
;
1677 dma_cap_zero(dma_mask
);
1678 dma_cap_set(DMA_MEMCPY
, dma_mask
);
1682 dma_request_channel(dma_mask
, ntb_dma_filter_fn
,
1683 (void *)(unsigned long)node
);
1684 if (!qp
->tx_dma_chan
)
1685 dev_info(&pdev
->dev
, "Unable to allocate TX DMA channel\n");
1688 dma_request_channel(dma_mask
, ntb_dma_filter_fn
,
1689 (void *)(unsigned long)node
);
1690 if (!qp
->rx_dma_chan
)
1691 dev_info(&pdev
->dev
, "Unable to allocate RX DMA channel\n");
1693 qp
->tx_dma_chan
= NULL
;
1694 qp
->rx_dma_chan
= NULL
;
1697 dev_dbg(&pdev
->dev
, "Using %s memcpy for TX\n",
1698 qp
->tx_dma_chan
? "DMA" : "CPU");
1700 dev_dbg(&pdev
->dev
, "Using %s memcpy for RX\n",
1701 qp
->rx_dma_chan
? "DMA" : "CPU");
1703 for (i
= 0; i
< NTB_QP_DEF_NUM_ENTRIES
; i
++) {
1704 entry
= kzalloc_node(sizeof(*entry
), GFP_ATOMIC
, node
);
1709 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
,
1713 for (i
= 0; i
< NTB_QP_DEF_NUM_ENTRIES
; i
++) {
1714 entry
= kzalloc_node(sizeof(*entry
), GFP_ATOMIC
, node
);
1719 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1723 ntb_db_clear(qp
->ndev
, qp_bit
);
1724 ntb_db_clear_mask(qp
->ndev
, qp_bit
);
1726 dev_info(&pdev
->dev
, "NTB Transport QP %d created\n", qp
->qp_num
);
1731 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
1734 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
)))
1736 if (qp
->tx_dma_chan
)
1737 dma_release_channel(qp
->tx_dma_chan
);
1738 if (qp
->rx_dma_chan
)
1739 dma_release_channel(qp
->rx_dma_chan
);
1740 nt
->qp_bitmap_free
|= qp_bit
;
1744 EXPORT_SYMBOL_GPL(ntb_transport_create_queue
);
1747 * ntb_transport_free_queue - Frees NTB transport queue
1748 * @qp: NTB queue to be freed
1750 * Frees NTB transport queue
1752 void ntb_transport_free_queue(struct ntb_transport_qp
*qp
)
1754 struct pci_dev
*pdev
;
1755 struct ntb_queue_entry
*entry
;
1761 pdev
= qp
->ndev
->pdev
;
1763 if (qp
->tx_dma_chan
) {
1764 struct dma_chan
*chan
= qp
->tx_dma_chan
;
1765 /* Putting the dma_chan to NULL will force any new traffic to be
1766 * processed by the CPU instead of the DAM engine
1768 qp
->tx_dma_chan
= NULL
;
1770 /* Try to be nice and wait for any queued DMA engine
1771 * transactions to process before smashing it with a rock
1773 dma_sync_wait(chan
, qp
->last_cookie
);
1774 dmaengine_terminate_all(chan
);
1775 dma_release_channel(chan
);
1778 if (qp
->rx_dma_chan
) {
1779 struct dma_chan
*chan
= qp
->rx_dma_chan
;
1780 /* Putting the dma_chan to NULL will force any new traffic to be
1781 * processed by the CPU instead of the DAM engine
1783 qp
->rx_dma_chan
= NULL
;
1785 /* Try to be nice and wait for any queued DMA engine
1786 * transactions to process before smashing it with a rock
1788 dma_sync_wait(chan
, qp
->last_cookie
);
1789 dmaengine_terminate_all(chan
);
1790 dma_release_channel(chan
);
1793 qp_bit
= BIT_ULL(qp
->qp_num
);
1795 ntb_db_set_mask(qp
->ndev
, qp_bit
);
1796 tasklet_disable(&qp
->rxc_db_work
);
1798 cancel_delayed_work_sync(&qp
->link_work
);
1801 qp
->rx_handler
= NULL
;
1802 qp
->tx_handler
= NULL
;
1803 qp
->event_handler
= NULL
;
1805 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
)))
1808 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
))) {
1809 dev_warn(&pdev
->dev
, "Freeing item from non-empty rx_pend_q\n");
1813 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_post_q
))) {
1814 dev_warn(&pdev
->dev
, "Freeing item from non-empty rx_post_q\n");
1818 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
1821 qp
->transport
->qp_bitmap_free
|= qp_bit
;
1823 dev_info(&pdev
->dev
, "NTB Transport QP %d freed\n", qp
->qp_num
);
1825 EXPORT_SYMBOL_GPL(ntb_transport_free_queue
);
1828 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1829 * @qp: NTB queue to be freed
1830 * @len: pointer to variable to write enqueued buffers length
1832 * Dequeues unused buffers from receive queue. Should only be used during
1835 * RETURNS: NULL error value on error, or void* for success.
1837 void *ntb_transport_rx_remove(struct ntb_transport_qp
*qp
, unsigned int *len
)
1839 struct ntb_queue_entry
*entry
;
1842 if (!qp
|| qp
->client_ready
)
1845 entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
);
1849 buf
= entry
->cb_data
;
1852 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
, &qp
->rx_free_q
);
1856 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove
);
1859 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1860 * @qp: NTB transport layer queue the entry is to be enqueued on
1861 * @cb: per buffer pointer for callback function to use
1862 * @data: pointer to data buffer that incoming packets will be copied into
1863 * @len: length of the data buffer
1865 * Enqueue a new receive buffer onto the transport queue into which a NTB
1866 * payload can be received into.
1868 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1870 int ntb_transport_rx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
1873 struct ntb_queue_entry
*entry
;
1878 entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
);
1882 entry
->cb_data
= cb
;
1887 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
, &qp
->rx_pend_q
);
1889 tasklet_schedule(&qp
->rxc_db_work
);
1893 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue
);
1896 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1897 * @qp: NTB transport layer queue the entry is to be enqueued on
1898 * @cb: per buffer pointer for callback function to use
1899 * @data: pointer to data buffer that will be sent
1900 * @len: length of the data buffer
1902 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1903 * payload will be transmitted. This assumes that a lock is being held to
1904 * serialize access to the qp.
1906 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1908 int ntb_transport_tx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
1911 struct ntb_queue_entry
*entry
;
1914 if (!qp
|| !qp
->link_is_up
|| !len
)
1917 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
1919 qp
->tx_err_no_buf
++;
1923 entry
->cb_data
= cb
;
1928 rc
= ntb_process_tx(qp
, entry
);
1930 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1935 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue
);
1938 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1939 * @qp: NTB transport layer queue to be enabled
1941 * Notify NTB transport layer of client readiness to use queue
1943 void ntb_transport_link_up(struct ntb_transport_qp
*qp
)
1948 qp
->client_ready
= true;
1950 if (qp
->transport
->link_is_up
)
1951 schedule_delayed_work(&qp
->link_work
, 0);
1953 EXPORT_SYMBOL_GPL(ntb_transport_link_up
);
1956 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1957 * @qp: NTB transport layer queue to be disabled
1959 * Notify NTB transport layer of client's desire to no longer receive data on
1960 * transport queue specified. It is the client's responsibility to ensure all
1961 * entries on queue are purged or otherwise handled appropriately.
1963 void ntb_transport_link_down(struct ntb_transport_qp
*qp
)
1970 qp
->client_ready
= false;
1972 val
= ntb_spad_read(qp
->ndev
, QP_LINKS
);
1974 ntb_peer_spad_write(qp
->ndev
, QP_LINKS
,
1975 val
& ~BIT(qp
->qp_num
));
1978 ntb_send_link_down(qp
);
1980 cancel_delayed_work_sync(&qp
->link_work
);
1982 EXPORT_SYMBOL_GPL(ntb_transport_link_down
);
1985 * ntb_transport_link_query - Query transport link state
1986 * @qp: NTB transport layer queue to be queried
1988 * Query connectivity to the remote system of the NTB transport queue
1990 * RETURNS: true for link up or false for link down
1992 bool ntb_transport_link_query(struct ntb_transport_qp
*qp
)
1997 return qp
->link_is_up
;
1999 EXPORT_SYMBOL_GPL(ntb_transport_link_query
);
2002 * ntb_transport_qp_num - Query the qp number
2003 * @qp: NTB transport layer queue to be queried
2005 * Query qp number of the NTB transport queue
2007 * RETURNS: a zero based number specifying the qp number
2009 unsigned char ntb_transport_qp_num(struct ntb_transport_qp
*qp
)
2016 EXPORT_SYMBOL_GPL(ntb_transport_qp_num
);
2019 * ntb_transport_max_size - Query the max payload size of a qp
2020 * @qp: NTB transport layer queue to be queried
2022 * Query the maximum payload size permissible on the given qp
2024 * RETURNS: the max payload size of a qp
2026 unsigned int ntb_transport_max_size(struct ntb_transport_qp
*qp
)
2028 unsigned int max_size
;
2029 unsigned int copy_align
;
2030 struct dma_chan
*rx_chan
, *tx_chan
;
2035 rx_chan
= qp
->rx_dma_chan
;
2036 tx_chan
= qp
->tx_dma_chan
;
2038 copy_align
= max(rx_chan
? rx_chan
->device
->copy_align
: 0,
2039 tx_chan
? tx_chan
->device
->copy_align
: 0);
2041 /* If DMA engine usage is possible, try to find the max size for that */
2042 max_size
= qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
2043 max_size
= round_down(max_size
, 1 << copy_align
);
2047 EXPORT_SYMBOL_GPL(ntb_transport_max_size
);
2049 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp
*qp
)
2051 unsigned int head
= qp
->tx_index
;
2052 unsigned int tail
= qp
->remote_rx_info
->entry
;
2054 return tail
> head
? tail
- head
: qp
->tx_max_entry
+ tail
- head
;
2056 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry
);
2058 static void ntb_transport_doorbell_callback(void *data
, int vector
)
2060 struct ntb_transport_ctx
*nt
= data
;
2061 struct ntb_transport_qp
*qp
;
2063 unsigned int qp_num
;
2065 db_bits
= (nt
->qp_bitmap
& ~nt
->qp_bitmap_free
&
2066 ntb_db_vector_mask(nt
->ndev
, vector
));
2069 qp_num
= __ffs(db_bits
);
2070 qp
= &nt
->qp_vec
[qp_num
];
2072 tasklet_schedule(&qp
->rxc_db_work
);
2074 db_bits
&= ~BIT_ULL(qp_num
);
2078 static const struct ntb_ctx_ops ntb_transport_ops
= {
2079 .link_event
= ntb_transport_event_callback
,
2080 .db_event
= ntb_transport_doorbell_callback
,
2083 static struct ntb_client ntb_transport_client
= {
2085 .probe
= ntb_transport_probe
,
2086 .remove
= ntb_transport_free
,
2090 static int __init
ntb_transport_init(void)
2094 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC
, NTB_TRANSPORT_VER
);
2096 if (debugfs_initialized())
2097 nt_debugfs_dir
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
2099 rc
= bus_register(&ntb_transport_bus
);
2103 rc
= ntb_register_client(&ntb_transport_client
);
2110 bus_unregister(&ntb_transport_bus
);
2112 debugfs_remove_recursive(nt_debugfs_dir
);
2115 module_init(ntb_transport_init
);
2117 static void __exit
ntb_transport_exit(void)
2119 debugfs_remove_recursive(nt_debugfs_dir
);
2121 ntb_unregister_client(&ntb_transport_client
);
2122 bus_unregister(&ntb_transport_bus
);
2124 module_exit(ntb_transport_exit
);