NTB: Schedule to receive on QP link up
[deliverable/linux.git] / drivers / ntb / ntb_transport.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * PCIe NTB Transport Linux driver
46 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
64
65 #define NTB_TRANSPORT_VERSION 4
66 #define NTB_TRANSPORT_VER "4"
67 #define NTB_TRANSPORT_NAME "ntb_transport"
68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
69
70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
71 MODULE_VERSION(NTB_TRANSPORT_VER);
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_AUTHOR("Intel Corporation");
74
75 static unsigned long max_mw_size;
76 module_param(max_mw_size, ulong, 0644);
77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
78
79 static unsigned int transport_mtu = 0x10000;
80 module_param(transport_mtu, uint, 0644);
81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
82
83 static unsigned char max_num_clients;
84 module_param(max_num_clients, byte, 0644);
85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
86
87 static unsigned int copy_bytes = 1024;
88 module_param(copy_bytes, uint, 0644);
89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
90
91 static bool use_dma;
92 module_param(use_dma, bool, 0644);
93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
94
95 static struct dentry *nt_debugfs_dir;
96
97 struct ntb_queue_entry {
98 /* ntb_queue list reference */
99 struct list_head entry;
100 /* pointers to data to be transferred */
101 void *cb_data;
102 void *buf;
103 unsigned int len;
104 unsigned int flags;
105
106 struct ntb_transport_qp *qp;
107 union {
108 struct ntb_payload_header __iomem *tx_hdr;
109 struct ntb_payload_header *rx_hdr;
110 };
111 unsigned int index;
112 };
113
114 struct ntb_rx_info {
115 unsigned int entry;
116 };
117
118 struct ntb_transport_qp {
119 struct ntb_transport_ctx *transport;
120 struct ntb_dev *ndev;
121 void *cb_data;
122 struct dma_chan *dma_chan;
123
124 bool client_ready;
125 bool link_is_up;
126
127 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
128 u64 qp_bit;
129
130 struct ntb_rx_info __iomem *rx_info;
131 struct ntb_rx_info *remote_rx_info;
132
133 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
134 void *data, int len);
135 struct list_head tx_free_q;
136 spinlock_t ntb_tx_free_q_lock;
137 void __iomem *tx_mw;
138 dma_addr_t tx_mw_phys;
139 unsigned int tx_index;
140 unsigned int tx_max_entry;
141 unsigned int tx_max_frame;
142
143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void *data, int len);
145 struct list_head rx_post_q;
146 struct list_head rx_pend_q;
147 struct list_head rx_free_q;
148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
149 spinlock_t ntb_rx_q_lock;
150 void *rx_buff;
151 unsigned int rx_index;
152 unsigned int rx_max_entry;
153 unsigned int rx_max_frame;
154 dma_cookie_t last_cookie;
155 struct tasklet_struct rxc_db_work;
156
157 void (*event_handler)(void *data, int status);
158 struct delayed_work link_work;
159 struct work_struct link_cleanup;
160
161 struct dentry *debugfs_dir;
162 struct dentry *debugfs_stats;
163
164 /* Stats */
165 u64 rx_bytes;
166 u64 rx_pkts;
167 u64 rx_ring_empty;
168 u64 rx_err_no_buf;
169 u64 rx_err_oflow;
170 u64 rx_err_ver;
171 u64 rx_memcpy;
172 u64 rx_async;
173 u64 tx_bytes;
174 u64 tx_pkts;
175 u64 tx_ring_full;
176 u64 tx_err_no_buf;
177 u64 tx_memcpy;
178 u64 tx_async;
179 };
180
181 struct ntb_transport_mw {
182 phys_addr_t phys_addr;
183 resource_size_t phys_size;
184 resource_size_t xlat_align;
185 resource_size_t xlat_align_size;
186 void __iomem *vbase;
187 size_t xlat_size;
188 size_t buff_size;
189 void *virt_addr;
190 dma_addr_t dma_addr;
191 };
192
193 struct ntb_transport_client_dev {
194 struct list_head entry;
195 struct ntb_transport_ctx *nt;
196 struct device dev;
197 };
198
199 struct ntb_transport_ctx {
200 struct list_head entry;
201 struct list_head client_devs;
202
203 struct ntb_dev *ndev;
204
205 struct ntb_transport_mw *mw_vec;
206 struct ntb_transport_qp *qp_vec;
207 unsigned int mw_count;
208 unsigned int qp_count;
209 u64 qp_bitmap;
210 u64 qp_bitmap_free;
211
212 bool link_is_up;
213 struct delayed_work link_work;
214 struct work_struct link_cleanup;
215
216 struct dentry *debugfs_node_dir;
217 };
218
219 enum {
220 DESC_DONE_FLAG = BIT(0),
221 LINK_DOWN_FLAG = BIT(1),
222 };
223
224 struct ntb_payload_header {
225 unsigned int ver;
226 unsigned int len;
227 unsigned int flags;
228 };
229
230 enum {
231 VERSION = 0,
232 QP_LINKS,
233 NUM_QPS,
234 NUM_MWS,
235 MW0_SZ_HIGH,
236 MW0_SZ_LOW,
237 MW1_SZ_HIGH,
238 MW1_SZ_LOW,
239 MAX_SPAD,
240 };
241
242 #define dev_client_dev(__dev) \
243 container_of((__dev), struct ntb_transport_client_dev, dev)
244
245 #define drv_client(__drv) \
246 container_of((__drv), struct ntb_transport_client, driver)
247
248 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
249 #define NTB_QP_DEF_NUM_ENTRIES 100
250 #define NTB_LINK_DOWN_TIMEOUT 10
251
252 static void ntb_transport_rxc_db(unsigned long data);
253 static const struct ntb_ctx_ops ntb_transport_ops;
254 static struct ntb_client ntb_transport_client;
255
256 static int ntb_transport_bus_match(struct device *dev,
257 struct device_driver *drv)
258 {
259 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
260 }
261
262 static int ntb_transport_bus_probe(struct device *dev)
263 {
264 const struct ntb_transport_client *client;
265 int rc = -EINVAL;
266
267 get_device(dev);
268
269 client = drv_client(dev->driver);
270 rc = client->probe(dev);
271 if (rc)
272 put_device(dev);
273
274 return rc;
275 }
276
277 static int ntb_transport_bus_remove(struct device *dev)
278 {
279 const struct ntb_transport_client *client;
280
281 client = drv_client(dev->driver);
282 client->remove(dev);
283
284 put_device(dev);
285
286 return 0;
287 }
288
289 static struct bus_type ntb_transport_bus = {
290 .name = "ntb_transport",
291 .match = ntb_transport_bus_match,
292 .probe = ntb_transport_bus_probe,
293 .remove = ntb_transport_bus_remove,
294 };
295
296 static LIST_HEAD(ntb_transport_list);
297
298 static int ntb_bus_init(struct ntb_transport_ctx *nt)
299 {
300 list_add(&nt->entry, &ntb_transport_list);
301 return 0;
302 }
303
304 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
305 {
306 struct ntb_transport_client_dev *client_dev, *cd;
307
308 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
309 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
310 dev_name(&client_dev->dev));
311 list_del(&client_dev->entry);
312 device_unregister(&client_dev->dev);
313 }
314
315 list_del(&nt->entry);
316 }
317
318 static void ntb_transport_client_release(struct device *dev)
319 {
320 struct ntb_transport_client_dev *client_dev;
321
322 client_dev = dev_client_dev(dev);
323 kfree(client_dev);
324 }
325
326 /**
327 * ntb_transport_unregister_client_dev - Unregister NTB client device
328 * @device_name: Name of NTB client device
329 *
330 * Unregister an NTB client device with the NTB transport layer
331 */
332 void ntb_transport_unregister_client_dev(char *device_name)
333 {
334 struct ntb_transport_client_dev *client, *cd;
335 struct ntb_transport_ctx *nt;
336
337 list_for_each_entry(nt, &ntb_transport_list, entry)
338 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
339 if (!strncmp(dev_name(&client->dev), device_name,
340 strlen(device_name))) {
341 list_del(&client->entry);
342 device_unregister(&client->dev);
343 }
344 }
345 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
346
347 /**
348 * ntb_transport_register_client_dev - Register NTB client device
349 * @device_name: Name of NTB client device
350 *
351 * Register an NTB client device with the NTB transport layer
352 */
353 int ntb_transport_register_client_dev(char *device_name)
354 {
355 struct ntb_transport_client_dev *client_dev;
356 struct ntb_transport_ctx *nt;
357 int node;
358 int rc, i = 0;
359
360 if (list_empty(&ntb_transport_list))
361 return -ENODEV;
362
363 list_for_each_entry(nt, &ntb_transport_list, entry) {
364 struct device *dev;
365
366 node = dev_to_node(&nt->ndev->dev);
367
368 client_dev = kzalloc_node(sizeof(*client_dev),
369 GFP_KERNEL, node);
370 if (!client_dev) {
371 rc = -ENOMEM;
372 goto err;
373 }
374
375 dev = &client_dev->dev;
376
377 /* setup and register client devices */
378 dev_set_name(dev, "%s%d", device_name, i);
379 dev->bus = &ntb_transport_bus;
380 dev->release = ntb_transport_client_release;
381 dev->parent = &nt->ndev->dev;
382
383 rc = device_register(dev);
384 if (rc) {
385 kfree(client_dev);
386 goto err;
387 }
388
389 list_add_tail(&client_dev->entry, &nt->client_devs);
390 i++;
391 }
392
393 return 0;
394
395 err:
396 ntb_transport_unregister_client_dev(device_name);
397
398 return rc;
399 }
400 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
401
402 /**
403 * ntb_transport_register_client - Register NTB client driver
404 * @drv: NTB client driver to be registered
405 *
406 * Register an NTB client driver with the NTB transport layer
407 *
408 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
409 */
410 int ntb_transport_register_client(struct ntb_transport_client *drv)
411 {
412 drv->driver.bus = &ntb_transport_bus;
413
414 if (list_empty(&ntb_transport_list))
415 return -ENODEV;
416
417 return driver_register(&drv->driver);
418 }
419 EXPORT_SYMBOL_GPL(ntb_transport_register_client);
420
421 /**
422 * ntb_transport_unregister_client - Unregister NTB client driver
423 * @drv: NTB client driver to be unregistered
424 *
425 * Unregister an NTB client driver with the NTB transport layer
426 *
427 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
428 */
429 void ntb_transport_unregister_client(struct ntb_transport_client *drv)
430 {
431 driver_unregister(&drv->driver);
432 }
433 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
434
435 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
436 loff_t *offp)
437 {
438 struct ntb_transport_qp *qp;
439 char *buf;
440 ssize_t ret, out_offset, out_count;
441
442 qp = filp->private_data;
443
444 if (!qp || !qp->link_is_up)
445 return 0;
446
447 out_count = 1000;
448
449 buf = kmalloc(out_count, GFP_KERNEL);
450 if (!buf)
451 return -ENOMEM;
452
453 out_offset = 0;
454 out_offset += snprintf(buf + out_offset, out_count - out_offset,
455 "NTB QP stats\n");
456 out_offset += snprintf(buf + out_offset, out_count - out_offset,
457 "rx_bytes - \t%llu\n", qp->rx_bytes);
458 out_offset += snprintf(buf + out_offset, out_count - out_offset,
459 "rx_pkts - \t%llu\n", qp->rx_pkts);
460 out_offset += snprintf(buf + out_offset, out_count - out_offset,
461 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
462 out_offset += snprintf(buf + out_offset, out_count - out_offset,
463 "rx_async - \t%llu\n", qp->rx_async);
464 out_offset += snprintf(buf + out_offset, out_count - out_offset,
465 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
466 out_offset += snprintf(buf + out_offset, out_count - out_offset,
467 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
468 out_offset += snprintf(buf + out_offset, out_count - out_offset,
469 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
470 out_offset += snprintf(buf + out_offset, out_count - out_offset,
471 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
472 out_offset += snprintf(buf + out_offset, out_count - out_offset,
473 "rx_buff - \t%p\n", qp->rx_buff);
474 out_offset += snprintf(buf + out_offset, out_count - out_offset,
475 "rx_index - \t%u\n", qp->rx_index);
476 out_offset += snprintf(buf + out_offset, out_count - out_offset,
477 "rx_max_entry - \t%u\n", qp->rx_max_entry);
478
479 out_offset += snprintf(buf + out_offset, out_count - out_offset,
480 "tx_bytes - \t%llu\n", qp->tx_bytes);
481 out_offset += snprintf(buf + out_offset, out_count - out_offset,
482 "tx_pkts - \t%llu\n", qp->tx_pkts);
483 out_offset += snprintf(buf + out_offset, out_count - out_offset,
484 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
485 out_offset += snprintf(buf + out_offset, out_count - out_offset,
486 "tx_async - \t%llu\n", qp->tx_async);
487 out_offset += snprintf(buf + out_offset, out_count - out_offset,
488 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
489 out_offset += snprintf(buf + out_offset, out_count - out_offset,
490 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
491 out_offset += snprintf(buf + out_offset, out_count - out_offset,
492 "tx_mw - \t%p\n", qp->tx_mw);
493 out_offset += snprintf(buf + out_offset, out_count - out_offset,
494 "tx_index - \t%u\n", qp->tx_index);
495 out_offset += snprintf(buf + out_offset, out_count - out_offset,
496 "tx_max_entry - \t%u\n", qp->tx_max_entry);
497
498 out_offset += snprintf(buf + out_offset, out_count - out_offset,
499 "\nQP Link %s\n",
500 qp->link_is_up ? "Up" : "Down");
501 if (out_offset > out_count)
502 out_offset = out_count;
503
504 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
505 kfree(buf);
506 return ret;
507 }
508
509 static const struct file_operations ntb_qp_debugfs_stats = {
510 .owner = THIS_MODULE,
511 .open = simple_open,
512 .read = debugfs_read,
513 };
514
515 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
516 struct list_head *list)
517 {
518 unsigned long flags;
519
520 spin_lock_irqsave(lock, flags);
521 list_add_tail(entry, list);
522 spin_unlock_irqrestore(lock, flags);
523 }
524
525 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
526 struct list_head *list)
527 {
528 struct ntb_queue_entry *entry;
529 unsigned long flags;
530
531 spin_lock_irqsave(lock, flags);
532 if (list_empty(list)) {
533 entry = NULL;
534 goto out;
535 }
536 entry = list_first_entry(list, struct ntb_queue_entry, entry);
537 list_del(&entry->entry);
538 out:
539 spin_unlock_irqrestore(lock, flags);
540
541 return entry;
542 }
543
544 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
545 struct list_head *list,
546 struct list_head *to_list)
547 {
548 struct ntb_queue_entry *entry;
549 unsigned long flags;
550
551 spin_lock_irqsave(lock, flags);
552
553 if (list_empty(list)) {
554 entry = NULL;
555 } else {
556 entry = list_first_entry(list, struct ntb_queue_entry, entry);
557 list_move_tail(&entry->entry, to_list);
558 }
559
560 spin_unlock_irqrestore(lock, flags);
561
562 return entry;
563 }
564
565 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
566 unsigned int qp_num)
567 {
568 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
569 struct ntb_transport_mw *mw;
570 unsigned int rx_size, num_qps_mw;
571 unsigned int mw_num, mw_count, qp_count;
572 unsigned int i;
573
574 mw_count = nt->mw_count;
575 qp_count = nt->qp_count;
576
577 mw_num = QP_TO_MW(nt, qp_num);
578 mw = &nt->mw_vec[mw_num];
579
580 if (!mw->virt_addr)
581 return -ENOMEM;
582
583 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
584 num_qps_mw = qp_count / mw_count + 1;
585 else
586 num_qps_mw = qp_count / mw_count;
587
588 rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
589 qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
590 rx_size -= sizeof(struct ntb_rx_info);
591
592 qp->remote_rx_info = qp->rx_buff + rx_size;
593
594 /* Due to housekeeping, there must be atleast 2 buffs */
595 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
596 qp->rx_max_entry = rx_size / qp->rx_max_frame;
597 qp->rx_index = 0;
598
599 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
600
601 /* setup the hdr offsets with 0's */
602 for (i = 0; i < qp->rx_max_entry; i++) {
603 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
604 sizeof(struct ntb_payload_header));
605 memset(offset, 0, sizeof(struct ntb_payload_header));
606 }
607
608 qp->rx_pkts = 0;
609 qp->tx_pkts = 0;
610 qp->tx_index = 0;
611
612 return 0;
613 }
614
615 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
616 {
617 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
618 struct pci_dev *pdev = nt->ndev->pdev;
619
620 if (!mw->virt_addr)
621 return;
622
623 ntb_mw_clear_trans(nt->ndev, num_mw);
624 dma_free_coherent(&pdev->dev, mw->buff_size,
625 mw->virt_addr, mw->dma_addr);
626 mw->xlat_size = 0;
627 mw->buff_size = 0;
628 mw->virt_addr = NULL;
629 }
630
631 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
632 unsigned int size)
633 {
634 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
635 struct pci_dev *pdev = nt->ndev->pdev;
636 unsigned int xlat_size, buff_size;
637 int rc;
638
639 xlat_size = round_up(size, mw->xlat_align_size);
640 buff_size = round_up(size, mw->xlat_align);
641
642 /* No need to re-setup */
643 if (mw->xlat_size == xlat_size)
644 return 0;
645
646 if (mw->buff_size)
647 ntb_free_mw(nt, num_mw);
648
649 /* Alloc memory for receiving data. Must be aligned */
650 mw->xlat_size = xlat_size;
651 mw->buff_size = buff_size;
652
653 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
654 &mw->dma_addr, GFP_KERNEL);
655 if (!mw->virt_addr) {
656 mw->xlat_size = 0;
657 mw->buff_size = 0;
658 dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
659 buff_size);
660 return -ENOMEM;
661 }
662
663 /*
664 * we must ensure that the memory address allocated is BAR size
665 * aligned in order for the XLAT register to take the value. This
666 * is a requirement of the hardware. It is recommended to setup CMA
667 * for BAR sizes equal or greater than 4MB.
668 */
669 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
670 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
671 &mw->dma_addr);
672 ntb_free_mw(nt, num_mw);
673 return -ENOMEM;
674 }
675
676 /* Notify HW the memory location of the receive buffer */
677 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
678 if (rc) {
679 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
680 ntb_free_mw(nt, num_mw);
681 return -EIO;
682 }
683
684 return 0;
685 }
686
687 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
688 {
689 qp->link_is_up = false;
690
691 qp->tx_index = 0;
692 qp->rx_index = 0;
693 qp->rx_bytes = 0;
694 qp->rx_pkts = 0;
695 qp->rx_ring_empty = 0;
696 qp->rx_err_no_buf = 0;
697 qp->rx_err_oflow = 0;
698 qp->rx_err_ver = 0;
699 qp->rx_memcpy = 0;
700 qp->rx_async = 0;
701 qp->tx_bytes = 0;
702 qp->tx_pkts = 0;
703 qp->tx_ring_full = 0;
704 qp->tx_err_no_buf = 0;
705 qp->tx_memcpy = 0;
706 qp->tx_async = 0;
707 }
708
709 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
710 {
711 struct ntb_transport_ctx *nt = qp->transport;
712 struct pci_dev *pdev = nt->ndev->pdev;
713
714 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
715
716 cancel_delayed_work_sync(&qp->link_work);
717 ntb_qp_link_down_reset(qp);
718
719 if (qp->event_handler)
720 qp->event_handler(qp->cb_data, qp->link_is_up);
721 }
722
723 static void ntb_qp_link_cleanup_work(struct work_struct *work)
724 {
725 struct ntb_transport_qp *qp = container_of(work,
726 struct ntb_transport_qp,
727 link_cleanup);
728 struct ntb_transport_ctx *nt = qp->transport;
729
730 ntb_qp_link_cleanup(qp);
731
732 if (nt->link_is_up)
733 schedule_delayed_work(&qp->link_work,
734 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
735 }
736
737 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
738 {
739 schedule_work(&qp->link_cleanup);
740 }
741
742 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
743 {
744 struct ntb_transport_qp *qp;
745 u64 qp_bitmap_alloc;
746 int i;
747
748 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
749
750 /* Pass along the info to any clients */
751 for (i = 0; i < nt->qp_count; i++)
752 if (qp_bitmap_alloc & BIT_ULL(i)) {
753 qp = &nt->qp_vec[i];
754 ntb_qp_link_cleanup(qp);
755 cancel_work_sync(&qp->link_cleanup);
756 cancel_delayed_work_sync(&qp->link_work);
757 }
758
759 if (!nt->link_is_up)
760 cancel_delayed_work_sync(&nt->link_work);
761
762 /* The scratchpad registers keep the values if the remote side
763 * goes down, blast them now to give them a sane value the next
764 * time they are accessed
765 */
766 for (i = 0; i < MAX_SPAD; i++)
767 ntb_spad_write(nt->ndev, i, 0);
768 }
769
770 static void ntb_transport_link_cleanup_work(struct work_struct *work)
771 {
772 struct ntb_transport_ctx *nt =
773 container_of(work, struct ntb_transport_ctx, link_cleanup);
774
775 ntb_transport_link_cleanup(nt);
776 }
777
778 static void ntb_transport_event_callback(void *data)
779 {
780 struct ntb_transport_ctx *nt = data;
781
782 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
783 schedule_delayed_work(&nt->link_work, 0);
784 else
785 schedule_work(&nt->link_cleanup);
786 }
787
788 static void ntb_transport_link_work(struct work_struct *work)
789 {
790 struct ntb_transport_ctx *nt =
791 container_of(work, struct ntb_transport_ctx, link_work.work);
792 struct ntb_dev *ndev = nt->ndev;
793 struct pci_dev *pdev = ndev->pdev;
794 resource_size_t size;
795 u32 val;
796 int rc, i, spad;
797
798 /* send the local info, in the opposite order of the way we read it */
799 for (i = 0; i < nt->mw_count; i++) {
800 size = nt->mw_vec[i].phys_size;
801
802 if (max_mw_size && size > max_mw_size)
803 size = max_mw_size;
804
805 spad = MW0_SZ_HIGH + (i * 2);
806 ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
807
808 spad = MW0_SZ_LOW + (i * 2);
809 ntb_peer_spad_write(ndev, spad, (u32)size);
810 }
811
812 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
813
814 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
815
816 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
817
818 /* Query the remote side for its info */
819 val = ntb_spad_read(ndev, VERSION);
820 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
821 if (val != NTB_TRANSPORT_VERSION)
822 goto out;
823
824 val = ntb_spad_read(ndev, NUM_QPS);
825 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
826 if (val != nt->qp_count)
827 goto out;
828
829 val = ntb_spad_read(ndev, NUM_MWS);
830 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
831 if (val != nt->mw_count)
832 goto out;
833
834 for (i = 0; i < nt->mw_count; i++) {
835 u64 val64;
836
837 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
838 val64 = (u64)val << 32;
839
840 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
841 val64 |= val;
842
843 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
844
845 rc = ntb_set_mw(nt, i, val64);
846 if (rc)
847 goto out1;
848 }
849
850 nt->link_is_up = true;
851
852 for (i = 0; i < nt->qp_count; i++) {
853 struct ntb_transport_qp *qp = &nt->qp_vec[i];
854
855 ntb_transport_setup_qp_mw(nt, i);
856
857 if (qp->client_ready)
858 schedule_delayed_work(&qp->link_work, 0);
859 }
860
861 return;
862
863 out1:
864 for (i = 0; i < nt->mw_count; i++)
865 ntb_free_mw(nt, i);
866 out:
867 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
868 schedule_delayed_work(&nt->link_work,
869 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
870 }
871
872 static void ntb_qp_link_work(struct work_struct *work)
873 {
874 struct ntb_transport_qp *qp = container_of(work,
875 struct ntb_transport_qp,
876 link_work.work);
877 struct pci_dev *pdev = qp->ndev->pdev;
878 struct ntb_transport_ctx *nt = qp->transport;
879 int val;
880
881 WARN_ON(!nt->link_is_up);
882
883 val = ntb_spad_read(nt->ndev, QP_LINKS);
884
885 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
886
887 /* query remote spad for qp ready bits */
888 ntb_peer_spad_read(nt->ndev, QP_LINKS);
889 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
890
891 /* See if the remote side is up */
892 if (val & BIT(qp->qp_num)) {
893 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
894 qp->link_is_up = true;
895
896 if (qp->event_handler)
897 qp->event_handler(qp->cb_data, qp->link_is_up);
898
899 tasklet_schedule(&qp->rxc_db_work);
900 } else if (nt->link_is_up)
901 schedule_delayed_work(&qp->link_work,
902 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
903 }
904
905 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
906 unsigned int qp_num)
907 {
908 struct ntb_transport_qp *qp;
909 struct ntb_transport_mw *mw;
910 phys_addr_t mw_base;
911 resource_size_t mw_size;
912 unsigned int num_qps_mw, tx_size;
913 unsigned int mw_num, mw_count, qp_count;
914 u64 qp_offset;
915
916 mw_count = nt->mw_count;
917 qp_count = nt->qp_count;
918
919 mw_num = QP_TO_MW(nt, qp_num);
920 mw = &nt->mw_vec[mw_num];
921
922 qp = &nt->qp_vec[qp_num];
923 qp->qp_num = qp_num;
924 qp->transport = nt;
925 qp->ndev = nt->ndev;
926 qp->client_ready = false;
927 qp->event_handler = NULL;
928 ntb_qp_link_down_reset(qp);
929
930 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
931 num_qps_mw = qp_count / mw_count + 1;
932 else
933 num_qps_mw = qp_count / mw_count;
934
935 mw_base = nt->mw_vec[mw_num].phys_addr;
936 mw_size = nt->mw_vec[mw_num].phys_size;
937
938 tx_size = (unsigned int)mw_size / num_qps_mw;
939 qp_offset = tx_size * qp_num / mw_count;
940
941 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
942 if (!qp->tx_mw)
943 return -EINVAL;
944
945 qp->tx_mw_phys = mw_base + qp_offset;
946 if (!qp->tx_mw_phys)
947 return -EINVAL;
948
949 tx_size -= sizeof(struct ntb_rx_info);
950 qp->rx_info = qp->tx_mw + tx_size;
951
952 /* Due to housekeeping, there must be atleast 2 buffs */
953 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
954 qp->tx_max_entry = tx_size / qp->tx_max_frame;
955
956 if (nt->debugfs_node_dir) {
957 char debugfs_name[4];
958
959 snprintf(debugfs_name, 4, "qp%d", qp_num);
960 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
961 nt->debugfs_node_dir);
962
963 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
964 qp->debugfs_dir, qp,
965 &ntb_qp_debugfs_stats);
966 } else {
967 qp->debugfs_dir = NULL;
968 qp->debugfs_stats = NULL;
969 }
970
971 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
972 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
973
974 spin_lock_init(&qp->ntb_rx_q_lock);
975 spin_lock_init(&qp->ntb_tx_free_q_lock);
976
977 INIT_LIST_HEAD(&qp->rx_post_q);
978 INIT_LIST_HEAD(&qp->rx_pend_q);
979 INIT_LIST_HEAD(&qp->rx_free_q);
980 INIT_LIST_HEAD(&qp->tx_free_q);
981
982 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
983 (unsigned long)qp);
984
985 return 0;
986 }
987
988 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
989 {
990 struct ntb_transport_ctx *nt;
991 struct ntb_transport_mw *mw;
992 unsigned int mw_count, qp_count;
993 u64 qp_bitmap;
994 int node;
995 int rc, i;
996
997 if (ntb_db_is_unsafe(ndev))
998 dev_dbg(&ndev->dev,
999 "doorbell is unsafe, proceed anyway...\n");
1000 if (ntb_spad_is_unsafe(ndev))
1001 dev_dbg(&ndev->dev,
1002 "scratchpad is unsafe, proceed anyway...\n");
1003
1004 node = dev_to_node(&ndev->dev);
1005
1006 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1007 if (!nt)
1008 return -ENOMEM;
1009
1010 nt->ndev = ndev;
1011
1012 mw_count = ntb_mw_count(ndev);
1013
1014 nt->mw_count = mw_count;
1015
1016 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
1017 GFP_KERNEL, node);
1018 if (!nt->mw_vec) {
1019 rc = -ENOMEM;
1020 goto err;
1021 }
1022
1023 for (i = 0; i < mw_count; i++) {
1024 mw = &nt->mw_vec[i];
1025
1026 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
1027 &mw->xlat_align, &mw->xlat_align_size);
1028 if (rc)
1029 goto err1;
1030
1031 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
1032 if (!mw->vbase) {
1033 rc = -ENOMEM;
1034 goto err1;
1035 }
1036
1037 mw->buff_size = 0;
1038 mw->xlat_size = 0;
1039 mw->virt_addr = NULL;
1040 mw->dma_addr = 0;
1041 }
1042
1043 qp_bitmap = ntb_db_valid_mask(ndev);
1044
1045 qp_count = ilog2(qp_bitmap);
1046 if (max_num_clients && max_num_clients < qp_count)
1047 qp_count = max_num_clients;
1048 else if (mw_count < qp_count)
1049 qp_count = mw_count;
1050
1051 qp_bitmap &= BIT_ULL(qp_count) - 1;
1052
1053 nt->qp_count = qp_count;
1054 nt->qp_bitmap = qp_bitmap;
1055 nt->qp_bitmap_free = qp_bitmap;
1056
1057 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1058 GFP_KERNEL, node);
1059 if (!nt->qp_vec) {
1060 rc = -ENOMEM;
1061 goto err2;
1062 }
1063
1064 if (nt_debugfs_dir) {
1065 nt->debugfs_node_dir =
1066 debugfs_create_dir(pci_name(ndev->pdev),
1067 nt_debugfs_dir);
1068 }
1069
1070 for (i = 0; i < qp_count; i++) {
1071 rc = ntb_transport_init_queue(nt, i);
1072 if (rc)
1073 goto err3;
1074 }
1075
1076 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1077 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1078
1079 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1080 if (rc)
1081 goto err3;
1082
1083 INIT_LIST_HEAD(&nt->client_devs);
1084 rc = ntb_bus_init(nt);
1085 if (rc)
1086 goto err4;
1087
1088 nt->link_is_up = false;
1089 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1090 ntb_link_event(ndev);
1091
1092 return 0;
1093
1094 err4:
1095 ntb_clear_ctx(ndev);
1096 err3:
1097 kfree(nt->qp_vec);
1098 err2:
1099 kfree(nt->mw_vec);
1100 err1:
1101 while (i--) {
1102 mw = &nt->mw_vec[i];
1103 iounmap(mw->vbase);
1104 }
1105 err:
1106 kfree(nt);
1107 return rc;
1108 }
1109
1110 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1111 {
1112 struct ntb_transport_ctx *nt = ndev->ctx;
1113 struct ntb_transport_qp *qp;
1114 u64 qp_bitmap_alloc;
1115 int i;
1116
1117 ntb_transport_link_cleanup(nt);
1118 cancel_work_sync(&nt->link_cleanup);
1119 cancel_delayed_work_sync(&nt->link_work);
1120
1121 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1122
1123 /* verify that all the qp's are freed */
1124 for (i = 0; i < nt->qp_count; i++) {
1125 qp = &nt->qp_vec[i];
1126 if (qp_bitmap_alloc & BIT_ULL(i))
1127 ntb_transport_free_queue(qp);
1128 debugfs_remove_recursive(qp->debugfs_dir);
1129 }
1130
1131 ntb_link_disable(ndev);
1132 ntb_clear_ctx(ndev);
1133
1134 ntb_bus_remove(nt);
1135
1136 for (i = nt->mw_count; i--; ) {
1137 ntb_free_mw(nt, i);
1138 iounmap(nt->mw_vec[i].vbase);
1139 }
1140
1141 kfree(nt->qp_vec);
1142 kfree(nt->mw_vec);
1143 kfree(nt);
1144 }
1145
1146 static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1147 {
1148 struct ntb_queue_entry *entry;
1149 void *cb_data;
1150 unsigned int len;
1151 unsigned long irqflags;
1152
1153 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1154
1155 while (!list_empty(&qp->rx_post_q)) {
1156 entry = list_first_entry(&qp->rx_post_q,
1157 struct ntb_queue_entry, entry);
1158 if (!(entry->flags & DESC_DONE_FLAG))
1159 break;
1160
1161 entry->rx_hdr->flags = 0;
1162 iowrite32(entry->index, &qp->rx_info->entry);
1163
1164 cb_data = entry->cb_data;
1165 len = entry->len;
1166
1167 list_move_tail(&entry->entry, &qp->rx_free_q);
1168
1169 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1170
1171 if (qp->rx_handler && qp->client_ready)
1172 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1173
1174 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1175 }
1176
1177 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1178 }
1179
1180 static void ntb_rx_copy_callback(void *data)
1181 {
1182 struct ntb_queue_entry *entry = data;
1183
1184 entry->flags |= DESC_DONE_FLAG;
1185
1186 ntb_complete_rxc(entry->qp);
1187 }
1188
1189 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1190 {
1191 void *buf = entry->buf;
1192 size_t len = entry->len;
1193
1194 memcpy(buf, offset, len);
1195
1196 /* Ensure that the data is fully copied out before clearing the flag */
1197 wmb();
1198
1199 ntb_rx_copy_callback(entry);
1200 }
1201
1202 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1203 {
1204 struct dma_async_tx_descriptor *txd;
1205 struct ntb_transport_qp *qp = entry->qp;
1206 struct dma_chan *chan = qp->dma_chan;
1207 struct dma_device *device;
1208 size_t pay_off, buff_off, len;
1209 struct dmaengine_unmap_data *unmap;
1210 dma_cookie_t cookie;
1211 void *buf = entry->buf;
1212
1213 len = entry->len;
1214
1215 if (!chan)
1216 goto err;
1217
1218 if (len < copy_bytes)
1219 goto err_wait;
1220
1221 device = chan->device;
1222 pay_off = (size_t)offset & ~PAGE_MASK;
1223 buff_off = (size_t)buf & ~PAGE_MASK;
1224
1225 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1226 goto err_wait;
1227
1228 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1229 if (!unmap)
1230 goto err_wait;
1231
1232 unmap->len = len;
1233 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1234 pay_off, len, DMA_TO_DEVICE);
1235 if (dma_mapping_error(device->dev, unmap->addr[0]))
1236 goto err_get_unmap;
1237
1238 unmap->to_cnt = 1;
1239
1240 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1241 buff_off, len, DMA_FROM_DEVICE);
1242 if (dma_mapping_error(device->dev, unmap->addr[1]))
1243 goto err_get_unmap;
1244
1245 unmap->from_cnt = 1;
1246
1247 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1248 unmap->addr[0], len,
1249 DMA_PREP_INTERRUPT);
1250 if (!txd)
1251 goto err_get_unmap;
1252
1253 txd->callback = ntb_rx_copy_callback;
1254 txd->callback_param = entry;
1255 dma_set_unmap(txd, unmap);
1256
1257 cookie = dmaengine_submit(txd);
1258 if (dma_submit_error(cookie))
1259 goto err_set_unmap;
1260
1261 dmaengine_unmap_put(unmap);
1262
1263 qp->last_cookie = cookie;
1264
1265 qp->rx_async++;
1266
1267 return;
1268
1269 err_set_unmap:
1270 dmaengine_unmap_put(unmap);
1271 err_get_unmap:
1272 dmaengine_unmap_put(unmap);
1273 err_wait:
1274 /* If the callbacks come out of order, the writing of the index to the
1275 * last completed will be out of order. This may result in the
1276 * receive stalling forever.
1277 */
1278 dma_sync_wait(chan, qp->last_cookie);
1279 err:
1280 ntb_memcpy_rx(entry, offset);
1281 qp->rx_memcpy++;
1282 }
1283
1284 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1285 {
1286 struct ntb_payload_header *hdr;
1287 struct ntb_queue_entry *entry;
1288 void *offset;
1289
1290 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1291 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1292
1293 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1294 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1295
1296 if (!(hdr->flags & DESC_DONE_FLAG)) {
1297 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1298 qp->rx_ring_empty++;
1299 return -EAGAIN;
1300 }
1301
1302 if (hdr->flags & LINK_DOWN_FLAG) {
1303 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1304 ntb_qp_link_down(qp);
1305 hdr->flags = 0;
1306 return -EAGAIN;
1307 }
1308
1309 if (hdr->ver != (u32)qp->rx_pkts) {
1310 dev_dbg(&qp->ndev->pdev->dev,
1311 "version mismatch, expected %llu - got %u\n",
1312 qp->rx_pkts, hdr->ver);
1313 qp->rx_err_ver++;
1314 return -EIO;
1315 }
1316
1317 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1318 if (!entry) {
1319 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1320 qp->rx_err_no_buf++;
1321 return -EAGAIN;
1322 }
1323
1324 entry->rx_hdr = hdr;
1325 entry->index = qp->rx_index;
1326
1327 if (hdr->len > entry->len) {
1328 dev_dbg(&qp->ndev->pdev->dev,
1329 "receive buffer overflow! Wanted %d got %d\n",
1330 hdr->len, entry->len);
1331 qp->rx_err_oflow++;
1332
1333 entry->len = -EIO;
1334 entry->flags |= DESC_DONE_FLAG;
1335
1336 ntb_complete_rxc(qp);
1337 } else {
1338 dev_dbg(&qp->ndev->pdev->dev,
1339 "RX OK index %u ver %u size %d into buf size %d\n",
1340 qp->rx_index, hdr->ver, hdr->len, entry->len);
1341
1342 qp->rx_bytes += hdr->len;
1343 qp->rx_pkts++;
1344
1345 entry->len = hdr->len;
1346
1347 ntb_async_rx(entry, offset);
1348 }
1349
1350 qp->rx_index++;
1351 qp->rx_index %= qp->rx_max_entry;
1352
1353 return 0;
1354 }
1355
1356 static void ntb_transport_rxc_db(unsigned long data)
1357 {
1358 struct ntb_transport_qp *qp = (void *)data;
1359 int rc, i;
1360
1361 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1362 __func__, qp->qp_num);
1363
1364 /* Limit the number of packets processed in a single interrupt to
1365 * provide fairness to others
1366 */
1367 for (i = 0; i < qp->rx_max_entry; i++) {
1368 rc = ntb_process_rxc(qp);
1369 if (rc)
1370 break;
1371 }
1372
1373 if (i && qp->dma_chan)
1374 dma_async_issue_pending(qp->dma_chan);
1375
1376 if (i == qp->rx_max_entry) {
1377 /* there is more work to do */
1378 tasklet_schedule(&qp->rxc_db_work);
1379 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1380 /* the doorbell bit is set: clear it */
1381 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1382 /* ntb_db_read ensures ntb_db_clear write is committed */
1383 ntb_db_read(qp->ndev);
1384
1385 /* an interrupt may have arrived between finishing
1386 * ntb_process_rxc and clearing the doorbell bit:
1387 * there might be some more work to do.
1388 */
1389 tasklet_schedule(&qp->rxc_db_work);
1390 }
1391 }
1392
1393 static void ntb_tx_copy_callback(void *data)
1394 {
1395 struct ntb_queue_entry *entry = data;
1396 struct ntb_transport_qp *qp = entry->qp;
1397 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1398
1399 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1400
1401 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1402
1403 /* The entry length can only be zero if the packet is intended to be a
1404 * "link down" or similar. Since no payload is being sent in these
1405 * cases, there is nothing to add to the completion queue.
1406 */
1407 if (entry->len > 0) {
1408 qp->tx_bytes += entry->len;
1409
1410 if (qp->tx_handler)
1411 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1412 entry->len);
1413 }
1414
1415 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1416 }
1417
1418 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1419 {
1420 #ifdef ARCH_HAS_NOCACHE_UACCESS
1421 /*
1422 * Using non-temporal mov to improve performance on non-cached
1423 * writes, even though we aren't actually copying from user space.
1424 */
1425 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1426 #else
1427 memcpy_toio(offset, entry->buf, entry->len);
1428 #endif
1429
1430 /* Ensure that the data is fully copied out before setting the flags */
1431 wmb();
1432
1433 ntb_tx_copy_callback(entry);
1434 }
1435
1436 static void ntb_async_tx(struct ntb_transport_qp *qp,
1437 struct ntb_queue_entry *entry)
1438 {
1439 struct ntb_payload_header __iomem *hdr;
1440 struct dma_async_tx_descriptor *txd;
1441 struct dma_chan *chan = qp->dma_chan;
1442 struct dma_device *device;
1443 size_t dest_off, buff_off;
1444 struct dmaengine_unmap_data *unmap;
1445 dma_addr_t dest;
1446 dma_cookie_t cookie;
1447 void __iomem *offset;
1448 size_t len = entry->len;
1449 void *buf = entry->buf;
1450
1451 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1452 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1453 entry->tx_hdr = hdr;
1454
1455 iowrite32(entry->len, &hdr->len);
1456 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1457
1458 if (!chan)
1459 goto err;
1460
1461 if (len < copy_bytes)
1462 goto err;
1463
1464 device = chan->device;
1465 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1466 buff_off = (size_t)buf & ~PAGE_MASK;
1467 dest_off = (size_t)dest & ~PAGE_MASK;
1468
1469 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1470 goto err;
1471
1472 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1473 if (!unmap)
1474 goto err;
1475
1476 unmap->len = len;
1477 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1478 buff_off, len, DMA_TO_DEVICE);
1479 if (dma_mapping_error(device->dev, unmap->addr[0]))
1480 goto err_get_unmap;
1481
1482 unmap->to_cnt = 1;
1483
1484 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1485 DMA_PREP_INTERRUPT);
1486 if (!txd)
1487 goto err_get_unmap;
1488
1489 txd->callback = ntb_tx_copy_callback;
1490 txd->callback_param = entry;
1491 dma_set_unmap(txd, unmap);
1492
1493 cookie = dmaengine_submit(txd);
1494 if (dma_submit_error(cookie))
1495 goto err_set_unmap;
1496
1497 dmaengine_unmap_put(unmap);
1498
1499 dma_async_issue_pending(chan);
1500 qp->tx_async++;
1501
1502 return;
1503 err_set_unmap:
1504 dmaengine_unmap_put(unmap);
1505 err_get_unmap:
1506 dmaengine_unmap_put(unmap);
1507 err:
1508 ntb_memcpy_tx(entry, offset);
1509 qp->tx_memcpy++;
1510 }
1511
1512 static int ntb_process_tx(struct ntb_transport_qp *qp,
1513 struct ntb_queue_entry *entry)
1514 {
1515 if (qp->tx_index == qp->remote_rx_info->entry) {
1516 qp->tx_ring_full++;
1517 return -EAGAIN;
1518 }
1519
1520 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1521 if (qp->tx_handler)
1522 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1523
1524 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1525 &qp->tx_free_q);
1526 return 0;
1527 }
1528
1529 ntb_async_tx(qp, entry);
1530
1531 qp->tx_index++;
1532 qp->tx_index %= qp->tx_max_entry;
1533
1534 qp->tx_pkts++;
1535
1536 return 0;
1537 }
1538
1539 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1540 {
1541 struct pci_dev *pdev = qp->ndev->pdev;
1542 struct ntb_queue_entry *entry;
1543 int i, rc;
1544
1545 if (!qp->link_is_up)
1546 return;
1547
1548 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1549
1550 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1551 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1552 if (entry)
1553 break;
1554 msleep(100);
1555 }
1556
1557 if (!entry)
1558 return;
1559
1560 entry->cb_data = NULL;
1561 entry->buf = NULL;
1562 entry->len = 0;
1563 entry->flags = LINK_DOWN_FLAG;
1564
1565 rc = ntb_process_tx(qp, entry);
1566 if (rc)
1567 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1568 qp->qp_num);
1569
1570 ntb_qp_link_down_reset(qp);
1571 }
1572
1573 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1574 {
1575 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1576 }
1577
1578 /**
1579 * ntb_transport_create_queue - Create a new NTB transport layer queue
1580 * @rx_handler: receive callback function
1581 * @tx_handler: transmit callback function
1582 * @event_handler: event callback function
1583 *
1584 * Create a new NTB transport layer queue and provide the queue with a callback
1585 * routine for both transmit and receive. The receive callback routine will be
1586 * used to pass up data when the transport has received it on the queue. The
1587 * transmit callback routine will be called when the transport has completed the
1588 * transmission of the data on the queue and the data is ready to be freed.
1589 *
1590 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1591 */
1592 struct ntb_transport_qp *
1593 ntb_transport_create_queue(void *data, struct device *client_dev,
1594 const struct ntb_queue_handlers *handlers)
1595 {
1596 struct ntb_dev *ndev;
1597 struct pci_dev *pdev;
1598 struct ntb_transport_ctx *nt;
1599 struct ntb_queue_entry *entry;
1600 struct ntb_transport_qp *qp;
1601 u64 qp_bit;
1602 unsigned int free_queue;
1603 dma_cap_mask_t dma_mask;
1604 int node;
1605 int i;
1606
1607 ndev = dev_ntb(client_dev->parent);
1608 pdev = ndev->pdev;
1609 nt = ndev->ctx;
1610
1611 node = dev_to_node(&ndev->dev);
1612
1613 free_queue = ffs(nt->qp_bitmap);
1614 if (!free_queue)
1615 goto err;
1616
1617 /* decrement free_queue to make it zero based */
1618 free_queue--;
1619
1620 qp = &nt->qp_vec[free_queue];
1621 qp_bit = BIT_ULL(qp->qp_num);
1622
1623 nt->qp_bitmap_free &= ~qp_bit;
1624
1625 qp->cb_data = data;
1626 qp->rx_handler = handlers->rx_handler;
1627 qp->tx_handler = handlers->tx_handler;
1628 qp->event_handler = handlers->event_handler;
1629
1630 dma_cap_zero(dma_mask);
1631 dma_cap_set(DMA_MEMCPY, dma_mask);
1632
1633 if (use_dma) {
1634 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
1635 (void *)(unsigned long)node);
1636 if (!qp->dma_chan)
1637 dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
1638 } else {
1639 qp->dma_chan = NULL;
1640 }
1641 dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
1642
1643 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1644 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1645 if (!entry)
1646 goto err1;
1647
1648 entry->qp = qp;
1649 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1650 &qp->rx_free_q);
1651 }
1652
1653 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1654 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1655 if (!entry)
1656 goto err2;
1657
1658 entry->qp = qp;
1659 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1660 &qp->tx_free_q);
1661 }
1662
1663 ntb_db_clear(qp->ndev, qp_bit);
1664 ntb_db_clear_mask(qp->ndev, qp_bit);
1665
1666 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1667
1668 return qp;
1669
1670 err2:
1671 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1672 kfree(entry);
1673 err1:
1674 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1675 kfree(entry);
1676 if (qp->dma_chan)
1677 dma_release_channel(qp->dma_chan);
1678 nt->qp_bitmap_free |= qp_bit;
1679 err:
1680 return NULL;
1681 }
1682 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1683
1684 /**
1685 * ntb_transport_free_queue - Frees NTB transport queue
1686 * @qp: NTB queue to be freed
1687 *
1688 * Frees NTB transport queue
1689 */
1690 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1691 {
1692 struct ntb_transport_ctx *nt = qp->transport;
1693 struct pci_dev *pdev;
1694 struct ntb_queue_entry *entry;
1695 u64 qp_bit;
1696
1697 if (!qp)
1698 return;
1699
1700 pdev = qp->ndev->pdev;
1701
1702 if (qp->dma_chan) {
1703 struct dma_chan *chan = qp->dma_chan;
1704 /* Putting the dma_chan to NULL will force any new traffic to be
1705 * processed by the CPU instead of the DAM engine
1706 */
1707 qp->dma_chan = NULL;
1708
1709 /* Try to be nice and wait for any queued DMA engine
1710 * transactions to process before smashing it with a rock
1711 */
1712 dma_sync_wait(chan, qp->last_cookie);
1713 dmaengine_terminate_all(chan);
1714 dma_release_channel(chan);
1715 }
1716
1717 qp_bit = BIT_ULL(qp->qp_num);
1718
1719 ntb_db_set_mask(qp->ndev, qp_bit);
1720 tasklet_disable(&qp->rxc_db_work);
1721
1722 cancel_delayed_work_sync(&qp->link_work);
1723
1724 qp->cb_data = NULL;
1725 qp->rx_handler = NULL;
1726 qp->tx_handler = NULL;
1727 qp->event_handler = NULL;
1728
1729 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1730 kfree(entry);
1731
1732 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1733 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1734 kfree(entry);
1735 }
1736
1737 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1738 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1739 kfree(entry);
1740 }
1741
1742 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1743 kfree(entry);
1744
1745 nt->qp_bitmap_free |= qp_bit;
1746
1747 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1748 }
1749 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1750
1751 /**
1752 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1753 * @qp: NTB queue to be freed
1754 * @len: pointer to variable to write enqueued buffers length
1755 *
1756 * Dequeues unused buffers from receive queue. Should only be used during
1757 * shutdown of qp.
1758 *
1759 * RETURNS: NULL error value on error, or void* for success.
1760 */
1761 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1762 {
1763 struct ntb_queue_entry *entry;
1764 void *buf;
1765
1766 if (!qp || qp->client_ready)
1767 return NULL;
1768
1769 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1770 if (!entry)
1771 return NULL;
1772
1773 buf = entry->cb_data;
1774 *len = entry->len;
1775
1776 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1777
1778 return buf;
1779 }
1780 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1781
1782 /**
1783 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1784 * @qp: NTB transport layer queue the entry is to be enqueued on
1785 * @cb: per buffer pointer for callback function to use
1786 * @data: pointer to data buffer that incoming packets will be copied into
1787 * @len: length of the data buffer
1788 *
1789 * Enqueue a new receive buffer onto the transport queue into which a NTB
1790 * payload can be received into.
1791 *
1792 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1793 */
1794 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1795 unsigned int len)
1796 {
1797 struct ntb_queue_entry *entry;
1798
1799 if (!qp)
1800 return -EINVAL;
1801
1802 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1803 if (!entry)
1804 return -ENOMEM;
1805
1806 entry->cb_data = cb;
1807 entry->buf = data;
1808 entry->len = len;
1809 entry->flags = 0;
1810
1811 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1812
1813 tasklet_schedule(&qp->rxc_db_work);
1814
1815 return 0;
1816 }
1817 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1818
1819 /**
1820 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1821 * @qp: NTB transport layer queue the entry is to be enqueued on
1822 * @cb: per buffer pointer for callback function to use
1823 * @data: pointer to data buffer that will be sent
1824 * @len: length of the data buffer
1825 *
1826 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1827 * payload will be transmitted. This assumes that a lock is being held to
1828 * serialize access to the qp.
1829 *
1830 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1831 */
1832 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1833 unsigned int len)
1834 {
1835 struct ntb_queue_entry *entry;
1836 int rc;
1837
1838 if (!qp || !qp->link_is_up || !len)
1839 return -EINVAL;
1840
1841 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1842 if (!entry) {
1843 qp->tx_err_no_buf++;
1844 return -ENOMEM;
1845 }
1846
1847 entry->cb_data = cb;
1848 entry->buf = data;
1849 entry->len = len;
1850 entry->flags = 0;
1851
1852 rc = ntb_process_tx(qp, entry);
1853 if (rc)
1854 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1855 &qp->tx_free_q);
1856
1857 return rc;
1858 }
1859 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1860
1861 /**
1862 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1863 * @qp: NTB transport layer queue to be enabled
1864 *
1865 * Notify NTB transport layer of client readiness to use queue
1866 */
1867 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1868 {
1869 if (!qp)
1870 return;
1871
1872 qp->client_ready = true;
1873
1874 if (qp->transport->link_is_up)
1875 schedule_delayed_work(&qp->link_work, 0);
1876 }
1877 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1878
1879 /**
1880 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1881 * @qp: NTB transport layer queue to be disabled
1882 *
1883 * Notify NTB transport layer of client's desire to no longer receive data on
1884 * transport queue specified. It is the client's responsibility to ensure all
1885 * entries on queue are purged or otherwise handled appropriately.
1886 */
1887 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1888 {
1889 struct pci_dev *pdev;
1890 int val;
1891
1892 if (!qp)
1893 return;
1894
1895 pdev = qp->ndev->pdev;
1896 qp->client_ready = false;
1897
1898 val = ntb_spad_read(qp->ndev, QP_LINKS);
1899
1900 ntb_peer_spad_write(qp->ndev, QP_LINKS,
1901 val & ~BIT(qp->qp_num));
1902
1903 if (qp->link_is_up)
1904 ntb_send_link_down(qp);
1905 else
1906 cancel_delayed_work_sync(&qp->link_work);
1907 }
1908 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1909
1910 /**
1911 * ntb_transport_link_query - Query transport link state
1912 * @qp: NTB transport layer queue to be queried
1913 *
1914 * Query connectivity to the remote system of the NTB transport queue
1915 *
1916 * RETURNS: true for link up or false for link down
1917 */
1918 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1919 {
1920 if (!qp)
1921 return false;
1922
1923 return qp->link_is_up;
1924 }
1925 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1926
1927 /**
1928 * ntb_transport_qp_num - Query the qp number
1929 * @qp: NTB transport layer queue to be queried
1930 *
1931 * Query qp number of the NTB transport queue
1932 *
1933 * RETURNS: a zero based number specifying the qp number
1934 */
1935 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1936 {
1937 if (!qp)
1938 return 0;
1939
1940 return qp->qp_num;
1941 }
1942 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1943
1944 /**
1945 * ntb_transport_max_size - Query the max payload size of a qp
1946 * @qp: NTB transport layer queue to be queried
1947 *
1948 * Query the maximum payload size permissible on the given qp
1949 *
1950 * RETURNS: the max payload size of a qp
1951 */
1952 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1953 {
1954 unsigned int max;
1955
1956 if (!qp)
1957 return 0;
1958
1959 if (!qp->dma_chan)
1960 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1961
1962 /* If DMA engine usage is possible, try to find the max size for that */
1963 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1964 max -= max % (1 << qp->dma_chan->device->copy_align);
1965
1966 return max;
1967 }
1968 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1969
1970 static void ntb_transport_doorbell_callback(void *data, int vector)
1971 {
1972 struct ntb_transport_ctx *nt = data;
1973 struct ntb_transport_qp *qp;
1974 u64 db_bits;
1975 unsigned int qp_num;
1976
1977 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
1978 ntb_db_vector_mask(nt->ndev, vector));
1979
1980 while (db_bits) {
1981 qp_num = __ffs(db_bits);
1982 qp = &nt->qp_vec[qp_num];
1983
1984 tasklet_schedule(&qp->rxc_db_work);
1985
1986 db_bits &= ~BIT_ULL(qp_num);
1987 }
1988 }
1989
1990 static const struct ntb_ctx_ops ntb_transport_ops = {
1991 .link_event = ntb_transport_event_callback,
1992 .db_event = ntb_transport_doorbell_callback,
1993 };
1994
1995 static struct ntb_client ntb_transport_client = {
1996 .ops = {
1997 .probe = ntb_transport_probe,
1998 .remove = ntb_transport_free,
1999 },
2000 };
2001
2002 static int __init ntb_transport_init(void)
2003 {
2004 int rc;
2005
2006 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2007
2008 if (debugfs_initialized())
2009 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2010
2011 rc = bus_register(&ntb_transport_bus);
2012 if (rc)
2013 goto err_bus;
2014
2015 rc = ntb_register_client(&ntb_transport_client);
2016 if (rc)
2017 goto err_client;
2018
2019 return 0;
2020
2021 err_client:
2022 bus_unregister(&ntb_transport_bus);
2023 err_bus:
2024 debugfs_remove_recursive(nt_debugfs_dir);
2025 return rc;
2026 }
2027 module_init(ntb_transport_init);
2028
2029 static void __exit ntb_transport_exit(void)
2030 {
2031 debugfs_remove_recursive(nt_debugfs_dir);
2032
2033 ntb_unregister_client(&ntb_transport_client);
2034 bus_unregister(&ntb_transport_bus);
2035 }
2036 module_exit(ntb_transport_exit);
This page took 0.146537 seconds and 6 git commands to generate.