dmaengine: remove DMA unmap from drivers
[deliverable/linux.git] / drivers / ntb / ntb_transport.c
CommitLineData
fce8a7bb
JM
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48#include <linux/debugfs.h>
49#include <linux/delay.h>
282a2fee 50#include <linux/dmaengine.h>
fce8a7bb
JM
51#include <linux/dma-mapping.h>
52#include <linux/errno.h>
53#include <linux/export.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56#include <linux/pci.h>
57#include <linux/slab.h>
58#include <linux/types.h>
59#include <linux/ntb.h>
60#include "ntb_hw.h"
61
113fc505 62#define NTB_TRANSPORT_VERSION 3
fce8a7bb 63
ef114ed5 64static unsigned int transport_mtu = 0x401E;
fce8a7bb
JM
65module_param(transport_mtu, uint, 0644);
66MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
67
948d3a65 68static unsigned char max_num_clients;
fce8a7bb
JM
69module_param(max_num_clients, byte, 0644);
70MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
71
282a2fee
JM
72static unsigned int copy_bytes = 1024;
73module_param(copy_bytes, uint, 0644);
74MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
75
fce8a7bb
JM
76struct ntb_queue_entry {
77 /* ntb_queue list reference */
78 struct list_head entry;
79 /* pointers to data to be transfered */
80 void *cb_data;
81 void *buf;
82 unsigned int len;
83 unsigned int flags;
282a2fee
JM
84
85 struct ntb_transport_qp *qp;
86 union {
87 struct ntb_payload_header __iomem *tx_hdr;
88 struct ntb_payload_header *rx_hdr;
89 };
90 unsigned int index;
fce8a7bb
JM
91};
92
793c20e9
JM
93struct ntb_rx_info {
94 unsigned int entry;
95};
96
fce8a7bb
JM
97struct ntb_transport_qp {
98 struct ntb_transport *transport;
99 struct ntb_device *ndev;
100 void *cb_data;
282a2fee 101 struct dma_chan *dma_chan;
fce8a7bb
JM
102
103 bool client_ready;
104 bool qp_link;
105 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
106
74465645 107 struct ntb_rx_info __iomem *rx_info;
793c20e9
JM
108 struct ntb_rx_info *remote_rx_info;
109
fce8a7bb
JM
110 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
111 void *data, int len);
112 struct list_head tx_free_q;
113 spinlock_t ntb_tx_free_q_lock;
74465645 114 void __iomem *tx_mw;
282a2fee 115 dma_addr_t tx_mw_phys;
793c20e9
JM
116 unsigned int tx_index;
117 unsigned int tx_max_entry;
ef114ed5 118 unsigned int tx_max_frame;
fce8a7bb
JM
119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q;
124 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock;
126 spinlock_t ntb_rx_free_q_lock;
793c20e9
JM
127 void *rx_buff;
128 unsigned int rx_index;
129 unsigned int rx_max_entry;
ef114ed5 130 unsigned int rx_max_frame;
282a2fee 131 dma_cookie_t last_cookie;
fce8a7bb
JM
132
133 void (*event_handler) (void *data, int status);
134 struct delayed_work link_work;
7b4f2d3c 135 struct work_struct link_cleanup;
fce8a7bb
JM
136
137 struct dentry *debugfs_dir;
138 struct dentry *debugfs_stats;
139
140 /* Stats */
141 u64 rx_bytes;
142 u64 rx_pkts;
143 u64 rx_ring_empty;
144 u64 rx_err_no_buf;
145 u64 rx_err_oflow;
146 u64 rx_err_ver;
282a2fee
JM
147 u64 rx_memcpy;
148 u64 rx_async;
fce8a7bb
JM
149 u64 tx_bytes;
150 u64 tx_pkts;
151 u64 tx_ring_full;
282a2fee
JM
152 u64 tx_err_no_buf;
153 u64 tx_memcpy;
154 u64 tx_async;
fce8a7bb
JM
155};
156
157struct ntb_transport_mw {
158 size_t size;
159 void *virt_addr;
160 dma_addr_t dma_addr;
161};
162
163struct ntb_transport_client_dev {
164 struct list_head entry;
165 struct device dev;
166};
167
168struct ntb_transport {
169 struct list_head entry;
170 struct list_head client_devs;
171
172 struct ntb_device *ndev;
948d3a65 173 struct ntb_transport_mw *mw;
fce8a7bb
JM
174 struct ntb_transport_qp *qps;
175 unsigned int max_qps;
176 unsigned long qp_bitmap;
177 bool transport_link;
178 struct delayed_work link_work;
7b4f2d3c 179 struct work_struct link_cleanup;
fce8a7bb
JM
180};
181
182enum {
183 DESC_DONE_FLAG = 1 << 0,
184 LINK_DOWN_FLAG = 1 << 1,
185};
186
187struct ntb_payload_header {
74465645 188 unsigned int ver;
fce8a7bb
JM
189 unsigned int len;
190 unsigned int flags;
191};
192
193enum {
194 VERSION = 0,
fce8a7bb 195 QP_LINKS,
113fc505
JM
196 NUM_QPS,
197 NUM_MWS,
198 MW0_SZ_HIGH,
199 MW0_SZ_LOW,
200 MW1_SZ_HIGH,
201 MW1_SZ_LOW,
fce8a7bb
JM
202 MAX_SPAD,
203};
204
948d3a65 205#define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
fce8a7bb
JM
206#define NTB_QP_DEF_NUM_ENTRIES 100
207#define NTB_LINK_DOWN_TIMEOUT 10
208
209static int ntb_match_bus(struct device *dev, struct device_driver *drv)
210{
211 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
212}
213
214static int ntb_client_probe(struct device *dev)
215{
216 const struct ntb_client *drv = container_of(dev->driver,
217 struct ntb_client, driver);
218 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
219 int rc = -EINVAL;
220
221 get_device(dev);
222 if (drv && drv->probe)
223 rc = drv->probe(pdev);
224 if (rc)
225 put_device(dev);
226
227 return rc;
228}
229
230static int ntb_client_remove(struct device *dev)
231{
232 const struct ntb_client *drv = container_of(dev->driver,
233 struct ntb_client, driver);
234 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
235
236 if (drv && drv->remove)
237 drv->remove(pdev);
238
239 put_device(dev);
240
241 return 0;
242}
243
170d35a5 244static struct bus_type ntb_bus_type = {
fce8a7bb
JM
245 .name = "ntb_bus",
246 .match = ntb_match_bus,
247 .probe = ntb_client_probe,
248 .remove = ntb_client_remove,
249};
250
251static LIST_HEAD(ntb_transport_list);
252
78a61ab7 253static int ntb_bus_init(struct ntb_transport *nt)
fce8a7bb
JM
254{
255 if (list_empty(&ntb_transport_list)) {
256 int rc = bus_register(&ntb_bus_type);
257 if (rc)
258 return rc;
259 }
260
261 list_add(&nt->entry, &ntb_transport_list);
262
263 return 0;
264}
265
78a61ab7 266static void ntb_bus_remove(struct ntb_transport *nt)
fce8a7bb
JM
267{
268 struct ntb_transport_client_dev *client_dev, *cd;
269
270 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
271 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
272 dev_name(&client_dev->dev));
273 list_del(&client_dev->entry);
274 device_unregister(&client_dev->dev);
275 }
276
277 list_del(&nt->entry);
278
279 if (list_empty(&ntb_transport_list))
280 bus_unregister(&ntb_bus_type);
281}
282
283static void ntb_client_release(struct device *dev)
284{
285 struct ntb_transport_client_dev *client_dev;
286 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
287
288 kfree(client_dev);
289}
290
291/**
292 * ntb_unregister_client_dev - Unregister NTB client device
293 * @device_name: Name of NTB client device
294 *
295 * Unregister an NTB client device with the NTB transport layer
296 */
297void ntb_unregister_client_dev(char *device_name)
298{
299 struct ntb_transport_client_dev *client, *cd;
300 struct ntb_transport *nt;
301
302 list_for_each_entry(nt, &ntb_transport_list, entry)
303 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
304 if (!strncmp(dev_name(&client->dev), device_name,
305 strlen(device_name))) {
306 list_del(&client->entry);
307 device_unregister(&client->dev);
308 }
309}
310EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
311
312/**
313 * ntb_register_client_dev - Register NTB client device
314 * @device_name: Name of NTB client device
315 *
316 * Register an NTB client device with the NTB transport layer
317 */
318int ntb_register_client_dev(char *device_name)
319{
320 struct ntb_transport_client_dev *client_dev;
321 struct ntb_transport *nt;
8b19d450 322 int rc, i = 0;
fce8a7bb 323
8222b402
JM
324 if (list_empty(&ntb_transport_list))
325 return -ENODEV;
326
fce8a7bb
JM
327 list_for_each_entry(nt, &ntb_transport_list, entry) {
328 struct device *dev;
329
330 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
331 GFP_KERNEL);
332 if (!client_dev) {
333 rc = -ENOMEM;
334 goto err;
335 }
336
337 dev = &client_dev->dev;
338
339 /* setup and register client devices */
8b19d450 340 dev_set_name(dev, "%s%d", device_name, i);
fce8a7bb
JM
341 dev->bus = &ntb_bus_type;
342 dev->release = ntb_client_release;
343 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
344
345 rc = device_register(dev);
346 if (rc) {
347 kfree(client_dev);
348 goto err;
349 }
350
351 list_add_tail(&client_dev->entry, &nt->client_devs);
8b19d450 352 i++;
fce8a7bb
JM
353 }
354
355 return 0;
356
357err:
358 ntb_unregister_client_dev(device_name);
359
360 return rc;
361}
362EXPORT_SYMBOL_GPL(ntb_register_client_dev);
363
364/**
365 * ntb_register_client - Register NTB client driver
366 * @drv: NTB client driver to be registered
367 *
368 * Register an NTB client driver with the NTB transport layer
369 *
370 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
371 */
372int ntb_register_client(struct ntb_client *drv)
373{
374 drv->driver.bus = &ntb_bus_type;
375
8222b402
JM
376 if (list_empty(&ntb_transport_list))
377 return -ENODEV;
378
fce8a7bb
JM
379 return driver_register(&drv->driver);
380}
381EXPORT_SYMBOL_GPL(ntb_register_client);
382
383/**
384 * ntb_unregister_client - Unregister NTB client driver
385 * @drv: NTB client driver to be unregistered
386 *
387 * Unregister an NTB client driver with the NTB transport layer
388 *
389 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
390 */
391void ntb_unregister_client(struct ntb_client *drv)
392{
393 driver_unregister(&drv->driver);
394}
395EXPORT_SYMBOL_GPL(ntb_unregister_client);
396
fce8a7bb
JM
397static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
398 loff_t *offp)
399{
400 struct ntb_transport_qp *qp;
d7237e22 401 char *buf;
fce8a7bb
JM
402 ssize_t ret, out_offset, out_count;
403
282a2fee 404 out_count = 1000;
d7237e22
JM
405
406 buf = kmalloc(out_count, GFP_KERNEL);
407 if (!buf)
408 return -ENOMEM;
fce8a7bb
JM
409
410 qp = filp->private_data;
411 out_offset = 0;
412 out_offset += snprintf(buf + out_offset, out_count - out_offset,
413 "NTB QP stats\n");
414 out_offset += snprintf(buf + out_offset, out_count - out_offset,
415 "rx_bytes - \t%llu\n", qp->rx_bytes);
416 out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 "rx_pkts - \t%llu\n", qp->rx_pkts);
282a2fee
JM
418 out_offset += snprintf(buf + out_offset, out_count - out_offset,
419 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
420 out_offset += snprintf(buf + out_offset, out_count - out_offset,
421 "rx_async - \t%llu\n", qp->rx_async);
fce8a7bb
JM
422 out_offset += snprintf(buf + out_offset, out_count - out_offset,
423 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
424 out_offset += snprintf(buf + out_offset, out_count - out_offset,
425 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
426 out_offset += snprintf(buf + out_offset, out_count - out_offset,
427 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
428 out_offset += snprintf(buf + out_offset, out_count - out_offset,
429 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
430 out_offset += snprintf(buf + out_offset, out_count - out_offset,
793c20e9 431 "rx_buff - \t%p\n", qp->rx_buff);
fce8a7bb 432 out_offset += snprintf(buf + out_offset, out_count - out_offset,
793c20e9 433 "rx_index - \t%u\n", qp->rx_index);
fce8a7bb 434 out_offset += snprintf(buf + out_offset, out_count - out_offset,
793c20e9 435 "rx_max_entry - \t%u\n", qp->rx_max_entry);
fce8a7bb
JM
436
437 out_offset += snprintf(buf + out_offset, out_count - out_offset,
438 "tx_bytes - \t%llu\n", qp->tx_bytes);
439 out_offset += snprintf(buf + out_offset, out_count - out_offset,
440 "tx_pkts - \t%llu\n", qp->tx_pkts);
282a2fee
JM
441 out_offset += snprintf(buf + out_offset, out_count - out_offset,
442 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
443 out_offset += snprintf(buf + out_offset, out_count - out_offset,
444 "tx_async - \t%llu\n", qp->tx_async);
fce8a7bb
JM
445 out_offset += snprintf(buf + out_offset, out_count - out_offset,
446 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
282a2fee
JM
447 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
fce8a7bb 449 out_offset += snprintf(buf + out_offset, out_count - out_offset,
793c20e9 450 "tx_mw - \t%p\n", qp->tx_mw);
fce8a7bb 451 out_offset += snprintf(buf + out_offset, out_count - out_offset,
793c20e9 452 "tx_index - \t%u\n", qp->tx_index);
fce8a7bb 453 out_offset += snprintf(buf + out_offset, out_count - out_offset,
793c20e9 454 "tx_max_entry - \t%u\n", qp->tx_max_entry);
fce8a7bb
JM
455
456 out_offset += snprintf(buf + out_offset, out_count - out_offset,
d7237e22 457 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
fce8a7bb 458 "Up" : "Down");
d7237e22
JM
459 if (out_offset > out_count)
460 out_offset = out_count;
fce8a7bb
JM
461
462 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
d7237e22 463 kfree(buf);
fce8a7bb
JM
464 return ret;
465}
466
467static const struct file_operations ntb_qp_debugfs_stats = {
468 .owner = THIS_MODULE,
d66d7ac2 469 .open = simple_open,
fce8a7bb
JM
470 .read = debugfs_read,
471};
472
473static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
474 struct list_head *list)
475{
476 unsigned long flags;
477
478 spin_lock_irqsave(lock, flags);
479 list_add_tail(entry, list);
480 spin_unlock_irqrestore(lock, flags);
481}
482
483static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
484 struct list_head *list)
485{
486 struct ntb_queue_entry *entry;
487 unsigned long flags;
488
489 spin_lock_irqsave(lock, flags);
490 if (list_empty(list)) {
491 entry = NULL;
492 goto out;
493 }
494 entry = list_first_entry(list, struct ntb_queue_entry, entry);
495 list_del(&entry->entry);
496out:
497 spin_unlock_irqrestore(lock, flags);
498
499 return entry;
500}
501
502static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
503 unsigned int qp_num)
504{
505 struct ntb_transport_qp *qp = &nt->qps[qp_num];
ef114ed5 506 unsigned int rx_size, num_qps_mw;
948d3a65 507 u8 mw_num, mw_max;
793c20e9 508 unsigned int i;
fce8a7bb 509
948d3a65
JM
510 mw_max = ntb_max_mw(nt->ndev);
511 mw_num = QP_TO_MW(nt->ndev, qp_num);
512
74465645 513 WARN_ON(nt->mw[mw_num].virt_addr == NULL);
fce8a7bb 514
948d3a65
JM
515 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
516 num_qps_mw = nt->max_qps / mw_max + 1;
fce8a7bb 517 else
948d3a65 518 num_qps_mw = nt->max_qps / mw_max;
fce8a7bb 519
793c20e9 520 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
282a2fee 521 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
793c20e9
JM
522 rx_size -= sizeof(struct ntb_rx_info);
523
282a2fee
JM
524 qp->remote_rx_info = qp->rx_buff + rx_size;
525
c9d534c8
JM
526 /* Due to housekeeping, there must be atleast 2 buffs */
527 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
793c20e9
JM
528 qp->rx_max_entry = rx_size / qp->rx_max_frame;
529 qp->rx_index = 0;
530
c9d534c8 531 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
fce8a7bb 532
ef114ed5 533 /* setup the hdr offsets with 0's */
793c20e9
JM
534 for (i = 0; i < qp->rx_max_entry; i++) {
535 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
536 sizeof(struct ntb_payload_header);
ef114ed5 537 memset(offset, 0, sizeof(struct ntb_payload_header));
793c20e9 538 }
fce8a7bb
JM
539
540 qp->rx_pkts = 0;
541 qp->tx_pkts = 0;
90f9e934 542 qp->tx_index = 0;
fce8a7bb
JM
543}
544
b77b2637
JM
545static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
546{
547 struct ntb_transport_mw *mw = &nt->mw[num_mw];
548 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
549
550 if (!mw->virt_addr)
551 return;
552
553 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
554 mw->virt_addr = NULL;
555}
556
fce8a7bb
JM
557static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
558{
559 struct ntb_transport_mw *mw = &nt->mw[num_mw];
560 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
fce8a7bb 561
b77b2637
JM
562 /* No need to re-setup */
563 if (mw->size == ALIGN(size, 4096))
564 return 0;
565
566 if (mw->size != 0)
567 ntb_free_mw(nt, num_mw);
568
fce8a7bb
JM
569 /* Alloc memory for receiving data. Must be 4k aligned */
570 mw->size = ALIGN(size, 4096);
571
572 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
573 GFP_KERNEL);
574 if (!mw->virt_addr) {
b77b2637 575 mw->size = 0;
fce8a7bb
JM
576 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
577 (int) mw->size);
578 return -ENOMEM;
579 }
580
fce8a7bb
JM
581 /* Notify HW the memory location of the receive buffer */
582 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
583
584 return 0;
585}
586
7b4f2d3c 587static void ntb_qp_link_cleanup(struct work_struct *work)
fce8a7bb 588{
7b4f2d3c
JM
589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
fce8a7bb
JM
592 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594
595 if (qp->qp_link == NTB_LINK_DOWN) {
596 cancel_delayed_work_sync(&qp->link_work);
597 return;
598 }
599
600 if (qp->event_handler)
601 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
602
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN;
605
606 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work,
608 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
609}
610
7b4f2d3c
JM
611static void ntb_qp_link_down(struct ntb_transport_qp *qp)
612{
613 schedule_work(&qp->link_cleanup);
614}
615
616static void ntb_transport_link_cleanup(struct work_struct *work)
fce8a7bb 617{
7b4f2d3c
JM
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
fce8a7bb
JM
620 int i;
621
622 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work);
624 else
625 nt->transport_link = NTB_LINK_DOWN;
626
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next
634 * time they are accessed
635 */
636 for (i = 0; i < MAX_SPAD; i++)
637 ntb_write_local_spad(nt->ndev, i, 0);
638}
639
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{
642 struct ntb_transport *nt = data;
643
644 switch (event) {
645 case NTB_EVENT_HW_LINK_UP:
646 schedule_delayed_work(&nt->link_work, 0);
647 break;
648 case NTB_EVENT_HW_LINK_DOWN:
7b4f2d3c 649 schedule_work(&nt->link_cleanup);
fce8a7bb
JM
650 break;
651 default:
652 BUG();
653 }
654}
655
656static void ntb_transport_link_work(struct work_struct *work)
657{
658 struct ntb_transport *nt = container_of(work, struct ntb_transport,
659 link_work.work);
660 struct ntb_device *ndev = nt->ndev;
661 struct pci_dev *pdev = ntb_query_pdev(ndev);
662 u32 val;
663 int rc, i;
664
113fc505 665 /* send the local info, in the opposite order of the way we read it */
948d3a65 666 for (i = 0; i < ntb_max_mw(ndev); i++) {
113fc505
JM
667 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
668 ntb_get_mw_size(ndev, i) >> 32);
669 if (rc) {
670 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
671 (u32)(ntb_get_mw_size(ndev, i) >> 32),
672 MW0_SZ_HIGH + (i * 2));
673 goto out;
674 }
fce8a7bb 675
113fc505
JM
676 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
677 (u32) ntb_get_mw_size(ndev, i));
678 if (rc) {
679 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
680 (u32) ntb_get_mw_size(ndev, i),
681 MW0_SZ_LOW + (i * 2));
682 goto out;
683 }
fce8a7bb
JM
684 }
685
948d3a65 686 rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
fce8a7bb
JM
687 if (rc) {
688 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
948d3a65 689 ntb_max_mw(ndev), NUM_MWS);
fce8a7bb
JM
690 goto out;
691 }
692
693 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
694 if (rc) {
695 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
696 nt->max_qps, NUM_QPS);
697 goto out;
698 }
699
113fc505 700 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
fce8a7bb
JM
701 if (rc) {
702 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
113fc505 703 NTB_TRANSPORT_VERSION, VERSION);
fce8a7bb
JM
704 goto out;
705 }
706
707 /* Query the remote side for its info */
708 rc = ntb_read_remote_spad(ndev, VERSION, &val);
709 if (rc) {
710 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
711 goto out;
712 }
713
714 if (val != NTB_TRANSPORT_VERSION)
715 goto out;
716 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
717
718 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
719 if (rc) {
720 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
721 goto out;
722 }
723
724 if (val != nt->max_qps)
725 goto out;
726 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
727
113fc505 728 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
fce8a7bb 729 if (rc) {
113fc505 730 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
fce8a7bb
JM
731 goto out;
732 }
733
948d3a65 734 if (val != ntb_max_mw(ndev))
fce8a7bb 735 goto out;
113fc505 736 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
fce8a7bb 737
948d3a65 738 for (i = 0; i < ntb_max_mw(ndev); i++) {
113fc505 739 u64 val64;
fce8a7bb 740
113fc505
JM
741 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
742 if (rc) {
743 dev_err(&pdev->dev, "Error reading remote spad %d\n",
744 MW0_SZ_HIGH + (i * 2));
745 goto out1;
746 }
fce8a7bb 747
113fc505 748 val64 = (u64) val << 32;
fce8a7bb 749
113fc505
JM
750 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
751 if (rc) {
752 dev_err(&pdev->dev, "Error reading remote spad %d\n",
753 MW0_SZ_LOW + (i * 2));
754 goto out1;
755 }
756
757 val64 |= val;
758
759 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
760
761 rc = ntb_set_mw(nt, i, val64);
762 if (rc)
763 goto out1;
764 }
fce8a7bb
JM
765
766 nt->transport_link = NTB_LINK_UP;
767
768 for (i = 0; i < nt->max_qps; i++) {
769 struct ntb_transport_qp *qp = &nt->qps[i];
770
771 ntb_transport_setup_qp_mw(nt, i);
772
773 if (qp->client_ready == NTB_LINK_UP)
774 schedule_delayed_work(&qp->link_work, 0);
775 }
776
777 return;
778
113fc505 779out1:
948d3a65 780 for (i = 0; i < ntb_max_mw(ndev); i++)
113fc505 781 ntb_free_mw(nt, i);
fce8a7bb
JM
782out:
783 if (ntb_hw_link_status(ndev))
784 schedule_delayed_work(&nt->link_work,
785 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
786}
787
788static void ntb_qp_link_work(struct work_struct *work)
789{
790 struct ntb_transport_qp *qp = container_of(work,
791 struct ntb_transport_qp,
792 link_work.work);
793 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
794 struct ntb_transport *nt = qp->transport;
795 int rc, val;
796
797 WARN_ON(nt->transport_link != NTB_LINK_UP);
798
799 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
800 if (rc) {
801 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
802 return;
803 }
804
805 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
806 if (rc)
807 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
808 val | 1 << qp->qp_num, QP_LINKS);
809
810 /* query remote spad for qp ready bits */
811 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
812 if (rc)
813 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
814
815 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
816
817 /* See if the remote side is up */
818 if (1 << qp->qp_num & val) {
819 qp->qp_link = NTB_LINK_UP;
820
821 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
822 if (qp->event_handler)
823 qp->event_handler(qp->cb_data, NTB_LINK_UP);
824 } else if (nt->transport_link == NTB_LINK_UP)
825 schedule_delayed_work(&qp->link_work,
826 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
827}
828
282a2fee 829static int ntb_transport_init_queue(struct ntb_transport *nt,
fce8a7bb
JM
830 unsigned int qp_num)
831{
832 struct ntb_transport_qp *qp;
ef114ed5 833 unsigned int num_qps_mw, tx_size;
948d3a65 834 u8 mw_num, mw_max;
282a2fee 835 u64 qp_offset;
948d3a65
JM
836
837 mw_max = ntb_max_mw(nt->ndev);
838 mw_num = QP_TO_MW(nt->ndev, qp_num);
fce8a7bb
JM
839
840 qp = &nt->qps[qp_num];
841 qp->qp_num = qp_num;
842 qp->transport = nt;
843 qp->ndev = nt->ndev;
844 qp->qp_link = NTB_LINK_DOWN;
845 qp->client_ready = NTB_LINK_DOWN;
846 qp->event_handler = NULL;
847
948d3a65
JM
848 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
849 num_qps_mw = nt->max_qps / mw_max + 1;
ef114ed5 850 else
948d3a65 851 num_qps_mw = nt->max_qps / mw_max;
ef114ed5 852
793c20e9 853 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
282a2fee
JM
854 qp_offset = qp_num / mw_max * tx_size;
855 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
856 if (!qp->tx_mw)
857 return -EINVAL;
858
859 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
860 if (!qp->tx_mw_phys)
861 return -EINVAL;
862
793c20e9 863 tx_size -= sizeof(struct ntb_rx_info);
282a2fee 864 qp->rx_info = qp->tx_mw + tx_size;
793c20e9 865
c9d534c8
JM
866 /* Due to housekeeping, there must be atleast 2 buffs */
867 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
793c20e9 868 qp->tx_max_entry = tx_size / qp->tx_max_frame;
ef114ed5 869
1517a3f2 870 if (ntb_query_debugfs(nt->ndev)) {
fce8a7bb
JM
871 char debugfs_name[4];
872
873 snprintf(debugfs_name, 4, "qp%d", qp_num);
874 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
1517a3f2 875 ntb_query_debugfs(nt->ndev));
fce8a7bb
JM
876
877 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
878 qp->debugfs_dir, qp,
879 &ntb_qp_debugfs_stats);
880 }
881
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
7b4f2d3c 883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
fce8a7bb
JM
884
885 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock);
887 spin_lock_init(&qp->ntb_tx_free_q_lock);
888
889 INIT_LIST_HEAD(&qp->rx_pend_q);
890 INIT_LIST_HEAD(&qp->rx_free_q);
891 INIT_LIST_HEAD(&qp->tx_free_q);
282a2fee
JM
892
893 return 0;
fce8a7bb
JM
894}
895
896int ntb_transport_init(struct pci_dev *pdev)
897{
898 struct ntb_transport *nt;
899 int rc, i;
900
901 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
902 if (!nt)
903 return -ENOMEM;
904
fce8a7bb
JM
905 nt->ndev = ntb_register_transport(pdev, nt);
906 if (!nt->ndev) {
907 rc = -EIO;
908 goto err;
909 }
910
948d3a65
JM
911 nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
912 GFP_KERNEL);
913 if (!nt->mw) {
914 rc = -ENOMEM;
915 goto err1;
916 }
917
918 if (max_num_clients)
919 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
920 else
921 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
fce8a7bb
JM
922
923 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
924 GFP_KERNEL);
925 if (!nt->qps) {
926 rc = -ENOMEM;
948d3a65 927 goto err2;
fce8a7bb
JM
928 }
929
930 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
931
282a2fee
JM
932 for (i = 0; i < nt->max_qps; i++) {
933 rc = ntb_transport_init_queue(nt, i);
934 if (rc)
935 goto err3;
936 }
fce8a7bb
JM
937
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
7b4f2d3c 939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
fce8a7bb
JM
940
941 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback);
943 if (rc)
948d3a65 944 goto err3;
fce8a7bb
JM
945
946 INIT_LIST_HEAD(&nt->client_devs);
947 rc = ntb_bus_init(nt);
948 if (rc)
948d3a65 949 goto err4;
fce8a7bb
JM
950
951 if (ntb_hw_link_status(nt->ndev))
952 schedule_delayed_work(&nt->link_work, 0);
953
954 return 0;
955
948d3a65 956err4:
fce8a7bb 957 ntb_unregister_event_callback(nt->ndev);
948d3a65 958err3:
fce8a7bb 959 kfree(nt->qps);
948d3a65
JM
960err2:
961 kfree(nt->mw);
fce8a7bb
JM
962err1:
963 ntb_unregister_transport(nt->ndev);
964err:
fce8a7bb
JM
965 kfree(nt);
966 return rc;
967}
968
969void ntb_transport_free(void *transport)
970{
971 struct ntb_transport *nt = transport;
948d3a65 972 struct ntb_device *ndev = nt->ndev;
fce8a7bb
JM
973 int i;
974
975 nt->transport_link = NTB_LINK_DOWN;
976
977 /* verify that all the qp's are freed */
1517a3f2 978 for (i = 0; i < nt->max_qps; i++) {
fce8a7bb
JM
979 if (!test_bit(i, &nt->qp_bitmap))
980 ntb_transport_free_queue(&nt->qps[i]);
1517a3f2
JM
981 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
982 }
fce8a7bb
JM
983
984 ntb_bus_remove(nt);
985
986 cancel_delayed_work_sync(&nt->link_work);
987
948d3a65 988 ntb_unregister_event_callback(ndev);
fce8a7bb 989
948d3a65 990 for (i = 0; i < ntb_max_mw(ndev); i++)
113fc505 991 ntb_free_mw(nt, i);
fce8a7bb
JM
992
993 kfree(nt->qps);
948d3a65
JM
994 kfree(nt->mw);
995 ntb_unregister_transport(ndev);
fce8a7bb
JM
996 kfree(nt);
997}
998
282a2fee 999static void ntb_rx_copy_callback(void *data)
fce8a7bb 1000{
282a2fee
JM
1001 struct ntb_queue_entry *entry = data;
1002 struct ntb_transport_qp *qp = entry->qp;
448c6fb3
JM
1003 void *cb_data = entry->cb_data;
1004 unsigned int len = entry->len;
282a2fee
JM
1005 struct ntb_payload_header *hdr = entry->rx_hdr;
1006
1007 /* Ensure that the data is fully copied out before clearing the flag */
1008 wmb();
1009 hdr->flags = 0;
fce8a7bb 1010
282a2fee 1011 iowrite32(entry->index, &qp->rx_info->entry);
fce8a7bb
JM
1012
1013 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
448c6fb3
JM
1014
1015 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1016 qp->rx_handler(qp, qp->cb_data, cb_data, len);
fce8a7bb
JM
1017}
1018
282a2fee
JM
1019static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1020{
1021 void *buf = entry->buf;
1022 size_t len = entry->len;
1023
1024 memcpy(buf, offset, len);
1025
1026 ntb_rx_copy_callback(entry);
1027}
1028
1029static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1030 size_t len)
1031{
1032 struct dma_async_tx_descriptor *txd;
1033 struct ntb_transport_qp *qp = entry->qp;
1034 struct dma_chan *chan = qp->dma_chan;
1035 struct dma_device *device;
1036 size_t pay_off, buff_off;
6f57fd05 1037 struct dmaengine_unmap_data *unmap;
282a2fee
JM
1038 dma_cookie_t cookie;
1039 void *buf = entry->buf;
1040 unsigned long flags;
1041
1042 entry->len = len;
1043
1044 if (!chan)
1045 goto err;
1046
1047 if (len < copy_bytes)
6f57fd05 1048 goto err_wait;
282a2fee
JM
1049
1050 device = chan->device;
1051 pay_off = (size_t) offset & ~PAGE_MASK;
1052 buff_off = (size_t) buf & ~PAGE_MASK;
1053
1054 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
6f57fd05 1055 goto err_wait;
282a2fee 1056
6f57fd05
BZ
1057 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1058 if (!unmap)
1059 goto err_wait;
282a2fee 1060
6f57fd05
BZ
1061 unmap->len = len;
1062 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1063 pay_off, len, DMA_TO_DEVICE);
1064 if (dma_mapping_error(device->dev, unmap->addr[0]))
1065 goto err_get_unmap;
1066
1067 unmap->to_cnt = 1;
282a2fee 1068
6f57fd05
BZ
1069 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1070 buff_off, len, DMA_FROM_DEVICE);
1071 if (dma_mapping_error(device->dev, unmap->addr[1]))
1072 goto err_get_unmap;
1073
1074 unmap->from_cnt = 1;
1075
1076 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
282a2fee 1077 DMA_PREP_INTERRUPT;
6f57fd05
BZ
1078 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1079 unmap->addr[0], len, flags);
282a2fee 1080 if (!txd)
6f57fd05 1081 goto err_get_unmap;
282a2fee
JM
1082
1083 txd->callback = ntb_rx_copy_callback;
1084 txd->callback_param = entry;
6f57fd05 1085 dma_set_unmap(txd, unmap);
282a2fee
JM
1086
1087 cookie = dmaengine_submit(txd);
1088 if (dma_submit_error(cookie))
6f57fd05
BZ
1089 goto err_set_unmap;
1090
1091 dmaengine_unmap_put(unmap);
282a2fee
JM
1092
1093 qp->last_cookie = cookie;
1094
1095 qp->rx_async++;
1096
1097 return;
1098
6f57fd05
BZ
1099err_set_unmap:
1100 dmaengine_unmap_put(unmap);
1101err_get_unmap:
1102 dmaengine_unmap_put(unmap);
1103err_wait:
282a2fee
JM
1104 /* If the callbacks come out of order, the writing of the index to the
1105 * last completed will be out of order. This may result in the
1106 * receive stalling forever.
1107 */
1108 dma_sync_wait(chan, qp->last_cookie);
1109err:
1110 ntb_memcpy_rx(entry, offset);
1111 qp->rx_memcpy++;
1112}
1113
fce8a7bb
JM
1114static int ntb_process_rxc(struct ntb_transport_qp *qp)
1115{
1116 struct ntb_payload_header *hdr;
1117 struct ntb_queue_entry *entry;
1118 void *offset;
1119
793c20e9
JM
1120 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1121 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1122
fce8a7bb
JM
1123 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1124 if (!entry) {
fce8a7bb 1125 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
74465645 1126 "no buffer - HDR ver %u, len %d, flags %x\n",
fce8a7bb
JM
1127 hdr->ver, hdr->len, hdr->flags);
1128 qp->rx_err_no_buf++;
1129 return -ENOMEM;
1130 }
1131
fce8a7bb
JM
1132 if (!(hdr->flags & DESC_DONE_FLAG)) {
1133 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
f766755c 1134 &qp->rx_pend_q);
fce8a7bb
JM
1135 qp->rx_ring_empty++;
1136 return -EAGAIN;
1137 }
1138
74465645 1139 if (hdr->ver != (u32) qp->rx_pkts) {
fce8a7bb 1140 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
74465645 1141 "qp %d: version mismatch, expected %llu - got %u\n",
fce8a7bb
JM
1142 qp->qp_num, qp->rx_pkts, hdr->ver);
1143 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
f766755c 1144 &qp->rx_pend_q);
fce8a7bb
JM
1145 qp->rx_err_ver++;
1146 return -EIO;
1147 }
1148
1149 if (hdr->flags & LINK_DOWN_FLAG) {
1150 ntb_qp_link_down(qp);
1151
282a2fee 1152 goto err;
fce8a7bb
JM
1153 }
1154
1155 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
74465645 1156 "rx offset %u, ver %u - %d payload received, buf size %d\n",
793c20e9 1157 qp->rx_index, hdr->ver, hdr->len, entry->len);
fce8a7bb 1158
282a2fee
JM
1159 qp->rx_bytes += hdr->len;
1160 qp->rx_pkts++;
fce8a7bb 1161
282a2fee 1162 if (hdr->len > entry->len) {
fce8a7bb
JM
1163 qp->rx_err_oflow++;
1164 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1165 "RX overflow! Wanted %d got %d\n",
1166 hdr->len, entry->len);
282a2fee
JM
1167
1168 goto err;
fce8a7bb
JM
1169 }
1170
282a2fee
JM
1171 entry->index = qp->rx_index;
1172 entry->rx_hdr = hdr;
1173
1174 ntb_async_rx(entry, offset, hdr->len);
fce8a7bb
JM
1175
1176out:
282a2fee
JM
1177 qp->rx_index++;
1178 qp->rx_index %= qp->rx_max_entry;
1179
1180 return 0;
1181
1182err:
1183 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1184 &qp->rx_pend_q);
793c20e9
JM
1185 /* Ensure that the data is fully copied out before clearing the flag */
1186 wmb();
1187 hdr->flags = 0;
74465645 1188 iowrite32(qp->rx_index, &qp->rx_info->entry);
793c20e9 1189
282a2fee 1190 goto out;
fce8a7bb
JM
1191}
1192
1193static void ntb_transport_rx(unsigned long data)
1194{
1195 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
c336acd3 1196 int rc, i;
fce8a7bb 1197
c336acd3
JM
1198 /* Limit the number of packets processed in a single interrupt to
1199 * provide fairness to others
1200 */
1201 for (i = 0; i < qp->rx_max_entry; i++) {
fce8a7bb 1202 rc = ntb_process_rxc(qp);
c336acd3
JM
1203 if (rc)
1204 break;
1205 }
282a2fee
JM
1206
1207 if (qp->dma_chan)
1208 dma_async_issue_pending(qp->dma_chan);
fce8a7bb
JM
1209}
1210
1211static void ntb_transport_rxc_db(void *data, int db_num)
1212{
1213 struct ntb_transport_qp *qp = data;
1214
1215 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1216 __func__, db_num);
1217
1218 tasklet_schedule(&qp->rx_work);
1219}
1220
282a2fee 1221static void ntb_tx_copy_callback(void *data)
fce8a7bb 1222{
282a2fee
JM
1223 struct ntb_queue_entry *entry = data;
1224 struct ntb_transport_qp *qp = entry->qp;
1225 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
fce8a7bb 1226
282a2fee 1227 /* Ensure that the data is fully copied out before setting the flags */
842c1dde 1228 wmb();
74465645 1229 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
fce8a7bb 1230
49793889 1231 ntb_ring_doorbell(qp->ndev, qp->qp_num);
fce8a7bb
JM
1232
1233 /* The entry length can only be zero if the packet is intended to be a
1234 * "link down" or similar. Since no payload is being sent in these
1235 * cases, there is nothing to add to the completion queue.
1236 */
1237 if (entry->len > 0) {
1238 qp->tx_bytes += entry->len;
1239
1240 if (qp->tx_handler)
1241 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1242 entry->len);
1243 }
1244
1245 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1246}
1247
282a2fee 1248static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
fce8a7bb 1249{
282a2fee
JM
1250 memcpy_toio(offset, entry->buf, entry->len);
1251
1252 ntb_tx_copy_callback(entry);
1253}
1254
1255static void ntb_async_tx(struct ntb_transport_qp *qp,
1256 struct ntb_queue_entry *entry)
1257{
1258 struct ntb_payload_header __iomem *hdr;
1259 struct dma_async_tx_descriptor *txd;
1260 struct dma_chan *chan = qp->dma_chan;
1261 struct dma_device *device;
1262 size_t dest_off, buff_off;
6f57fd05
BZ
1263 struct dmaengine_unmap_data *unmap;
1264 dma_addr_t dest;
282a2fee 1265 dma_cookie_t cookie;
74465645 1266 void __iomem *offset;
282a2fee
JM
1267 size_t len = entry->len;
1268 void *buf = entry->buf;
1269 unsigned long flags;
fce8a7bb 1270
793c20e9 1271 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
282a2fee
JM
1272 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1273 entry->tx_hdr = hdr;
fce8a7bb 1274
282a2fee
JM
1275 iowrite32(entry->len, &hdr->len);
1276 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1277
1278 if (!chan)
1279 goto err;
1280
1281 if (len < copy_bytes)
1282 goto err;
1283
1284 device = chan->device;
1285 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1286 buff_off = (size_t) buf & ~PAGE_MASK;
1287 dest_off = (size_t) dest & ~PAGE_MASK;
1288
1289 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1290 goto err;
1291
6f57fd05
BZ
1292 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1293 if (!unmap)
282a2fee
JM
1294 goto err;
1295
6f57fd05
BZ
1296 unmap->len = len;
1297 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1298 buff_off, len, DMA_TO_DEVICE);
1299 if (dma_mapping_error(device->dev, unmap->addr[0]))
1300 goto err_get_unmap;
1301
1302 unmap->to_cnt = 1;
1303
1304 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
1305 DMA_PREP_INTERRUPT;
1306 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1307 flags);
282a2fee 1308 if (!txd)
6f57fd05 1309 goto err_get_unmap;
282a2fee
JM
1310
1311 txd->callback = ntb_tx_copy_callback;
1312 txd->callback_param = entry;
6f57fd05 1313 dma_set_unmap(txd, unmap);
282a2fee
JM
1314
1315 cookie = dmaengine_submit(txd);
1316 if (dma_submit_error(cookie))
6f57fd05
BZ
1317 goto err_set_unmap;
1318
1319 dmaengine_unmap_put(unmap);
282a2fee
JM
1320
1321 dma_async_issue_pending(chan);
1322 qp->tx_async++;
1323
1324 return;
6f57fd05
BZ
1325err_set_unmap:
1326 dmaengine_unmap_put(unmap);
1327err_get_unmap:
1328 dmaengine_unmap_put(unmap);
282a2fee
JM
1329err:
1330 ntb_memcpy_tx(entry, offset);
1331 qp->tx_memcpy++;
1332}
1333
1334static int ntb_process_tx(struct ntb_transport_qp *qp,
1335 struct ntb_queue_entry *entry)
1336{
1337 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1338 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
f766755c 1339 entry->buf);
793c20e9 1340 if (qp->tx_index == qp->remote_rx_info->entry) {
fce8a7bb
JM
1341 qp->tx_ring_full++;
1342 return -EAGAIN;
1343 }
1344
ef114ed5 1345 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
fce8a7bb
JM
1346 if (qp->tx_handler)
1347 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1348
1349 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1350 &qp->tx_free_q);
1351 return 0;
1352 }
1353
282a2fee 1354 ntb_async_tx(qp, entry);
fce8a7bb 1355
793c20e9
JM
1356 qp->tx_index++;
1357 qp->tx_index %= qp->tx_max_entry;
fce8a7bb
JM
1358
1359 qp->tx_pkts++;
1360
1361 return 0;
1362}
1363
1364static void ntb_send_link_down(struct ntb_transport_qp *qp)
1365{
1366 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1367 struct ntb_queue_entry *entry;
1368 int i, rc;
1369
1370 if (qp->qp_link == NTB_LINK_DOWN)
1371 return;
1372
1373 qp->qp_link = NTB_LINK_DOWN;
1374 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1375
1376 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
f766755c 1377 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
fce8a7bb
JM
1378 if (entry)
1379 break;
1380 msleep(100);
1381 }
1382
1383 if (!entry)
1384 return;
1385
1386 entry->cb_data = NULL;
1387 entry->buf = NULL;
1388 entry->len = 0;
1389 entry->flags = LINK_DOWN_FLAG;
1390
1391 rc = ntb_process_tx(qp, entry);
1392 if (rc)
1393 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1394 qp->qp_num);
1395}
1396
1397/**
1398 * ntb_transport_create_queue - Create a new NTB transport layer queue
1399 * @rx_handler: receive callback function
1400 * @tx_handler: transmit callback function
1401 * @event_handler: event callback function
1402 *
1403 * Create a new NTB transport layer queue and provide the queue with a callback
1404 * routine for both transmit and receive. The receive callback routine will be
1405 * used to pass up data when the transport has received it on the queue. The
1406 * transmit callback routine will be called when the transport has completed the
1407 * transmission of the data on the queue and the data is ready to be freed.
1408 *
1409 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1410 */
1411struct ntb_transport_qp *
1412ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1413 const struct ntb_queue_handlers *handlers)
1414{
1415 struct ntb_queue_entry *entry;
1416 struct ntb_transport_qp *qp;
1417 struct ntb_transport *nt;
1418 unsigned int free_queue;
1419 int rc, i;
1420
1421 nt = ntb_find_transport(pdev);
1422 if (!nt)
1423 goto err;
1424
1425 free_queue = ffs(nt->qp_bitmap);
1426 if (!free_queue)
1427 goto err;
1428
1429 /* decrement free_queue to make it zero based */
1430 free_queue--;
1431
1432 clear_bit(free_queue, &nt->qp_bitmap);
1433
1434 qp = &nt->qps[free_queue];
1435 qp->cb_data = data;
1436 qp->rx_handler = handlers->rx_handler;
1437 qp->tx_handler = handlers->tx_handler;
1438 qp->event_handler = handlers->event_handler;
1439
282a2fee
JM
1440 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1441 if (!qp->dma_chan)
1442 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1443 else
1444 dmaengine_get();
1445
fce8a7bb
JM
1446 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1447 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1448 if (!entry)
1449 goto err1;
1450
282a2fee 1451 entry->qp = qp;
fce8a7bb 1452 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
f766755c 1453 &qp->rx_free_q);
fce8a7bb
JM
1454 }
1455
1456 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1457 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1458 if (!entry)
1459 goto err2;
1460
282a2fee 1461 entry->qp = qp;
fce8a7bb 1462 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
f766755c 1463 &qp->tx_free_q);
fce8a7bb
JM
1464 }
1465
1466 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1467
1468 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1469 ntb_transport_rxc_db);
1470 if (rc)
1471 goto err3;
1472
1473 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1474
1475 return qp;
1476
1477err3:
1478 tasklet_disable(&qp->rx_work);
1479err2:
f766755c 1480 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
fce8a7bb
JM
1481 kfree(entry);
1482err1:
f766755c 1483 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
fce8a7bb
JM
1484 kfree(entry);
1485 set_bit(free_queue, &nt->qp_bitmap);
1486err:
1487 return NULL;
1488}
1489EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1490
1491/**
1492 * ntb_transport_free_queue - Frees NTB transport queue
1493 * @qp: NTB queue to be freed
1494 *
1495 * Frees NTB transport queue
1496 */
1497void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1498{
186f27ff 1499 struct pci_dev *pdev;
fce8a7bb
JM
1500 struct ntb_queue_entry *entry;
1501
1502 if (!qp)
1503 return;
1504
186f27ff
JM
1505 pdev = ntb_query_pdev(qp->ndev);
1506
282a2fee
JM
1507 if (qp->dma_chan) {
1508 struct dma_chan *chan = qp->dma_chan;
1509 /* Putting the dma_chan to NULL will force any new traffic to be
1510 * processed by the CPU instead of the DAM engine
1511 */
1512 qp->dma_chan = NULL;
1513
1514 /* Try to be nice and wait for any queued DMA engine
1515 * transactions to process before smashing it with a rock
1516 */
1517 dma_sync_wait(chan, qp->last_cookie);
1518 dmaengine_terminate_all(chan);
1519 dmaengine_put();
1520 }
fce8a7bb
JM
1521
1522 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1523 tasklet_disable(&qp->rx_work);
1524
282a2fee
JM
1525 cancel_delayed_work_sync(&qp->link_work);
1526
f766755c 1527 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
fce8a7bb
JM
1528 kfree(entry);
1529
f766755c 1530 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
fce8a7bb
JM
1531 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1532 kfree(entry);
1533 }
1534
f766755c 1535 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
fce8a7bb
JM
1536 kfree(entry);
1537
1538 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1539
1540 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1541}
1542EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1543
1544/**
1545 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1546 * @qp: NTB queue to be freed
1547 * @len: pointer to variable to write enqueued buffers length
1548 *
1549 * Dequeues unused buffers from receive queue. Should only be used during
1550 * shutdown of qp.
1551 *
1552 * RETURNS: NULL error value on error, or void* for success.
1553 */
1554void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1555{
1556 struct ntb_queue_entry *entry;
1557 void *buf;
1558
1559 if (!qp || qp->client_ready == NTB_LINK_UP)
1560 return NULL;
1561
1562 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1563 if (!entry)
1564 return NULL;
1565
1566 buf = entry->cb_data;
1567 *len = entry->len;
1568
f766755c 1569 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
fce8a7bb
JM
1570
1571 return buf;
1572}
1573EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1574
1575/**
1576 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1577 * @qp: NTB transport layer queue the entry is to be enqueued on
1578 * @cb: per buffer pointer for callback function to use
1579 * @data: pointer to data buffer that incoming packets will be copied into
1580 * @len: length of the data buffer
1581 *
1582 * Enqueue a new receive buffer onto the transport queue into which a NTB
1583 * payload can be received into.
1584 *
1585 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1586 */
1587int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1588 unsigned int len)
1589{
1590 struct ntb_queue_entry *entry;
1591
1592 if (!qp)
1593 return -EINVAL;
1594
1595 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1596 if (!entry)
1597 return -ENOMEM;
1598
1599 entry->cb_data = cb;
1600 entry->buf = data;
1601 entry->len = len;
1602
f766755c 1603 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
fce8a7bb
JM
1604
1605 return 0;
1606}
1607EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1608
1609/**
1610 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1611 * @qp: NTB transport layer queue the entry is to be enqueued on
1612 * @cb: per buffer pointer for callback function to use
1613 * @data: pointer to data buffer that will be sent
1614 * @len: length of the data buffer
1615 *
1616 * Enqueue a new transmit buffer onto the transport queue from which a NTB
f9a2cf89 1617 * payload will be transmitted. This assumes that a lock is being held to
fce8a7bb
JM
1618 * serialize access to the qp.
1619 *
1620 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1621 */
1622int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1623 unsigned int len)
1624{
1625 struct ntb_queue_entry *entry;
1626 int rc;
1627
1628 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1629 return -EINVAL;
1630
1631 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
282a2fee
JM
1632 if (!entry) {
1633 qp->tx_err_no_buf++;
fce8a7bb 1634 return -ENOMEM;
282a2fee 1635 }
fce8a7bb
JM
1636
1637 entry->cb_data = cb;
1638 entry->buf = data;
1639 entry->len = len;
1640 entry->flags = 0;
1641
1642 rc = ntb_process_tx(qp, entry);
1643 if (rc)
1644 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1645 &qp->tx_free_q);
1646
1647 return rc;
1648}
1649EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1650
1651/**
1652 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1653 * @qp: NTB transport layer queue to be enabled
1654 *
1655 * Notify NTB transport layer of client readiness to use queue
1656 */
1657void ntb_transport_link_up(struct ntb_transport_qp *qp)
1658{
1659 if (!qp)
1660 return;
1661
1662 qp->client_ready = NTB_LINK_UP;
1663
1664 if (qp->transport->transport_link == NTB_LINK_UP)
1665 schedule_delayed_work(&qp->link_work, 0);
1666}
1667EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1668
1669/**
1670 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1671 * @qp: NTB transport layer queue to be disabled
1672 *
1673 * Notify NTB transport layer of client's desire to no longer receive data on
1674 * transport queue specified. It is the client's responsibility to ensure all
f9a2cf89 1675 * entries on queue are purged or otherwise handled appropriately.
fce8a7bb
JM
1676 */
1677void ntb_transport_link_down(struct ntb_transport_qp *qp)
1678{
186f27ff 1679 struct pci_dev *pdev;
fce8a7bb
JM
1680 int rc, val;
1681
1682 if (!qp)
1683 return;
1684
186f27ff 1685 pdev = ntb_query_pdev(qp->ndev);
fce8a7bb
JM
1686 qp->client_ready = NTB_LINK_DOWN;
1687
1688 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1689 if (rc) {
1690 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1691 return;
1692 }
1693
1694 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1695 val & ~(1 << qp->qp_num));
1696 if (rc)
1697 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1698 val & ~(1 << qp->qp_num), QP_LINKS);
1699
1700 if (qp->qp_link == NTB_LINK_UP)
1701 ntb_send_link_down(qp);
1702 else
1703 cancel_delayed_work_sync(&qp->link_work);
1704}
1705EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1706
1707/**
1708 * ntb_transport_link_query - Query transport link state
1709 * @qp: NTB transport layer queue to be queried
1710 *
1711 * Query connectivity to the remote system of the NTB transport queue
1712 *
1713 * RETURNS: true for link up or false for link down
1714 */
1715bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1716{
186f27ff
JM
1717 if (!qp)
1718 return false;
1719
fce8a7bb
JM
1720 return qp->qp_link == NTB_LINK_UP;
1721}
1722EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1723
1724/**
1725 * ntb_transport_qp_num - Query the qp number
1726 * @qp: NTB transport layer queue to be queried
1727 *
1728 * Query qp number of the NTB transport queue
1729 *
1730 * RETURNS: a zero based number specifying the qp number
1731 */
1732unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1733{
186f27ff
JM
1734 if (!qp)
1735 return 0;
1736
fce8a7bb
JM
1737 return qp->qp_num;
1738}
1739EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1740
1741/**
1742 * ntb_transport_max_size - Query the max payload size of a qp
1743 * @qp: NTB transport layer queue to be queried
1744 *
1745 * Query the maximum payload size permissible on the given qp
1746 *
1747 * RETURNS: the max payload size of a qp
1748 */
ef114ed5 1749unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
fce8a7bb 1750{
282a2fee
JM
1751 unsigned int max;
1752
186f27ff
JM
1753 if (!qp)
1754 return 0;
1755
282a2fee
JM
1756 if (!qp->dma_chan)
1757 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1758
1759 /* If DMA engine usage is possible, try to find the max size for that */
1760 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1761 max -= max % (1 << qp->dma_chan->device->copy_align);
1762
1763 return max;
fce8a7bb
JM
1764}
1765EXPORT_SYMBOL_GPL(ntb_transport_max_size);
This page took 0.146899 seconds and 5 git commands to generate.