RDMA/cxgb4: Export T4 TCP MIB
[deliverable/linux.git] / drivers / infiniband / hw / cxgb4 / device.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
35
36#include <rdma/ib_verbs.h>
37
38#include "iw_cxgb4.h"
39
40#define DRV_VERSION "0.1"
41
42MODULE_AUTHOR("Steve Wise");
43MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION);
46
47static LIST_HEAD(dev_list);
48static DEFINE_MUTEX(dev_mutex);
49
50static struct dentry *c4iw_debugfs_root;
51
9e8d1fa3 52struct c4iw_debugfs_data {
cfdda9d7
SW
53 struct c4iw_dev *devp;
54 char *buf;
55 int bufsize;
56 int pos;
57};
58
9e8d1fa3 59static int count_idrs(int id, void *p, void *data)
cfdda9d7 60{
cfdda9d7
SW
61 int *countp = data;
62
cfdda9d7
SW
63 *countp = *countp + 1;
64 return 0;
65}
66
9e8d1fa3
SW
67static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
68 loff_t *ppos)
69{
70 struct c4iw_debugfs_data *d = file->private_data;
9e8d1fa3 71
3160977a 72 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
9e8d1fa3
SW
73}
74
75static int dump_qp(int id, void *p, void *data)
cfdda9d7
SW
76{
77 struct c4iw_qp *qp = p;
9e8d1fa3 78 struct c4iw_debugfs_data *qpd = data;
cfdda9d7
SW
79 int space;
80 int cc;
81
82 if (id != qp->wq.sq.qid)
83 return 0;
84
85 space = qpd->bufsize - qpd->pos - 1;
86 if (space == 0)
87 return 1;
88
89 if (qp->ep)
90 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
91 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
92 qp->wq.sq.qid, (int)qp->attr.state,
93 qp->ep->hwtid, (int)qp->ep->com.state,
94 &qp->ep->com.local_addr.sin_addr.s_addr,
95 ntohs(qp->ep->com.local_addr.sin_port),
96 &qp->ep->com.remote_addr.sin_addr.s_addr,
97 ntohs(qp->ep->com.remote_addr.sin_port));
98 else
99 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
100 qp->wq.sq.qid, (int)qp->attr.state);
101 if (cc < space)
102 qpd->pos += cc;
103 return 0;
104}
105
106static int qp_release(struct inode *inode, struct file *file)
107{
9e8d1fa3 108 struct c4iw_debugfs_data *qpd = file->private_data;
cfdda9d7
SW
109 if (!qpd) {
110 printk(KERN_INFO "%s null qpd?\n", __func__);
111 return 0;
112 }
113 kfree(qpd->buf);
114 kfree(qpd);
115 return 0;
116}
117
118static int qp_open(struct inode *inode, struct file *file)
119{
9e8d1fa3 120 struct c4iw_debugfs_data *qpd;
cfdda9d7
SW
121 int ret = 0;
122 int count = 1;
123
124 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
125 if (!qpd) {
126 ret = -ENOMEM;
127 goto out;
128 }
129 qpd->devp = inode->i_private;
130 qpd->pos = 0;
131
132 spin_lock_irq(&qpd->devp->lock);
9e8d1fa3 133 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
cfdda9d7
SW
134 spin_unlock_irq(&qpd->devp->lock);
135
136 qpd->bufsize = count * 128;
137 qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
138 if (!qpd->buf) {
139 ret = -ENOMEM;
140 goto err1;
141 }
142
143 spin_lock_irq(&qpd->devp->lock);
9e8d1fa3 144 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
cfdda9d7
SW
145 spin_unlock_irq(&qpd->devp->lock);
146
147 qpd->buf[qpd->pos++] = 0;
148 file->private_data = qpd;
149 goto out;
150err1:
151 kfree(qpd);
152out:
153 return ret;
154}
155
9e8d1fa3
SW
156static const struct file_operations qp_debugfs_fops = {
157 .owner = THIS_MODULE,
158 .open = qp_open,
159 .release = qp_release,
160 .read = debugfs_read,
8bbac892 161 .llseek = default_llseek,
9e8d1fa3
SW
162};
163
164static int dump_stag(int id, void *p, void *data)
cfdda9d7 165{
9e8d1fa3
SW
166 struct c4iw_debugfs_data *stagd = data;
167 int space;
168 int cc;
cfdda9d7 169
9e8d1fa3
SW
170 space = stagd->bufsize - stagd->pos - 1;
171 if (space == 0)
172 return 1;
173
174 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
175 if (cc < space)
176 stagd->pos += cc;
177 return 0;
178}
179
180static int stag_release(struct inode *inode, struct file *file)
181{
182 struct c4iw_debugfs_data *stagd = file->private_data;
183 if (!stagd) {
184 printk(KERN_INFO "%s null stagd?\n", __func__);
cfdda9d7 185 return 0;
9e8d1fa3
SW
186 }
187 kfree(stagd->buf);
188 kfree(stagd);
189 return 0;
190}
cfdda9d7 191
9e8d1fa3
SW
192static int stag_open(struct inode *inode, struct file *file)
193{
194 struct c4iw_debugfs_data *stagd;
195 int ret = 0;
196 int count = 1;
cfdda9d7 197
9e8d1fa3
SW
198 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
199 if (!stagd) {
200 ret = -ENOMEM;
201 goto out;
202 }
203 stagd->devp = inode->i_private;
204 stagd->pos = 0;
cfdda9d7 205
9e8d1fa3
SW
206 spin_lock_irq(&stagd->devp->lock);
207 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
208 spin_unlock_irq(&stagd->devp->lock);
209
210 stagd->bufsize = count * sizeof("0x12345678\n");
211 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
212 if (!stagd->buf) {
213 ret = -ENOMEM;
214 goto err1;
cfdda9d7 215 }
9e8d1fa3
SW
216
217 spin_lock_irq(&stagd->devp->lock);
218 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
219 spin_unlock_irq(&stagd->devp->lock);
220
221 stagd->buf[stagd->pos++] = 0;
222 file->private_data = stagd;
223 goto out;
224err1:
225 kfree(stagd);
226out:
227 return ret;
cfdda9d7
SW
228}
229
9e8d1fa3 230static const struct file_operations stag_debugfs_fops = {
cfdda9d7 231 .owner = THIS_MODULE,
9e8d1fa3
SW
232 .open = stag_open,
233 .release = stag_release,
234 .read = debugfs_read,
8bbac892 235 .llseek = default_llseek,
cfdda9d7
SW
236};
237
238static int setup_debugfs(struct c4iw_dev *devp)
239{
240 struct dentry *de;
241
242 if (!devp->debugfs_root)
243 return -1;
244
245 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
246 (void *)devp, &qp_debugfs_fops);
247 if (de && de->d_inode)
248 de->d_inode->i_size = 4096;
9e8d1fa3
SW
249
250 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
251 (void *)devp, &stag_debugfs_fops);
252 if (de && de->d_inode)
253 de->d_inode->i_size = 4096;
cfdda9d7
SW
254 return 0;
255}
256
257void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
258 struct c4iw_dev_ucontext *uctx)
259{
260 struct list_head *pos, *nxt;
261 struct c4iw_qid_list *entry;
262
263 mutex_lock(&uctx->lock);
264 list_for_each_safe(pos, nxt, &uctx->qpids) {
265 entry = list_entry(pos, struct c4iw_qid_list, entry);
266 list_del_init(&entry->entry);
267 if (!(entry->qid & rdev->qpmask))
268 c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
269 &rdev->resource.qid_fifo_lock);
270 kfree(entry);
271 }
272
273 list_for_each_safe(pos, nxt, &uctx->qpids) {
274 entry = list_entry(pos, struct c4iw_qid_list, entry);
275 list_del_init(&entry->entry);
276 kfree(entry);
277 }
278 mutex_unlock(&uctx->lock);
279}
280
281void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
282 struct c4iw_dev_ucontext *uctx)
283{
284 INIT_LIST_HEAD(&uctx->qpids);
285 INIT_LIST_HEAD(&uctx->cqids);
286 mutex_init(&uctx->lock);
287}
288
289/* Caller takes care of locking if needed */
290static int c4iw_rdev_open(struct c4iw_rdev *rdev)
291{
292 int err;
293
294 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
295
296 /*
297 * qpshift is the number of bits to shift the qpid left in order
298 * to get the correct address of the doorbell for that qp.
299 */
300 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
301 rdev->qpmask = rdev->lldi.udb_density - 1;
302 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
303 rdev->cqmask = rdev->lldi.ucq_density - 1;
304 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
93fb72e4
SW
305 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
306 "qp qid start %u size %u cq qid start %u size %u\n",
cfdda9d7
SW
307 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
308 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
309 rdev->lldi.vr->pbl.start,
310 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
93fb72e4
SW
311 rdev->lldi.vr->rq.size,
312 rdev->lldi.vr->qp.start,
313 rdev->lldi.vr->qp.size,
314 rdev->lldi.vr->cq.start,
315 rdev->lldi.vr->cq.size);
cfdda9d7
SW
316 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
317 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
318 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
319 (void *)pci_resource_start(rdev->lldi.pdev, 2),
320 rdev->lldi.db_reg,
321 rdev->lldi.gts_reg,
322 rdev->qpshift, rdev->qpmask,
323 rdev->cqshift, rdev->cqmask);
324
325 if (c4iw_num_stags(rdev) == 0) {
326 err = -EINVAL;
327 goto err1;
328 }
329
330 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
331 if (err) {
332 printk(KERN_ERR MOD "error %d initializing resources\n", err);
333 goto err1;
334 }
335 err = c4iw_pblpool_create(rdev);
336 if (err) {
337 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
338 goto err2;
339 }
340 err = c4iw_rqtpool_create(rdev);
341 if (err) {
342 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
343 goto err3;
344 }
c6d7b267
SW
345 err = c4iw_ocqp_pool_create(rdev);
346 if (err) {
347 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
348 goto err4;
349 }
cfdda9d7 350 return 0;
c6d7b267
SW
351err4:
352 c4iw_rqtpool_destroy(rdev);
cfdda9d7
SW
353err3:
354 c4iw_pblpool_destroy(rdev);
355err2:
356 c4iw_destroy_resource(&rdev->resource);
357err1:
358 return err;
359}
360
361static void c4iw_rdev_close(struct c4iw_rdev *rdev)
362{
363 c4iw_pblpool_destroy(rdev);
364 c4iw_rqtpool_destroy(rdev);
365 c4iw_destroy_resource(&rdev->resource);
366}
367
368static void c4iw_remove(struct c4iw_dev *dev)
369{
370 PDBG("%s c4iw_dev %p\n", __func__, dev);
371 cancel_delayed_work_sync(&dev->db_drop_task);
372 list_del(&dev->entry);
1c01c538
SW
373 if (dev->registered)
374 c4iw_unregister_device(dev);
cfdda9d7
SW
375 c4iw_rdev_close(&dev->rdev);
376 idr_destroy(&dev->cqidr);
377 idr_destroy(&dev->qpidr);
378 idr_destroy(&dev->mmidr);
c6d7b267 379 iounmap(dev->rdev.oc_mw_kva);
cfdda9d7
SW
380 ib_dealloc_device(&dev->ibdev);
381}
382
383static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
384{
385 struct c4iw_dev *devp;
386 int ret;
387
388 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
389 if (!devp) {
390 printk(KERN_ERR MOD "Cannot allocate ib device\n");
391 return NULL;
392 }
393 devp->rdev.lldi = *infop;
394
c6d7b267
SW
395 devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
396 (pci_resource_len(devp->rdev.lldi.pdev, 2) -
397 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
398 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
399 devp->rdev.lldi.vr->ocq.size);
400
401 printk(KERN_INFO MOD "ocq memory: "
402 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
403 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
404 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
405
cfdda9d7
SW
406 mutex_lock(&dev_mutex);
407
408 ret = c4iw_rdev_open(&devp->rdev);
409 if (ret) {
410 mutex_unlock(&dev_mutex);
411 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
412 ib_dealloc_device(&devp->ibdev);
413 return NULL;
414 }
415
416 idr_init(&devp->cqidr);
417 idr_init(&devp->qpidr);
418 idr_init(&devp->mmidr);
419 spin_lock_init(&devp->lock);
420 list_add_tail(&devp->entry, &dev_list);
421 mutex_unlock(&dev_mutex);
422
cfdda9d7
SW
423 if (c4iw_debugfs_root) {
424 devp->debugfs_root = debugfs_create_dir(
425 pci_name(devp->rdev.lldi.pdev),
426 c4iw_debugfs_root);
427 setup_debugfs(devp);
428 }
429 return devp;
430}
431
432static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
433{
434 struct c4iw_dev *dev;
435 static int vers_printed;
436 int i;
437
438 if (!vers_printed++)
439 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
440 DRV_VERSION);
441
442 dev = c4iw_alloc(infop);
443 if (!dev)
444 goto out;
445
446 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
447 __func__, pci_name(dev->rdev.lldi.pdev),
448 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
449 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
450
451 for (i = 0; i < dev->rdev.lldi.nrxq; i++)
452 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
cfdda9d7
SW
453out:
454 return dev;
455}
456
457static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
458 unsigned int skb_len,
459 unsigned int pull_len)
460{
461 struct sk_buff *skb;
462 struct skb_shared_info *ssi;
463
464 if (gl->tot_len <= 512) {
465 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
466 if (unlikely(!skb))
467 goto out;
468 __skb_put(skb, gl->tot_len);
469 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
470 } else {
471 skb = alloc_skb(skb_len, GFP_ATOMIC);
472 if (unlikely(!skb))
473 goto out;
474 __skb_put(skb, pull_len);
475 skb_copy_to_linear_data(skb, gl->va, pull_len);
476
477 ssi = skb_shinfo(skb);
478 ssi->frags[0].page = gl->frags[0].page;
479 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
480 ssi->frags[0].size = gl->frags[0].size - pull_len;
481 if (gl->nfrags > 1)
482 memcpy(&ssi->frags[1], &gl->frags[1],
483 (gl->nfrags - 1) * sizeof(skb_frag_t));
484 ssi->nr_frags = gl->nfrags;
485
486 skb->len = gl->tot_len;
487 skb->data_len = skb->len - pull_len;
488 skb->truesize += skb->data_len;
489
490 /* Get a reference for the last page, we don't own it */
491 get_page(gl->frags[gl->nfrags - 1].page);
492 }
493out:
494 return skb;
495}
496
497static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
498 const struct pkt_gl *gl)
499{
500 struct c4iw_dev *dev = handle;
501 struct sk_buff *skb;
502 const struct cpl_act_establish *rpl;
503 unsigned int opcode;
504
505 if (gl == NULL) {
506 /* omit RSS and rsp_ctrl at end of descriptor */
507 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
508
509 skb = alloc_skb(256, GFP_ATOMIC);
510 if (!skb)
511 goto nomem;
512 __skb_put(skb, len);
513 skb_copy_to_linear_data(skb, &rsp[1], len);
514 } else if (gl == CXGB4_MSG_AN) {
515 const struct rsp_ctrl *rc = (void *)rsp;
516
517 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
518 c4iw_ev_handler(dev, qid);
519 return 0;
520 } else {
521 skb = t4_pktgl_to_skb(gl, 128, 128);
522 if (unlikely(!skb))
523 goto nomem;
524 }
525
526 rpl = cplhdr(skb);
527 opcode = rpl->ot.opcode;
528
529 if (c4iw_handlers[opcode])
530 c4iw_handlers[opcode](dev, skb);
531 else
532 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
533 opcode);
534
535 return 0;
536nomem:
537 return -1;
538}
539
540static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
541{
1c01c538
SW
542 struct c4iw_dev *dev = handle;
543
cfdda9d7 544 PDBG("%s new_state %u\n", __func__, new_state);
1c01c538
SW
545 switch (new_state) {
546 case CXGB4_STATE_UP:
547 printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
548 if (!dev->registered) {
549 int ret;
550 ret = c4iw_register_device(dev);
551 if (ret)
552 printk(KERN_ERR MOD
553 "%s: RDMA registration failed: %d\n",
554 pci_name(dev->rdev.lldi.pdev), ret);
555 }
556 break;
557 case CXGB4_STATE_DOWN:
558 printk(KERN_INFO MOD "%s: Down\n",
559 pci_name(dev->rdev.lldi.pdev));
560 if (dev->registered)
561 c4iw_unregister_device(dev);
562 break;
563 case CXGB4_STATE_START_RECOVERY:
564 printk(KERN_INFO MOD "%s: Fatal Error\n",
565 pci_name(dev->rdev.lldi.pdev));
566 if (dev->registered)
567 c4iw_unregister_device(dev);
568 break;
569 case CXGB4_STATE_DETACH:
570 printk(KERN_INFO MOD "%s: Detach\n",
571 pci_name(dev->rdev.lldi.pdev));
572 mutex_lock(&dev_mutex);
573 c4iw_remove(dev);
574 mutex_unlock(&dev_mutex);
575 break;
576 }
cfdda9d7
SW
577 return 0;
578}
579
580static struct cxgb4_uld_info c4iw_uld_info = {
581 .name = DRV_NAME,
582 .add = c4iw_uld_add,
583 .rx_handler = c4iw_uld_rx_handler,
584 .state_change = c4iw_uld_state_change,
585};
586
587static int __init c4iw_init_module(void)
588{
589 int err;
590
591 err = c4iw_cm_init();
592 if (err)
593 return err;
594
595 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
596 if (!c4iw_debugfs_root)
597 printk(KERN_WARNING MOD
598 "could not create debugfs entry, continuing\n");
599
600 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
601
602 return 0;
603}
604
605static void __exit c4iw_exit_module(void)
606{
607 struct c4iw_dev *dev, *tmp;
608
cfdda9d7
SW
609 mutex_lock(&dev_mutex);
610 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
611 c4iw_remove(dev);
612 }
613 mutex_unlock(&dev_mutex);
fd388ce6 614 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
cfdda9d7
SW
615 c4iw_cm_term();
616 debugfs_remove_recursive(c4iw_debugfs_root);
617}
618
619module_init(c4iw_init_module);
620module_exit(c4iw_exit_module);
This page took 0.074122 seconds and 5 git commands to generate.