2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
36 #include <rdma/ib_verbs.h>
40 #define DRV_VERSION "0.1"
42 MODULE_AUTHOR("Steve Wise");
43 MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_VERSION(DRV_VERSION
);
47 static LIST_HEAD(dev_list
);
48 static DEFINE_MUTEX(dev_mutex
);
50 static struct dentry
*c4iw_debugfs_root
;
52 struct c4iw_debugfs_data
{
53 struct c4iw_dev
*devp
;
59 static int count_idrs(int id
, void *p
, void *data
)
63 *countp
= *countp
+ 1;
67 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
70 struct c4iw_debugfs_data
*d
= file
->private_data
;
72 loff_t avail
= d
->pos
;
78 if (count
> avail
- pos
)
84 len
= min((int)count
, (int)d
->pos
- (int)pos
);
85 if (copy_to_user(buf
, d
->buf
+ pos
, len
))
99 static int dump_qp(int id
, void *p
, void *data
)
101 struct c4iw_qp
*qp
= p
;
102 struct c4iw_debugfs_data
*qpd
= data
;
106 if (id
!= qp
->wq
.sq
.qid
)
109 space
= qpd
->bufsize
- qpd
->pos
- 1;
114 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
, "qp id %u state %u "
115 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
116 qp
->wq
.sq
.qid
, (int)qp
->attr
.state
,
117 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
118 &qp
->ep
->com
.local_addr
.sin_addr
.s_addr
,
119 ntohs(qp
->ep
->com
.local_addr
.sin_port
),
120 &qp
->ep
->com
.remote_addr
.sin_addr
.s_addr
,
121 ntohs(qp
->ep
->com
.remote_addr
.sin_port
));
123 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
, "qp id %u state %u\n",
124 qp
->wq
.sq
.qid
, (int)qp
->attr
.state
);
130 static int qp_release(struct inode
*inode
, struct file
*file
)
132 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
134 printk(KERN_INFO
"%s null qpd?\n", __func__
);
142 static int qp_open(struct inode
*inode
, struct file
*file
)
144 struct c4iw_debugfs_data
*qpd
;
148 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
153 qpd
->devp
= inode
->i_private
;
156 spin_lock_irq(&qpd
->devp
->lock
);
157 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
158 spin_unlock_irq(&qpd
->devp
->lock
);
160 qpd
->bufsize
= count
* 128;
161 qpd
->buf
= kmalloc(qpd
->bufsize
, GFP_KERNEL
);
167 spin_lock_irq(&qpd
->devp
->lock
);
168 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
169 spin_unlock_irq(&qpd
->devp
->lock
);
171 qpd
->buf
[qpd
->pos
++] = 0;
172 file
->private_data
= qpd
;
180 static const struct file_operations qp_debugfs_fops
= {
181 .owner
= THIS_MODULE
,
183 .release
= qp_release
,
184 .read
= debugfs_read
,
185 .llseek
= default_llseek
,
188 static int dump_stag(int id
, void *p
, void *data
)
190 struct c4iw_debugfs_data
*stagd
= data
;
194 space
= stagd
->bufsize
- stagd
->pos
- 1;
198 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
, "0x%x\n", id
<<8);
204 static int stag_release(struct inode
*inode
, struct file
*file
)
206 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
208 printk(KERN_INFO
"%s null stagd?\n", __func__
);
216 static int stag_open(struct inode
*inode
, struct file
*file
)
218 struct c4iw_debugfs_data
*stagd
;
222 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
227 stagd
->devp
= inode
->i_private
;
230 spin_lock_irq(&stagd
->devp
->lock
);
231 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
232 spin_unlock_irq(&stagd
->devp
->lock
);
234 stagd
->bufsize
= count
* sizeof("0x12345678\n");
235 stagd
->buf
= kmalloc(stagd
->bufsize
, GFP_KERNEL
);
241 spin_lock_irq(&stagd
->devp
->lock
);
242 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
243 spin_unlock_irq(&stagd
->devp
->lock
);
245 stagd
->buf
[stagd
->pos
++] = 0;
246 file
->private_data
= stagd
;
254 static const struct file_operations stag_debugfs_fops
= {
255 .owner
= THIS_MODULE
,
257 .release
= stag_release
,
258 .read
= debugfs_read
,
259 .llseek
= default_llseek
,
262 static int setup_debugfs(struct c4iw_dev
*devp
)
266 if (!devp
->debugfs_root
)
269 de
= debugfs_create_file("qps", S_IWUSR
, devp
->debugfs_root
,
270 (void *)devp
, &qp_debugfs_fops
);
271 if (de
&& de
->d_inode
)
272 de
->d_inode
->i_size
= 4096;
274 de
= debugfs_create_file("stags", S_IWUSR
, devp
->debugfs_root
,
275 (void *)devp
, &stag_debugfs_fops
);
276 if (de
&& de
->d_inode
)
277 de
->d_inode
->i_size
= 4096;
281 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
282 struct c4iw_dev_ucontext
*uctx
)
284 struct list_head
*pos
, *nxt
;
285 struct c4iw_qid_list
*entry
;
287 mutex_lock(&uctx
->lock
);
288 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
289 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
290 list_del_init(&entry
->entry
);
291 if (!(entry
->qid
& rdev
->qpmask
))
292 c4iw_put_resource(&rdev
->resource
.qid_fifo
, entry
->qid
,
293 &rdev
->resource
.qid_fifo_lock
);
297 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
298 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
299 list_del_init(&entry
->entry
);
302 mutex_unlock(&uctx
->lock
);
305 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
306 struct c4iw_dev_ucontext
*uctx
)
308 INIT_LIST_HEAD(&uctx
->qpids
);
309 INIT_LIST_HEAD(&uctx
->cqids
);
310 mutex_init(&uctx
->lock
);
313 /* Caller takes care of locking if needed */
314 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
318 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
321 * qpshift is the number of bits to shift the qpid left in order
322 * to get the correct address of the doorbell for that qp.
324 rdev
->qpshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.udb_density
);
325 rdev
->qpmask
= rdev
->lldi
.udb_density
- 1;
326 rdev
->cqshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.ucq_density
);
327 rdev
->cqmask
= rdev
->lldi
.ucq_density
- 1;
328 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
329 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
330 "qp qid start %u size %u cq qid start %u size %u\n",
331 __func__
, pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
332 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
333 rdev
->lldi
.vr
->pbl
.start
,
334 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
335 rdev
->lldi
.vr
->rq
.size
,
336 rdev
->lldi
.vr
->qp
.start
,
337 rdev
->lldi
.vr
->qp
.size
,
338 rdev
->lldi
.vr
->cq
.start
,
339 rdev
->lldi
.vr
->cq
.size
);
340 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
341 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
342 (unsigned)pci_resource_len(rdev
->lldi
.pdev
, 2),
343 (void *)pci_resource_start(rdev
->lldi
.pdev
, 2),
346 rdev
->qpshift
, rdev
->qpmask
,
347 rdev
->cqshift
, rdev
->cqmask
);
349 if (c4iw_num_stags(rdev
) == 0) {
354 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
), T4_MAX_NUM_PD
);
356 printk(KERN_ERR MOD
"error %d initializing resources\n", err
);
359 err
= c4iw_pblpool_create(rdev
);
361 printk(KERN_ERR MOD
"error %d initializing pbl pool\n", err
);
364 err
= c4iw_rqtpool_create(rdev
);
366 printk(KERN_ERR MOD
"error %d initializing rqt pool\n", err
);
369 err
= c4iw_ocqp_pool_create(rdev
);
371 printk(KERN_ERR MOD
"error %d initializing ocqp pool\n", err
);
376 c4iw_rqtpool_destroy(rdev
);
378 c4iw_pblpool_destroy(rdev
);
380 c4iw_destroy_resource(&rdev
->resource
);
385 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
387 c4iw_pblpool_destroy(rdev
);
388 c4iw_rqtpool_destroy(rdev
);
389 c4iw_destroy_resource(&rdev
->resource
);
392 static void c4iw_remove(struct c4iw_dev
*dev
)
394 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
395 cancel_delayed_work_sync(&dev
->db_drop_task
);
396 list_del(&dev
->entry
);
398 c4iw_unregister_device(dev
);
399 c4iw_rdev_close(&dev
->rdev
);
400 idr_destroy(&dev
->cqidr
);
401 idr_destroy(&dev
->qpidr
);
402 idr_destroy(&dev
->mmidr
);
403 iounmap(dev
->rdev
.oc_mw_kva
);
404 ib_dealloc_device(&dev
->ibdev
);
407 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
409 struct c4iw_dev
*devp
;
412 devp
= (struct c4iw_dev
*)ib_alloc_device(sizeof(*devp
));
414 printk(KERN_ERR MOD
"Cannot allocate ib device\n");
417 devp
->rdev
.lldi
= *infop
;
419 devp
->rdev
.oc_mw_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
420 (pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
421 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
));
422 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
423 devp
->rdev
.lldi
.vr
->ocq
.size
);
425 printk(KERN_INFO MOD
"ocq memory: "
426 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
427 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
428 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
430 mutex_lock(&dev_mutex
);
432 ret
= c4iw_rdev_open(&devp
->rdev
);
434 mutex_unlock(&dev_mutex
);
435 printk(KERN_ERR MOD
"Unable to open CXIO rdev err %d\n", ret
);
436 ib_dealloc_device(&devp
->ibdev
);
440 idr_init(&devp
->cqidr
);
441 idr_init(&devp
->qpidr
);
442 idr_init(&devp
->mmidr
);
443 spin_lock_init(&devp
->lock
);
444 list_add_tail(&devp
->entry
, &dev_list
);
445 mutex_unlock(&dev_mutex
);
447 if (c4iw_debugfs_root
) {
448 devp
->debugfs_root
= debugfs_create_dir(
449 pci_name(devp
->rdev
.lldi
.pdev
),
456 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
458 struct c4iw_dev
*dev
;
459 static int vers_printed
;
463 printk(KERN_INFO MOD
"Chelsio T4 RDMA Driver - version %s\n",
466 dev
= c4iw_alloc(infop
);
470 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
471 __func__
, pci_name(dev
->rdev
.lldi
.pdev
),
472 dev
->rdev
.lldi
.nchan
, dev
->rdev
.lldi
.nrxq
,
473 dev
->rdev
.lldi
.ntxq
, dev
->rdev
.lldi
.nports
);
475 for (i
= 0; i
< dev
->rdev
.lldi
.nrxq
; i
++)
476 PDBG("rxqid[%u] %u\n", i
, dev
->rdev
.lldi
.rxq_ids
[i
]);
481 static struct sk_buff
*t4_pktgl_to_skb(const struct pkt_gl
*gl
,
482 unsigned int skb_len
,
483 unsigned int pull_len
)
486 struct skb_shared_info
*ssi
;
488 if (gl
->tot_len
<= 512) {
489 skb
= alloc_skb(gl
->tot_len
, GFP_ATOMIC
);
492 __skb_put(skb
, gl
->tot_len
);
493 skb_copy_to_linear_data(skb
, gl
->va
, gl
->tot_len
);
495 skb
= alloc_skb(skb_len
, GFP_ATOMIC
);
498 __skb_put(skb
, pull_len
);
499 skb_copy_to_linear_data(skb
, gl
->va
, pull_len
);
501 ssi
= skb_shinfo(skb
);
502 ssi
->frags
[0].page
= gl
->frags
[0].page
;
503 ssi
->frags
[0].page_offset
= gl
->frags
[0].page_offset
+ pull_len
;
504 ssi
->frags
[0].size
= gl
->frags
[0].size
- pull_len
;
506 memcpy(&ssi
->frags
[1], &gl
->frags
[1],
507 (gl
->nfrags
- 1) * sizeof(skb_frag_t
));
508 ssi
->nr_frags
= gl
->nfrags
;
510 skb
->len
= gl
->tot_len
;
511 skb
->data_len
= skb
->len
- pull_len
;
512 skb
->truesize
+= skb
->data_len
;
514 /* Get a reference for the last page, we don't own it */
515 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
521 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
522 const struct pkt_gl
*gl
)
524 struct c4iw_dev
*dev
= handle
;
526 const struct cpl_act_establish
*rpl
;
530 /* omit RSS and rsp_ctrl at end of descriptor */
531 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
533 skb
= alloc_skb(256, GFP_ATOMIC
);
537 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
538 } else if (gl
== CXGB4_MSG_AN
) {
539 const struct rsp_ctrl
*rc
= (void *)rsp
;
541 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
542 c4iw_ev_handler(dev
, qid
);
545 skb
= t4_pktgl_to_skb(gl
, 128, 128);
551 opcode
= rpl
->ot
.opcode
;
553 if (c4iw_handlers
[opcode
])
554 c4iw_handlers
[opcode
](dev
, skb
);
556 printk(KERN_INFO
"%s no handler opcode 0x%x...\n", __func__
,
564 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
566 struct c4iw_dev
*dev
= handle
;
568 PDBG("%s new_state %u\n", __func__
, new_state
);
571 printk(KERN_INFO MOD
"%s: Up\n", pci_name(dev
->rdev
.lldi
.pdev
));
572 if (!dev
->registered
) {
574 ret
= c4iw_register_device(dev
);
577 "%s: RDMA registration failed: %d\n",
578 pci_name(dev
->rdev
.lldi
.pdev
), ret
);
581 case CXGB4_STATE_DOWN
:
582 printk(KERN_INFO MOD
"%s: Down\n",
583 pci_name(dev
->rdev
.lldi
.pdev
));
585 c4iw_unregister_device(dev
);
587 case CXGB4_STATE_START_RECOVERY
:
588 printk(KERN_INFO MOD
"%s: Fatal Error\n",
589 pci_name(dev
->rdev
.lldi
.pdev
));
591 c4iw_unregister_device(dev
);
593 case CXGB4_STATE_DETACH
:
594 printk(KERN_INFO MOD
"%s: Detach\n",
595 pci_name(dev
->rdev
.lldi
.pdev
));
596 mutex_lock(&dev_mutex
);
598 mutex_unlock(&dev_mutex
);
604 static struct cxgb4_uld_info c4iw_uld_info
= {
607 .rx_handler
= c4iw_uld_rx_handler
,
608 .state_change
= c4iw_uld_state_change
,
611 static int __init
c4iw_init_module(void)
615 err
= c4iw_cm_init();
619 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
620 if (!c4iw_debugfs_root
)
621 printk(KERN_WARNING MOD
622 "could not create debugfs entry, continuing\n");
624 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
629 static void __exit
c4iw_exit_module(void)
631 struct c4iw_dev
*dev
, *tmp
;
633 mutex_lock(&dev_mutex
);
634 list_for_each_entry_safe(dev
, tmp
, &dev_list
, entry
) {
637 mutex_unlock(&dev_mutex
);
638 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
640 debugfs_remove_recursive(c4iw_debugfs_root
);
643 module_init(c4iw_init_module
);
644 module_exit(c4iw_exit_module
);