2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
37 #include <rdma/ib_verbs.h>
41 #define DRV_VERSION "0.1"
43 MODULE_AUTHOR("Steve Wise");
44 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_VERSION(DRV_VERSION
);
48 static int allow_db_fc_on_t5
;
49 module_param(allow_db_fc_on_t5
, int, 0644);
50 MODULE_PARM_DESC(allow_db_fc_on_t5
,
51 "Allow DB Flow Control on T5 (default = 0)");
53 static int allow_db_coalescing_on_t5
;
54 module_param(allow_db_coalescing_on_t5
, int, 0644);
55 MODULE_PARM_DESC(allow_db_coalescing_on_t5
,
56 "Allow DB Coalescing on T5 (default = 0)");
59 struct list_head entry
;
60 struct cxgb4_lld_info lldi
;
64 static LIST_HEAD(uld_ctx_list
);
65 static DEFINE_MUTEX(dev_mutex
);
67 #define DB_FC_RESUME_SIZE 64
68 #define DB_FC_RESUME_DELAY 1
69 #define DB_FC_DRAIN_THRESH 0
71 static struct dentry
*c4iw_debugfs_root
;
73 struct c4iw_debugfs_data
{
74 struct c4iw_dev
*devp
;
80 /* registered cxgb4 netlink callbacks */
81 static struct ibnl_client_cbs c4iw_nl_cb_table
[] = {
82 [RDMA_NL_IWPM_REG_PID
] = {.dump
= iwpm_register_pid_cb
},
83 [RDMA_NL_IWPM_ADD_MAPPING
] = {.dump
= iwpm_add_mapping_cb
},
84 [RDMA_NL_IWPM_QUERY_MAPPING
] = {.dump
= iwpm_add_and_query_mapping_cb
},
85 [RDMA_NL_IWPM_HANDLE_ERR
] = {.dump
= iwpm_mapping_error_cb
},
86 [RDMA_NL_IWPM_MAPINFO
] = {.dump
= iwpm_mapping_info_cb
},
87 [RDMA_NL_IWPM_MAPINFO_NUM
] = {.dump
= iwpm_ack_mapping_info_cb
}
90 static int count_idrs(int id
, void *p
, void *data
)
94 *countp
= *countp
+ 1;
98 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
101 struct c4iw_debugfs_data
*d
= file
->private_data
;
103 return simple_read_from_buffer(buf
, count
, ppos
, d
->buf
, d
->pos
);
106 static int dump_qp(int id
, void *p
, void *data
)
108 struct c4iw_qp
*qp
= p
;
109 struct c4iw_debugfs_data
*qpd
= data
;
113 if (id
!= qp
->wq
.sq
.qid
)
116 space
= qpd
->bufsize
- qpd
->pos
- 1;
121 if (qp
->ep
->com
.local_addr
.ss_family
== AF_INET
) {
122 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
123 &qp
->ep
->com
.local_addr
;
124 struct sockaddr_in
*rsin
= (struct sockaddr_in
*)
125 &qp
->ep
->com
.remote_addr
;
126 struct sockaddr_in
*mapped_lsin
= (struct sockaddr_in
*)
127 &qp
->ep
->com
.mapped_local_addr
;
128 struct sockaddr_in
*mapped_rsin
= (struct sockaddr_in
*)
129 &qp
->ep
->com
.mapped_remote_addr
;
131 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
132 "rc qp sq id %u rq id %u state %u "
133 "onchip %u ep tid %u state %u "
134 "%pI4:%u/%u->%pI4:%u/%u\n",
135 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
137 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
138 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
139 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
140 ntohs(mapped_lsin
->sin_port
),
141 &rsin
->sin_addr
, ntohs(rsin
->sin_port
),
142 ntohs(mapped_rsin
->sin_port
));
144 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
145 &qp
->ep
->com
.local_addr
;
146 struct sockaddr_in6
*rsin6
= (struct sockaddr_in6
*)
147 &qp
->ep
->com
.remote_addr
;
148 struct sockaddr_in6
*mapped_lsin6
=
149 (struct sockaddr_in6
*)
150 &qp
->ep
->com
.mapped_local_addr
;
151 struct sockaddr_in6
*mapped_rsin6
=
152 (struct sockaddr_in6
*)
153 &qp
->ep
->com
.mapped_remote_addr
;
155 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
156 "rc qp sq id %u rq id %u state %u "
157 "onchip %u ep tid %u state %u "
158 "%pI6:%u/%u->%pI6:%u/%u\n",
159 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
161 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
162 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
164 ntohs(lsin6
->sin6_port
),
165 ntohs(mapped_lsin6
->sin6_port
),
167 ntohs(rsin6
->sin6_port
),
168 ntohs(mapped_rsin6
->sin6_port
));
171 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
172 "qp sq id %u rq id %u state %u onchip %u\n",
173 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
175 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
);
181 static int qp_release(struct inode
*inode
, struct file
*file
)
183 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
185 printk(KERN_INFO
"%s null qpd?\n", __func__
);
193 static int qp_open(struct inode
*inode
, struct file
*file
)
195 struct c4iw_debugfs_data
*qpd
;
199 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
204 qpd
->devp
= inode
->i_private
;
207 spin_lock_irq(&qpd
->devp
->lock
);
208 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
209 spin_unlock_irq(&qpd
->devp
->lock
);
211 qpd
->bufsize
= count
* 128;
212 qpd
->buf
= vmalloc(qpd
->bufsize
);
218 spin_lock_irq(&qpd
->devp
->lock
);
219 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
220 spin_unlock_irq(&qpd
->devp
->lock
);
222 qpd
->buf
[qpd
->pos
++] = 0;
223 file
->private_data
= qpd
;
231 static const struct file_operations qp_debugfs_fops
= {
232 .owner
= THIS_MODULE
,
234 .release
= qp_release
,
235 .read
= debugfs_read
,
236 .llseek
= default_llseek
,
239 static int dump_stag(int id
, void *p
, void *data
)
241 struct c4iw_debugfs_data
*stagd
= data
;
245 space
= stagd
->bufsize
- stagd
->pos
- 1;
249 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
, "0x%x\n", id
<<8);
255 static int stag_release(struct inode
*inode
, struct file
*file
)
257 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
259 printk(KERN_INFO
"%s null stagd?\n", __func__
);
267 static int stag_open(struct inode
*inode
, struct file
*file
)
269 struct c4iw_debugfs_data
*stagd
;
273 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
278 stagd
->devp
= inode
->i_private
;
281 spin_lock_irq(&stagd
->devp
->lock
);
282 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
283 spin_unlock_irq(&stagd
->devp
->lock
);
285 stagd
->bufsize
= count
* sizeof("0x12345678\n");
286 stagd
->buf
= kmalloc(stagd
->bufsize
, GFP_KERNEL
);
292 spin_lock_irq(&stagd
->devp
->lock
);
293 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
294 spin_unlock_irq(&stagd
->devp
->lock
);
296 stagd
->buf
[stagd
->pos
++] = 0;
297 file
->private_data
= stagd
;
305 static const struct file_operations stag_debugfs_fops
= {
306 .owner
= THIS_MODULE
,
308 .release
= stag_release
,
309 .read
= debugfs_read
,
310 .llseek
= default_llseek
,
313 static char *db_state_str
[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
315 static int stats_show(struct seq_file
*seq
, void *v
)
317 struct c4iw_dev
*dev
= seq
->private;
319 seq_printf(seq
, " Object: %10s %10s %10s %10s\n", "Total", "Current",
321 seq_printf(seq
, " PDID: %10llu %10llu %10llu %10llu\n",
322 dev
->rdev
.stats
.pd
.total
, dev
->rdev
.stats
.pd
.cur
,
323 dev
->rdev
.stats
.pd
.max
, dev
->rdev
.stats
.pd
.fail
);
324 seq_printf(seq
, " QID: %10llu %10llu %10llu %10llu\n",
325 dev
->rdev
.stats
.qid
.total
, dev
->rdev
.stats
.qid
.cur
,
326 dev
->rdev
.stats
.qid
.max
, dev
->rdev
.stats
.qid
.fail
);
327 seq_printf(seq
, " TPTMEM: %10llu %10llu %10llu %10llu\n",
328 dev
->rdev
.stats
.stag
.total
, dev
->rdev
.stats
.stag
.cur
,
329 dev
->rdev
.stats
.stag
.max
, dev
->rdev
.stats
.stag
.fail
);
330 seq_printf(seq
, " PBLMEM: %10llu %10llu %10llu %10llu\n",
331 dev
->rdev
.stats
.pbl
.total
, dev
->rdev
.stats
.pbl
.cur
,
332 dev
->rdev
.stats
.pbl
.max
, dev
->rdev
.stats
.pbl
.fail
);
333 seq_printf(seq
, " RQTMEM: %10llu %10llu %10llu %10llu\n",
334 dev
->rdev
.stats
.rqt
.total
, dev
->rdev
.stats
.rqt
.cur
,
335 dev
->rdev
.stats
.rqt
.max
, dev
->rdev
.stats
.rqt
.fail
);
336 seq_printf(seq
, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
337 dev
->rdev
.stats
.ocqp
.total
, dev
->rdev
.stats
.ocqp
.cur
,
338 dev
->rdev
.stats
.ocqp
.max
, dev
->rdev
.stats
.ocqp
.fail
);
339 seq_printf(seq
, " DB FULL: %10llu\n", dev
->rdev
.stats
.db_full
);
340 seq_printf(seq
, " DB EMPTY: %10llu\n", dev
->rdev
.stats
.db_empty
);
341 seq_printf(seq
, " DB DROP: %10llu\n", dev
->rdev
.stats
.db_drop
);
342 seq_printf(seq
, " DB State: %s Transitions %llu FC Interruptions %llu\n",
343 db_state_str
[dev
->db_state
],
344 dev
->rdev
.stats
.db_state_transitions
,
345 dev
->rdev
.stats
.db_fc_interruptions
);
346 seq_printf(seq
, "TCAM_FULL: %10llu\n", dev
->rdev
.stats
.tcam_full
);
347 seq_printf(seq
, "ACT_OFLD_CONN_FAILS: %10llu\n",
348 dev
->rdev
.stats
.act_ofld_conn_fails
);
349 seq_printf(seq
, "PAS_OFLD_CONN_FAILS: %10llu\n",
350 dev
->rdev
.stats
.pas_ofld_conn_fails
);
354 static int stats_open(struct inode
*inode
, struct file
*file
)
356 return single_open(file
, stats_show
, inode
->i_private
);
359 static ssize_t
stats_clear(struct file
*file
, const char __user
*buf
,
360 size_t count
, loff_t
*pos
)
362 struct c4iw_dev
*dev
= ((struct seq_file
*)file
->private_data
)->private;
364 mutex_lock(&dev
->rdev
.stats
.lock
);
365 dev
->rdev
.stats
.pd
.max
= 0;
366 dev
->rdev
.stats
.pd
.fail
= 0;
367 dev
->rdev
.stats
.qid
.max
= 0;
368 dev
->rdev
.stats
.qid
.fail
= 0;
369 dev
->rdev
.stats
.stag
.max
= 0;
370 dev
->rdev
.stats
.stag
.fail
= 0;
371 dev
->rdev
.stats
.pbl
.max
= 0;
372 dev
->rdev
.stats
.pbl
.fail
= 0;
373 dev
->rdev
.stats
.rqt
.max
= 0;
374 dev
->rdev
.stats
.rqt
.fail
= 0;
375 dev
->rdev
.stats
.ocqp
.max
= 0;
376 dev
->rdev
.stats
.ocqp
.fail
= 0;
377 dev
->rdev
.stats
.db_full
= 0;
378 dev
->rdev
.stats
.db_empty
= 0;
379 dev
->rdev
.stats
.db_drop
= 0;
380 dev
->rdev
.stats
.db_state_transitions
= 0;
381 dev
->rdev
.stats
.tcam_full
= 0;
382 dev
->rdev
.stats
.act_ofld_conn_fails
= 0;
383 dev
->rdev
.stats
.pas_ofld_conn_fails
= 0;
384 mutex_unlock(&dev
->rdev
.stats
.lock
);
388 static const struct file_operations stats_debugfs_fops
= {
389 .owner
= THIS_MODULE
,
391 .release
= single_release
,
394 .write
= stats_clear
,
397 static int dump_ep(int id
, void *p
, void *data
)
399 struct c4iw_ep
*ep
= p
;
400 struct c4iw_debugfs_data
*epd
= data
;
404 space
= epd
->bufsize
- epd
->pos
- 1;
408 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
409 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
411 struct sockaddr_in
*rsin
= (struct sockaddr_in
*)
412 &ep
->com
.remote_addr
;
413 struct sockaddr_in
*mapped_lsin
= (struct sockaddr_in
*)
414 &ep
->com
.mapped_local_addr
;
415 struct sockaddr_in
*mapped_rsin
= (struct sockaddr_in
*)
416 &ep
->com
.mapped_remote_addr
;
418 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
419 "ep %p cm_id %p qp %p state %d flags 0x%lx "
420 "history 0x%lx hwtid %d atid %d "
421 "%pI4:%d/%d <-> %pI4:%d/%d\n",
422 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
423 (int)ep
->com
.state
, ep
->com
.flags
,
424 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
425 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
426 ntohs(mapped_lsin
->sin_port
),
427 &rsin
->sin_addr
, ntohs(rsin
->sin_port
),
428 ntohs(mapped_rsin
->sin_port
));
430 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
432 struct sockaddr_in6
*rsin6
= (struct sockaddr_in6
*)
433 &ep
->com
.remote_addr
;
434 struct sockaddr_in6
*mapped_lsin6
= (struct sockaddr_in6
*)
435 &ep
->com
.mapped_local_addr
;
436 struct sockaddr_in6
*mapped_rsin6
= (struct sockaddr_in6
*)
437 &ep
->com
.mapped_remote_addr
;
439 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
440 "ep %p cm_id %p qp %p state %d flags 0x%lx "
441 "history 0x%lx hwtid %d atid %d "
442 "%pI6:%d/%d <-> %pI6:%d/%d\n",
443 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
444 (int)ep
->com
.state
, ep
->com
.flags
,
445 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
446 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
),
447 ntohs(mapped_lsin6
->sin6_port
),
448 &rsin6
->sin6_addr
, ntohs(rsin6
->sin6_port
),
449 ntohs(mapped_rsin6
->sin6_port
));
456 static int dump_listen_ep(int id
, void *p
, void *data
)
458 struct c4iw_listen_ep
*ep
= p
;
459 struct c4iw_debugfs_data
*epd
= data
;
463 space
= epd
->bufsize
- epd
->pos
- 1;
467 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
468 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
470 struct sockaddr_in
*mapped_lsin
= (struct sockaddr_in
*)
471 &ep
->com
.mapped_local_addr
;
473 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
474 "ep %p cm_id %p state %d flags 0x%lx stid %d "
475 "backlog %d %pI4:%d/%d\n",
476 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
477 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
478 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
479 ntohs(mapped_lsin
->sin_port
));
481 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
483 struct sockaddr_in6
*mapped_lsin6
= (struct sockaddr_in6
*)
484 &ep
->com
.mapped_local_addr
;
486 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
487 "ep %p cm_id %p state %d flags 0x%lx stid %d "
488 "backlog %d %pI6:%d/%d\n",
489 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
490 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
491 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
),
492 ntohs(mapped_lsin6
->sin6_port
));
499 static int ep_release(struct inode
*inode
, struct file
*file
)
501 struct c4iw_debugfs_data
*epd
= file
->private_data
;
503 pr_info("%s null qpd?\n", __func__
);
511 static int ep_open(struct inode
*inode
, struct file
*file
)
513 struct c4iw_debugfs_data
*epd
;
517 epd
= kmalloc(sizeof(*epd
), GFP_KERNEL
);
522 epd
->devp
= inode
->i_private
;
525 spin_lock_irq(&epd
->devp
->lock
);
526 idr_for_each(&epd
->devp
->hwtid_idr
, count_idrs
, &count
);
527 idr_for_each(&epd
->devp
->atid_idr
, count_idrs
, &count
);
528 idr_for_each(&epd
->devp
->stid_idr
, count_idrs
, &count
);
529 spin_unlock_irq(&epd
->devp
->lock
);
531 epd
->bufsize
= count
* 160;
532 epd
->buf
= vmalloc(epd
->bufsize
);
538 spin_lock_irq(&epd
->devp
->lock
);
539 idr_for_each(&epd
->devp
->hwtid_idr
, dump_ep
, epd
);
540 idr_for_each(&epd
->devp
->atid_idr
, dump_ep
, epd
);
541 idr_for_each(&epd
->devp
->stid_idr
, dump_listen_ep
, epd
);
542 spin_unlock_irq(&epd
->devp
->lock
);
544 file
->private_data
= epd
;
552 static const struct file_operations ep_debugfs_fops
= {
553 .owner
= THIS_MODULE
,
555 .release
= ep_release
,
556 .read
= debugfs_read
,
559 static int setup_debugfs(struct c4iw_dev
*devp
)
563 if (!devp
->debugfs_root
)
566 de
= debugfs_create_file("qps", S_IWUSR
, devp
->debugfs_root
,
567 (void *)devp
, &qp_debugfs_fops
);
568 if (de
&& de
->d_inode
)
569 de
->d_inode
->i_size
= 4096;
571 de
= debugfs_create_file("stags", S_IWUSR
, devp
->debugfs_root
,
572 (void *)devp
, &stag_debugfs_fops
);
573 if (de
&& de
->d_inode
)
574 de
->d_inode
->i_size
= 4096;
576 de
= debugfs_create_file("stats", S_IWUSR
, devp
->debugfs_root
,
577 (void *)devp
, &stats_debugfs_fops
);
578 if (de
&& de
->d_inode
)
579 de
->d_inode
->i_size
= 4096;
581 de
= debugfs_create_file("eps", S_IWUSR
, devp
->debugfs_root
,
582 (void *)devp
, &ep_debugfs_fops
);
583 if (de
&& de
->d_inode
)
584 de
->d_inode
->i_size
= 4096;
589 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
590 struct c4iw_dev_ucontext
*uctx
)
592 struct list_head
*pos
, *nxt
;
593 struct c4iw_qid_list
*entry
;
595 mutex_lock(&uctx
->lock
);
596 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
597 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
598 list_del_init(&entry
->entry
);
599 if (!(entry
->qid
& rdev
->qpmask
)) {
600 c4iw_put_resource(&rdev
->resource
.qid_table
,
602 mutex_lock(&rdev
->stats
.lock
);
603 rdev
->stats
.qid
.cur
-= rdev
->qpmask
+ 1;
604 mutex_unlock(&rdev
->stats
.lock
);
609 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
610 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
611 list_del_init(&entry
->entry
);
614 mutex_unlock(&uctx
->lock
);
617 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
618 struct c4iw_dev_ucontext
*uctx
)
620 INIT_LIST_HEAD(&uctx
->qpids
);
621 INIT_LIST_HEAD(&uctx
->cqids
);
622 mutex_init(&uctx
->lock
);
625 /* Caller takes care of locking if needed */
626 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
630 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
633 * qpshift is the number of bits to shift the qpid left in order
634 * to get the correct address of the doorbell for that qp.
636 rdev
->qpshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.udb_density
);
637 rdev
->qpmask
= rdev
->lldi
.udb_density
- 1;
638 rdev
->cqshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.ucq_density
);
639 rdev
->cqmask
= rdev
->lldi
.ucq_density
- 1;
640 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
641 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
642 "qp qid start %u size %u cq qid start %u size %u\n",
643 __func__
, pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
644 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
645 rdev
->lldi
.vr
->pbl
.start
,
646 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
647 rdev
->lldi
.vr
->rq
.size
,
648 rdev
->lldi
.vr
->qp
.start
,
649 rdev
->lldi
.vr
->qp
.size
,
650 rdev
->lldi
.vr
->cq
.start
,
651 rdev
->lldi
.vr
->cq
.size
);
652 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
653 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
654 (unsigned)pci_resource_len(rdev
->lldi
.pdev
, 2),
655 (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2),
658 rdev
->qpshift
, rdev
->qpmask
,
659 rdev
->cqshift
, rdev
->cqmask
);
661 if (c4iw_num_stags(rdev
) == 0) {
666 rdev
->stats
.pd
.total
= T4_MAX_NUM_PD
;
667 rdev
->stats
.stag
.total
= rdev
->lldi
.vr
->stag
.size
;
668 rdev
->stats
.pbl
.total
= rdev
->lldi
.vr
->pbl
.size
;
669 rdev
->stats
.rqt
.total
= rdev
->lldi
.vr
->rq
.size
;
670 rdev
->stats
.ocqp
.total
= rdev
->lldi
.vr
->ocq
.size
;
671 rdev
->stats
.qid
.total
= rdev
->lldi
.vr
->qp
.size
;
673 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
), T4_MAX_NUM_PD
);
675 printk(KERN_ERR MOD
"error %d initializing resources\n", err
);
678 err
= c4iw_pblpool_create(rdev
);
680 printk(KERN_ERR MOD
"error %d initializing pbl pool\n", err
);
683 err
= c4iw_rqtpool_create(rdev
);
685 printk(KERN_ERR MOD
"error %d initializing rqt pool\n", err
);
688 err
= c4iw_ocqp_pool_create(rdev
);
690 printk(KERN_ERR MOD
"error %d initializing ocqp pool\n", err
);
693 rdev
->status_page
= (struct t4_dev_status_page
*)
694 __get_free_page(GFP_KERNEL
);
695 if (!rdev
->status_page
) {
696 pr_err(MOD
"error allocating status page\n");
701 c4iw_rqtpool_destroy(rdev
);
703 c4iw_pblpool_destroy(rdev
);
705 c4iw_destroy_resource(&rdev
->resource
);
710 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
712 free_page((unsigned long)rdev
->status_page
);
713 c4iw_pblpool_destroy(rdev
);
714 c4iw_rqtpool_destroy(rdev
);
715 c4iw_destroy_resource(&rdev
->resource
);
718 static void c4iw_dealloc(struct uld_ctx
*ctx
)
720 c4iw_rdev_close(&ctx
->dev
->rdev
);
721 idr_destroy(&ctx
->dev
->cqidr
);
722 idr_destroy(&ctx
->dev
->qpidr
);
723 idr_destroy(&ctx
->dev
->mmidr
);
724 idr_destroy(&ctx
->dev
->hwtid_idr
);
725 idr_destroy(&ctx
->dev
->stid_idr
);
726 idr_destroy(&ctx
->dev
->atid_idr
);
727 if (ctx
->dev
->rdev
.bar2_kva
)
728 iounmap(ctx
->dev
->rdev
.bar2_kva
);
729 if (ctx
->dev
->rdev
.oc_mw_kva
)
730 iounmap(ctx
->dev
->rdev
.oc_mw_kva
);
731 ib_dealloc_device(&ctx
->dev
->ibdev
);
732 iwpm_exit(RDMA_NL_C4IW
);
736 static void c4iw_remove(struct uld_ctx
*ctx
)
738 PDBG("%s c4iw_dev %p\n", __func__
, ctx
->dev
);
739 c4iw_unregister_device(ctx
->dev
);
743 static int rdma_supported(const struct cxgb4_lld_info
*infop
)
745 return infop
->vr
->stag
.size
> 0 && infop
->vr
->pbl
.size
> 0 &&
746 infop
->vr
->rq
.size
> 0 && infop
->vr
->qp
.size
> 0 &&
747 infop
->vr
->cq
.size
> 0;
750 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
752 struct c4iw_dev
*devp
;
755 if (!rdma_supported(infop
)) {
756 printk(KERN_INFO MOD
"%s: RDMA not supported on this device.\n",
757 pci_name(infop
->pdev
));
758 return ERR_PTR(-ENOSYS
);
760 if (!ocqp_supported(infop
))
761 pr_info("%s: On-Chip Queues not supported on this device.\n",
762 pci_name(infop
->pdev
));
764 devp
= (struct c4iw_dev
*)ib_alloc_device(sizeof(*devp
));
766 printk(KERN_ERR MOD
"Cannot allocate ib device\n");
767 return ERR_PTR(-ENOMEM
);
769 devp
->rdev
.lldi
= *infop
;
771 /* init various hw-queue params based on lld info */
772 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
773 __func__
, devp
->rdev
.lldi
.sge_ingpadboundary
,
774 devp
->rdev
.lldi
.sge_egrstatuspagesize
);
776 devp
->rdev
.hw_queue
.t4_eq_status_entries
=
777 devp
->rdev
.lldi
.sge_ingpadboundary
> 64 ? 2 : 1;
778 devp
->rdev
.hw_queue
.t4_max_eq_size
=
779 65520 - devp
->rdev
.hw_queue
.t4_eq_status_entries
;
780 devp
->rdev
.hw_queue
.t4_max_iq_size
= 65520 - 1;
781 devp
->rdev
.hw_queue
.t4_max_rq_size
=
782 8192 - devp
->rdev
.hw_queue
.t4_eq_status_entries
;
783 devp
->rdev
.hw_queue
.t4_max_sq_size
=
784 devp
->rdev
.hw_queue
.t4_max_eq_size
- 1;
785 devp
->rdev
.hw_queue
.t4_max_qp_depth
=
786 devp
->rdev
.hw_queue
.t4_max_rq_size
- 1;
787 devp
->rdev
.hw_queue
.t4_max_cq_depth
=
788 devp
->rdev
.hw_queue
.t4_max_iq_size
- 1;
789 devp
->rdev
.hw_queue
.t4_stat_len
=
790 devp
->rdev
.lldi
.sge_egrstatuspagesize
;
793 * For T5 devices, we map all of BAR2 with WC.
794 * For T4 devices with onchip qp mem, we map only that part
797 devp
->rdev
.bar2_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2);
798 if (is_t5(devp
->rdev
.lldi
.adapter_type
)) {
799 devp
->rdev
.bar2_kva
= ioremap_wc(devp
->rdev
.bar2_pa
,
800 pci_resource_len(devp
->rdev
.lldi
.pdev
, 2));
801 if (!devp
->rdev
.bar2_kva
) {
802 pr_err(MOD
"Unable to ioremap BAR2\n");
803 ib_dealloc_device(&devp
->ibdev
);
804 return ERR_PTR(-EINVAL
);
806 } else if (ocqp_supported(infop
)) {
807 devp
->rdev
.oc_mw_pa
=
808 pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
809 pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
810 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
);
811 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
812 devp
->rdev
.lldi
.vr
->ocq
.size
);
813 if (!devp
->rdev
.oc_mw_kva
) {
814 pr_err(MOD
"Unable to ioremap onchip mem\n");
815 ib_dealloc_device(&devp
->ibdev
);
816 return ERR_PTR(-EINVAL
);
820 PDBG(KERN_INFO MOD
"ocq memory: "
821 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
822 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
823 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
825 ret
= c4iw_rdev_open(&devp
->rdev
);
827 printk(KERN_ERR MOD
"Unable to open CXIO rdev err %d\n", ret
);
828 ib_dealloc_device(&devp
->ibdev
);
832 idr_init(&devp
->cqidr
);
833 idr_init(&devp
->qpidr
);
834 idr_init(&devp
->mmidr
);
835 idr_init(&devp
->hwtid_idr
);
836 idr_init(&devp
->stid_idr
);
837 idr_init(&devp
->atid_idr
);
838 spin_lock_init(&devp
->lock
);
839 mutex_init(&devp
->rdev
.stats
.lock
);
840 mutex_init(&devp
->db_mutex
);
841 INIT_LIST_HEAD(&devp
->db_fc_list
);
843 if (c4iw_debugfs_root
) {
844 devp
->debugfs_root
= debugfs_create_dir(
845 pci_name(devp
->rdev
.lldi
.pdev
),
850 ret
= iwpm_init(RDMA_NL_C4IW
);
852 pr_err("port mapper initialization failed with %d\n", ret
);
853 ib_dealloc_device(&devp
->ibdev
);
860 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
863 static int vers_printed
;
867 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
870 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
872 ctx
= ERR_PTR(-ENOMEM
);
877 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
878 __func__
, pci_name(ctx
->lldi
.pdev
),
879 ctx
->lldi
.nchan
, ctx
->lldi
.nrxq
,
880 ctx
->lldi
.ntxq
, ctx
->lldi
.nports
);
882 mutex_lock(&dev_mutex
);
883 list_add_tail(&ctx
->entry
, &uld_ctx_list
);
884 mutex_unlock(&dev_mutex
);
886 for (i
= 0; i
< ctx
->lldi
.nrxq
; i
++)
887 PDBG("rxqid[%u] %u\n", i
, ctx
->lldi
.rxq_ids
[i
]);
892 static inline struct sk_buff
*copy_gl_to_skb_pkt(const struct pkt_gl
*gl
,
899 * Allocate space for cpl_pass_accept_req which will be synthesized by
900 * driver. Once the driver synthesizes the request the skb will go
901 * through the regular cpl_pass_accept_req processing.
902 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
905 skb
= alloc_skb(gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
906 sizeof(struct rss_header
) - pktshift
, GFP_ATOMIC
);
910 __skb_put(skb
, gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
911 sizeof(struct rss_header
) - pktshift
);
914 * This skb will contain:
915 * rss_header from the rspq descriptor (1 flit)
916 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
917 * space for the difference between the size of an
918 * rx_pkt and pass_accept_req cpl (1 flit)
919 * the packet data from the gl
921 skb_copy_to_linear_data(skb
, rsp
, sizeof(struct cpl_pass_accept_req
) +
922 sizeof(struct rss_header
));
923 skb_copy_to_linear_data_offset(skb
, sizeof(struct rss_header
) +
924 sizeof(struct cpl_pass_accept_req
),
926 gl
->tot_len
- pktshift
);
930 static inline int recv_rx_pkt(struct c4iw_dev
*dev
, const struct pkt_gl
*gl
,
933 unsigned int opcode
= *(u8
*)rsp
;
936 if (opcode
!= CPL_RX_PKT
)
939 skb
= copy_gl_to_skb_pkt(gl
, rsp
, dev
->rdev
.lldi
.sge_pktshift
);
943 if (c4iw_handlers
[opcode
] == NULL
) {
944 pr_info("%s no handler opcode 0x%x...\n", __func__
,
949 c4iw_handlers
[opcode
](dev
, skb
);
955 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
956 const struct pkt_gl
*gl
)
958 struct uld_ctx
*ctx
= handle
;
959 struct c4iw_dev
*dev
= ctx
->dev
;
964 /* omit RSS and rsp_ctrl at end of descriptor */
965 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
967 skb
= alloc_skb(256, GFP_ATOMIC
);
971 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
972 } else if (gl
== CXGB4_MSG_AN
) {
973 const struct rsp_ctrl
*rc
= (void *)rsp
;
975 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
976 c4iw_ev_handler(dev
, qid
);
978 } else if (unlikely(*(u8
*)rsp
!= *(u8
*)gl
->va
)) {
979 if (recv_rx_pkt(dev
, gl
, rsp
))
982 pr_info("%s: unexpected FL contents at %p, " \
983 "RSS %#llx, FL %#llx, len %u\n",
984 pci_name(ctx
->lldi
.pdev
), gl
->va
,
985 (unsigned long long)be64_to_cpu(*rsp
),
986 (unsigned long long)be64_to_cpu(
987 *(__force __be64
*)gl
->va
),
992 skb
= cxgb4_pktgl_to_skb(gl
, 128, 128);
998 if (c4iw_handlers
[opcode
]) {
999 c4iw_handlers
[opcode
](dev
, skb
);
1001 pr_info("%s no handler opcode 0x%x...\n", __func__
,
1011 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
1013 struct uld_ctx
*ctx
= handle
;
1015 PDBG("%s new_state %u\n", __func__
, new_state
);
1016 switch (new_state
) {
1017 case CXGB4_STATE_UP
:
1018 printk(KERN_INFO MOD
"%s: Up\n", pci_name(ctx
->lldi
.pdev
));
1022 ctx
->dev
= c4iw_alloc(&ctx
->lldi
);
1023 if (IS_ERR(ctx
->dev
)) {
1025 "%s: initialization failed: %ld\n",
1026 pci_name(ctx
->lldi
.pdev
),
1031 ret
= c4iw_register_device(ctx
->dev
);
1034 "%s: RDMA registration failed: %d\n",
1035 pci_name(ctx
->lldi
.pdev
), ret
);
1040 case CXGB4_STATE_DOWN
:
1041 printk(KERN_INFO MOD
"%s: Down\n",
1042 pci_name(ctx
->lldi
.pdev
));
1046 case CXGB4_STATE_START_RECOVERY
:
1047 printk(KERN_INFO MOD
"%s: Fatal Error\n",
1048 pci_name(ctx
->lldi
.pdev
));
1050 struct ib_event event
;
1052 ctx
->dev
->rdev
.flags
|= T4_FATAL_ERROR
;
1053 memset(&event
, 0, sizeof event
);
1054 event
.event
= IB_EVENT_DEVICE_FATAL
;
1055 event
.device
= &ctx
->dev
->ibdev
;
1056 ib_dispatch_event(&event
);
1060 case CXGB4_STATE_DETACH
:
1061 printk(KERN_INFO MOD
"%s: Detach\n",
1062 pci_name(ctx
->lldi
.pdev
));
1070 static int disable_qp_db(int id
, void *p
, void *data
)
1072 struct c4iw_qp
*qp
= p
;
1074 t4_disable_wq_db(&qp
->wq
);
1078 static void stop_queues(struct uld_ctx
*ctx
)
1080 unsigned long flags
;
1082 spin_lock_irqsave(&ctx
->dev
->lock
, flags
);
1083 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1084 ctx
->dev
->db_state
= STOPPED
;
1085 if (ctx
->dev
->rdev
.flags
& T4_STATUS_PAGE_DISABLED
)
1086 idr_for_each(&ctx
->dev
->qpidr
, disable_qp_db
, NULL
);
1088 ctx
->dev
->rdev
.status_page
->db_off
= 1;
1089 spin_unlock_irqrestore(&ctx
->dev
->lock
, flags
);
1092 static int enable_qp_db(int id
, void *p
, void *data
)
1094 struct c4iw_qp
*qp
= p
;
1096 t4_enable_wq_db(&qp
->wq
);
1100 static void resume_rc_qp(struct c4iw_qp
*qp
)
1102 spin_lock(&qp
->lock
);
1103 t4_ring_sq_db(&qp
->wq
, qp
->wq
.sq
.wq_pidx_inc
,
1104 is_t5(qp
->rhp
->rdev
.lldi
.adapter_type
), NULL
);
1105 qp
->wq
.sq
.wq_pidx_inc
= 0;
1106 t4_ring_rq_db(&qp
->wq
, qp
->wq
.rq
.wq_pidx_inc
,
1107 is_t5(qp
->rhp
->rdev
.lldi
.adapter_type
), NULL
);
1108 qp
->wq
.rq
.wq_pidx_inc
= 0;
1109 spin_unlock(&qp
->lock
);
1112 static void resume_a_chunk(struct uld_ctx
*ctx
)
1117 for (i
= 0; i
< DB_FC_RESUME_SIZE
; i
++) {
1118 qp
= list_first_entry(&ctx
->dev
->db_fc_list
, struct c4iw_qp
,
1120 list_del_init(&qp
->db_fc_entry
);
1122 if (list_empty(&ctx
->dev
->db_fc_list
))
1127 static void resume_queues(struct uld_ctx
*ctx
)
1129 spin_lock_irq(&ctx
->dev
->lock
);
1130 if (ctx
->dev
->db_state
!= STOPPED
)
1132 ctx
->dev
->db_state
= FLOW_CONTROL
;
1134 if (list_empty(&ctx
->dev
->db_fc_list
)) {
1135 WARN_ON(ctx
->dev
->db_state
!= FLOW_CONTROL
);
1136 ctx
->dev
->db_state
= NORMAL
;
1137 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1138 if (ctx
->dev
->rdev
.flags
& T4_STATUS_PAGE_DISABLED
) {
1139 idr_for_each(&ctx
->dev
->qpidr
, enable_qp_db
,
1142 ctx
->dev
->rdev
.status_page
->db_off
= 0;
1146 if (cxgb4_dbfifo_count(ctx
->dev
->rdev
.lldi
.ports
[0], 1)
1147 < (ctx
->dev
->rdev
.lldi
.dbfifo_int_thresh
<<
1148 DB_FC_DRAIN_THRESH
)) {
1149 resume_a_chunk(ctx
);
1151 if (!list_empty(&ctx
->dev
->db_fc_list
)) {
1152 spin_unlock_irq(&ctx
->dev
->lock
);
1153 if (DB_FC_RESUME_DELAY
) {
1154 set_current_state(TASK_UNINTERRUPTIBLE
);
1155 schedule_timeout(DB_FC_RESUME_DELAY
);
1157 spin_lock_irq(&ctx
->dev
->lock
);
1158 if (ctx
->dev
->db_state
!= FLOW_CONTROL
)
1164 if (ctx
->dev
->db_state
!= NORMAL
)
1165 ctx
->dev
->rdev
.stats
.db_fc_interruptions
++;
1166 spin_unlock_irq(&ctx
->dev
->lock
);
1171 struct c4iw_qp
**qps
;
1174 static int add_and_ref_qp(int id
, void *p
, void *data
)
1176 struct qp_list
*qp_listp
= data
;
1177 struct c4iw_qp
*qp
= p
;
1179 c4iw_qp_add_ref(&qp
->ibqp
);
1180 qp_listp
->qps
[qp_listp
->idx
++] = qp
;
1184 static int count_qps(int id
, void *p
, void *data
)
1186 unsigned *countp
= data
;
1191 static void deref_qps(struct qp_list
*qp_list
)
1195 for (idx
= 0; idx
< qp_list
->idx
; idx
++)
1196 c4iw_qp_rem_ref(&qp_list
->qps
[idx
]->ibqp
);
1199 static void recover_lost_dbs(struct uld_ctx
*ctx
, struct qp_list
*qp_list
)
1204 for (idx
= 0; idx
< qp_list
->idx
; idx
++) {
1205 struct c4iw_qp
*qp
= qp_list
->qps
[idx
];
1207 spin_lock_irq(&qp
->rhp
->lock
);
1208 spin_lock(&qp
->lock
);
1209 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1211 t4_sq_host_wq_pidx(&qp
->wq
),
1212 t4_sq_wq_size(&qp
->wq
));
1214 pr_err(KERN_ERR MOD
"%s: Fatal error - "
1215 "DB overflow recovery failed - "
1216 "error syncing SQ qid %u\n",
1217 pci_name(ctx
->lldi
.pdev
), qp
->wq
.sq
.qid
);
1218 spin_unlock(&qp
->lock
);
1219 spin_unlock_irq(&qp
->rhp
->lock
);
1222 qp
->wq
.sq
.wq_pidx_inc
= 0;
1224 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1226 t4_rq_host_wq_pidx(&qp
->wq
),
1227 t4_rq_wq_size(&qp
->wq
));
1230 pr_err(KERN_ERR MOD
"%s: Fatal error - "
1231 "DB overflow recovery failed - "
1232 "error syncing RQ qid %u\n",
1233 pci_name(ctx
->lldi
.pdev
), qp
->wq
.rq
.qid
);
1234 spin_unlock(&qp
->lock
);
1235 spin_unlock_irq(&qp
->rhp
->lock
);
1238 qp
->wq
.rq
.wq_pidx_inc
= 0;
1239 spin_unlock(&qp
->lock
);
1240 spin_unlock_irq(&qp
->rhp
->lock
);
1242 /* Wait for the dbfifo to drain */
1243 while (cxgb4_dbfifo_count(qp
->rhp
->rdev
.lldi
.ports
[0], 1) > 0) {
1244 set_current_state(TASK_UNINTERRUPTIBLE
);
1245 schedule_timeout(usecs_to_jiffies(10));
1250 static void recover_queues(struct uld_ctx
*ctx
)
1253 struct qp_list qp_list
;
1256 /* slow everybody down */
1257 set_current_state(TASK_UNINTERRUPTIBLE
);
1258 schedule_timeout(usecs_to_jiffies(1000));
1260 /* flush the SGE contexts */
1261 ret
= cxgb4_flush_eq_cache(ctx
->dev
->rdev
.lldi
.ports
[0]);
1263 printk(KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
1264 pci_name(ctx
->lldi
.pdev
));
1268 /* Count active queues so we can build a list of queues to recover */
1269 spin_lock_irq(&ctx
->dev
->lock
);
1270 WARN_ON(ctx
->dev
->db_state
!= STOPPED
);
1271 ctx
->dev
->db_state
= RECOVERY
;
1272 idr_for_each(&ctx
->dev
->qpidr
, count_qps
, &count
);
1274 qp_list
.qps
= kzalloc(count
* sizeof *qp_list
.qps
, GFP_ATOMIC
);
1276 printk(KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
1277 pci_name(ctx
->lldi
.pdev
));
1278 spin_unlock_irq(&ctx
->dev
->lock
);
1283 /* add and ref each qp so it doesn't get freed */
1284 idr_for_each(&ctx
->dev
->qpidr
, add_and_ref_qp
, &qp_list
);
1286 spin_unlock_irq(&ctx
->dev
->lock
);
1288 /* now traverse the list in a safe context to recover the db state*/
1289 recover_lost_dbs(ctx
, &qp_list
);
1291 /* we're almost done! deref the qps and clean up */
1292 deref_qps(&qp_list
);
1295 spin_lock_irq(&ctx
->dev
->lock
);
1296 WARN_ON(ctx
->dev
->db_state
!= RECOVERY
);
1297 ctx
->dev
->db_state
= STOPPED
;
1298 spin_unlock_irq(&ctx
->dev
->lock
);
1301 static int c4iw_uld_control(void *handle
, enum cxgb4_control control
, ...)
1303 struct uld_ctx
*ctx
= handle
;
1306 case CXGB4_CONTROL_DB_FULL
:
1308 ctx
->dev
->rdev
.stats
.db_full
++;
1310 case CXGB4_CONTROL_DB_EMPTY
:
1312 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1313 ctx
->dev
->rdev
.stats
.db_empty
++;
1314 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1316 case CXGB4_CONTROL_DB_DROP
:
1317 recover_queues(ctx
);
1318 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1319 ctx
->dev
->rdev
.stats
.db_drop
++;
1320 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1323 printk(KERN_WARNING MOD
"%s: unknown control cmd %u\n",
1324 pci_name(ctx
->lldi
.pdev
), control
);
1330 static struct cxgb4_uld_info c4iw_uld_info
= {
1332 .add
= c4iw_uld_add
,
1333 .rx_handler
= c4iw_uld_rx_handler
,
1334 .state_change
= c4iw_uld_state_change
,
1335 .control
= c4iw_uld_control
,
1338 static int __init
c4iw_init_module(void)
1342 err
= c4iw_cm_init();
1346 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
1347 if (!c4iw_debugfs_root
)
1348 printk(KERN_WARNING MOD
1349 "could not create debugfs entry, continuing\n");
1351 if (ibnl_add_client(RDMA_NL_C4IW
, RDMA_NL_IWPM_NUM_OPS
,
1353 pr_err("%s[%u]: Failed to add netlink callback\n"
1354 , __func__
, __LINE__
);
1356 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
1361 static void __exit
c4iw_exit_module(void)
1363 struct uld_ctx
*ctx
, *tmp
;
1365 mutex_lock(&dev_mutex
);
1366 list_for_each_entry_safe(ctx
, tmp
, &uld_ctx_list
, entry
) {
1371 mutex_unlock(&dev_mutex
);
1372 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
1373 ibnl_remove_client(RDMA_NL_C4IW
);
1375 debugfs_remove_recursive(c4iw_debugfs_root
);
1378 module_init(c4iw_init_module
);
1379 module_exit(c4iw_exit_module
);