2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 static int destroy_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
35 struct c4iw_dev_ucontext
*uctx
)
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
41 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
42 wq
->rq
.memsize
, wq
->rq
.queue
,
43 pci_unmap_addr(&wq
->rq
, mapping
));
44 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
45 wq
->sq
.memsize
, wq
->sq
.queue
,
46 pci_unmap_addr(&wq
->sq
, mapping
));
47 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
50 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
51 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
55 static int create_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
56 struct t4_cq
*rcq
, struct t4_cq
*scq
,
57 struct c4iw_dev_ucontext
*uctx
)
59 int user
= (uctx
!= &rdev
->uctx
);
60 struct fw_ri_res_wr
*res_wr
;
61 struct fw_ri_res
*res
;
63 struct c4iw_wr_wait wr_wait
;
68 wq
->sq
.qid
= c4iw_get_qpid(rdev
, uctx
);
72 wq
->rq
.qid
= c4iw_get_qpid(rdev
, uctx
);
77 wq
->sq
.sw_sq
= kzalloc(wq
->sq
.size
* sizeof *wq
->sq
.sw_sq
,
82 wq
->rq
.sw_rq
= kzalloc(wq
->rq
.size
* sizeof *wq
->rq
.sw_rq
,
89 * RQT must be a power of 2.
91 wq
->rq
.rqt_size
= roundup_pow_of_two(wq
->rq
.size
);
92 wq
->rq
.rqt_hwaddr
= c4iw_rqtpool_alloc(rdev
, wq
->rq
.rqt_size
);
93 if (!wq
->rq
.rqt_hwaddr
)
96 wq
->sq
.queue
= dma_alloc_coherent(&(rdev
->lldi
.pdev
->dev
),
97 wq
->sq
.memsize
, &(wq
->sq
.dma_addr
),
101 memset(wq
->sq
.queue
, 0, wq
->sq
.memsize
);
102 pci_unmap_addr_set(&wq
->sq
, mapping
, wq
->sq
.dma_addr
);
104 wq
->rq
.queue
= dma_alloc_coherent(&(rdev
->lldi
.pdev
->dev
),
105 wq
->rq
.memsize
, &(wq
->rq
.dma_addr
),
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__
, wq
->sq
.queue
,
111 (unsigned long long)virt_to_phys(wq
->sq
.queue
),
113 (unsigned long long)virt_to_phys(wq
->rq
.queue
));
114 memset(wq
->rq
.queue
, 0, wq
->rq
.memsize
);
115 pci_unmap_addr_set(&wq
->rq
, mapping
, wq
->rq
.dma_addr
);
117 wq
->db
= rdev
->lldi
.db_reg
;
118 wq
->gts
= rdev
->lldi
.gts_reg
;
120 wq
->sq
.udb
= (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2) +
121 (wq
->sq
.qid
<< rdev
->qpshift
);
122 wq
->sq
.udb
&= PAGE_MASK
;
123 wq
->rq
.udb
= (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2) +
124 (wq
->rq
.qid
<< rdev
->qpshift
);
125 wq
->rq
.udb
&= PAGE_MASK
;
130 /* build fw_ri_res_wr */
131 wr_len
= sizeof *res_wr
+ 2 * sizeof *res
;
133 skb
= alloc_skb(wr_len
, GFP_KERNEL
| __GFP_NOFAIL
);
138 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
140 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
141 memset(res_wr
, 0, wr_len
);
142 res_wr
->op_nres
= cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR
) |
144 V_FW_RI_RES_WR_NRES(2) |
146 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
147 res_wr
->cookie
= (u64
)&wr_wait
;
149 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_SQ
;
150 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
153 * eqsize is the number of 64B entries plus the status page size.
155 eqsize
= wq
->sq
.size
* T4_SQ_NUM_SLOTS
+ T4_EQ_STATUS_ENTRIES
;
157 res
->u
.sqrq
.fetchszm_to_iqid
= cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq
->cqid
));
162 res
->u
.sqrq
.dcaen_to_eqsize
= cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
165 V_FW_RI_RES_WR_FBMIN(3) |
166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize
));
170 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->sq
.qid
);
171 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->sq
.dma_addr
);
173 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_RQ
;
174 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
177 * eqsize is the number of 64B entries plus the status page size.
179 eqsize
= wq
->rq
.size
* T4_RQ_NUM_SLOTS
+ T4_EQ_STATUS_ENTRIES
;
180 res
->u
.sqrq
.fetchszm_to_iqid
= cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq
->cqid
));
185 res
->u
.sqrq
.dcaen_to_eqsize
= cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
188 V_FW_RI_RES_WR_FBMIN(3) |
189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize
));
193 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->rq
.qid
);
194 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->rq
.dma_addr
);
196 c4iw_init_wr_wait(&wr_wait
);
198 ret
= c4iw_ofld_send(rdev
, skb
);
201 wait_event_timeout(wr_wait
.wait
, wr_wait
.done
, C4IW_WR_TO
);
203 printk(KERN_ERR MOD
"Device %s not responding!\n",
204 pci_name(rdev
->lldi
.pdev
));
205 rdev
->flags
= T4_FATAL_ERROR
;
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__
, wq
->sq
.qid
, wq
->rq
.qid
, wq
->db
,
214 (unsigned long long)wq
->sq
.udb
, (unsigned long long)wq
->rq
.udb
);
218 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
219 wq
->rq
.memsize
, wq
->rq
.queue
,
220 pci_unmap_addr(&wq
->rq
, mapping
));
222 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
223 wq
->sq
.memsize
, wq
->sq
.queue
,
224 pci_unmap_addr(&wq
->sq
, mapping
));
226 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
232 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
234 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
238 static int build_rdma_send(union t4_wr
*wqe
, struct ib_send_wr
*wr
, u8
*len16
)
245 if (wr
->num_sge
> T4_MAX_SEND_SGE
)
247 switch (wr
->opcode
) {
249 if (wr
->send_flags
& IB_SEND_SOLICITED
)
250 wqe
->send
.sendop_pkd
= cpu_to_be32(
251 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE
));
253 wqe
->send
.sendop_pkd
= cpu_to_be32(
254 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND
));
255 wqe
->send
.stag_inv
= 0;
257 case IB_WR_SEND_WITH_INV
:
258 if (wr
->send_flags
& IB_SEND_SOLICITED
)
259 wqe
->send
.sendop_pkd
= cpu_to_be32(
260 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV
));
262 wqe
->send
.sendop_pkd
= cpu_to_be32(
263 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV
));
264 wqe
->send
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
272 if (wr
->send_flags
& IB_SEND_INLINE
) {
273 datap
= (u8
*)wqe
->send
.u
.immd_src
[0].data
;
274 for (i
= 0; i
< wr
->num_sge
; i
++) {
275 if ((plen
+ wr
->sg_list
[i
].length
) >
276 T4_MAX_SEND_INLINE
) {
279 plen
+= wr
->sg_list
[i
].length
;
281 (void *)(unsigned long)wr
->sg_list
[i
].addr
,
282 wr
->sg_list
[i
].length
);
283 datap
+= wr
->sg_list
[i
].length
;
285 wqe
->send
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
286 wqe
->send
.u
.immd_src
[0].r1
= 0;
287 wqe
->send
.u
.immd_src
[0].r2
= 0;
288 wqe
->send
.u
.immd_src
[0].immdlen
= cpu_to_be32(plen
);
289 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_immd
) +
292 for (i
= 0; i
< wr
->num_sge
; i
++) {
293 if ((plen
+ wr
->sg_list
[i
].length
) < plen
)
295 plen
+= wr
->sg_list
[i
].length
;
296 wqe
->send
.u
.isgl_src
[0].sge
[i
].stag
=
297 cpu_to_be32(wr
->sg_list
[i
].lkey
);
298 wqe
->send
.u
.isgl_src
[0].sge
[i
].len
=
299 cpu_to_be32(wr
->sg_list
[i
].length
);
300 wqe
->send
.u
.isgl_src
[0].sge
[i
].to
=
301 cpu_to_be64(wr
->sg_list
[i
].addr
);
303 wqe
->send
.u
.isgl_src
[0].op
= FW_RI_DATA_ISGL
;
304 wqe
->send
.u
.isgl_src
[0].r1
= 0;
305 wqe
->send
.u
.isgl_src
[0].nsge
= cpu_to_be16(wr
->num_sge
);
306 wqe
->send
.u
.isgl_src
[0].r2
= 0;
307 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_isgl
) +
308 wr
->num_sge
* sizeof(struct fw_ri_sge
);
311 wqe
->send
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
312 wqe
->send
.u
.immd_src
[0].r1
= 0;
313 wqe
->send
.u
.immd_src
[0].r2
= 0;
314 wqe
->send
.u
.immd_src
[0].immdlen
= 0;
315 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_immd
);
317 *len16
= DIV_ROUND_UP(size
, 16);
318 wqe
->send
.plen
= cpu_to_be32(plen
);
322 static int build_rdma_write(union t4_wr
*wqe
, struct ib_send_wr
*wr
, u8
*len16
)
329 if (wr
->num_sge
> T4_MAX_WRITE_SGE
)
332 wqe
->write
.stag_sink
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
333 wqe
->write
.to_sink
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
336 if (wr
->send_flags
& IB_SEND_INLINE
) {
337 datap
= (u8
*)wqe
->write
.u
.immd_src
[0].data
;
338 for (i
= 0; i
< wr
->num_sge
; i
++) {
339 if ((plen
+ wr
->sg_list
[i
].length
) >
340 T4_MAX_WRITE_INLINE
) {
343 plen
+= wr
->sg_list
[i
].length
;
345 (void *)(unsigned long)wr
->sg_list
[i
].addr
,
346 wr
->sg_list
[i
].length
);
347 datap
+= wr
->sg_list
[i
].length
;
349 wqe
->write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
350 wqe
->write
.u
.immd_src
[0].r1
= 0;
351 wqe
->write
.u
.immd_src
[0].r2
= 0;
352 wqe
->write
.u
.immd_src
[0].immdlen
= cpu_to_be32(plen
);
353 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_immd
) +
356 for (i
= 0; i
< wr
->num_sge
; i
++) {
357 if ((plen
+ wr
->sg_list
[i
].length
) < plen
)
359 plen
+= wr
->sg_list
[i
].length
;
360 wqe
->write
.u
.isgl_src
[0].sge
[i
].stag
=
361 cpu_to_be32(wr
->sg_list
[i
].lkey
);
362 wqe
->write
.u
.isgl_src
[0].sge
[i
].len
=
363 cpu_to_be32(wr
->sg_list
[i
].length
);
364 wqe
->write
.u
.isgl_src
[0].sge
[i
].to
=
365 cpu_to_be64(wr
->sg_list
[i
].addr
);
367 wqe
->write
.u
.isgl_src
[0].op
= FW_RI_DATA_ISGL
;
368 wqe
->write
.u
.isgl_src
[0].r1
= 0;
369 wqe
->write
.u
.isgl_src
[0].nsge
=
370 cpu_to_be16(wr
->num_sge
);
371 wqe
->write
.u
.isgl_src
[0].r2
= 0;
372 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_isgl
) +
373 wr
->num_sge
* sizeof(struct fw_ri_sge
);
376 wqe
->write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
377 wqe
->write
.u
.immd_src
[0].r1
= 0;
378 wqe
->write
.u
.immd_src
[0].r2
= 0;
379 wqe
->write
.u
.immd_src
[0].immdlen
= 0;
380 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_immd
);
382 *len16
= DIV_ROUND_UP(size
, 16);
383 wqe
->write
.plen
= cpu_to_be32(plen
);
387 static int build_rdma_read(union t4_wr
*wqe
, struct ib_send_wr
*wr
, u8
*len16
)
392 wqe
->read
.stag_src
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
393 wqe
->read
.to_src_hi
= cpu_to_be32((u32
)(wr
->wr
.rdma
.remote_addr
395 wqe
->read
.to_src_lo
= cpu_to_be32((u32
)wr
->wr
.rdma
.remote_addr
);
396 wqe
->read
.stag_sink
= cpu_to_be32(wr
->sg_list
[0].lkey
);
397 wqe
->read
.plen
= cpu_to_be32(wr
->sg_list
[0].length
);
398 wqe
->read
.to_sink_hi
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
400 wqe
->read
.to_sink_lo
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
));
402 wqe
->read
.stag_src
= cpu_to_be32(2);
403 wqe
->read
.to_src_hi
= 0;
404 wqe
->read
.to_src_lo
= 0;
405 wqe
->read
.stag_sink
= cpu_to_be32(2);
407 wqe
->read
.to_sink_hi
= 0;
408 wqe
->read
.to_sink_lo
= 0;
412 *len16
= DIV_ROUND_UP(sizeof wqe
->read
, 16);
416 static int build_rdma_recv(struct c4iw_qp
*qhp
, union t4_recv_wr
*wqe
,
417 struct ib_recv_wr
*wr
, u8
*len16
)
422 for (i
= 0; i
< wr
->num_sge
; i
++) {
423 if ((plen
+ wr
->sg_list
[i
].length
) < plen
)
425 plen
+= wr
->sg_list
[i
].length
;
426 wqe
->recv
.isgl
.sge
[i
].stag
=
427 cpu_to_be32(wr
->sg_list
[i
].lkey
);
428 wqe
->recv
.isgl
.sge
[i
].len
=
429 cpu_to_be32(wr
->sg_list
[i
].length
);
430 wqe
->recv
.isgl
.sge
[i
].to
=
431 cpu_to_be64(wr
->sg_list
[i
].addr
);
433 for (; i
< T4_MAX_RECV_SGE
; i
++) {
434 wqe
->recv
.isgl
.sge
[i
].stag
= 0;
435 wqe
->recv
.isgl
.sge
[i
].len
= 0;
436 wqe
->recv
.isgl
.sge
[i
].to
= 0;
438 wqe
->recv
.isgl
.op
= FW_RI_DATA_ISGL
;
439 wqe
->recv
.isgl
.r1
= 0;
440 wqe
->recv
.isgl
.nsge
= cpu_to_be16(wr
->num_sge
);
441 wqe
->recv
.isgl
.r2
= 0;
442 *len16
= DIV_ROUND_UP(sizeof wqe
->recv
+
443 wr
->num_sge
* sizeof(struct fw_ri_sge
), 16);
447 static int build_fastreg(union t4_wr
*wqe
, struct ib_send_wr
*wr
, u8
*len16
)
450 struct fw_ri_immd
*imdp
;
453 int pbllen
= roundup(wr
->wr
.fast_reg
.page_list_len
* sizeof(u64
), 32);
455 if (wr
->wr
.fast_reg
.page_list_len
> T4_MAX_FR_DEPTH
)
458 wqe
->fr
.qpbinde_to_dcacpu
= 0;
459 wqe
->fr
.pgsz_shift
= wr
->wr
.fast_reg
.page_shift
- 12;
460 wqe
->fr
.addr_type
= FW_RI_VA_BASED_TO
;
461 wqe
->fr
.mem_perms
= c4iw_ib_to_tpt_access(wr
->wr
.fast_reg
.access_flags
);
463 wqe
->fr
.len_lo
= cpu_to_be32(wr
->wr
.fast_reg
.length
);
464 wqe
->fr
.stag
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
465 wqe
->fr
.va_hi
= cpu_to_be32(wr
->wr
.fast_reg
.iova_start
>> 32);
466 wqe
->fr
.va_lo_fbo
= cpu_to_be32(wr
->wr
.fast_reg
.iova_start
&
468 if (pbllen
> T4_MAX_FR_IMMD
) {
469 struct c4iw_fr_page_list
*c4pl
=
470 to_c4iw_fr_page_list(wr
->wr
.fast_reg
.page_list
);
471 struct fw_ri_dsgl
*sglp
;
473 sglp
= (struct fw_ri_dsgl
*)(&wqe
->fr
+ 1);
474 sglp
->op
= FW_RI_DATA_DSGL
;
476 sglp
->nsge
= cpu_to_be16(1);
477 sglp
->addr0
= cpu_to_be64(c4pl
->dma_addr
);
478 sglp
->len0
= cpu_to_be32(pbllen
);
480 *len16
= DIV_ROUND_UP(sizeof wqe
->fr
+ sizeof *sglp
, 16);
482 imdp
= (struct fw_ri_immd
*)(&wqe
->fr
+ 1);
483 imdp
->op
= FW_RI_DATA_IMMD
;
486 imdp
->immdlen
= cpu_to_be32(pbllen
);
487 p
= (__be64
*)(imdp
+ 1);
488 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++, p
++)
490 (u64
)wr
->wr
.fast_reg
.page_list
->page_list
[i
]);
491 *len16
= DIV_ROUND_UP(sizeof wqe
->fr
+ sizeof *imdp
+ pbllen
,
497 static int build_inv_stag(union t4_wr
*wqe
, struct ib_send_wr
*wr
,
500 wqe
->inv
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
502 *len16
= DIV_ROUND_UP(sizeof wqe
->inv
, 16);
506 void c4iw_qp_add_ref(struct ib_qp
*qp
)
508 PDBG("%s ib_qp %p\n", __func__
, qp
);
509 atomic_inc(&(to_c4iw_qp(qp
)->refcnt
));
512 void c4iw_qp_rem_ref(struct ib_qp
*qp
)
514 PDBG("%s ib_qp %p\n", __func__
, qp
);
515 if (atomic_dec_and_test(&(to_c4iw_qp(qp
)->refcnt
)))
516 wake_up(&(to_c4iw_qp(qp
)->wait
));
519 int c4iw_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
520 struct ib_send_wr
**bad_wr
)
524 enum fw_wr_opcodes fw_opcode
= 0;
525 enum fw_ri_wr_flags fw_flags
;
529 struct t4_swsqe
*swsqe
;
533 qhp
= to_c4iw_qp(ibqp
);
534 spin_lock_irqsave(&qhp
->lock
, flag
);
535 if (t4_wq_in_error(&qhp
->wq
)) {
536 spin_unlock_irqrestore(&qhp
->lock
, flag
);
539 num_wrs
= t4_sq_avail(&qhp
->wq
);
541 spin_unlock_irqrestore(&qhp
->lock
, flag
);
550 wqe
= &qhp
->wq
.sq
.queue
[qhp
->wq
.sq
.pidx
];
552 if (wr
->send_flags
& IB_SEND_SOLICITED
)
553 fw_flags
|= FW_RI_SOLICITED_EVENT_FLAG
;
554 if (wr
->send_flags
& IB_SEND_SIGNALED
)
555 fw_flags
|= FW_RI_COMPLETION_FLAG
;
556 swsqe
= &qhp
->wq
.sq
.sw_sq
[qhp
->wq
.sq
.pidx
];
557 switch (wr
->opcode
) {
558 case IB_WR_SEND_WITH_INV
:
560 if (wr
->send_flags
& IB_SEND_FENCE
)
561 fw_flags
|= FW_RI_READ_FENCE_FLAG
;
562 fw_opcode
= FW_RI_SEND_WR
;
563 if (wr
->opcode
== IB_WR_SEND
)
564 swsqe
->opcode
= FW_RI_SEND
;
566 swsqe
->opcode
= FW_RI_SEND_WITH_INV
;
567 err
= build_rdma_send(wqe
, wr
, &len16
);
569 case IB_WR_RDMA_WRITE
:
570 fw_opcode
= FW_RI_RDMA_WRITE_WR
;
571 swsqe
->opcode
= FW_RI_RDMA_WRITE
;
572 err
= build_rdma_write(wqe
, wr
, &len16
);
574 case IB_WR_RDMA_READ
:
575 case IB_WR_RDMA_READ_WITH_INV
:
576 fw_opcode
= FW_RI_RDMA_READ_WR
;
577 swsqe
->opcode
= FW_RI_READ_REQ
;
578 if (wr
->opcode
== IB_WR_RDMA_READ_WITH_INV
)
579 fw_flags
|= FW_RI_RDMA_READ_INVALIDATE
;
582 err
= build_rdma_read(wqe
, wr
, &len16
);
585 swsqe
->read_len
= wr
->sg_list
[0].length
;
586 if (!qhp
->wq
.sq
.oldest_read
)
587 qhp
->wq
.sq
.oldest_read
= swsqe
;
589 case IB_WR_FAST_REG_MR
:
590 fw_opcode
= FW_RI_FR_NSMR_WR
;
591 swsqe
->opcode
= FW_RI_FAST_REGISTER
;
592 err
= build_fastreg(wqe
, wr
, &len16
);
594 case IB_WR_LOCAL_INV
:
595 if (wr
->send_flags
& IB_SEND_FENCE
)
596 fw_flags
|= FW_RI_LOCAL_FENCE_FLAG
;
597 fw_opcode
= FW_RI_INV_LSTAG_WR
;
598 swsqe
->opcode
= FW_RI_LOCAL_INV
;
599 err
= build_inv_stag(wqe
, wr
, &len16
);
602 PDBG("%s post of type=%d TBD!\n", __func__
,
610 swsqe
->idx
= qhp
->wq
.sq
.pidx
;
612 swsqe
->signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
);
613 swsqe
->wr_id
= wr
->wr_id
;
615 init_wr_hdr(wqe
, qhp
->wq
.sq
.pidx
, fw_opcode
, fw_flags
, len16
);
617 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
618 __func__
, (unsigned long long)wr
->wr_id
, qhp
->wq
.sq
.pidx
,
619 swsqe
->opcode
, swsqe
->read_len
);
622 t4_sq_produce(&qhp
->wq
);
625 if (t4_wq_db_enabled(&qhp
->wq
))
626 t4_ring_sq_db(&qhp
->wq
, idx
);
627 spin_unlock_irqrestore(&qhp
->lock
, flag
);
631 int c4iw_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
632 struct ib_recv_wr
**bad_wr
)
636 union t4_recv_wr
*wqe
;
642 qhp
= to_c4iw_qp(ibqp
);
643 spin_lock_irqsave(&qhp
->lock
, flag
);
644 if (t4_wq_in_error(&qhp
->wq
)) {
645 spin_unlock_irqrestore(&qhp
->lock
, flag
);
648 num_wrs
= t4_rq_avail(&qhp
->wq
);
650 spin_unlock_irqrestore(&qhp
->lock
, flag
);
654 if (wr
->num_sge
> T4_MAX_RECV_SGE
) {
659 wqe
= &qhp
->wq
.rq
.queue
[qhp
->wq
.rq
.pidx
];
661 err
= build_rdma_recv(qhp
, wqe
, wr
, &len16
);
669 qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].wr_id
= wr
->wr_id
;
671 wqe
->recv
.opcode
= FW_RI_RECV_WR
;
673 wqe
->recv
.wrid
= qhp
->wq
.rq
.pidx
;
677 wqe
->recv
.len16
= len16
;
681 PDBG("%s cookie 0x%llx pidx %u\n", __func__
,
682 (unsigned long long) wr
->wr_id
, qhp
->wq
.rq
.pidx
);
683 t4_rq_produce(&qhp
->wq
);
688 if (t4_wq_db_enabled(&qhp
->wq
))
689 t4_ring_rq_db(&qhp
->wq
, idx
);
690 spin_unlock_irqrestore(&qhp
->lock
, flag
);
694 int c4iw_bind_mw(struct ib_qp
*qp
, struct ib_mw
*mw
, struct ib_mw_bind
*mw_bind
)
699 static inline void build_term_codes(struct t4_cqe
*err_cqe
, u8
*layer_type
,
709 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
714 status
= CQE_STATUS(err_cqe
);
715 opcode
= CQE_OPCODE(err_cqe
);
716 rqtype
= RQ_TYPE(err_cqe
);
717 send_inv
= (opcode
== FW_RI_SEND_WITH_INV
) ||
718 (opcode
== FW_RI_SEND_WITH_SE_INV
);
719 tagged
= (opcode
== FW_RI_RDMA_WRITE
) ||
720 (rqtype
&& (opcode
== FW_RI_READ_RESP
));
725 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
726 *ecode
= RDMAP_CANT_INV_STAG
;
728 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
729 *ecode
= RDMAP_INV_STAG
;
733 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
734 if ((opcode
== FW_RI_SEND_WITH_INV
) ||
735 (opcode
== FW_RI_SEND_WITH_SE_INV
))
736 *ecode
= RDMAP_CANT_INV_STAG
;
738 *ecode
= RDMAP_STAG_NOT_ASSOC
;
741 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
742 *ecode
= RDMAP_STAG_NOT_ASSOC
;
745 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
746 *ecode
= RDMAP_ACC_VIOL
;
749 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
750 *ecode
= RDMAP_TO_WRAP
;
754 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
755 *ecode
= DDPT_BASE_BOUNDS
;
757 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
758 *ecode
= RDMAP_BASE_BOUNDS
;
761 case T4_ERR_INVALIDATE_SHARED_MR
:
762 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
763 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
764 *ecode
= RDMAP_CANT_INV_STAG
;
767 case T4_ERR_ECC_PSTAG
:
768 case T4_ERR_INTERNAL_ERR
:
769 *layer_type
= LAYER_RDMAP
|RDMAP_LOCAL_CATA
;
772 case T4_ERR_OUT_OF_RQE
:
773 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
774 *ecode
= DDPU_INV_MSN_NOBUF
;
776 case T4_ERR_PBL_ADDR_BOUND
:
777 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
778 *ecode
= DDPT_BASE_BOUNDS
;
781 *layer_type
= LAYER_MPA
|DDP_LLP
;
782 *ecode
= MPA_CRC_ERR
;
785 *layer_type
= LAYER_MPA
|DDP_LLP
;
786 *ecode
= MPA_MARKER_ERR
;
788 case T4_ERR_PDU_LEN_ERR
:
789 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
790 *ecode
= DDPU_MSG_TOOBIG
;
792 case T4_ERR_DDP_VERSION
:
794 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
795 *ecode
= DDPT_INV_VERS
;
797 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
798 *ecode
= DDPU_INV_VERS
;
801 case T4_ERR_RDMA_VERSION
:
802 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
803 *ecode
= RDMAP_INV_VERS
;
806 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
807 *ecode
= RDMAP_INV_OPCODE
;
809 case T4_ERR_DDP_QUEUE_NUM
:
810 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
811 *ecode
= DDPU_INV_QN
;
815 case T4_ERR_MSN_RANGE
:
816 case T4_ERR_IRD_OVERFLOW
:
817 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
818 *ecode
= DDPU_INV_MSN_RANGE
;
821 *layer_type
= LAYER_DDP
|DDP_LOCAL_CATA
;
825 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
826 *ecode
= DDPU_INV_MO
;
829 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
835 int c4iw_post_zb_read(struct c4iw_qp
*qhp
)
841 PDBG("%s enter\n", __func__
);
842 skb
= alloc_skb(40, GFP_KERNEL
);
844 printk(KERN_ERR
"%s cannot send zb_read!!\n", __func__
);
847 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
849 wqe
= (union t4_wr
*)skb_put(skb
, sizeof wqe
->read
);
850 memset(wqe
, 0, sizeof wqe
->read
);
851 wqe
->read
.r2
= cpu_to_be64(0);
852 wqe
->read
.stag_sink
= cpu_to_be32(1);
853 wqe
->read
.to_sink_hi
= cpu_to_be32(0);
854 wqe
->read
.to_sink_lo
= cpu_to_be32(1);
855 wqe
->read
.stag_src
= cpu_to_be32(1);
856 wqe
->read
.plen
= cpu_to_be32(0);
857 wqe
->read
.to_src_hi
= cpu_to_be32(0);
858 wqe
->read
.to_src_lo
= cpu_to_be32(1);
859 len16
= DIV_ROUND_UP(sizeof wqe
->read
, 16);
860 init_wr_hdr(wqe
, 0, FW_RI_RDMA_READ_WR
, FW_RI_COMPLETION_FLAG
, len16
);
862 return c4iw_ofld_send(&qhp
->rhp
->rdev
, skb
);
865 static void post_terminate(struct c4iw_qp
*qhp
, struct t4_cqe
*err_cqe
,
868 struct fw_ri_wr
*wqe
;
870 struct terminate_message
*term
;
872 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
875 skb
= alloc_skb(sizeof *wqe
, gfp
);
878 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
880 wqe
= (struct fw_ri_wr
*)__skb_put(skb
, sizeof(*wqe
));
881 memset(wqe
, 0, sizeof *wqe
);
882 wqe
->op_compl
= cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR
));
883 wqe
->flowid_len16
= cpu_to_be32(
884 FW_WR_FLOWID(qhp
->ep
->hwtid
) |
885 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe
, 16)));
887 wqe
->u
.terminate
.type
= FW_RI_TYPE_TERMINATE
;
888 wqe
->u
.terminate
.immdlen
= cpu_to_be32(sizeof *term
);
889 term
= (struct terminate_message
*)wqe
->u
.terminate
.termmsg
;
890 build_term_codes(err_cqe
, &term
->layer_etype
, &term
->ecode
);
891 c4iw_ofld_send(&qhp
->rhp
->rdev
, skb
);
895 * Assumes qhp lock is held.
897 static void __flush_qp(struct c4iw_qp
*qhp
, struct c4iw_cq
*rchp
,
898 struct c4iw_cq
*schp
, unsigned long *flag
)
903 PDBG("%s qhp %p rchp %p schp %p\n", __func__
, qhp
, rchp
, schp
);
904 /* take a ref on the qhp since we must release the lock */
905 atomic_inc(&qhp
->refcnt
);
906 spin_unlock_irqrestore(&qhp
->lock
, *flag
);
908 /* locking heirarchy: cq lock first, then qp lock. */
909 spin_lock_irqsave(&rchp
->lock
, *flag
);
910 spin_lock(&qhp
->lock
);
911 c4iw_flush_hw_cq(&rchp
->cq
);
912 c4iw_count_rcqes(&rchp
->cq
, &qhp
->wq
, &count
);
913 flushed
= c4iw_flush_rq(&qhp
->wq
, &rchp
->cq
, count
);
914 spin_unlock(&qhp
->lock
);
915 spin_unlock_irqrestore(&rchp
->lock
, *flag
);
917 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
, rchp
->ibcq
.cq_context
);
919 /* locking heirarchy: cq lock first, then qp lock. */
920 spin_lock_irqsave(&schp
->lock
, *flag
);
921 spin_lock(&qhp
->lock
);
922 c4iw_flush_hw_cq(&schp
->cq
);
923 c4iw_count_scqes(&schp
->cq
, &qhp
->wq
, &count
);
924 flushed
= c4iw_flush_sq(&qhp
->wq
, &schp
->cq
, count
);
925 spin_unlock(&qhp
->lock
);
926 spin_unlock_irqrestore(&schp
->lock
, *flag
);
928 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
, schp
->ibcq
.cq_context
);
931 if (atomic_dec_and_test(&qhp
->refcnt
))
934 spin_lock_irqsave(&qhp
->lock
, *flag
);
937 static void flush_qp(struct c4iw_qp
*qhp
, unsigned long *flag
)
939 struct c4iw_cq
*rchp
, *schp
;
941 rchp
= get_chp(qhp
->rhp
, qhp
->attr
.rcq
);
942 schp
= get_chp(qhp
->rhp
, qhp
->attr
.scq
);
944 if (qhp
->ibqp
.uobject
) {
945 t4_set_wq_in_error(&qhp
->wq
);
946 t4_set_cq_in_error(&rchp
->cq
);
948 t4_set_cq_in_error(&schp
->cq
);
951 __flush_qp(qhp
, rchp
, schp
, flag
);
954 static int rdma_fini(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
)
956 struct fw_ri_wr
*wqe
;
958 struct c4iw_wr_wait wr_wait
;
961 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
964 skb
= alloc_skb(sizeof *wqe
, GFP_KERNEL
| __GFP_NOFAIL
);
967 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
969 wqe
= (struct fw_ri_wr
*)__skb_put(skb
, sizeof(*wqe
));
970 memset(wqe
, 0, sizeof *wqe
);
971 wqe
->op_compl
= cpu_to_be32(
972 FW_WR_OP(FW_RI_INIT_WR
) |
974 wqe
->flowid_len16
= cpu_to_be32(
975 FW_WR_FLOWID(qhp
->ep
->hwtid
) |
976 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe
, 16)));
977 wqe
->cookie
= (u64
)&wr_wait
;
979 wqe
->u
.fini
.type
= FW_RI_TYPE_FINI
;
980 c4iw_init_wr_wait(&wr_wait
);
981 ret
= c4iw_ofld_send(&rhp
->rdev
, skb
);
985 wait_event_timeout(wr_wait
.wait
, wr_wait
.done
, C4IW_WR_TO
);
987 printk(KERN_ERR MOD
"Device %s not responding!\n",
988 pci_name(rhp
->rdev
.lldi
.pdev
));
989 rhp
->rdev
.flags
= T4_FATAL_ERROR
;
994 printk(KERN_WARNING MOD
995 "%s: Abnormal close qpid %d ret %u\n",
996 pci_name(rhp
->rdev
.lldi
.pdev
), qhp
->wq
.sq
.qid
,
1000 PDBG("%s ret %d\n", __func__
, ret
);
1004 static void build_rtr_msg(u8 p2p_type
, struct fw_ri_init
*init
)
1006 memset(&init
->u
, 0, sizeof init
->u
);
1008 case FW_RI_INIT_P2PTYPE_RDMA_WRITE
:
1009 init
->u
.write
.opcode
= FW_RI_RDMA_WRITE_WR
;
1010 init
->u
.write
.stag_sink
= cpu_to_be32(1);
1011 init
->u
.write
.to_sink
= cpu_to_be64(1);
1012 init
->u
.write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
1013 init
->u
.write
.len16
= DIV_ROUND_UP(sizeof init
->u
.write
+
1014 sizeof(struct fw_ri_immd
),
1017 case FW_RI_INIT_P2PTYPE_READ_REQ
:
1018 init
->u
.write
.opcode
= FW_RI_RDMA_READ_WR
;
1019 init
->u
.read
.stag_src
= cpu_to_be32(1);
1020 init
->u
.read
.to_src_lo
= cpu_to_be32(1);
1021 init
->u
.read
.stag_sink
= cpu_to_be32(1);
1022 init
->u
.read
.to_sink_lo
= cpu_to_be32(1);
1023 init
->u
.read
.len16
= DIV_ROUND_UP(sizeof init
->u
.read
, 16);
1028 static int rdma_init(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
)
1030 struct fw_ri_wr
*wqe
;
1032 struct c4iw_wr_wait wr_wait
;
1033 struct sk_buff
*skb
;
1035 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
1038 skb
= alloc_skb(sizeof *wqe
, GFP_KERNEL
| __GFP_NOFAIL
);
1041 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
1043 wqe
= (struct fw_ri_wr
*)__skb_put(skb
, sizeof(*wqe
));
1044 memset(wqe
, 0, sizeof *wqe
);
1045 wqe
->op_compl
= cpu_to_be32(
1046 FW_WR_OP(FW_RI_INIT_WR
) |
1048 wqe
->flowid_len16
= cpu_to_be32(
1049 FW_WR_FLOWID(qhp
->ep
->hwtid
) |
1050 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe
, 16)));
1052 wqe
->cookie
= (u64
)&wr_wait
;
1054 wqe
->u
.init
.type
= FW_RI_TYPE_INIT
;
1055 wqe
->u
.init
.mpareqbit_p2ptype
=
1056 V_FW_RI_WR_MPAREQBIT(qhp
->attr
.mpa_attr
.initiator
) |
1057 V_FW_RI_WR_P2PTYPE(qhp
->attr
.mpa_attr
.p2p_type
);
1058 wqe
->u
.init
.mpa_attrs
= FW_RI_MPA_IETF_ENABLE
;
1059 if (qhp
->attr
.mpa_attr
.recv_marker_enabled
)
1060 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_RX_MARKER_ENABLE
;
1061 if (qhp
->attr
.mpa_attr
.xmit_marker_enabled
)
1062 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_TX_MARKER_ENABLE
;
1063 if (qhp
->attr
.mpa_attr
.crc_enabled
)
1064 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_CRC_ENABLE
;
1066 wqe
->u
.init
.qp_caps
= FW_RI_QP_RDMA_READ_ENABLE
|
1067 FW_RI_QP_RDMA_WRITE_ENABLE
|
1068 FW_RI_QP_BIND_ENABLE
;
1069 if (!qhp
->ibqp
.uobject
)
1070 wqe
->u
.init
.qp_caps
|= FW_RI_QP_FAST_REGISTER_ENABLE
|
1071 FW_RI_QP_STAG0_ENABLE
;
1072 wqe
->u
.init
.nrqe
= cpu_to_be16(t4_rqes_posted(&qhp
->wq
));
1073 wqe
->u
.init
.pdid
= cpu_to_be32(qhp
->attr
.pd
);
1074 wqe
->u
.init
.qpid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1075 wqe
->u
.init
.sq_eqid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1076 wqe
->u
.init
.rq_eqid
= cpu_to_be32(qhp
->wq
.rq
.qid
);
1077 wqe
->u
.init
.scqid
= cpu_to_be32(qhp
->attr
.scq
);
1078 wqe
->u
.init
.rcqid
= cpu_to_be32(qhp
->attr
.rcq
);
1079 wqe
->u
.init
.ord_max
= cpu_to_be32(qhp
->attr
.max_ord
);
1080 wqe
->u
.init
.ird_max
= cpu_to_be32(qhp
->attr
.max_ird
);
1081 wqe
->u
.init
.iss
= cpu_to_be32(qhp
->ep
->snd_seq
);
1082 wqe
->u
.init
.irs
= cpu_to_be32(qhp
->ep
->rcv_seq
);
1083 wqe
->u
.init
.hwrqsize
= cpu_to_be32(qhp
->wq
.rq
.rqt_size
);
1084 wqe
->u
.init
.hwrqaddr
= cpu_to_be32(qhp
->wq
.rq
.rqt_hwaddr
-
1085 rhp
->rdev
.lldi
.vr
->rq
.start
);
1086 if (qhp
->attr
.mpa_attr
.initiator
)
1087 build_rtr_msg(qhp
->attr
.mpa_attr
.p2p_type
, &wqe
->u
.init
);
1089 c4iw_init_wr_wait(&wr_wait
);
1090 ret
= c4iw_ofld_send(&rhp
->rdev
, skb
);
1094 wait_event_timeout(wr_wait
.wait
, wr_wait
.done
, C4IW_WR_TO
);
1095 if (!wr_wait
.done
) {
1096 printk(KERN_ERR MOD
"Device %s not responding!\n",
1097 pci_name(rhp
->rdev
.lldi
.pdev
));
1098 rhp
->rdev
.flags
= T4_FATAL_ERROR
;
1103 PDBG("%s ret %d\n", __func__
, ret
);
1107 int c4iw_modify_qp(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
,
1108 enum c4iw_qp_attr_mask mask
,
1109 struct c4iw_qp_attributes
*attrs
,
1113 struct c4iw_qp_attributes newattr
= qhp
->attr
;
1119 struct c4iw_ep
*ep
= NULL
;
1121 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__
,
1122 qhp
, qhp
->wq
.sq
.qid
, qhp
->wq
.rq
.qid
, qhp
->ep
, qhp
->attr
.state
,
1123 (mask
& C4IW_QP_ATTR_NEXT_STATE
) ? attrs
->next_state
: -1);
1125 spin_lock_irqsave(&qhp
->lock
, flag
);
1127 /* Process attr changes if in IDLE */
1128 if (mask
& C4IW_QP_ATTR_VALID_MODIFY
) {
1129 if (qhp
->attr
.state
!= C4IW_QP_STATE_IDLE
) {
1133 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_READ
)
1134 newattr
.enable_rdma_read
= attrs
->enable_rdma_read
;
1135 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_WRITE
)
1136 newattr
.enable_rdma_write
= attrs
->enable_rdma_write
;
1137 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_BIND
)
1138 newattr
.enable_bind
= attrs
->enable_bind
;
1139 if (mask
& C4IW_QP_ATTR_MAX_ORD
) {
1140 if (attrs
->max_ord
> c4iw_max_read_depth
) {
1144 newattr
.max_ord
= attrs
->max_ord
;
1146 if (mask
& C4IW_QP_ATTR_MAX_IRD
) {
1147 if (attrs
->max_ird
> c4iw_max_read_depth
) {
1151 newattr
.max_ird
= attrs
->max_ird
;
1153 qhp
->attr
= newattr
;
1156 if (!(mask
& C4IW_QP_ATTR_NEXT_STATE
))
1158 if (qhp
->attr
.state
== attrs
->next_state
)
1161 switch (qhp
->attr
.state
) {
1162 case C4IW_QP_STATE_IDLE
:
1163 switch (attrs
->next_state
) {
1164 case C4IW_QP_STATE_RTS
:
1165 if (!(mask
& C4IW_QP_ATTR_LLP_STREAM_HANDLE
)) {
1169 if (!(mask
& C4IW_QP_ATTR_MPA_ATTR
)) {
1173 qhp
->attr
.mpa_attr
= attrs
->mpa_attr
;
1174 qhp
->attr
.llp_stream_handle
= attrs
->llp_stream_handle
;
1175 qhp
->ep
= qhp
->attr
.llp_stream_handle
;
1176 qhp
->attr
.state
= C4IW_QP_STATE_RTS
;
1179 * Ref the endpoint here and deref when we
1180 * disassociate the endpoint from the QP. This
1181 * happens in CLOSING->IDLE transition or *->ERROR
1184 c4iw_get_ep(&qhp
->ep
->com
);
1185 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1186 ret
= rdma_init(rhp
, qhp
);
1187 spin_lock_irqsave(&qhp
->lock
, flag
);
1191 case C4IW_QP_STATE_ERROR
:
1192 qhp
->attr
.state
= C4IW_QP_STATE_ERROR
;
1193 flush_qp(qhp
, &flag
);
1200 case C4IW_QP_STATE_RTS
:
1201 switch (attrs
->next_state
) {
1202 case C4IW_QP_STATE_CLOSING
:
1203 BUG_ON(atomic_read(&qhp
->ep
->com
.kref
.refcount
) < 2);
1204 qhp
->attr
.state
= C4IW_QP_STATE_CLOSING
;
1209 c4iw_get_ep(&ep
->com
);
1211 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1212 ret
= rdma_fini(rhp
, qhp
);
1213 spin_lock_irqsave(&qhp
->lock
, flag
);
1216 c4iw_get_ep(&ep
->com
);
1217 disconnect
= abort
= 1;
1221 case C4IW_QP_STATE_TERMINATE
:
1222 qhp
->attr
.state
= C4IW_QP_STATE_TERMINATE
;
1223 if (qhp
->ibqp
.uobject
)
1224 t4_set_wq_in_error(&qhp
->wq
);
1226 c4iw_get_ep(&ep
->com
);
1230 case C4IW_QP_STATE_ERROR
:
1231 qhp
->attr
.state
= C4IW_QP_STATE_ERROR
;
1236 c4iw_get_ep(&ep
->com
);
1245 case C4IW_QP_STATE_CLOSING
:
1250 switch (attrs
->next_state
) {
1251 case C4IW_QP_STATE_IDLE
:
1252 flush_qp(qhp
, &flag
);
1253 qhp
->attr
.state
= C4IW_QP_STATE_IDLE
;
1254 qhp
->attr
.llp_stream_handle
= NULL
;
1255 c4iw_put_ep(&qhp
->ep
->com
);
1257 wake_up(&qhp
->wait
);
1259 case C4IW_QP_STATE_ERROR
:
1266 case C4IW_QP_STATE_ERROR
:
1267 if (attrs
->next_state
!= C4IW_QP_STATE_IDLE
) {
1271 if (!t4_sq_empty(&qhp
->wq
) || !t4_rq_empty(&qhp
->wq
)) {
1275 qhp
->attr
.state
= C4IW_QP_STATE_IDLE
;
1277 case C4IW_QP_STATE_TERMINATE
:
1285 printk(KERN_ERR
"%s in a bad state %d\n",
1286 __func__
, qhp
->attr
.state
);
1293 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__
, qhp
->ep
,
1296 /* disassociate the LLP connection */
1297 qhp
->attr
.llp_stream_handle
= NULL
;
1300 qhp
->attr
.state
= C4IW_QP_STATE_ERROR
;
1302 wake_up(&qhp
->wait
);
1304 flush_qp(qhp
, &flag
);
1306 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1309 post_terminate(qhp
, NULL
, internal
? GFP_ATOMIC
: GFP_KERNEL
);
1312 * If disconnect is 1, then we need to initiate a disconnect
1313 * on the EP. This can be a normal close (RTS->CLOSING) or
1314 * an abnormal close (RTS/CLOSING->ERROR).
1317 c4iw_ep_disconnect(ep
, abort
, internal
? GFP_ATOMIC
:
1319 c4iw_put_ep(&ep
->com
);
1323 * If free is 1, then we've disassociated the EP from the QP
1324 * and we need to dereference the EP.
1327 c4iw_put_ep(&ep
->com
);
1329 PDBG("%s exit state %d\n", __func__
, qhp
->attr
.state
);
1333 int c4iw_destroy_qp(struct ib_qp
*ib_qp
)
1335 struct c4iw_dev
*rhp
;
1336 struct c4iw_qp
*qhp
;
1337 struct c4iw_qp_attributes attrs
;
1338 struct c4iw_ucontext
*ucontext
;
1340 qhp
= to_c4iw_qp(ib_qp
);
1343 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1344 c4iw_modify_qp(rhp
, qhp
, C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1345 wait_event(qhp
->wait
, !qhp
->ep
);
1347 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.sq
.qid
);
1348 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.rq
.qid
);
1349 atomic_dec(&qhp
->refcnt
);
1350 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
1352 ucontext
= ib_qp
->uobject
?
1353 to_c4iw_ucontext(ib_qp
->uobject
->context
) : NULL
;
1354 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
1355 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1357 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__
, ib_qp
, qhp
->wq
.sq
.qid
);
1362 struct ib_qp
*c4iw_create_qp(struct ib_pd
*pd
, struct ib_qp_init_attr
*attrs
,
1363 struct ib_udata
*udata
)
1365 struct c4iw_dev
*rhp
;
1366 struct c4iw_qp
*qhp
;
1367 struct c4iw_pd
*php
;
1368 struct c4iw_cq
*schp
;
1369 struct c4iw_cq
*rchp
;
1370 struct c4iw_create_qp_resp uresp
;
1372 struct c4iw_ucontext
*ucontext
;
1374 struct c4iw_mm_entry
*mm1
, *mm2
, *mm3
, *mm4
;
1376 PDBG("%s ib_pd %p\n", __func__
, pd
);
1378 if (attrs
->qp_type
!= IB_QPT_RC
)
1379 return ERR_PTR(-EINVAL
);
1381 php
= to_c4iw_pd(pd
);
1383 schp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->send_cq
)->cq
.cqid
);
1384 rchp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->recv_cq
)->cq
.cqid
);
1386 return ERR_PTR(-EINVAL
);
1388 if (attrs
->cap
.max_inline_data
> T4_MAX_SEND_INLINE
)
1389 return ERR_PTR(-EINVAL
);
1391 rqsize
= roundup(attrs
->cap
.max_recv_wr
+ 1, 16);
1392 if (rqsize
> T4_MAX_RQ_SIZE
)
1393 return ERR_PTR(-E2BIG
);
1395 sqsize
= roundup(attrs
->cap
.max_send_wr
+ 1, 16);
1396 if (sqsize
> T4_MAX_SQ_SIZE
)
1397 return ERR_PTR(-E2BIG
);
1399 ucontext
= pd
->uobject
? to_c4iw_ucontext(pd
->uobject
->context
) : NULL
;
1402 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
1404 return ERR_PTR(-ENOMEM
);
1405 qhp
->wq
.sq
.size
= sqsize
;
1406 qhp
->wq
.sq
.memsize
= (sqsize
+ 1) * sizeof *qhp
->wq
.sq
.queue
;
1407 qhp
->wq
.rq
.size
= rqsize
;
1408 qhp
->wq
.rq
.memsize
= (rqsize
+ 1) * sizeof *qhp
->wq
.rq
.queue
;
1411 qhp
->wq
.sq
.memsize
= roundup(qhp
->wq
.sq
.memsize
, PAGE_SIZE
);
1412 qhp
->wq
.rq
.memsize
= roundup(qhp
->wq
.rq
.memsize
, PAGE_SIZE
);
1415 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1416 __func__
, sqsize
, qhp
->wq
.sq
.memsize
, rqsize
, qhp
->wq
.rq
.memsize
);
1418 ret
= create_qp(&rhp
->rdev
, &qhp
->wq
, &schp
->cq
, &rchp
->cq
,
1419 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1423 attrs
->cap
.max_recv_wr
= rqsize
- 1;
1424 attrs
->cap
.max_send_wr
= sqsize
- 1;
1425 attrs
->cap
.max_inline_data
= T4_MAX_SEND_INLINE
;
1428 qhp
->attr
.pd
= php
->pdid
;
1429 qhp
->attr
.scq
= ((struct c4iw_cq
*) attrs
->send_cq
)->cq
.cqid
;
1430 qhp
->attr
.rcq
= ((struct c4iw_cq
*) attrs
->recv_cq
)->cq
.cqid
;
1431 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
1432 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
1433 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
1434 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
1435 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
1436 qhp
->attr
.state
= C4IW_QP_STATE_IDLE
;
1437 qhp
->attr
.next_state
= C4IW_QP_STATE_IDLE
;
1438 qhp
->attr
.enable_rdma_read
= 1;
1439 qhp
->attr
.enable_rdma_write
= 1;
1440 qhp
->attr
.enable_bind
= 1;
1441 qhp
->attr
.max_ord
= 1;
1442 qhp
->attr
.max_ird
= 1;
1443 spin_lock_init(&qhp
->lock
);
1444 init_waitqueue_head(&qhp
->wait
);
1445 atomic_set(&qhp
->refcnt
, 1);
1447 ret
= insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.sq
.qid
);
1451 ret
= insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.rq
.qid
);
1456 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
1461 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
1466 mm3
= kmalloc(sizeof *mm3
, GFP_KERNEL
);
1471 mm4
= kmalloc(sizeof *mm4
, GFP_KERNEL
);
1477 uresp
.qid_mask
= rhp
->rdev
.qpmask
;
1478 uresp
.sqid
= qhp
->wq
.sq
.qid
;
1479 uresp
.sq_size
= qhp
->wq
.sq
.size
;
1480 uresp
.sq_memsize
= qhp
->wq
.sq
.memsize
;
1481 uresp
.rqid
= qhp
->wq
.rq
.qid
;
1482 uresp
.rq_size
= qhp
->wq
.rq
.size
;
1483 uresp
.rq_memsize
= qhp
->wq
.rq
.memsize
;
1484 spin_lock(&ucontext
->mmap_lock
);
1485 uresp
.sq_key
= ucontext
->key
;
1486 ucontext
->key
+= PAGE_SIZE
;
1487 uresp
.rq_key
= ucontext
->key
;
1488 ucontext
->key
+= PAGE_SIZE
;
1489 uresp
.sq_db_gts_key
= ucontext
->key
;
1490 ucontext
->key
+= PAGE_SIZE
;
1491 uresp
.rq_db_gts_key
= ucontext
->key
;
1492 ucontext
->key
+= PAGE_SIZE
;
1493 spin_unlock(&ucontext
->mmap_lock
);
1494 ret
= ib_copy_to_udata(udata
, &uresp
, sizeof uresp
);
1497 mm1
->key
= uresp
.sq_key
;
1498 mm1
->addr
= virt_to_phys(qhp
->wq
.sq
.queue
);
1499 mm1
->len
= PAGE_ALIGN(qhp
->wq
.sq
.memsize
);
1500 insert_mmap(ucontext
, mm1
);
1501 mm2
->key
= uresp
.rq_key
;
1502 mm2
->addr
= virt_to_phys(qhp
->wq
.rq
.queue
);
1503 mm2
->len
= PAGE_ALIGN(qhp
->wq
.rq
.memsize
);
1504 insert_mmap(ucontext
, mm2
);
1505 mm3
->key
= uresp
.sq_db_gts_key
;
1506 mm3
->addr
= qhp
->wq
.sq
.udb
;
1507 mm3
->len
= PAGE_SIZE
;
1508 insert_mmap(ucontext
, mm3
);
1509 mm4
->key
= uresp
.rq_db_gts_key
;
1510 mm4
->addr
= qhp
->wq
.rq
.udb
;
1511 mm4
->len
= PAGE_SIZE
;
1512 insert_mmap(ucontext
, mm4
);
1514 qhp
->ibqp
.qp_num
= qhp
->wq
.sq
.qid
;
1515 init_timer(&(qhp
->timer
));
1516 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1517 __func__
, qhp
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
1529 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.rq
.qid
);
1531 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.sq
.qid
);
1533 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
1534 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1537 return ERR_PTR(ret
);
1540 int c4iw_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1541 int attr_mask
, struct ib_udata
*udata
)
1543 struct c4iw_dev
*rhp
;
1544 struct c4iw_qp
*qhp
;
1545 enum c4iw_qp_attr_mask mask
= 0;
1546 struct c4iw_qp_attributes attrs
;
1548 PDBG("%s ib_qp %p\n", __func__
, ibqp
);
1550 /* iwarp does not support the RTR state */
1551 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
1552 attr_mask
&= ~IB_QP_STATE
;
1554 /* Make sure we still have something left to do */
1558 memset(&attrs
, 0, sizeof attrs
);
1559 qhp
= to_c4iw_qp(ibqp
);
1562 attrs
.next_state
= c4iw_convert_state(attr
->qp_state
);
1563 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1564 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1565 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1566 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1567 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1570 mask
|= (attr_mask
& IB_QP_STATE
) ? C4IW_QP_ATTR_NEXT_STATE
: 0;
1571 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1572 (C4IW_QP_ATTR_ENABLE_RDMA_READ
|
1573 C4IW_QP_ATTR_ENABLE_RDMA_WRITE
|
1574 C4IW_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1576 return c4iw_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1579 struct ib_qp
*c4iw_get_qp(struct ib_device
*dev
, int qpn
)
1581 PDBG("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1582 return (struct ib_qp
*)get_qhp(to_c4iw_dev(dev
), qpn
);