1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_status.h"
38 #include "i40iw_user.h"
39 #include "i40iw_register.h"
41 static u32 nop_signature
= 0x55550000;
44 * i40iw_nop_1 - insert a nop wqe and move head. no post work
47 static enum i40iw_status_code
i40iw_nop_1(struct i40iw_qp_uk
*qp
)
51 u32 wqe_idx
, peek_head
;
52 bool signaled
= false;
54 if (!qp
->sq_ring
.head
)
55 return I40IW_ERR_PARAM
;
57 wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
58 wqe
= qp
->sq_base
[wqe_idx
].elem
;
59 peek_head
= (qp
->sq_ring
.head
+ 1) % qp
->sq_ring
.size
;
60 wqe_0
= qp
->sq_base
[peek_head
].elem
;
62 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
64 wqe_0
[3] = LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
66 set_64bit_val(wqe
, 0, 0);
67 set_64bit_val(wqe
, 8, 0);
68 set_64bit_val(wqe
, 16, 0);
70 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
71 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
72 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
) | nop_signature
++;
74 wmb(); /* Memory barrier to ensure data is written before valid bit is set */
76 set_64bit_val(wqe
, 24, header
);
81 * i40iw_qp_post_wr - post wr to hrdware
84 void i40iw_qp_post_wr(struct i40iw_qp_uk
*qp
)
90 mb(); /* valid bit is written and loads completed before reading shadow */
92 /* read the doorbell shadow area */
93 get_64bit_val(qp
->shadow_area
, 0, &temp
);
95 hw_sq_tail
= (u32
)RS_64(temp
, I40IW_QP_DBSA_HW_SQ_TAIL
);
96 sw_sq_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
97 if (sw_sq_head
!= hw_sq_tail
) {
98 if (sw_sq_head
> qp
->initial_ring
.head
) {
99 if ((hw_sq_tail
>= qp
->initial_ring
.head
) &&
100 (hw_sq_tail
< sw_sq_head
)) {
101 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
103 } else if (sw_sq_head
!= qp
->initial_ring
.head
) {
104 if ((hw_sq_tail
>= qp
->initial_ring
.head
) ||
105 (hw_sq_tail
< sw_sq_head
)) {
106 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
111 qp
->initial_ring
.head
= qp
->sq_ring
.head
;
115 * i40iw_qp_ring_push_db - ring qp doorbell
117 * @wqe_idx: wqe index
119 static void i40iw_qp_ring_push_db(struct i40iw_qp_uk
*qp
, u32 wqe_idx
)
121 set_32bit_val(qp
->push_db
, 0, LS_32((wqe_idx
>> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX
) | qp
->qp_id
);
122 qp
->initial_ring
.head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
126 * i40iw_qp_get_next_send_wqe - return next wqe ptr
128 * @wqe_idx: return wqe index
129 * @wqe_size: size of sq wqe
131 u64
*i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk
*qp
,
142 enum i40iw_status_code ret_code
= 0;
143 u8 nop_wqe_cnt
= 0, i
;
146 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
149 qp
->swqe_polarity
= !qp
->swqe_polarity
;
150 wqe_ptr
= (uintptr_t)qp
->sq_base
[*wqe_idx
].elem
;
151 offset
= (u16
)(wqe_ptr
) & 0x7F;
152 if ((offset
+ wqe_size
) > I40IW_QP_WQE_MAX_SIZE
) {
153 nop_wqe_cnt
= (u8
)(I40IW_QP_WQE_MAX_SIZE
- offset
) / I40IW_QP_WQE_MIN_SIZE
;
154 for (i
= 0; i
< nop_wqe_cnt
; i
++) {
156 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
161 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
163 qp
->swqe_polarity
= !qp
->swqe_polarity
;
165 for (i
= 0; i
< wqe_size
/ I40IW_QP_WQE_MIN_SIZE
; i
++) {
166 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
171 wqe
= qp
->sq_base
[*wqe_idx
].elem
;
173 peek_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
174 wqe_0
= qp
->sq_base
[peek_head
].elem
;
176 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
178 qp
->sq_wrtrk_array
[*wqe_idx
].wrid
= wr_id
;
179 qp
->sq_wrtrk_array
[*wqe_idx
].wr_len
= total_size
;
180 qp
->sq_wrtrk_array
[*wqe_idx
].wqe_size
= wqe_size
;
185 * i40iw_set_fragment - set fragment in wqe
186 * @wqe: wqe for setting fragment
187 * @offset: offset value
188 * @sge: sge length and stag
190 static void i40iw_set_fragment(u64
*wqe
, u32 offset
, struct i40iw_sge
*sge
)
193 set_64bit_val(wqe
, offset
, LS_64(sge
->tag_off
, I40IWQPSQ_FRAG_TO
));
194 set_64bit_val(wqe
, (offset
+ 8),
195 (LS_64(sge
->len
, I40IWQPSQ_FRAG_LEN
) |
196 LS_64(sge
->stag
, I40IWQPSQ_FRAG_STAG
)));
201 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
203 * @wqe_idx: return wqe index
205 u64
*i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk
*qp
, u32
*wqe_idx
)
208 enum i40iw_status_code ret_code
;
210 if (I40IW_RING_FULL_ERR(qp
->rq_ring
))
213 I40IW_ATOMIC_RING_MOVE_HEAD(qp
->rq_ring
, *wqe_idx
, ret_code
);
217 qp
->rwqe_polarity
= !qp
->rwqe_polarity
;
218 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
219 wqe
= qp
->rq_base
[*wqe_idx
* (qp
->rq_wqe_size_multiplier
>> 2)].elem
;
225 * i40iw_rdma_write - rdma write operation
227 * @info: post sq information
228 * @post_sq: flag to post sq
230 static enum i40iw_status_code
i40iw_rdma_write(struct i40iw_qp_uk
*qp
,
231 struct i40iw_post_sq_info
*info
,
236 struct i40iw_rdma_write
*op_info
;
238 u32 total_size
= 0, byte_off
;
239 enum i40iw_status_code ret_code
;
240 bool read_fence
= false;
243 op_info
= &info
->op
.rdma_write
;
244 if (op_info
->num_lo_sges
> qp
->max_sq_frag_cnt
)
245 return I40IW_ERR_INVALID_FRAG_COUNT
;
247 for (i
= 0; i
< op_info
->num_lo_sges
; i
++)
248 total_size
+= op_info
->lo_sg_list
[i
].len
;
250 if (total_size
> I40IW_MAX_OUTBOUND_MESSAGE_SIZE
)
251 return I40IW_ERR_QP_INVALID_MSG_SIZE
;
253 read_fence
|= info
->read_fence
;
255 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_lo_sges
, &wqe_size
);
259 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, total_size
, info
->wr_id
);
261 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
262 set_64bit_val(wqe
, 16,
263 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
264 if (!op_info
->rem_addr
.stag
)
265 return I40IW_ERR_BAD_STAG
;
267 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
268 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
269 LS_64((op_info
->num_lo_sges
> 1 ? (op_info
->num_lo_sges
- 1) : 0), I40IWQPSQ_ADDFRAGCNT
) |
270 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
271 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
272 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
273 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
275 i40iw_set_fragment(wqe
, 0, op_info
->lo_sg_list
);
277 for (i
= 1; i
< op_info
->num_lo_sges
; i
++) {
278 byte_off
= 32 + (i
- 1) * 16;
279 i40iw_set_fragment(wqe
, byte_off
, &op_info
->lo_sg_list
[i
]);
282 wmb(); /* make sure WQE is populated before valid bit is set */
284 set_64bit_val(wqe
, 24, header
);
287 i40iw_qp_post_wr(qp
);
293 * i40iw_rdma_read - rdma read command
295 * @info: post sq information
296 * @inv_stag: flag for inv_stag
297 * @post_sq: flag to post sq
299 static enum i40iw_status_code
i40iw_rdma_read(struct i40iw_qp_uk
*qp
,
300 struct i40iw_post_sq_info
*info
,
305 struct i40iw_rdma_read
*op_info
;
308 enum i40iw_status_code ret_code
;
310 bool local_fence
= false;
312 op_info
= &info
->op
.rdma_read
;
313 ret_code
= i40iw_fragcnt_to_wqesize_sq(1, &wqe_size
);
316 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->lo_addr
.len
, info
->wr_id
);
318 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
319 local_fence
|= info
->local_fence
;
321 set_64bit_val(wqe
, 16, LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
322 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
323 LS_64((inv_stag
? I40IWQP_OP_RDMA_READ_LOC_INV
: I40IWQP_OP_RDMA_READ
), I40IWQPSQ_OPCODE
) |
324 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
325 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
326 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
327 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
329 i40iw_set_fragment(wqe
, 0, &op_info
->lo_addr
);
331 wmb(); /* make sure WQE is populated before valid bit is set */
333 set_64bit_val(wqe
, 24, header
);
335 i40iw_qp_post_wr(qp
);
341 * i40iw_send - rdma send command
343 * @info: post sq information
344 * @stag_to_inv: stag_to_inv value
345 * @post_sq: flag to post sq
347 static enum i40iw_status_code
i40iw_send(struct i40iw_qp_uk
*qp
,
348 struct i40iw_post_sq_info
*info
,
353 struct i40iw_post_send
*op_info
;
355 u32 i
, wqe_idx
, total_size
= 0, byte_off
;
356 enum i40iw_status_code ret_code
;
357 bool read_fence
= false;
360 op_info
= &info
->op
.send
;
361 if (qp
->max_sq_frag_cnt
< op_info
->num_sges
)
362 return I40IW_ERR_INVALID_FRAG_COUNT
;
364 for (i
= 0; i
< op_info
->num_sges
; i
++)
365 total_size
+= op_info
->sg_list
[i
].len
;
366 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_sges
, &wqe_size
);
370 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, total_size
, info
->wr_id
);
372 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
374 read_fence
|= info
->read_fence
;
375 set_64bit_val(wqe
, 16, 0);
376 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
377 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
378 LS_64((op_info
->num_sges
> 1 ? (op_info
->num_sges
- 1) : 0),
379 I40IWQPSQ_ADDFRAGCNT
) |
380 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
381 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
382 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
383 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
385 i40iw_set_fragment(wqe
, 0, op_info
->sg_list
);
387 for (i
= 1; i
< op_info
->num_sges
; i
++) {
388 byte_off
= 32 + (i
- 1) * 16;
389 i40iw_set_fragment(wqe
, byte_off
, &op_info
->sg_list
[i
]);
392 wmb(); /* make sure WQE is populated before valid bit is set */
394 set_64bit_val(wqe
, 24, header
);
396 i40iw_qp_post_wr(qp
);
402 * i40iw_inline_rdma_write - inline rdma write operation
404 * @info: post sq information
405 * @post_sq: flag to post sq
407 static enum i40iw_status_code
i40iw_inline_rdma_write(struct i40iw_qp_uk
*qp
,
408 struct i40iw_post_sq_info
*info
,
413 struct i40iw_inline_rdma_write
*op_info
;
417 enum i40iw_status_code ret_code
;
418 bool read_fence
= false;
421 op_info
= &info
->op
.inline_rdma_write
;
422 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
423 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
425 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
429 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->len
, info
->wr_id
);
431 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
433 read_fence
|= info
->read_fence
;
434 set_64bit_val(wqe
, 16,
435 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
437 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
438 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
439 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
440 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
441 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
442 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
443 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
444 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
445 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
448 src
= (u8
*)(op_info
->data
);
450 if (op_info
->len
<= 16) {
451 for (i
= 0; i
< op_info
->len
; i
++, src
++, dest
++)
454 for (i
= 0; i
< 16; i
++, src
++, dest
++)
456 dest
= (u8
*)wqe
+ 32;
457 for (; i
< op_info
->len
; i
++, src
++, dest
++)
461 wmb(); /* make sure WQE is populated before valid bit is set */
463 set_64bit_val(wqe
, 24, header
);
466 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
467 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
468 i40iw_qp_ring_push_db(qp
, wqe_idx
);
471 i40iw_qp_post_wr(qp
);
478 * i40iw_inline_send - inline send operation
480 * @info: post sq information
481 * @stag_to_inv: remote stag
482 * @post_sq: flag to post sq
484 static enum i40iw_status_code
i40iw_inline_send(struct i40iw_qp_uk
*qp
,
485 struct i40iw_post_sq_info
*info
,
491 struct i40iw_post_inline_send
*op_info
;
494 enum i40iw_status_code ret_code
;
495 bool read_fence
= false;
499 op_info
= &info
->op
.inline_send
;
500 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
501 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
503 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
507 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->len
, info
->wr_id
);
509 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
511 read_fence
|= info
->read_fence
;
512 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
513 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
514 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
515 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
516 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
517 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
518 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
519 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
520 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
523 src
= (u8
*)(op_info
->data
);
525 if (op_info
->len
<= 16) {
526 for (i
= 0; i
< op_info
->len
; i
++, src
++, dest
++)
529 for (i
= 0; i
< 16; i
++, src
++, dest
++)
531 dest
= (u8
*)wqe
+ 32;
532 for (; i
< op_info
->len
; i
++, src
++, dest
++)
536 wmb(); /* make sure WQE is populated before valid bit is set */
538 set_64bit_val(wqe
, 24, header
);
541 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
542 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
543 i40iw_qp_ring_push_db(qp
, wqe_idx
);
546 i40iw_qp_post_wr(qp
);
553 * i40iw_stag_local_invalidate - stag invalidate operation
555 * @info: post sq information
556 * @post_sq: flag to post sq
558 static enum i40iw_status_code
i40iw_stag_local_invalidate(struct i40iw_qp_uk
*qp
,
559 struct i40iw_post_sq_info
*info
,
563 struct i40iw_inv_local_stag
*op_info
;
566 bool local_fence
= false;
568 op_info
= &info
->op
.inv_local_stag
;
569 local_fence
= info
->local_fence
;
571 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, info
->wr_id
);
573 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
574 set_64bit_val(wqe
, 0, 0);
575 set_64bit_val(wqe
, 8,
576 LS_64(op_info
->target_stag
, I40IWQPSQ_LOCSTAG
));
577 set_64bit_val(wqe
, 16, 0);
578 header
= LS_64(I40IW_OP_TYPE_INV_STAG
, I40IWQPSQ_OPCODE
) |
579 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
580 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
581 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
582 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
584 wmb(); /* make sure WQE is populated before valid bit is set */
586 set_64bit_val(wqe
, 24, header
);
589 i40iw_qp_post_wr(qp
);
595 * i40iw_mw_bind - Memory Window bind operation
597 * @info: post sq information
598 * @post_sq: flag to post sq
600 static enum i40iw_status_code
i40iw_mw_bind(struct i40iw_qp_uk
*qp
,
601 struct i40iw_post_sq_info
*info
,
605 struct i40iw_bind_window
*op_info
;
608 bool local_fence
= false;
610 op_info
= &info
->op
.bind_window
;
612 local_fence
|= info
->local_fence
;
613 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, info
->wr_id
);
615 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
616 set_64bit_val(wqe
, 0, (uintptr_t)op_info
->va
);
617 set_64bit_val(wqe
, 8,
618 LS_64(op_info
->mr_stag
, I40IWQPSQ_PARENTMRSTAG
) |
619 LS_64(op_info
->mw_stag
, I40IWQPSQ_MWSTAG
));
620 set_64bit_val(wqe
, 16, op_info
->bind_length
);
621 header
= LS_64(I40IW_OP_TYPE_BIND_MW
, I40IWQPSQ_OPCODE
) |
622 LS_64(((op_info
->enable_reads
<< 2) |
623 (op_info
->enable_writes
<< 3)),
624 I40IWQPSQ_STAGRIGHTS
) |
625 LS_64((op_info
->addressing_type
== I40IW_ADDR_TYPE_VA_BASED
? 1 : 0),
626 I40IWQPSQ_VABASEDTO
) |
627 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
628 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
629 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
630 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
632 wmb(); /* make sure WQE is populated before valid bit is set */
634 set_64bit_val(wqe
, 24, header
);
637 i40iw_qp_post_wr(qp
);
643 * i40iw_post_receive - post receive wqe
645 * @info: post rq information
647 static enum i40iw_status_code
i40iw_post_receive(struct i40iw_qp_uk
*qp
,
648 struct i40iw_post_rq_info
*info
)
652 u32 total_size
= 0, wqe_idx
, i
, byte_off
;
654 if (qp
->max_rq_frag_cnt
< info
->num_sges
)
655 return I40IW_ERR_INVALID_FRAG_COUNT
;
656 for (i
= 0; i
< info
->num_sges
; i
++)
657 total_size
+= info
->sg_list
[i
].len
;
658 wqe
= i40iw_qp_get_next_recv_wqe(qp
, &wqe_idx
);
660 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
662 qp
->rq_wrid_array
[wqe_idx
] = info
->wr_id
;
663 set_64bit_val(wqe
, 16, 0);
665 header
= LS_64((info
->num_sges
> 1 ? (info
->num_sges
- 1) : 0),
666 I40IWQPSQ_ADDFRAGCNT
) |
667 LS_64(qp
->rwqe_polarity
, I40IWQPSQ_VALID
);
669 i40iw_set_fragment(wqe
, 0, info
->sg_list
);
671 for (i
= 1; i
< info
->num_sges
; i
++) {
672 byte_off
= 32 + (i
- 1) * 16;
673 i40iw_set_fragment(wqe
, byte_off
, &info
->sg_list
[i
]);
676 wmb(); /* make sure WQE is populated before valid bit is set */
678 set_64bit_val(wqe
, 24, header
);
684 * i40iw_cq_request_notification - cq notification request (door bell)
686 * @cq_notify: notification type
688 static void i40iw_cq_request_notification(struct i40iw_cq_uk
*cq
,
689 enum i40iw_completion_notify cq_notify
)
697 get_64bit_val(cq
->shadow_area
, 32, &temp_val
);
698 arm_seq_num
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_SEQ_NUM
);
701 sw_cq_sel
= (u16
)RS_64(temp_val
, I40IW_CQ_DBSA_SW_CQ_SELECT
);
702 arm_next_se
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_NEXT_SE
);
704 if (cq_notify
== IW_CQ_COMPL_EVENT
)
706 temp_val
= LS_64(arm_seq_num
, I40IW_CQ_DBSA_ARM_SEQ_NUM
) |
707 LS_64(sw_cq_sel
, I40IW_CQ_DBSA_SW_CQ_SELECT
) |
708 LS_64(arm_next_se
, I40IW_CQ_DBSA_ARM_NEXT_SE
) |
709 LS_64(arm_next
, I40IW_CQ_DBSA_ARM_NEXT
);
711 set_64bit_val(cq
->shadow_area
, 32, temp_val
);
713 wmb(); /* make sure WQE is populated before valid bit is set */
715 writel(cq
->cq_id
, cq
->cqe_alloc_reg
);
719 * i40iw_cq_post_entries - update tail in shadow memory
721 * @count: # of entries processed
723 static enum i40iw_status_code
i40iw_cq_post_entries(struct i40iw_cq_uk
*cq
,
726 I40IW_RING_MOVE_TAIL_BY_COUNT(cq
->cq_ring
, count
);
727 set_64bit_val(cq
->shadow_area
, 0,
728 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
733 * i40iw_cq_poll_completion - get cq completion info
735 * @info: cq poll information returned
736 * @post_cq: update cq tail
738 static enum i40iw_status_code
i40iw_cq_poll_completion(struct i40iw_cq_uk
*cq
,
739 struct i40iw_cq_poll_info
*info
,
742 u64 comp_ctx
, qword0
, qword2
, qword3
, wqe_qword
;
744 struct i40iw_qp_uk
*qp
;
745 struct i40iw_ring
*pring
= NULL
;
746 u32 wqe_idx
, q_type
, array_idx
= 0;
747 enum i40iw_status_code ret_code
= 0;
748 enum i40iw_status_code ret_code2
= 0;
749 bool move_cq_head
= true;
753 if (cq
->avoid_mem_cflct
)
754 cqe
= (u64
*)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq
);
756 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(cq
);
758 get_64bit_val(cqe
, 24, &qword3
);
759 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
761 if (polarity
!= cq
->polarity
)
762 return I40IW_ERR_QUEUE_EMPTY
;
764 q_type
= (u8
)RS_64(qword3
, I40IW_CQ_SQ
);
765 info
->error
= (bool)RS_64(qword3
, I40IW_CQ_ERROR
);
766 info
->push_dropped
= (bool)RS_64(qword3
, I40IWCQ_PSHDROP
);
768 info
->comp_status
= I40IW_COMPL_STATUS_FLUSHED
;
769 info
->major_err
= (bool)RS_64(qword3
, I40IW_CQ_MAJERR
);
770 info
->minor_err
= (bool)RS_64(qword3
, I40IW_CQ_MINERR
);
772 info
->comp_status
= I40IW_COMPL_STATUS_SUCCESS
;
775 get_64bit_val(cqe
, 0, &qword0
);
776 get_64bit_val(cqe
, 16, &qword2
);
778 info
->tcp_seq_num
= (u8
)RS_64(qword0
, I40IWCQ_TCPSEQNUM
);
780 info
->qp_id
= (u32
)RS_64(qword2
, I40IWCQ_QPID
);
782 get_64bit_val(cqe
, 8, &comp_ctx
);
784 info
->solicited_event
= (bool)RS_64(qword3
, I40IWCQ_SOEVENT
);
785 info
->is_srq
= (bool)RS_64(qword3
, I40IWCQ_SRQ
);
787 qp
= (struct i40iw_qp_uk
*)(unsigned long)comp_ctx
;
788 wqe_idx
= (u32
)RS_64(qword3
, I40IW_CQ_WQEIDX
);
789 info
->qp_handle
= (i40iw_qp_handle
)(unsigned long)qp
;
791 if (q_type
== I40IW_CQE_QTYPE_RQ
) {
792 array_idx
= (wqe_idx
* 4) / qp
->rq_wqe_size_multiplier
;
793 if (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
) {
794 info
->wr_id
= qp
->rq_wrid_array
[qp
->rq_ring
.tail
];
795 array_idx
= qp
->rq_ring
.tail
;
797 info
->wr_id
= qp
->rq_wrid_array
[array_idx
];
800 info
->op_type
= I40IW_OP_TYPE_REC
;
801 if (qword3
& I40IWCQ_STAG_MASK
) {
802 info
->stag_invalid_set
= true;
803 info
->inv_stag
= (u32
)RS_64(qword2
, I40IWCQ_INVSTAG
);
805 info
->stag_invalid_set
= false;
807 info
->bytes_xfered
= (u32
)RS_64(qword0
, I40IWCQ_PAYLDLEN
);
808 I40IW_RING_SET_TAIL(qp
->rq_ring
, array_idx
+ 1);
809 pring
= &qp
->rq_ring
;
811 if (info
->comp_status
!= I40IW_COMPL_STATUS_FLUSHED
) {
812 info
->wr_id
= qp
->sq_wrtrk_array
[wqe_idx
].wrid
;
813 info
->bytes_xfered
= qp
->sq_wrtrk_array
[wqe_idx
].wr_len
;
815 info
->op_type
= (u8
)RS_64(qword3
, I40IWCQ_OP
);
816 sw_wqe
= qp
->sq_base
[wqe_idx
].elem
;
817 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
819 addl_wqes
= qp
->sq_wrtrk_array
[wqe_idx
].wqe_size
/ I40IW_QP_WQE_MIN_SIZE
;
820 I40IW_RING_SET_TAIL(qp
->sq_ring
, (wqe_idx
+ addl_wqes
));
826 tail
= qp
->sq_ring
.tail
;
827 sw_wqe
= qp
->sq_base
[tail
].elem
;
828 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
829 op_type
= (u8
)RS_64(wqe_qword
, I40IWQPSQ_OPCODE
);
830 info
->op_type
= op_type
;
831 addl_wqes
= qp
->sq_wrtrk_array
[tail
].wqe_size
/ I40IW_QP_WQE_MIN_SIZE
;
832 I40IW_RING_SET_TAIL(qp
->sq_ring
, (tail
+ addl_wqes
));
833 if (op_type
!= I40IWQP_OP_NOP
) {
834 info
->wr_id
= qp
->sq_wrtrk_array
[tail
].wrid
;
835 info
->bytes_xfered
= qp
->sq_wrtrk_array
[tail
].wr_len
;
840 pring
= &qp
->sq_ring
;
846 (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
))
847 if (pring
&& (I40IW_RING_MORE_WORK(*pring
)))
848 move_cq_head
= false;
851 I40IW_RING_MOVE_HEAD(cq
->cq_ring
, ret_code2
);
853 if (ret_code2
&& !ret_code
)
854 ret_code
= ret_code2
;
856 if (I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
) == 0)
860 I40IW_RING_MOVE_TAIL(cq
->cq_ring
);
861 set_64bit_val(cq
->shadow_area
, 0,
862 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
867 qword3
&= ~I40IW_CQ_WQEIDX_MASK
;
868 qword3
|= LS_64(pring
->tail
, I40IW_CQ_WQEIDX
);
869 set_64bit_val(cqe
, 24, qword3
);
876 * i40iw_get_wqe_shift - get shift count for maximum wqe size
877 * @wqdepth: depth of wq required.
878 * @sge: Maximum Scatter Gather Elements wqe
879 * @inline_data: Maximum inline data size
880 * @shift: Returns the shift needed based on sge
882 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
883 * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
884 * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
885 * Shift of 2 otherwise (wqe size of 128 bytes).
887 enum i40iw_status_code
i40iw_get_wqe_shift(u32 wqdepth
, u32 sge
, u32 inline_data
, u8
*shift
)
892 if (sge
> 1 || inline_data
> 16)
893 *shift
= (sge
< 4 && inline_data
<= 48) ? 1 : 2;
895 /* check if wqdepth is multiple of 2 or not */
897 if ((wqdepth
< I40IWQP_SW_MIN_WQSIZE
) || (wqdepth
& (wqdepth
- 1)))
898 return I40IW_ERR_INVALID_SIZE
;
900 size
= wqdepth
<< *shift
; /* multiple of 32 bytes count */
901 if (size
> I40IWQP_SW_MAX_WQSIZE
)
902 return I40IW_ERR_INVALID_SIZE
;
906 static struct i40iw_qp_uk_ops iw_qp_uk_ops
= {
908 i40iw_qp_ring_push_db
,
912 i40iw_inline_rdma_write
,
914 i40iw_stag_local_invalidate
,
920 static struct i40iw_cq_ops iw_cq_ops
= {
921 i40iw_cq_request_notification
,
922 i40iw_cq_poll_completion
,
923 i40iw_cq_post_entries
,
927 static struct i40iw_device_uk_ops iw_device_uk_ops
= {
933 * i40iw_qp_uk_init - initialize shared qp
934 * @qp: hw qp (user and kernel)
935 * @info: qp initialization info
937 * initializes the vars used in both user and kernel mode.
938 * size of the wqe depends on numbers of max. fragements
939 * allowed. Then size of wqe * the number of wqes should be the
940 * amount of memory allocated for sq and rq. If srq is used,
941 * then rq_base will point to one rq wqe only (not the whole
944 enum i40iw_status_code
i40iw_qp_uk_init(struct i40iw_qp_uk
*qp
,
945 struct i40iw_qp_uk_init_info
*info
)
947 enum i40iw_status_code ret_code
= 0;
951 if (info
->max_sq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
952 return I40IW_ERR_INVALID_FRAG_COUNT
;
954 if (info
->max_rq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
955 return I40IW_ERR_INVALID_FRAG_COUNT
;
956 ret_code
= i40iw_get_wqe_shift(info
->sq_size
, info
->max_sq_frag_cnt
, info
->max_inline_data
, &sqshift
);
960 ret_code
= i40iw_get_wqe_shift(info
->rq_size
, info
->max_rq_frag_cnt
, 0, &rqshift
);
964 qp
->sq_base
= info
->sq
;
965 qp
->rq_base
= info
->rq
;
966 qp
->shadow_area
= info
->shadow_area
;
967 qp
->sq_wrtrk_array
= info
->sq_wrtrk_array
;
968 qp
->rq_wrid_array
= info
->rq_wrid_array
;
970 qp
->wqe_alloc_reg
= info
->wqe_alloc_reg
;
971 qp
->qp_id
= info
->qp_id
;
973 qp
->sq_size
= info
->sq_size
;
974 qp
->push_db
= info
->push_db
;
975 qp
->push_wqe
= info
->push_wqe
;
977 qp
->max_sq_frag_cnt
= info
->max_sq_frag_cnt
;
978 sq_ring_size
= qp
->sq_size
<< sqshift
;
980 I40IW_RING_INIT(qp
->sq_ring
, sq_ring_size
);
981 I40IW_RING_INIT(qp
->initial_ring
, sq_ring_size
);
982 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
983 I40IW_RING_MOVE_TAIL(qp
->sq_ring
);
984 I40IW_RING_MOVE_HEAD(qp
->initial_ring
, ret_code
);
985 qp
->swqe_polarity
= 1;
986 qp
->swqe_polarity_deferred
= 1;
987 qp
->rwqe_polarity
= 0;
990 qp
->rq_size
= info
->rq_size
;
991 qp
->max_rq_frag_cnt
= info
->max_rq_frag_cnt
;
992 qp
->rq_wqe_size
= rqshift
;
993 I40IW_RING_INIT(qp
->rq_ring
, qp
->rq_size
);
994 qp
->rq_wqe_size_multiplier
= 4 << rqshift
;
996 qp
->ops
= iw_qp_uk_ops
;
1002 * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1004 * @info: hw cq initialization info
1006 enum i40iw_status_code
i40iw_cq_uk_init(struct i40iw_cq_uk
*cq
,
1007 struct i40iw_cq_uk_init_info
*info
)
1009 if ((info
->cq_size
< I40IW_MIN_CQ_SIZE
) ||
1010 (info
->cq_size
> I40IW_MAX_CQ_SIZE
))
1011 return I40IW_ERR_INVALID_SIZE
;
1012 cq
->cq_base
= (struct i40iw_cqe
*)info
->cq_base
;
1013 cq
->cq_id
= info
->cq_id
;
1014 cq
->cq_size
= info
->cq_size
;
1015 cq
->cqe_alloc_reg
= info
->cqe_alloc_reg
;
1016 cq
->shadow_area
= info
->shadow_area
;
1017 cq
->avoid_mem_cflct
= info
->avoid_mem_cflct
;
1019 I40IW_RING_INIT(cq
->cq_ring
, cq
->cq_size
);
1021 cq
->ops
= iw_cq_ops
;
1027 * i40iw_device_init_uk - setup routines for iwarp shared device
1028 * @dev: iwarp shared (user and kernel)
1030 void i40iw_device_init_uk(struct i40iw_dev_uk
*dev
)
1032 dev
->ops_uk
= iw_device_uk_ops
;
1036 * i40iw_clean_cq - clean cq entries
1037 * @ queue completion context
1040 void i40iw_clean_cq(void *queue
, struct i40iw_cq_uk
*cq
)
1043 u64 qword3
, comp_ctx
;
1047 cq_head
= cq
->cq_ring
.head
;
1048 temp
= cq
->polarity
;
1050 if (cq
->avoid_mem_cflct
)
1051 cqe
= (u64
*)&(((struct i40iw_extended_cqe
*)cq
->cq_base
)[cq_head
]);
1053 cqe
= (u64
*)&cq
->cq_base
[cq_head
];
1054 get_64bit_val(cqe
, 24, &qword3
);
1055 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
1057 if (polarity
!= temp
)
1060 get_64bit_val(cqe
, 8, &comp_ctx
);
1061 if ((void *)(unsigned long)comp_ctx
== queue
)
1062 set_64bit_val(cqe
, 8, 0);
1064 cq_head
= (cq_head
+ 1) % cq
->cq_ring
.size
;
1071 * i40iw_nop - send a nop
1073 * @wr_id: work request id
1074 * @signaled: flag if signaled for completion
1075 * @post_sq: flag to post sq
1077 enum i40iw_status_code
i40iw_nop(struct i40iw_qp_uk
*qp
,
1085 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, wr_id
);
1087 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
1088 set_64bit_val(wqe
, 0, 0);
1089 set_64bit_val(wqe
, 8, 0);
1090 set_64bit_val(wqe
, 16, 0);
1092 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
1093 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
1094 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
1096 wmb(); /* make sure WQE is populated before valid bit is set */
1098 set_64bit_val(wqe
, 24, header
);
1100 i40iw_qp_post_wr(qp
);
1106 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1107 * @frag_cnt: number of fragments
1108 * @wqe_size: size of sq wqe returned
1110 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt
, u8
*wqe_size
)
1115 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
1130 return I40IW_ERR_INVALID_FRAG_COUNT
;
1137 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1138 * @frag_cnt: number of fragments
1139 * @wqe_size: size of rq wqe returned
1141 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt
, u8
*wqe_size
)
1159 return I40IW_ERR_INVALID_FRAG_COUNT
;
1166 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1167 * @data_size: data size for inline
1168 * @wqe_size: size of sq wqe returned
1170 enum i40iw_status_code
i40iw_inline_data_size_to_wqesize(u32 data_size
,
1173 if (data_size
> I40IW_MAX_INLINE_DATA_SIZE
)
1174 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
1176 if (data_size
<= 16)
1177 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
1178 else if (data_size
<= 48)
1180 else if (data_size
<= 80)