RDMA/i40iw: Fix for using one sge for RDMA READ
[deliverable/linux.git] / drivers / infiniband / hw / i40iw / i40iw_uk.c
CommitLineData
280cfc4b
FL
1/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include "i40iw_osdep.h"
36#include "i40iw_status.h"
37#include "i40iw_d.h"
38#include "i40iw_user.h"
39#include "i40iw_register.h"
40
41static u32 nop_signature = 0x55550000;
42
43/**
44 * i40iw_nop_1 - insert a nop wqe and move head. no post work
45 * @qp: hw qp ptr
46 */
47static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
48{
49 u64 header, *wqe;
50 u64 *wqe_0 = NULL;
51 u32 wqe_idx, peek_head;
52 bool signaled = false;
53
54 if (!qp->sq_ring.head)
55 return I40IW_ERR_PARAM;
56
57 wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
58 wqe = qp->sq_base[wqe_idx].elem;
84a4c246
MK
59
60 qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE;
61
280cfc4b
FL
62 peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
63 wqe_0 = qp->sq_base[peek_head].elem;
64 if (peek_head)
65 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
66 else
67 wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
68
69 set_64bit_val(wqe, 0, 0);
70 set_64bit_val(wqe, 8, 0);
71 set_64bit_val(wqe, 16, 0);
72
73 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
74 LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
75 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
76
77 wmb(); /* Memory barrier to ensure data is written before valid bit is set */
78
79 set_64bit_val(wqe, 24, header);
80 return 0;
81}
82
83/**
84 * i40iw_qp_post_wr - post wr to hrdware
85 * @qp: hw qp ptr
86 */
87void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
88{
89 u64 temp;
90 u32 hw_sq_tail;
91 u32 sw_sq_head;
92
93 mb(); /* valid bit is written and loads completed before reading shadow */
94
95 /* read the doorbell shadow area */
96 get_64bit_val(qp->shadow_area, 0, &temp);
97
98 hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
99 sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
100 if (sw_sq_head != hw_sq_tail) {
101 if (sw_sq_head > qp->initial_ring.head) {
102 if ((hw_sq_tail >= qp->initial_ring.head) &&
103 (hw_sq_tail < sw_sq_head)) {
104 writel(qp->qp_id, qp->wqe_alloc_reg);
105 }
106 } else if (sw_sq_head != qp->initial_ring.head) {
107 if ((hw_sq_tail >= qp->initial_ring.head) ||
108 (hw_sq_tail < sw_sq_head)) {
109 writel(qp->qp_id, qp->wqe_alloc_reg);
110 }
111 }
112 }
113
114 qp->initial_ring.head = qp->sq_ring.head;
115}
116
117/**
118 * i40iw_qp_ring_push_db - ring qp doorbell
119 * @qp: hw qp ptr
120 * @wqe_idx: wqe index
121 */
122static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
123{
124 set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
125 qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
126}
127
128/**
129 * i40iw_qp_get_next_send_wqe - return next wqe ptr
130 * @qp: hw qp ptr
131 * @wqe_idx: return wqe index
132 * @wqe_size: size of sq wqe
133 */
134u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
135 u32 *wqe_idx,
23ef48ad
IM
136 u8 wqe_size,
137 u32 total_size,
138 u64 wr_id
139 )
280cfc4b
FL
140{
141 u64 *wqe = NULL;
142 u64 wqe_ptr;
143 u32 peek_head = 0;
144 u16 offset;
145 enum i40iw_status_code ret_code = 0;
146 u8 nop_wqe_cnt = 0, i;
147 u64 *wqe_0 = NULL;
148
149 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
150
151 if (!*wqe_idx)
152 qp->swqe_polarity = !qp->swqe_polarity;
153 wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
154 offset = (u16)(wqe_ptr) & 0x7F;
155 if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
156 nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
157 for (i = 0; i < nop_wqe_cnt; i++) {
158 i40iw_nop_1(qp);
159 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
160 if (ret_code)
161 return NULL;
162 }
163
164 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
165 if (!*wqe_idx)
166 qp->swqe_polarity = !qp->swqe_polarity;
167 }
9510b066
SS
168
169 if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) {
170 i40iw_nop_1(qp);
171 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
172 if (ret_code)
173 return NULL;
174 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
175 if (!*wqe_idx)
176 qp->swqe_polarity = !qp->swqe_polarity;
177 }
178
280cfc4b
FL
179 for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
180 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
181 if (ret_code)
182 return NULL;
183 }
184
185 wqe = qp->sq_base[*wqe_idx].elem;
186
187 peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
188 wqe_0 = qp->sq_base[peek_head].elem;
9510b066
SS
189
190 if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) {
191 if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity)
192 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
193 }
23ef48ad
IM
194
195 qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id;
196 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
197 qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size;
280cfc4b
FL
198 return wqe;
199}
200
201/**
202 * i40iw_set_fragment - set fragment in wqe
203 * @wqe: wqe for setting fragment
204 * @offset: offset value
205 * @sge: sge length and stag
206 */
207static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
208{
209 if (sge) {
210 set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
211 set_64bit_val(wqe, (offset + 8),
212 (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
213 LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
214 }
215}
216
217/**
218 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
219 * @qp: hw qp ptr
220 * @wqe_idx: return wqe index
221 */
222u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
223{
224 u64 *wqe = NULL;
225 enum i40iw_status_code ret_code;
226
227 if (I40IW_RING_FULL_ERR(qp->rq_ring))
228 return NULL;
229
230 I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
231 if (ret_code)
232 return NULL;
233 if (!*wqe_idx)
234 qp->rwqe_polarity = !qp->rwqe_polarity;
235 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
236 wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
237
238 return wqe;
239}
240
241/**
242 * i40iw_rdma_write - rdma write operation
243 * @qp: hw qp ptr
244 * @info: post sq information
245 * @post_sq: flag to post sq
246 */
247static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
248 struct i40iw_post_sq_info *info,
249 bool post_sq)
250{
251 u64 header;
252 u64 *wqe;
253 struct i40iw_rdma_write *op_info;
254 u32 i, wqe_idx;
255 u32 total_size = 0, byte_off;
256 enum i40iw_status_code ret_code;
257 bool read_fence = false;
258 u8 wqe_size;
259
260 op_info = &info->op.rdma_write;
261 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
262 return I40IW_ERR_INVALID_FRAG_COUNT;
263
264 for (i = 0; i < op_info->num_lo_sges; i++)
265 total_size += op_info->lo_sg_list[i].len;
266
267 if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
268 return I40IW_ERR_QP_INVALID_MSG_SIZE;
269
270 read_fence |= info->read_fence;
271
272 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
273 if (ret_code)
274 return ret_code;
275
23ef48ad 276 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
280cfc4b
FL
277 if (!wqe)
278 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
280cfc4b
FL
279 set_64bit_val(wqe, 16,
280 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
281 if (!op_info->rem_addr.stag)
282 return I40IW_ERR_BAD_STAG;
283
284 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
285 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
286 LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
287 LS_64(read_fence, I40IWQPSQ_READFENCE) |
288 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
289 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
290 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
291
292 i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
293
294 for (i = 1; i < op_info->num_lo_sges; i++) {
295 byte_off = 32 + (i - 1) * 16;
296 i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
297 }
298
299 wmb(); /* make sure WQE is populated before valid bit is set */
300
301 set_64bit_val(wqe, 24, header);
302
303 if (post_sq)
304 i40iw_qp_post_wr(qp);
305
306 return 0;
307}
308
309/**
310 * i40iw_rdma_read - rdma read command
311 * @qp: hw qp ptr
312 * @info: post sq information
313 * @inv_stag: flag for inv_stag
314 * @post_sq: flag to post sq
315 */
316static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
317 struct i40iw_post_sq_info *info,
318 bool inv_stag,
319 bool post_sq)
320{
321 u64 *wqe;
322 struct i40iw_rdma_read *op_info;
323 u64 header;
324 u32 wqe_idx;
325 enum i40iw_status_code ret_code;
326 u8 wqe_size;
327 bool local_fence = false;
328
329 op_info = &info->op.rdma_read;
330 ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
331 if (ret_code)
332 return ret_code;
23ef48ad 333 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id);
280cfc4b
FL
334 if (!wqe)
335 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
280cfc4b
FL
336 local_fence |= info->local_fence;
337
338 set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
339 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
340 LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
341 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
342 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
343 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
344 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
345
346 i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
347
348 wmb(); /* make sure WQE is populated before valid bit is set */
349
350 set_64bit_val(wqe, 24, header);
351 if (post_sq)
352 i40iw_qp_post_wr(qp);
353
354 return 0;
355}
356
357/**
358 * i40iw_send - rdma send command
359 * @qp: hw qp ptr
360 * @info: post sq information
361 * @stag_to_inv: stag_to_inv value
362 * @post_sq: flag to post sq
363 */
364static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
365 struct i40iw_post_sq_info *info,
366 u32 stag_to_inv,
367 bool post_sq)
368{
369 u64 *wqe;
370 struct i40iw_post_send *op_info;
371 u64 header;
372 u32 i, wqe_idx, total_size = 0, byte_off;
373 enum i40iw_status_code ret_code;
374 bool read_fence = false;
375 u8 wqe_size;
376
377 op_info = &info->op.send;
378 if (qp->max_sq_frag_cnt < op_info->num_sges)
379 return I40IW_ERR_INVALID_FRAG_COUNT;
380
381 for (i = 0; i < op_info->num_sges; i++)
382 total_size += op_info->sg_list[i].len;
383 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
384 if (ret_code)
385 return ret_code;
386
23ef48ad 387 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
280cfc4b
FL
388 if (!wqe)
389 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
390
391 read_fence |= info->read_fence;
280cfc4b
FL
392 set_64bit_val(wqe, 16, 0);
393 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
394 LS_64(info->op_type, I40IWQPSQ_OPCODE) |
395 LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
396 I40IWQPSQ_ADDFRAGCNT) |
397 LS_64(read_fence, I40IWQPSQ_READFENCE) |
398 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
399 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
400 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
401
402 i40iw_set_fragment(wqe, 0, op_info->sg_list);
403
404 for (i = 1; i < op_info->num_sges; i++) {
405 byte_off = 32 + (i - 1) * 16;
406 i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
407 }
408
409 wmb(); /* make sure WQE is populated before valid bit is set */
410
411 set_64bit_val(wqe, 24, header);
412 if (post_sq)
413 i40iw_qp_post_wr(qp);
414
415 return 0;
416}
417
418/**
419 * i40iw_inline_rdma_write - inline rdma write operation
420 * @qp: hw qp ptr
421 * @info: post sq information
422 * @post_sq: flag to post sq
423 */
424static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
425 struct i40iw_post_sq_info *info,
426 bool post_sq)
427{
428 u64 *wqe;
429 u8 *dest, *src;
430 struct i40iw_inline_rdma_write *op_info;
431 u64 *push;
432 u64 header = 0;
433 u32 i, wqe_idx;
434 enum i40iw_status_code ret_code;
435 bool read_fence = false;
436 u8 wqe_size;
437
438 op_info = &info->op.inline_rdma_write;
439 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
440 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
441
442 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
443 if (ret_code)
444 return ret_code;
445
23ef48ad 446 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
280cfc4b
FL
447 if (!wqe)
448 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
449
450 read_fence |= info->read_fence;
280cfc4b
FL
451 set_64bit_val(wqe, 16,
452 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
453
454 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
455 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
456 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
457 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
458 LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
459 LS_64(read_fence, I40IWQPSQ_READFENCE) |
460 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
461 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
462 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
463
464 dest = (u8 *)wqe;
465 src = (u8 *)(op_info->data);
466
467 if (op_info->len <= 16) {
468 for (i = 0; i < op_info->len; i++, src++, dest++)
469 *dest = *src;
470 } else {
471 for (i = 0; i < 16; i++, src++, dest++)
472 *dest = *src;
473 dest = (u8 *)wqe + 32;
474 for (; i < op_info->len; i++, src++, dest++)
475 *dest = *src;
476 }
477
478 wmb(); /* make sure WQE is populated before valid bit is set */
479
480 set_64bit_val(wqe, 24, header);
481
482 if (qp->push_db) {
483 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
484 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
485 i40iw_qp_ring_push_db(qp, wqe_idx);
486 } else {
487 if (post_sq)
488 i40iw_qp_post_wr(qp);
489 }
490
491 return 0;
492}
493
494/**
495 * i40iw_inline_send - inline send operation
496 * @qp: hw qp ptr
497 * @info: post sq information
498 * @stag_to_inv: remote stag
499 * @post_sq: flag to post sq
500 */
501static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
502 struct i40iw_post_sq_info *info,
503 u32 stag_to_inv,
504 bool post_sq)
505{
506 u64 *wqe;
507 u8 *dest, *src;
508 struct i40iw_post_inline_send *op_info;
509 u64 header;
510 u32 wqe_idx, i;
511 enum i40iw_status_code ret_code;
512 bool read_fence = false;
513 u8 wqe_size;
514 u64 *push;
515
516 op_info = &info->op.inline_send;
517 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
518 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
519
520 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
521 if (ret_code)
522 return ret_code;
523
23ef48ad 524 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
280cfc4b
FL
525 if (!wqe)
526 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
527
528 read_fence |= info->read_fence;
280cfc4b
FL
529 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
530 LS_64(info->op_type, I40IWQPSQ_OPCODE) |
531 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
532 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
533 LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
534 LS_64(read_fence, I40IWQPSQ_READFENCE) |
535 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
536 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
537 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
538
539 dest = (u8 *)wqe;
540 src = (u8 *)(op_info->data);
541
542 if (op_info->len <= 16) {
543 for (i = 0; i < op_info->len; i++, src++, dest++)
544 *dest = *src;
545 } else {
546 for (i = 0; i < 16; i++, src++, dest++)
547 *dest = *src;
548 dest = (u8 *)wqe + 32;
549 for (; i < op_info->len; i++, src++, dest++)
550 *dest = *src;
551 }
552
553 wmb(); /* make sure WQE is populated before valid bit is set */
554
555 set_64bit_val(wqe, 24, header);
556
557 if (qp->push_db) {
558 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
559 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
560 i40iw_qp_ring_push_db(qp, wqe_idx);
561 } else {
562 if (post_sq)
563 i40iw_qp_post_wr(qp);
564 }
565
566 return 0;
567}
568
569/**
570 * i40iw_stag_local_invalidate - stag invalidate operation
571 * @qp: hw qp ptr
572 * @info: post sq information
573 * @post_sq: flag to post sq
574 */
575static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
576 struct i40iw_post_sq_info *info,
577 bool post_sq)
578{
579 u64 *wqe;
580 struct i40iw_inv_local_stag *op_info;
581 u64 header;
582 u32 wqe_idx;
583 bool local_fence = false;
584
585 op_info = &info->op.inv_local_stag;
586 local_fence = info->local_fence;
587
23ef48ad 588 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
280cfc4b
FL
589 if (!wqe)
590 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
280cfc4b
FL
591 set_64bit_val(wqe, 0, 0);
592 set_64bit_val(wqe, 8,
593 LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
594 set_64bit_val(wqe, 16, 0);
595 header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
596 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
597 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
598 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
599 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
600
601 wmb(); /* make sure WQE is populated before valid bit is set */
602
603 set_64bit_val(wqe, 24, header);
604
605 if (post_sq)
606 i40iw_qp_post_wr(qp);
607
608 return 0;
609}
610
611/**
612 * i40iw_mw_bind - Memory Window bind operation
613 * @qp: hw qp ptr
614 * @info: post sq information
615 * @post_sq: flag to post sq
616 */
617static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
618 struct i40iw_post_sq_info *info,
619 bool post_sq)
620{
621 u64 *wqe;
622 struct i40iw_bind_window *op_info;
623 u64 header;
624 u32 wqe_idx;
625 bool local_fence = false;
626
627 op_info = &info->op.bind_window;
628
629 local_fence |= info->local_fence;
23ef48ad 630 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
280cfc4b
FL
631 if (!wqe)
632 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
280cfc4b
FL
633 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
634 set_64bit_val(wqe, 8,
635 LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
636 LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
637 set_64bit_val(wqe, 16, op_info->bind_length);
638 header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
639 LS_64(((op_info->enable_reads << 2) |
640 (op_info->enable_writes << 3)),
641 I40IWQPSQ_STAGRIGHTS) |
642 LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0),
643 I40IWQPSQ_VABASEDTO) |
644 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
645 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
646 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
647 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
648
649 wmb(); /* make sure WQE is populated before valid bit is set */
650
651 set_64bit_val(wqe, 24, header);
652
653 if (post_sq)
654 i40iw_qp_post_wr(qp);
655
656 return 0;
657}
658
659/**
660 * i40iw_post_receive - post receive wqe
661 * @qp: hw qp ptr
662 * @info: post rq information
663 */
664static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
665 struct i40iw_post_rq_info *info)
666{
667 u64 *wqe;
668 u64 header;
669 u32 total_size = 0, wqe_idx, i, byte_off;
670
671 if (qp->max_rq_frag_cnt < info->num_sges)
672 return I40IW_ERR_INVALID_FRAG_COUNT;
673 for (i = 0; i < info->num_sges; i++)
674 total_size += info->sg_list[i].len;
675 wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
676 if (!wqe)
677 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
678
679 qp->rq_wrid_array[wqe_idx] = info->wr_id;
680 set_64bit_val(wqe, 16, 0);
681
682 header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
683 I40IWQPSQ_ADDFRAGCNT) |
684 LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
685
686 i40iw_set_fragment(wqe, 0, info->sg_list);
687
688 for (i = 1; i < info->num_sges; i++) {
689 byte_off = 32 + (i - 1) * 16;
690 i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
691 }
692
693 wmb(); /* make sure WQE is populated before valid bit is set */
694
695 set_64bit_val(wqe, 24, header);
696
697 return 0;
698}
699
700/**
701 * i40iw_cq_request_notification - cq notification request (door bell)
702 * @cq: hw cq
703 * @cq_notify: notification type
704 */
705static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
706 enum i40iw_completion_notify cq_notify)
707{
708 u64 temp_val;
709 u16 sw_cq_sel;
710 u8 arm_next_se = 0;
711 u8 arm_next = 0;
712 u8 arm_seq_num;
713
714 get_64bit_val(cq->shadow_area, 32, &temp_val);
715 arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
716 arm_seq_num++;
717
718 sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
719 arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
720 arm_next_se |= 1;
721 if (cq_notify == IW_CQ_COMPL_EVENT)
722 arm_next = 1;
723 temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
724 LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
725 LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
726 LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
727
728 set_64bit_val(cq->shadow_area, 32, temp_val);
729
730 wmb(); /* make sure WQE is populated before valid bit is set */
731
732 writel(cq->cq_id, cq->cqe_alloc_reg);
733}
734
735/**
736 * i40iw_cq_post_entries - update tail in shadow memory
737 * @cq: hw cq
738 * @count: # of entries processed
739 */
740static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
741 u8 count)
742{
743 I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
744 set_64bit_val(cq->shadow_area, 0,
745 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
746 return 0;
747}
748
749/**
750 * i40iw_cq_poll_completion - get cq completion info
751 * @cq: hw cq
752 * @info: cq poll information returned
753 * @post_cq: update cq tail
754 */
755static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
756 struct i40iw_cq_poll_info *info,
757 bool post_cq)
758{
759 u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
760 u64 *cqe, *sw_wqe;
761 struct i40iw_qp_uk *qp;
762 struct i40iw_ring *pring = NULL;
763 u32 wqe_idx, q_type, array_idx = 0;
764 enum i40iw_status_code ret_code = 0;
765 enum i40iw_status_code ret_code2 = 0;
766 bool move_cq_head = true;
767 u8 polarity;
23ef48ad 768 u8 addl_wqes = 0;
280cfc4b
FL
769
770 if (cq->avoid_mem_cflct)
771 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
772 else
773 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
774
775 get_64bit_val(cqe, 24, &qword3);
776 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
777
778 if (polarity != cq->polarity)
779 return I40IW_ERR_QUEUE_EMPTY;
780
781 q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
782 info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
783 info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);
784 if (info->error) {
785 info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
786 info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
787 info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
788 } else {
789 info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
790 }
791
792 get_64bit_val(cqe, 0, &qword0);
793 get_64bit_val(cqe, 16, &qword2);
794
795 info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
796
797 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
798
799 get_64bit_val(cqe, 8, &comp_ctx);
800
801 info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
802 info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
803
804 qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
805 wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
806 info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
807
808 if (q_type == I40IW_CQE_QTYPE_RQ) {
809 array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
810 if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
811 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
812 array_idx = qp->rq_ring.tail;
813 } else {
814 info->wr_id = qp->rq_wrid_array[array_idx];
815 }
816
817 info->op_type = I40IW_OP_TYPE_REC;
818 if (qword3 & I40IWCQ_STAG_MASK) {
819 info->stag_invalid_set = true;
820 info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
821 } else {
822 info->stag_invalid_set = false;
823 }
824 info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
825 I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
826 pring = &qp->rq_ring;
827 } else {
828 if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
829 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
830 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
831
832 info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
833 sw_wqe = qp->sq_base[wqe_idx].elem;
834 get_64bit_val(sw_wqe, 24, &wqe_qword);
280cfc4b 835
23ef48ad 836 addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE;
280cfc4b
FL
837 I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
838 } else {
839 do {
840 u8 op_type;
841 u32 tail;
842
843 tail = qp->sq_ring.tail;
844 sw_wqe = qp->sq_base[tail].elem;
845 get_64bit_val(sw_wqe, 24, &wqe_qword);
846 op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
847 info->op_type = op_type;
23ef48ad 848 addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE;
280cfc4b
FL
849 I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
850 if (op_type != I40IWQP_OP_NOP) {
851 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
852 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
853 break;
854 }
855 } while (1);
856 }
857 pring = &qp->sq_ring;
858 }
859
860 ret_code = 0;
861
862 if (!ret_code &&
863 (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
864 if (pring && (I40IW_RING_MORE_WORK(*pring)))
865 move_cq_head = false;
866
867 if (move_cq_head) {
868 I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
869
870 if (ret_code2 && !ret_code)
871 ret_code = ret_code2;
872
873 if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
874 cq->polarity ^= 1;
875
876 if (post_cq) {
877 I40IW_RING_MOVE_TAIL(cq->cq_ring);
878 set_64bit_val(cq->shadow_area, 0,
879 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
880 }
881 } else {
882 if (info->is_srq)
883 return ret_code;
884 qword3 &= ~I40IW_CQ_WQEIDX_MASK;
885 qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
886 set_64bit_val(cqe, 24, qword3);
887 }
888
889 return ret_code;
890}
891
892/**
893 * i40iw_get_wqe_shift - get shift count for maximum wqe size
894 * @wqdepth: depth of wq required.
895 * @sge: Maximum Scatter Gather Elements wqe
23ef48ad 896 * @inline_data: Maximum inline data size
280cfc4b
FL
897 * @shift: Returns the shift needed based on sge
898 *
23ef48ad
IM
899 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
900 * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
901 * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
902 * Shift of 2 otherwise (wqe size of 128 bytes).
280cfc4b 903 */
23ef48ad 904enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift)
280cfc4b
FL
905{
906 u32 size;
907
908 *shift = 0;
23ef48ad
IM
909 if (sge > 1 || inline_data > 16)
910 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
280cfc4b
FL
911
912 /* check if wqdepth is multiple of 2 or not */
913
914 if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
915 return I40IW_ERR_INVALID_SIZE;
916
917 size = wqdepth << *shift; /* multiple of 32 bytes count */
918 if (size > I40IWQP_SW_MAX_WQSIZE)
919 return I40IW_ERR_INVALID_SIZE;
920 return 0;
921}
922
923static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
924 i40iw_qp_post_wr,
925 i40iw_qp_ring_push_db,
926 i40iw_rdma_write,
927 i40iw_rdma_read,
928 i40iw_send,
929 i40iw_inline_rdma_write,
930 i40iw_inline_send,
931 i40iw_stag_local_invalidate,
932 i40iw_mw_bind,
933 i40iw_post_receive,
934 i40iw_nop
935};
936
937static struct i40iw_cq_ops iw_cq_ops = {
938 i40iw_cq_request_notification,
939 i40iw_cq_poll_completion,
940 i40iw_cq_post_entries,
941 i40iw_clean_cq
942};
943
944static struct i40iw_device_uk_ops iw_device_uk_ops = {
945 i40iw_cq_uk_init,
946 i40iw_qp_uk_init,
947};
948
949/**
950 * i40iw_qp_uk_init - initialize shared qp
951 * @qp: hw qp (user and kernel)
952 * @info: qp initialization info
953 *
954 * initializes the vars used in both user and kernel mode.
955 * size of the wqe depends on numbers of max. fragements
956 * allowed. Then size of wqe * the number of wqes should be the
957 * amount of memory allocated for sq and rq. If srq is used,
958 * then rq_base will point to one rq wqe only (not the whole
959 * array of wqes)
960 */
961enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
962 struct i40iw_qp_uk_init_info *info)
963{
964 enum i40iw_status_code ret_code = 0;
965 u32 sq_ring_size;
966 u8 sqshift, rqshift;
967
968 if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
969 return I40IW_ERR_INVALID_FRAG_COUNT;
970
971 if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
972 return I40IW_ERR_INVALID_FRAG_COUNT;
23ef48ad 973 ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
280cfc4b
FL
974 if (ret_code)
975 return ret_code;
976
23ef48ad 977 ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
280cfc4b
FL
978 if (ret_code)
979 return ret_code;
980
981 qp->sq_base = info->sq;
982 qp->rq_base = info->rq;
983 qp->shadow_area = info->shadow_area;
984 qp->sq_wrtrk_array = info->sq_wrtrk_array;
985 qp->rq_wrid_array = info->rq_wrid_array;
986
987 qp->wqe_alloc_reg = info->wqe_alloc_reg;
988 qp->qp_id = info->qp_id;
989
990 qp->sq_size = info->sq_size;
991 qp->push_db = info->push_db;
992 qp->push_wqe = info->push_wqe;
993
994 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
995 sq_ring_size = qp->sq_size << sqshift;
996
997 I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
998 I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
999 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
1000 I40IW_RING_MOVE_TAIL(qp->sq_ring);
1001 I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
1002 qp->swqe_polarity = 1;
1003 qp->swqe_polarity_deferred = 1;
1004 qp->rwqe_polarity = 0;
1005
1006 if (!qp->use_srq) {
1007 qp->rq_size = info->rq_size;
1008 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1009 qp->rq_wqe_size = rqshift;
1010 I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
1011 qp->rq_wqe_size_multiplier = 4 << rqshift;
1012 }
1013 qp->ops = iw_qp_uk_ops;
1014
1015 return ret_code;
1016}
1017
1018/**
1019 * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1020 * @cq: hw cq
1021 * @info: hw cq initialization info
1022 */
1023enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
1024 struct i40iw_cq_uk_init_info *info)
1025{
1026 if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
1027 (info->cq_size > I40IW_MAX_CQ_SIZE))
1028 return I40IW_ERR_INVALID_SIZE;
1029 cq->cq_base = (struct i40iw_cqe *)info->cq_base;
1030 cq->cq_id = info->cq_id;
1031 cq->cq_size = info->cq_size;
1032 cq->cqe_alloc_reg = info->cqe_alloc_reg;
1033 cq->shadow_area = info->shadow_area;
1034 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1035
1036 I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
1037 cq->polarity = 1;
1038 cq->ops = iw_cq_ops;
1039
1040 return 0;
1041}
1042
1043/**
1044 * i40iw_device_init_uk - setup routines for iwarp shared device
1045 * @dev: iwarp shared (user and kernel)
1046 */
1047void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
1048{
1049 dev->ops_uk = iw_device_uk_ops;
1050}
1051
1052/**
1053 * i40iw_clean_cq - clean cq entries
1054 * @ queue completion context
1055 * @cq: cq to clean
1056 */
1057void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
1058{
1059 u64 *cqe;
1060 u64 qword3, comp_ctx;
1061 u32 cq_head;
1062 u8 polarity, temp;
1063
1064 cq_head = cq->cq_ring.head;
1065 temp = cq->polarity;
1066 do {
1067 if (cq->avoid_mem_cflct)
1068 cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
1069 else
1070 cqe = (u64 *)&cq->cq_base[cq_head];
1071 get_64bit_val(cqe, 24, &qword3);
1072 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
1073
1074 if (polarity != temp)
1075 break;
1076
1077 get_64bit_val(cqe, 8, &comp_ctx);
1078 if ((void *)(unsigned long)comp_ctx == queue)
1079 set_64bit_val(cqe, 8, 0);
1080
1081 cq_head = (cq_head + 1) % cq->cq_ring.size;
1082 if (!cq_head)
1083 temp ^= 1;
1084 } while (true);
1085}
1086
1087/**
1088 * i40iw_nop - send a nop
1089 * @qp: hw qp ptr
1090 * @wr_id: work request id
1091 * @signaled: flag if signaled for completion
1092 * @post_sq: flag to post sq
1093 */
1094enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
1095 u64 wr_id,
1096 bool signaled,
1097 bool post_sq)
1098{
1099 u64 header, *wqe;
1100 u32 wqe_idx;
1101
23ef48ad 1102 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id);
280cfc4b
FL
1103 if (!wqe)
1104 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
280cfc4b
FL
1105 set_64bit_val(wqe, 0, 0);
1106 set_64bit_val(wqe, 8, 0);
1107 set_64bit_val(wqe, 16, 0);
1108
1109 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
1110 LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
1111 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
1112
1113 wmb(); /* make sure WQE is populated before valid bit is set */
1114
1115 set_64bit_val(wqe, 24, header);
1116 if (post_sq)
1117 i40iw_qp_post_wr(qp);
1118
1119 return 0;
1120}
1121
1122/**
1123 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1124 * @frag_cnt: number of fragments
1125 * @wqe_size: size of sq wqe returned
1126 */
23ef48ad 1127enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size)
280cfc4b
FL
1128{
1129 switch (frag_cnt) {
1130 case 0:
1131 case 1:
1132 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1133 break;
1134 case 2:
1135 case 3:
1136 *wqe_size = 64;
1137 break;
1138 case 4:
1139 case 5:
1140 *wqe_size = 96;
1141 break;
1142 case 6:
1143 case 7:
1144 *wqe_size = 128;
1145 break;
1146 default:
1147 return I40IW_ERR_INVALID_FRAG_COUNT;
1148 }
1149
1150 return 0;
1151}
1152
1153/**
1154 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1155 * @frag_cnt: number of fragments
1156 * @wqe_size: size of rq wqe returned
1157 */
23ef48ad 1158enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size)
280cfc4b
FL
1159{
1160 switch (frag_cnt) {
1161 case 0:
1162 case 1:
1163 *wqe_size = 32;
1164 break;
1165 case 2:
1166 case 3:
1167 *wqe_size = 64;
1168 break;
1169 case 4:
1170 case 5:
1171 case 6:
1172 case 7:
1173 *wqe_size = 128;
1174 break;
1175 default:
1176 return I40IW_ERR_INVALID_FRAG_COUNT;
1177 }
1178
1179 return 0;
1180}
1181
1182/**
1183 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1184 * @data_size: data size for inline
1185 * @wqe_size: size of sq wqe returned
1186 */
1187enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
1188 u8 *wqe_size)
1189{
1190 if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
1191 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
1192
1193 if (data_size <= 16)
1194 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1195 else if (data_size <= 48)
1196 *wqe_size = 64;
1197 else if (data_size <= 80)
1198 *wqe_size = 96;
1199 else
1200 *wqe_size = 128;
1201
1202 return 0;
1203}
This page took 0.099252 seconds and 5 git commands to generate.