RDMA/i40iw: Correct STag mask to min of 14 bits
[deliverable/linux.git] / drivers / infiniband / hw / i40iw / i40iw_hw.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/ip.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
42
43 #include "i40iw.h"
44
45 /**
46 * i40iw_initialize_hw_resources - initialize hw resource during open
47 * @iwdev: iwarp device
48 */
49 u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
50 {
51 unsigned long num_pds;
52 u32 resources_size;
53 u32 max_mr;
54 u32 max_qp;
55 u32 max_cq;
56 u32 arp_table_size;
57 u32 mrdrvbits;
58 void *resource_ptr;
59
60 max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
61 max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
62 max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
63 arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
64 iwdev->max_cqe = 0xFFFFF;
65 num_pds = max_qp * 4;
66 resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
67 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
68 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
69 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
70 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
71 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
72 resources_size += sizeof(struct i40iw_qp **) * max_qp;
73 iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
74
75 if (!iwdev->mem_resources)
76 return -ENOMEM;
77
78 iwdev->max_qp = max_qp;
79 iwdev->max_mr = max_mr;
80 iwdev->max_cq = max_cq;
81 iwdev->max_pd = num_pds;
82 iwdev->arp_table_size = arp_table_size;
83 iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
84 resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
85
86 iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
87 IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
88
89 iwdev->allocated_qps = resource_ptr;
90 iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
91 iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
92 iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
93 iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
94 iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
95 set_bit(0, iwdev->allocated_mrs);
96 set_bit(0, iwdev->allocated_qps);
97 set_bit(0, iwdev->allocated_cqs);
98 set_bit(0, iwdev->allocated_pds);
99 set_bit(0, iwdev->allocated_arps);
100
101 /* Following for ILQ/IEQ */
102 set_bit(1, iwdev->allocated_qps);
103 set_bit(1, iwdev->allocated_cqs);
104 set_bit(1, iwdev->allocated_pds);
105 set_bit(2, iwdev->allocated_cqs);
106 set_bit(2, iwdev->allocated_pds);
107
108 spin_lock_init(&iwdev->resource_lock);
109 spin_lock_init(&iwdev->qptable_lock);
110 /* stag index mask has a minimum of 14 bits */
111 mrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14);
112 iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
113 return 0;
114 }
115
116 /**
117 * i40iw_cqp_ce_handler - handle cqp completions
118 * @iwdev: iwarp device
119 * @arm: flag to arm after completions
120 * @cq: cq for cqp completions
121 */
122 static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
123 {
124 struct i40iw_cqp_request *cqp_request;
125 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
126 u32 cqe_count = 0;
127 struct i40iw_ccq_cqe_info info;
128 int ret;
129
130 do {
131 memset(&info, 0, sizeof(info));
132 ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
133 if (ret)
134 break;
135 cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
136 if (info.error)
137 i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
138 info.op_code, info.maj_err_code, info.min_err_code);
139 if (cqp_request) {
140 cqp_request->compl_info.maj_err_code = info.maj_err_code;
141 cqp_request->compl_info.min_err_code = info.min_err_code;
142 cqp_request->compl_info.op_ret_val = info.op_ret_val;
143 cqp_request->compl_info.error = info.error;
144
145 if (cqp_request->waiting) {
146 cqp_request->request_done = true;
147 wake_up(&cqp_request->waitq);
148 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
149 } else {
150 if (cqp_request->callback_fcn)
151 cqp_request->callback_fcn(cqp_request, 1);
152 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
153 }
154 }
155
156 cqe_count++;
157 } while (1);
158
159 if (arm && cqe_count) {
160 i40iw_process_bh(dev);
161 dev->ccq_ops->ccq_arm(cq);
162 }
163 }
164
165 /**
166 * i40iw_iwarp_ce_handler - handle iwarp completions
167 * @iwdev: iwarp device
168 * @iwcp: iwarp cq receiving event
169 */
170 static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
171 struct i40iw_sc_cq *iwcq)
172 {
173 struct i40iw_cq *i40iwcq = iwcq->back_cq;
174
175 if (i40iwcq->ibcq.comp_handler)
176 i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
177 i40iwcq->ibcq.cq_context);
178 }
179
180 /**
181 * i40iw_puda_ce_handler - handle puda completion events
182 * @iwdev: iwarp device
183 * @cq: puda completion q for event
184 */
185 static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
186 struct i40iw_sc_cq *cq)
187 {
188 struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
189 enum i40iw_status_code status;
190 u32 compl_error;
191
192 do {
193 status = i40iw_puda_poll_completion(dev, cq, &compl_error);
194 if (status == I40IW_ERR_QUEUE_EMPTY)
195 break;
196 if (status) {
197 i40iw_pr_err("puda status = %d\n", status);
198 break;
199 }
200 if (compl_error) {
201 i40iw_pr_err("puda compl_err =0x%x\n", compl_error);
202 break;
203 }
204 } while (1);
205
206 dev->ccq_ops->ccq_arm(cq);
207 }
208
209 /**
210 * i40iw_process_ceq - handle ceq for completions
211 * @iwdev: iwarp device
212 * @ceq: ceq having cq for completion
213 */
214 void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
215 {
216 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
217 struct i40iw_sc_ceq *sc_ceq;
218 struct i40iw_sc_cq *cq;
219 bool arm = true;
220
221 sc_ceq = &ceq->sc_ceq;
222 do {
223 cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
224 if (!cq)
225 break;
226
227 if (cq->cq_type == I40IW_CQ_TYPE_CQP)
228 i40iw_cqp_ce_handler(iwdev, cq, arm);
229 else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
230 i40iw_iwarp_ce_handler(iwdev, cq);
231 else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
232 (cq->cq_type == I40IW_CQ_TYPE_IEQ))
233 i40iw_puda_ce_handler(iwdev, cq);
234 } while (1);
235 }
236
237 /**
238 * i40iw_next_iw_state - modify qp state
239 * @iwqp: iwarp qp to modify
240 * @state: next state for qp
241 * @del_hash: del hash
242 * @term: term message
243 * @termlen: length of term message
244 */
245 void i40iw_next_iw_state(struct i40iw_qp *iwqp,
246 u8 state,
247 u8 del_hash,
248 u8 term,
249 u8 termlen)
250 {
251 struct i40iw_modify_qp_info info;
252
253 memset(&info, 0, sizeof(info));
254 info.next_iwarp_state = state;
255 info.remove_hash_idx = del_hash;
256 info.cq_num_valid = true;
257 info.arp_cache_idx_valid = true;
258 info.dont_send_term = true;
259 info.dont_send_fin = true;
260 info.termlen = termlen;
261
262 if (term & I40IWQP_TERM_SEND_TERM_ONLY)
263 info.dont_send_term = false;
264 if (term & I40IWQP_TERM_SEND_FIN_ONLY)
265 info.dont_send_fin = false;
266 if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
267 info.reset_tcp_conn = true;
268 i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
269 }
270
271 /**
272 * i40iw_process_aeq - handle aeq events
273 * @iwdev: iwarp device
274 */
275 void i40iw_process_aeq(struct i40iw_device *iwdev)
276 {
277 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
278 struct i40iw_aeq *aeq = &iwdev->aeq;
279 struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
280 struct i40iw_aeqe_info aeinfo;
281 struct i40iw_aeqe_info *info = &aeinfo;
282 int ret;
283 struct i40iw_qp *iwqp = NULL;
284 struct i40iw_sc_cq *cq = NULL;
285 struct i40iw_cq *iwcq = NULL;
286 struct i40iw_sc_qp *qp = NULL;
287 struct i40iw_qp_host_ctx_info *ctx_info = NULL;
288 unsigned long flags;
289
290 u32 aeqcnt = 0;
291
292 if (!sc_aeq->size)
293 return;
294
295 do {
296 memset(info, 0, sizeof(*info));
297 ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
298 if (ret)
299 break;
300
301 aeqcnt++;
302 i40iw_debug(dev, I40IW_DEBUG_AEQ,
303 "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
304 __func__, info->ae_id, info->qp, info->qp_cq_id);
305 if (info->qp) {
306 spin_lock_irqsave(&iwdev->qptable_lock, flags);
307 iwqp = iwdev->qp_table[info->qp_cq_id];
308 if (!iwqp) {
309 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
310 i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
311 continue;
312 }
313 i40iw_add_ref(&iwqp->ibqp);
314 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
315 qp = &iwqp->sc_qp;
316 spin_lock_irqsave(&iwqp->lock, flags);
317 iwqp->hw_tcp_state = info->tcp_state;
318 iwqp->hw_iwarp_state = info->iwarp_state;
319 iwqp->last_aeq = info->ae_id;
320 spin_unlock_irqrestore(&iwqp->lock, flags);
321 ctx_info = &iwqp->ctx_info;
322 ctx_info->err_rq_idx_valid = true;
323 } else {
324 if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
325 continue;
326 }
327
328 switch (info->ae_id) {
329 case I40IW_AE_LLP_FIN_RECEIVED:
330 if (qp->term_flags)
331 continue;
332 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
333 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
334 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
335 (iwqp->ibqp_state == IB_QPS_RTS)) {
336 i40iw_next_iw_state(iwqp,
337 I40IW_QP_STATE_CLOSING, 0, 0, 0);
338 i40iw_cm_disconn(iwqp);
339 }
340 iwqp->cm_id->add_ref(iwqp->cm_id);
341 i40iw_schedule_cm_timer(iwqp->cm_node,
342 (struct i40iw_puda_buf *)iwqp,
343 I40IW_TIMER_TYPE_CLOSE, 1, 0);
344 }
345 break;
346 case I40IW_AE_LLP_CLOSE_COMPLETE:
347 if (qp->term_flags)
348 i40iw_terminate_done(qp, 0);
349 else
350 i40iw_cm_disconn(iwqp);
351 break;
352 case I40IW_AE_RESET_SENT:
353 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
354 i40iw_cm_disconn(iwqp);
355 break;
356 case I40IW_AE_LLP_CONNECTION_RESET:
357 if (atomic_read(&iwqp->close_timer_started))
358 continue;
359 i40iw_cm_disconn(iwqp);
360 break;
361 case I40IW_AE_TERMINATE_SENT:
362 i40iw_terminate_send_fin(qp);
363 break;
364 case I40IW_AE_LLP_TERMINATE_RECEIVED:
365 i40iw_terminate_received(qp, info);
366 break;
367 case I40IW_AE_CQ_OPERATION_ERROR:
368 i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
369 info->ae_id);
370 cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
371 iwcq = (struct i40iw_cq *)cq->back_cq;
372
373 if (iwcq->ibcq.event_handler) {
374 struct ib_event ibevent;
375
376 ibevent.device = iwcq->ibcq.device;
377 ibevent.event = IB_EVENT_CQ_ERR;
378 ibevent.element.cq = &iwcq->ibcq;
379 iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
380 }
381 break;
382 case I40IW_AE_PRIV_OPERATION_DENIED:
383 case I40IW_AE_STAG_ZERO_INVALID:
384 case I40IW_AE_IB_RREQ_AND_Q1_FULL:
385 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
386 case I40IW_AE_DDP_UBE_INVALID_MO:
387 case I40IW_AE_DDP_UBE_INVALID_QN:
388 case I40IW_AE_DDP_NO_L_BIT:
389 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
390 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
391 case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
392 case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
393 case I40IW_AE_INVALID_ARP_ENTRY:
394 case I40IW_AE_INVALID_TCP_OPTION_RCVD:
395 case I40IW_AE_STALE_ARP_ENTRY:
396 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
397 case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
398 case I40IW_AE_LLP_SYN_RECEIVED:
399 case I40IW_AE_LLP_TOO_MANY_RETRIES:
400 case I40IW_AE_LLP_DOUBT_REACHABILITY:
401 case I40IW_AE_LCE_QP_CATASTROPHIC:
402 case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
403 case I40IW_AE_LCE_CQ_CATASTROPHIC:
404 case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
405 case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
406 case I40IW_AE_QP_SUSPEND_COMPLETE:
407 ctx_info->err_rq_idx_valid = false;
408 default:
409 if (!info->sq && ctx_info->err_rq_idx_valid) {
410 ctx_info->err_rq_idx = info->wqe_idx;
411 ctx_info->tcp_info_valid = false;
412 ctx_info->iwarp_info_valid = false;
413 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
414 iwqp->host_ctx.va,
415 ctx_info);
416 }
417 i40iw_terminate_connection(qp, info);
418 break;
419 }
420 if (info->qp)
421 i40iw_rem_ref(&iwqp->ibqp);
422 } while (1);
423
424 if (aeqcnt)
425 dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
426 }
427
428 /**
429 * i40iw_manage_apbvt - add or delete tcp port
430 * @iwdev: iwarp device
431 * @accel_local_port: port for apbvt
432 * @add_port: add or delete port
433 */
434 int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
435 {
436 struct i40iw_apbvt_info *info;
437 enum i40iw_status_code status;
438 struct i40iw_cqp_request *cqp_request;
439 struct cqp_commands_info *cqp_info;
440
441 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
442 if (!cqp_request)
443 return -ENOMEM;
444
445 cqp_info = &cqp_request->info;
446 info = &cqp_info->in.u.manage_apbvt_entry.info;
447
448 memset(info, 0, sizeof(*info));
449 info->add = add_port;
450 info->port = cpu_to_le16(accel_local_port);
451
452 cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
453 cqp_info->post_sq = 1;
454 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
455 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
456 status = i40iw_handle_cqp_op(iwdev, cqp_request);
457 if (status)
458 i40iw_pr_err("CQP-OP Manage APBVT entry fail");
459 return status;
460 }
461
462 /**
463 * i40iw_manage_arp_cache - manage hw arp cache
464 * @iwdev: iwarp device
465 * @mac_addr: mac address ptr
466 * @ip_addr: ip addr for arp cache
467 * @action: add, delete or modify
468 */
469 void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
470 unsigned char *mac_addr,
471 u32 *ip_addr,
472 bool ipv4,
473 u32 action)
474 {
475 struct i40iw_add_arp_cache_entry_info *info;
476 struct i40iw_cqp_request *cqp_request;
477 struct cqp_commands_info *cqp_info;
478 int arp_index;
479
480 arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
481 if (arp_index == -1)
482 return;
483 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
484 if (!cqp_request)
485 return;
486
487 cqp_info = &cqp_request->info;
488 if (action == I40IW_ARP_ADD) {
489 cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
490 info = &cqp_info->in.u.add_arp_cache_entry.info;
491 memset(info, 0, sizeof(*info));
492 info->arp_index = cpu_to_le16((u16)arp_index);
493 info->permanent = true;
494 ether_addr_copy(info->mac_addr, mac_addr);
495 cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
496 cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
497 } else {
498 cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
499 cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
500 cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
501 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
502 }
503
504 cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
505 cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
506 cqp_info->post_sq = 1;
507 if (i40iw_handle_cqp_op(iwdev, cqp_request))
508 i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
509 }
510
511 /**
512 * i40iw_send_syn_cqp_callback - do syn/ack after qhash
513 * @cqp_request: qhash cqp completion
514 * @send_ack: flag send ack
515 */
516 static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
517 {
518 i40iw_send_syn(cqp_request->param, send_ack);
519 }
520
521 /**
522 * i40iw_manage_qhash - add or modify qhash
523 * @iwdev: iwarp device
524 * @cminfo: cm info for qhash
525 * @etype: type (syn or quad)
526 * @mtype: type of qhash
527 * @cmnode: cmnode associated with connection
528 * @wait: wait for completion
529 * @user_pri:user pri of the connection
530 */
531 enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
532 struct i40iw_cm_info *cminfo,
533 enum i40iw_quad_entry_type etype,
534 enum i40iw_quad_hash_manage_type mtype,
535 void *cmnode,
536 bool wait)
537 {
538 struct i40iw_qhash_table_info *info;
539 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
540 enum i40iw_status_code status;
541 struct i40iw_cqp *iwcqp = &iwdev->cqp;
542 struct i40iw_cqp_request *cqp_request;
543 struct cqp_commands_info *cqp_info;
544
545 cqp_request = i40iw_get_cqp_request(iwcqp, wait);
546 if (!cqp_request)
547 return I40IW_ERR_NO_MEMORY;
548 cqp_info = &cqp_request->info;
549 info = &cqp_info->in.u.manage_qhash_table_entry.info;
550 memset(info, 0, sizeof(*info));
551
552 info->manage = mtype;
553 info->entry_type = etype;
554 if (cminfo->vlan_id != 0xFFFF) {
555 info->vlan_valid = true;
556 info->vlan_id = cpu_to_le16(cminfo->vlan_id);
557 } else {
558 info->vlan_valid = false;
559 }
560
561 info->ipv4_valid = cminfo->ipv4;
562 ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
563 info->qp_num = cpu_to_le32(dev->ilq->qp_id);
564 info->dest_port = cpu_to_le16(cminfo->loc_port);
565 info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
566 info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
567 info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
568 info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
569 if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
570 info->src_port = cpu_to_le16(cminfo->rem_port);
571 info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
572 info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
573 info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
574 info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
575 }
576 if (cmnode) {
577 cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
578 cqp_request->param = (void *)cmnode;
579 }
580
581 if (info->ipv4_valid)
582 i40iw_debug(dev, I40IW_DEBUG_CM,
583 "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
584 __func__, (!mtype) ? "DELETE" : "ADD",
585 info->dest_ip,
586 info->dest_port, info->mac_addr, cminfo->vlan_id);
587 else
588 i40iw_debug(dev, I40IW_DEBUG_CM,
589 "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
590 __func__, (!mtype) ? "DELETE" : "ADD",
591 info->dest_ip,
592 info->dest_port, info->mac_addr, cminfo->vlan_id);
593 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
594 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
595 cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
596 cqp_info->post_sq = 1;
597 status = i40iw_handle_cqp_op(iwdev, cqp_request);
598 if (status)
599 i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
600 return status;
601 }
602
603 /**
604 * i40iw_hw_flush_wqes - flush qp's wqe
605 * @iwdev: iwarp device
606 * @qp: hardware control qp
607 * @info: info for flush
608 * @wait: flag wait for completion
609 */
610 enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
611 struct i40iw_sc_qp *qp,
612 struct i40iw_qp_flush_info *info,
613 bool wait)
614 {
615 enum i40iw_status_code status;
616 struct i40iw_qp_flush_info *hw_info;
617 struct i40iw_cqp_request *cqp_request;
618 struct cqp_commands_info *cqp_info;
619
620 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
621 if (!cqp_request)
622 return I40IW_ERR_NO_MEMORY;
623
624 cqp_info = &cqp_request->info;
625 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
626 memcpy(hw_info, info, sizeof(*hw_info));
627
628 cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
629 cqp_info->post_sq = 1;
630 cqp_info->in.u.qp_flush_wqes.qp = qp;
631 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
632 status = i40iw_handle_cqp_op(iwdev, cqp_request);
633 if (status)
634 i40iw_pr_err("CQP-OP Flush WQE's fail");
635 return status;
636 }
637
638 /**
639 * i40iw_hw_manage_vf_pble_bp - manage vf pbles
640 * @iwdev: iwarp device
641 * @info: info for managing pble
642 * @wait: flag wait for completion
643 */
644 enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
645 struct i40iw_manage_vf_pble_info *info,
646 bool wait)
647 {
648 enum i40iw_status_code status;
649 struct i40iw_manage_vf_pble_info *hw_info;
650 struct i40iw_cqp_request *cqp_request;
651 struct cqp_commands_info *cqp_info;
652
653 if ((iwdev->init_state < CCQ_CREATED) && wait)
654 wait = false;
655
656 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
657 if (!cqp_request)
658 return I40IW_ERR_NO_MEMORY;
659
660 cqp_info = &cqp_request->info;
661 hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
662 memcpy(hw_info, info, sizeof(*hw_info));
663
664 cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
665 cqp_info->post_sq = 1;
666 cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
667 cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
668 status = i40iw_handle_cqp_op(iwdev, cqp_request);
669 if (status)
670 i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
671 return status;
672 }
673
674 /**
675 * i40iw_get_ib_wc - return change flush code to IB's
676 * @opcode: iwarp flush code
677 */
678 static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
679 {
680 switch (opcode) {
681 case FLUSH_PROT_ERR:
682 return IB_WC_LOC_PROT_ERR;
683 case FLUSH_REM_ACCESS_ERR:
684 return IB_WC_REM_ACCESS_ERR;
685 case FLUSH_LOC_QP_OP_ERR:
686 return IB_WC_LOC_QP_OP_ERR;
687 case FLUSH_REM_OP_ERR:
688 return IB_WC_REM_OP_ERR;
689 case FLUSH_LOC_LEN_ERR:
690 return IB_WC_LOC_LEN_ERR;
691 case FLUSH_GENERAL_ERR:
692 return IB_WC_GENERAL_ERR;
693 case FLUSH_FATAL_ERR:
694 default:
695 return IB_WC_FATAL_ERR;
696 }
697 }
698
699 /**
700 * i40iw_set_flush_info - set flush info
701 * @pinfo: set flush info
702 * @min: minor err
703 * @maj: major err
704 * @opcode: flush error code
705 */
706 static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
707 u16 *min,
708 u16 *maj,
709 enum i40iw_flush_opcode opcode)
710 {
711 *min = (u16)i40iw_get_ib_wc(opcode);
712 *maj = CQE_MAJOR_DRV;
713 pinfo->userflushcode = true;
714 }
715
716 /**
717 * i40iw_flush_wqes - flush wqe for qp
718 * @iwdev: iwarp device
719 * @iwqp: qp to flush wqes
720 */
721 void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
722 {
723 struct i40iw_qp_flush_info info;
724 struct i40iw_qp_flush_info *pinfo = &info;
725
726 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
727
728 memset(pinfo, 0, sizeof(*pinfo));
729 info.sq = true;
730 info.rq = true;
731 if (qp->term_flags) {
732 i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
733 &pinfo->sq_major_code, qp->flush_code);
734 i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
735 &pinfo->rq_major_code, qp->flush_code);
736 }
737 (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);
738 }
This page took 0.059986 seconds and 5 git commands to generate.