IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters
[deliverable/linux.git] / drivers / infiniband / hw / qib / qib_uc.c
CommitLineData
f931551b
RC
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "qib.h"
36
37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x
39
40/**
41 * qib_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
43 *
44 * Return 1 if constructed; otherwise, return 0.
45 */
46int qib_make_uc_req(struct qib_qp *qp)
47{
48 struct qib_other_headers *ohdr;
49 struct qib_swqe *wqe;
50 unsigned long flags;
51 u32 hwords;
52 u32 bth0;
53 u32 len;
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
55 int ret = 0;
56
57 spin_lock_irqsave(&qp->s_lock, flags);
58
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
61 goto bail;
62 /* We are in the error state, flush the work request. */
63 if (qp->s_last == qp->s_head)
64 goto bail;
65 /* If DMAs are in progress, we can't flush immediately. */
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp->s_flags |= QIB_S_WAIT_DMA;
68 goto bail;
69 }
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
72 goto done;
73 }
74
75 ohdr = &qp->s_hdr.u.oth;
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
77 ohdr = &qp->s_hdr.u.l.oth;
78
79 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
80 hwords = 5;
81 bth0 = 0;
82
83 /* Get the next send request. */
84 wqe = get_swqe_ptr(qp, qp->s_cur);
85 qp->s_wqe = NULL;
86 switch (qp->s_state) {
87 default:
88 if (!(ib_qib_state_ops[qp->state] &
89 QIB_PROCESS_NEXT_SEND_OK))
90 goto bail;
91 /* Check if send work queue is empty. */
92 if (qp->s_cur == qp->s_head)
93 goto bail;
94 /*
95 * Start a new request.
96 */
97 wqe->psn = qp->s_next_psn;
98 qp->s_psn = qp->s_next_psn;
99 qp->s_sge.sge = wqe->sg_list[0];
100 qp->s_sge.sg_list = wqe->sg_list + 1;
101 qp->s_sge.num_sge = wqe->wr.num_sge;
102 qp->s_sge.total_len = wqe->length;
103 len = wqe->length;
104 qp->s_len = len;
105 switch (wqe->wr.opcode) {
106 case IB_WR_SEND:
107 case IB_WR_SEND_WITH_IMM:
108 if (len > pmtu) {
109 qp->s_state = OP(SEND_FIRST);
110 len = pmtu;
111 break;
112 }
113 if (wqe->wr.opcode == IB_WR_SEND)
114 qp->s_state = OP(SEND_ONLY);
115 else {
116 qp->s_state =
117 OP(SEND_ONLY_WITH_IMMEDIATE);
118 /* Immediate data comes after the BTH */
119 ohdr->u.imm_data = wqe->wr.ex.imm_data;
120 hwords += 1;
121 }
122 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
123 bth0 |= IB_BTH_SOLICITED;
124 qp->s_wqe = wqe;
125 if (++qp->s_cur >= qp->s_size)
126 qp->s_cur = 0;
127 break;
128
129 case IB_WR_RDMA_WRITE:
130 case IB_WR_RDMA_WRITE_WITH_IMM:
131 ohdr->u.rc.reth.vaddr =
132 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
133 ohdr->u.rc.reth.rkey =
134 cpu_to_be32(wqe->wr.wr.rdma.rkey);
135 ohdr->u.rc.reth.length = cpu_to_be32(len);
136 hwords += sizeof(struct ib_reth) / 4;
137 if (len > pmtu) {
138 qp->s_state = OP(RDMA_WRITE_FIRST);
139 len = pmtu;
140 break;
141 }
142 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
143 qp->s_state = OP(RDMA_WRITE_ONLY);
144 else {
145 qp->s_state =
146 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
147 /* Immediate data comes after the RETH */
148 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
149 hwords += 1;
150 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
151 bth0 |= IB_BTH_SOLICITED;
152 }
153 qp->s_wqe = wqe;
154 if (++qp->s_cur >= qp->s_size)
155 qp->s_cur = 0;
156 break;
157
158 default:
159 goto bail;
160 }
161 break;
162
163 case OP(SEND_FIRST):
164 qp->s_state = OP(SEND_MIDDLE);
165 /* FALLTHROUGH */
166 case OP(SEND_MIDDLE):
167 len = qp->s_len;
168 if (len > pmtu) {
169 len = pmtu;
170 break;
171 }
172 if (wqe->wr.opcode == IB_WR_SEND)
173 qp->s_state = OP(SEND_LAST);
174 else {
175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
176 /* Immediate data comes after the BTH */
177 ohdr->u.imm_data = wqe->wr.ex.imm_data;
178 hwords += 1;
179 }
180 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181 bth0 |= IB_BTH_SOLICITED;
182 qp->s_wqe = wqe;
183 if (++qp->s_cur >= qp->s_size)
184 qp->s_cur = 0;
185 break;
186
187 case OP(RDMA_WRITE_FIRST):
188 qp->s_state = OP(RDMA_WRITE_MIDDLE);
189 /* FALLTHROUGH */
190 case OP(RDMA_WRITE_MIDDLE):
191 len = qp->s_len;
192 if (len > pmtu) {
193 len = pmtu;
194 break;
195 }
196 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
197 qp->s_state = OP(RDMA_WRITE_LAST);
198 else {
199 qp->s_state =
200 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
201 /* Immediate data comes after the BTH */
202 ohdr->u.imm_data = wqe->wr.ex.imm_data;
203 hwords += 1;
204 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
205 bth0 |= IB_BTH_SOLICITED;
206 }
207 qp->s_wqe = wqe;
208 if (++qp->s_cur >= qp->s_size)
209 qp->s_cur = 0;
210 break;
211 }
212 qp->s_len -= len;
213 qp->s_hdrwords = hwords;
214 qp->s_cur_sge = &qp->s_sge;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
217 qp->s_next_psn++ & QIB_PSN_MASK);
218done:
219 ret = 1;
220 goto unlock;
221
222bail:
223 qp->s_flags &= ~QIB_S_BUSY;
224unlock:
225 spin_unlock_irqrestore(&qp->s_lock, flags);
226 return ret;
227}
228
229/**
230 * qib_uc_rcv - handle an incoming UC packet
231 * @ibp: the port the packet came in on
232 * @hdr: the header of the packet
233 * @has_grh: true if the packet has a GRH
234 * @data: the packet data
235 * @tlen: the length of the packet
236 * @qp: the QP for this packet.
237 *
238 * This is called from qib_qp_rcv() to process an incoming UC packet
239 * for the given QP.
240 * Called at interrupt level.
241 */
242void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
243 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
244{
245 struct qib_other_headers *ohdr;
246 unsigned long flags;
247 u32 opcode;
248 u32 hdrsize;
249 u32 psn;
250 u32 pad;
251 struct ib_wc wc;
252 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
253 struct ib_reth *reth;
254 int ret;
255
256 /* Check for GRH */
257 if (!has_grh) {
258 ohdr = &hdr->u.oth;
259 hdrsize = 8 + 12; /* LRH + BTH */
260 } else {
261 ohdr = &hdr->u.l.oth;
262 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
263 }
264
265 opcode = be32_to_cpu(ohdr->bth[0]);
266 spin_lock_irqsave(&qp->s_lock, flags);
267 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
268 goto sunlock;
269 spin_unlock_irqrestore(&qp->s_lock, flags);
270
271 psn = be32_to_cpu(ohdr->bth[2]);
272 opcode >>= 24;
273 memset(&wc, 0, sizeof wc);
274
275 /* Prevent simultaneous processing after APM on different CPUs */
276 spin_lock(&qp->r_lock);
277
278 /* Compare the PSN verses the expected PSN. */
279 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
280 /*
281 * Handle a sequence error.
282 * Silently drop any current message.
283 */
284 qp->r_psn = psn;
285inv:
286 if (qp->r_state == OP(SEND_FIRST) ||
287 qp->r_state == OP(SEND_MIDDLE)) {
288 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
289 qp->r_sge.num_sge = 0;
290 } else
291 while (qp->r_sge.num_sge) {
292 atomic_dec(&qp->r_sge.sge.mr->refcount);
293 if (--qp->r_sge.num_sge)
294 qp->r_sge.sge = *qp->r_sge.sg_list++;
295 }
296 qp->r_state = OP(SEND_LAST);
297 switch (opcode) {
298 case OP(SEND_FIRST):
299 case OP(SEND_ONLY):
300 case OP(SEND_ONLY_WITH_IMMEDIATE):
301 goto send_first;
302
303 case OP(RDMA_WRITE_FIRST):
304 case OP(RDMA_WRITE_ONLY):
305 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
306 goto rdma_first;
307
308 default:
309 goto drop;
310 }
311 }
312
313 /* Check for opcode sequence errors. */
314 switch (qp->r_state) {
315 case OP(SEND_FIRST):
316 case OP(SEND_MIDDLE):
317 if (opcode == OP(SEND_MIDDLE) ||
318 opcode == OP(SEND_LAST) ||
319 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
320 break;
321 goto inv;
322
323 case OP(RDMA_WRITE_FIRST):
324 case OP(RDMA_WRITE_MIDDLE):
325 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
326 opcode == OP(RDMA_WRITE_LAST) ||
327 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
328 break;
329 goto inv;
330
331 default:
332 if (opcode == OP(SEND_FIRST) ||
333 opcode == OP(SEND_ONLY) ||
334 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
335 opcode == OP(RDMA_WRITE_FIRST) ||
336 opcode == OP(RDMA_WRITE_ONLY) ||
337 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
338 break;
339 goto inv;
340 }
341
342 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
343 qp->r_flags |= QIB_R_COMM_EST;
344 if (qp->ibqp.event_handler) {
345 struct ib_event ev;
346
347 ev.device = qp->ibqp.device;
348 ev.element.qp = &qp->ibqp;
349 ev.event = IB_EVENT_COMM_EST;
350 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
351 }
352 }
353
354 /* OK, process the packet. */
355 switch (opcode) {
356 case OP(SEND_FIRST):
357 case OP(SEND_ONLY):
358 case OP(SEND_ONLY_WITH_IMMEDIATE):
359send_first:
360 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
361 qp->r_sge = qp->s_rdma_read_sge;
362 else {
363 ret = qib_get_rwqe(qp, 0);
364 if (ret < 0)
365 goto op_err;
366 if (!ret)
367 goto drop;
368 /*
369 * qp->s_rdma_read_sge will be the owner
370 * of the mr references.
371 */
372 qp->s_rdma_read_sge = qp->r_sge;
373 }
374 qp->r_rcv_len = 0;
375 if (opcode == OP(SEND_ONLY))
376 goto send_last;
377 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
378 goto send_last_imm;
379 /* FALLTHROUGH */
380 case OP(SEND_MIDDLE):
381 /* Check for invalid length PMTU or posted rwqe len. */
382 if (unlikely(tlen != (hdrsize + pmtu + 4)))
383 goto rewind;
384 qp->r_rcv_len += pmtu;
385 if (unlikely(qp->r_rcv_len > qp->r_len))
386 goto rewind;
387 qib_copy_sge(&qp->r_sge, data, pmtu, 0);
388 break;
389
390 case OP(SEND_LAST_WITH_IMMEDIATE):
391send_last_imm:
392 wc.ex.imm_data = ohdr->u.imm_data;
393 hdrsize += 4;
394 wc.wc_flags = IB_WC_WITH_IMM;
395 /* FALLTHROUGH */
396 case OP(SEND_LAST):
397send_last:
398 /* Get the number of bytes the message was padded by. */
399 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
400 /* Check for invalid length. */
401 /* XXX LAST len should be >= 1 */
402 if (unlikely(tlen < (hdrsize + pad + 4)))
403 goto rewind;
404 /* Don't count the CRC. */
405 tlen -= (hdrsize + pad + 4);
406 wc.byte_len = tlen + qp->r_rcv_len;
407 if (unlikely(wc.byte_len > qp->r_len))
408 goto rewind;
409 wc.opcode = IB_WC_RECV;
410last_imm:
411 qib_copy_sge(&qp->r_sge, data, tlen, 0);
412 while (qp->s_rdma_read_sge.num_sge) {
413 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
414 if (--qp->s_rdma_read_sge.num_sge)
415 qp->s_rdma_read_sge.sge =
416 *qp->s_rdma_read_sge.sg_list++;
417 }
418 wc.wr_id = qp->r_wr_id;
419 wc.status = IB_WC_SUCCESS;
420 wc.qp = &qp->ibqp;
421 wc.src_qp = qp->remote_qpn;
422 wc.slid = qp->remote_ah_attr.dlid;
423 wc.sl = qp->remote_ah_attr.sl;
424 /* Signal completion event if the solicited bit is set. */
425 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
426 (ohdr->bth[0] &
427 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
428 break;
429
430 case OP(RDMA_WRITE_FIRST):
431 case OP(RDMA_WRITE_ONLY):
432 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
433rdma_first:
434 if (unlikely(!(qp->qp_access_flags &
435 IB_ACCESS_REMOTE_WRITE))) {
436 goto drop;
437 }
438 reth = &ohdr->u.rc.reth;
439 hdrsize += sizeof(*reth);
440 qp->r_len = be32_to_cpu(reth->length);
441 qp->r_rcv_len = 0;
442 qp->r_sge.sg_list = NULL;
443 if (qp->r_len != 0) {
444 u32 rkey = be32_to_cpu(reth->rkey);
445 u64 vaddr = be64_to_cpu(reth->vaddr);
446 int ok;
447
448 /* Check rkey */
449 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
450 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
451 if (unlikely(!ok))
452 goto drop;
453 qp->r_sge.num_sge = 1;
454 } else {
455 qp->r_sge.num_sge = 0;
456 qp->r_sge.sge.mr = NULL;
457 qp->r_sge.sge.vaddr = NULL;
458 qp->r_sge.sge.length = 0;
459 qp->r_sge.sge.sge_length = 0;
460 }
461 if (opcode == OP(RDMA_WRITE_ONLY))
462 goto rdma_last;
463 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
464 goto rdma_last_imm;
465 /* FALLTHROUGH */
466 case OP(RDMA_WRITE_MIDDLE):
467 /* Check for invalid length PMTU or posted rwqe len. */
468 if (unlikely(tlen != (hdrsize + pmtu + 4)))
469 goto drop;
470 qp->r_rcv_len += pmtu;
471 if (unlikely(qp->r_rcv_len > qp->r_len))
472 goto drop;
473 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
474 break;
475
476 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
477rdma_last_imm:
478 wc.ex.imm_data = ohdr->u.imm_data;
479 hdrsize += 4;
480 wc.wc_flags = IB_WC_WITH_IMM;
481
482 /* Get the number of bytes the message was padded by. */
483 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
484 /* Check for invalid length. */
485 /* XXX LAST len should be >= 1 */
486 if (unlikely(tlen < (hdrsize + pad + 4)))
487 goto drop;
488 /* Don't count the CRC. */
489 tlen -= (hdrsize + pad + 4);
490 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
491 goto drop;
492 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
493 while (qp->s_rdma_read_sge.num_sge) {
494 atomic_dec(&qp->s_rdma_read_sge.sge.mr->
495 refcount);
496 if (--qp->s_rdma_read_sge.num_sge)
497 qp->s_rdma_read_sge.sge =
498 *qp->s_rdma_read_sge.sg_list++;
499 }
500 else {
501 ret = qib_get_rwqe(qp, 1);
502 if (ret < 0)
503 goto op_err;
504 if (!ret)
505 goto drop;
506 }
507 wc.byte_len = qp->r_len;
508 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
509 goto last_imm;
510
511 case OP(RDMA_WRITE_LAST):
512rdma_last:
513 /* Get the number of bytes the message was padded by. */
514 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
515 /* Check for invalid length. */
516 /* XXX LAST len should be >= 1 */
517 if (unlikely(tlen < (hdrsize + pad + 4)))
518 goto drop;
519 /* Don't count the CRC. */
520 tlen -= (hdrsize + pad + 4);
521 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
522 goto drop;
523 qib_copy_sge(&qp->r_sge, data, tlen, 1);
524 while (qp->r_sge.num_sge) {
525 atomic_dec(&qp->r_sge.sge.mr->refcount);
526 if (--qp->r_sge.num_sge)
527 qp->r_sge.sge = *qp->r_sge.sg_list++;
528 }
529 break;
530
531 default:
532 /* Drop packet for unknown opcodes. */
533 goto drop;
534 }
535 qp->r_psn++;
536 qp->r_state = opcode;
537 spin_unlock(&qp->r_lock);
538 return;
539
540rewind:
541 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
542 qp->r_sge.num_sge = 0;
543drop:
544 ibp->n_pkt_drops++;
545 spin_unlock(&qp->r_lock);
546 return;
547
548op_err:
549 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
550 spin_unlock(&qp->r_lock);
551 return;
552
553sunlock:
554 spin_unlock_irqrestore(&qp->s_lock, flags);
555}
This page took 0.046569 seconds and 5 git commands to generate.