Commit | Line | Data |
---|---|---|
f931551b | 1 | /* |
1fb9fed6 MM |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. | |
f931551b RC |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/err.h> | |
36 | #include <linux/vmalloc.h> | |
af061a64 | 37 | #include <linux/jhash.h> |
f931551b RC |
38 | |
39 | #include "qib.h" | |
40 | ||
41 | #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) | |
42 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | |
43 | ||
44 | static inline unsigned mk_qpn(struct qib_qpn_table *qpt, | |
45 | struct qpn_map *map, unsigned off) | |
46 | { | |
47 | return (map - qpt->map) * BITS_PER_PAGE + off; | |
48 | } | |
49 | ||
50 | static inline unsigned find_next_offset(struct qib_qpn_table *qpt, | |
51 | struct qpn_map *map, unsigned off, | |
2528ea60 | 52 | unsigned n) |
f931551b RC |
53 | { |
54 | if (qpt->mask) { | |
55 | off++; | |
2528ea60 MM |
56 | if (((off & qpt->mask) >> 1) >= n) |
57 | off = (off | qpt->mask) + 2; | |
f931551b RC |
58 | } else |
59 | off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); | |
60 | return off; | |
61 | } | |
62 | ||
63 | /* | |
64 | * Convert the AETH credit code into the number of credits. | |
65 | */ | |
66 | static u32 credit_table[31] = { | |
67 | 0, /* 0 */ | |
68 | 1, /* 1 */ | |
69 | 2, /* 2 */ | |
70 | 3, /* 3 */ | |
71 | 4, /* 4 */ | |
72 | 6, /* 5 */ | |
73 | 8, /* 6 */ | |
74 | 12, /* 7 */ | |
75 | 16, /* 8 */ | |
76 | 24, /* 9 */ | |
77 | 32, /* A */ | |
78 | 48, /* B */ | |
79 | 64, /* C */ | |
80 | 96, /* D */ | |
81 | 128, /* E */ | |
82 | 192, /* F */ | |
83 | 256, /* 10 */ | |
84 | 384, /* 11 */ | |
85 | 512, /* 12 */ | |
86 | 768, /* 13 */ | |
87 | 1024, /* 14 */ | |
88 | 1536, /* 15 */ | |
89 | 2048, /* 16 */ | |
90 | 3072, /* 17 */ | |
91 | 4096, /* 18 */ | |
92 | 6144, /* 19 */ | |
93 | 8192, /* 1A */ | |
94 | 12288, /* 1B */ | |
95 | 16384, /* 1C */ | |
96 | 24576, /* 1D */ | |
97 | 32768 /* 1E */ | |
98 | }; | |
99 | ||
100 | static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map) | |
101 | { | |
102 | unsigned long page = get_zeroed_page(GFP_KERNEL); | |
103 | ||
104 | /* | |
105 | * Free the page if someone raced with us installing it. | |
106 | */ | |
107 | ||
108 | spin_lock(&qpt->lock); | |
109 | if (map->page) | |
110 | free_page(page); | |
111 | else | |
112 | map->page = (void *)page; | |
113 | spin_unlock(&qpt->lock); | |
114 | } | |
115 | ||
116 | /* | |
117 | * Allocate the next available QPN or | |
118 | * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. | |
119 | */ | |
120 | static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |
121 | enum ib_qp_type type, u8 port) | |
122 | { | |
123 | u32 i, offset, max_scan, qpn; | |
124 | struct qpn_map *map; | |
125 | u32 ret; | |
f931551b RC |
126 | |
127 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { | |
128 | unsigned n; | |
129 | ||
130 | ret = type == IB_QPT_GSI; | |
131 | n = 1 << (ret + 2 * (port - 1)); | |
132 | spin_lock(&qpt->lock); | |
133 | if (qpt->flags & n) | |
134 | ret = -EINVAL; | |
135 | else | |
136 | qpt->flags |= n; | |
137 | spin_unlock(&qpt->lock); | |
138 | goto bail; | |
139 | } | |
140 | ||
7c3edd3f | 141 | qpn = qpt->last + 2; |
f931551b RC |
142 | if (qpn >= QPN_MAX) |
143 | qpn = 2; | |
2528ea60 MM |
144 | if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) |
145 | qpn = (qpn | qpt->mask) + 2; | |
f931551b RC |
146 | offset = qpn & BITS_PER_PAGE_MASK; |
147 | map = &qpt->map[qpn / BITS_PER_PAGE]; | |
148 | max_scan = qpt->nmaps - !offset; | |
149 | for (i = 0;;) { | |
150 | if (unlikely(!map->page)) { | |
151 | get_map_page(qpt, map); | |
152 | if (unlikely(!map->page)) | |
153 | break; | |
154 | } | |
155 | do { | |
156 | if (!test_and_set_bit(offset, map->page)) { | |
157 | qpt->last = qpn; | |
158 | ret = qpn; | |
159 | goto bail; | |
160 | } | |
2528ea60 MM |
161 | offset = find_next_offset(qpt, map, offset, |
162 | dd->n_krcv_queues); | |
f931551b RC |
163 | qpn = mk_qpn(qpt, map, offset); |
164 | /* | |
165 | * This test differs from alloc_pidmap(). | |
166 | * If find_next_offset() does find a zero | |
167 | * bit, we don't need to check for QPN | |
168 | * wrapping around past our starting QPN. | |
169 | * We just need to be sure we don't loop | |
170 | * forever. | |
171 | */ | |
172 | } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); | |
173 | /* | |
174 | * In order to keep the number of pages allocated to a | |
175 | * minimum, we scan the all existing pages before increasing | |
176 | * the size of the bitmap table. | |
177 | */ | |
178 | if (++i > max_scan) { | |
179 | if (qpt->nmaps == QPNMAP_ENTRIES) | |
180 | break; | |
181 | map = &qpt->map[qpt->nmaps++]; | |
2528ea60 | 182 | offset = 0; |
f931551b RC |
183 | } else if (map < &qpt->map[qpt->nmaps]) { |
184 | ++map; | |
2528ea60 | 185 | offset = 0; |
f931551b RC |
186 | } else { |
187 | map = &qpt->map[0]; | |
2528ea60 | 188 | offset = 2; |
f931551b RC |
189 | } |
190 | qpn = mk_qpn(qpt, map, offset); | |
191 | } | |
192 | ||
193 | ret = -ENOMEM; | |
194 | ||
195 | bail: | |
196 | return ret; | |
197 | } | |
198 | ||
199 | static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | |
200 | { | |
201 | struct qpn_map *map; | |
202 | ||
203 | map = qpt->map + qpn / BITS_PER_PAGE; | |
204 | if (map->page) | |
205 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); | |
206 | } | |
207 | ||
af061a64 MM |
208 | static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) |
209 | { | |
210 | return jhash_1word(qpn, dev->qp_rnd) & | |
211 | (dev->qp_table_size - 1); | |
212 | } | |
213 | ||
214 | ||
f931551b RC |
215 | /* |
216 | * Put the QP into the hash table. | |
217 | * The hash table holds a reference to the QP. | |
218 | */ | |
219 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |
220 | { | |
221 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
f931551b | 222 | unsigned long flags; |
af061a64 | 223 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); |
f931551b RC |
224 | |
225 | spin_lock_irqsave(&dev->qpt_lock, flags); | |
af061a64 | 226 | atomic_inc(&qp->refcount); |
f931551b RC |
227 | |
228 | if (qp->ibqp.qp_num == 0) | |
af061a64 | 229 | rcu_assign_pointer(ibp->qp0, qp); |
f931551b | 230 | else if (qp->ibqp.qp_num == 1) |
af061a64 | 231 | rcu_assign_pointer(ibp->qp1, qp); |
f931551b RC |
232 | else { |
233 | qp->next = dev->qp_table[n]; | |
af061a64 | 234 | rcu_assign_pointer(dev->qp_table[n], qp); |
f931551b | 235 | } |
f931551b RC |
236 | |
237 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | |
af061a64 | 238 | synchronize_rcu(); |
f931551b RC |
239 | } |
240 | ||
241 | /* | |
242 | * Remove the QP from the table so it can't be found asynchronously by | |
243 | * the receive interrupt routine. | |
244 | */ | |
245 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |
246 | { | |
247 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
af061a64 | 248 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); |
f931551b RC |
249 | unsigned long flags; |
250 | ||
f931551b RC |
251 | spin_lock_irqsave(&dev->qpt_lock, flags); |
252 | ||
1fb9fed6 MM |
253 | if (rcu_dereference_protected(ibp->qp0, |
254 | lockdep_is_held(&dev->qpt_lock)) == qp) { | |
f931551b | 255 | atomic_dec(&qp->refcount); |
af061a64 | 256 | rcu_assign_pointer(ibp->qp0, NULL); |
1fb9fed6 MM |
257 | } else if (rcu_dereference_protected(ibp->qp1, |
258 | lockdep_is_held(&dev->qpt_lock)) == qp) { | |
f931551b | 259 | atomic_dec(&qp->refcount); |
af061a64 MM |
260 | rcu_assign_pointer(ibp->qp1, NULL); |
261 | } else { | |
1fb9fed6 MM |
262 | struct qib_qp *q; |
263 | struct qib_qp __rcu **qpp; | |
af061a64 MM |
264 | |
265 | qpp = &dev->qp_table[n]; | |
d359f354 MM |
266 | for (; (q = rcu_dereference_protected(*qpp, |
267 | lockdep_is_held(&dev->qpt_lock))) != NULL; | |
268 | qpp = &q->next) | |
f931551b | 269 | if (q == qp) { |
f931551b | 270 | atomic_dec(&qp->refcount); |
1fb9fed6 MM |
271 | *qpp = qp->next; |
272 | rcu_assign_pointer(qp->next, NULL); | |
f931551b RC |
273 | break; |
274 | } | |
af061a64 | 275 | } |
f931551b RC |
276 | |
277 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | |
af061a64 | 278 | synchronize_rcu(); |
f931551b RC |
279 | } |
280 | ||
281 | /** | |
282 | * qib_free_all_qps - check for QPs still in use | |
283 | * @qpt: the QP table to empty | |
284 | * | |
285 | * There should not be any QPs still in use. | |
286 | * Free memory for table. | |
287 | */ | |
288 | unsigned qib_free_all_qps(struct qib_devdata *dd) | |
289 | { | |
290 | struct qib_ibdev *dev = &dd->verbs_dev; | |
291 | unsigned long flags; | |
292 | struct qib_qp *qp; | |
293 | unsigned n, qp_inuse = 0; | |
294 | ||
295 | for (n = 0; n < dd->num_pports; n++) { | |
296 | struct qib_ibport *ibp = &dd->pport[n].ibport_data; | |
297 | ||
298 | if (!qib_mcast_tree_empty(ibp)) | |
299 | qp_inuse++; | |
af061a64 MM |
300 | rcu_read_lock(); |
301 | if (rcu_dereference(ibp->qp0)) | |
f931551b | 302 | qp_inuse++; |
af061a64 | 303 | if (rcu_dereference(ibp->qp1)) |
f931551b | 304 | qp_inuse++; |
af061a64 | 305 | rcu_read_unlock(); |
f931551b RC |
306 | } |
307 | ||
308 | spin_lock_irqsave(&dev->qpt_lock, flags); | |
309 | for (n = 0; n < dev->qp_table_size; n++) { | |
1fb9fed6 MM |
310 | qp = rcu_dereference_protected(dev->qp_table[n], |
311 | lockdep_is_held(&dev->qpt_lock)); | |
af061a64 | 312 | rcu_assign_pointer(dev->qp_table[n], NULL); |
f931551b | 313 | |
1fb9fed6 MM |
314 | for (; qp; qp = rcu_dereference_protected(qp->next, |
315 | lockdep_is_held(&dev->qpt_lock))) | |
f931551b RC |
316 | qp_inuse++; |
317 | } | |
318 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | |
af061a64 | 319 | synchronize_rcu(); |
f931551b RC |
320 | |
321 | return qp_inuse; | |
322 | } | |
323 | ||
324 | /** | |
325 | * qib_lookup_qpn - return the QP with the given QPN | |
326 | * @qpt: the QP table | |
327 | * @qpn: the QP number to look up | |
328 | * | |
329 | * The caller is responsible for decrementing the QP reference count | |
330 | * when done. | |
331 | */ | |
332 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | |
333 | { | |
af061a64 | 334 | struct qib_qp *qp = NULL; |
f931551b | 335 | |
af061a64 MM |
336 | if (unlikely(qpn <= 1)) { |
337 | rcu_read_lock(); | |
338 | if (qpn == 0) | |
339 | qp = rcu_dereference(ibp->qp0); | |
340 | else | |
341 | qp = rcu_dereference(ibp->qp1); | |
342 | } else { | |
343 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | |
344 | unsigned n = qpn_hash(dev, qpn); | |
f931551b | 345 | |
af061a64 | 346 | rcu_read_lock(); |
1fb9fed6 MM |
347 | for (qp = rcu_dereference(dev->qp_table[n]); qp; |
348 | qp = rcu_dereference(qp->next)) | |
f931551b RC |
349 | if (qp->ibqp.qp_num == qpn) |
350 | break; | |
af061a64 | 351 | } |
f931551b | 352 | if (qp) |
af061a64 MM |
353 | if (unlikely(!atomic_inc_not_zero(&qp->refcount))) |
354 | qp = NULL; | |
f931551b | 355 | |
af061a64 | 356 | rcu_read_unlock(); |
f931551b RC |
357 | return qp; |
358 | } | |
359 | ||
360 | /** | |
361 | * qib_reset_qp - initialize the QP state to the reset state | |
362 | * @qp: the QP to reset | |
363 | * @type: the QP type | |
364 | */ | |
365 | static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) | |
366 | { | |
367 | qp->remote_qpn = 0; | |
368 | qp->qkey = 0; | |
369 | qp->qp_access_flags = 0; | |
370 | atomic_set(&qp->s_dma_busy, 0); | |
371 | qp->s_flags &= QIB_S_SIGNAL_REQ_WR; | |
372 | qp->s_hdrwords = 0; | |
373 | qp->s_wqe = NULL; | |
374 | qp->s_draining = 0; | |
375 | qp->s_next_psn = 0; | |
376 | qp->s_last_psn = 0; | |
377 | qp->s_sending_psn = 0; | |
378 | qp->s_sending_hpsn = 0; | |
379 | qp->s_psn = 0; | |
380 | qp->r_psn = 0; | |
381 | qp->r_msn = 0; | |
382 | if (type == IB_QPT_RC) { | |
383 | qp->s_state = IB_OPCODE_RC_SEND_LAST; | |
384 | qp->r_state = IB_OPCODE_RC_SEND_LAST; | |
385 | } else { | |
386 | qp->s_state = IB_OPCODE_UC_SEND_LAST; | |
387 | qp->r_state = IB_OPCODE_UC_SEND_LAST; | |
388 | } | |
389 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | |
390 | qp->r_nak_state = 0; | |
391 | qp->r_aflags = 0; | |
392 | qp->r_flags = 0; | |
393 | qp->s_head = 0; | |
394 | qp->s_tail = 0; | |
395 | qp->s_cur = 0; | |
396 | qp->s_acked = 0; | |
397 | qp->s_last = 0; | |
398 | qp->s_ssn = 1; | |
399 | qp->s_lsn = 0; | |
400 | qp->s_mig_state = IB_MIG_MIGRATED; | |
401 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); | |
402 | qp->r_head_ack_queue = 0; | |
403 | qp->s_tail_ack_queue = 0; | |
404 | qp->s_num_rd_atomic = 0; | |
405 | if (qp->r_rq.wq) { | |
406 | qp->r_rq.wq->head = 0; | |
407 | qp->r_rq.wq->tail = 0; | |
408 | } | |
409 | qp->r_sge.num_sge = 0; | |
410 | } | |
411 | ||
412 | static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |
413 | { | |
414 | unsigned n; | |
415 | ||
416 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | |
6a82649f | 417 | qib_put_ss(&qp->s_rdma_read_sge); |
f931551b | 418 | |
6a82649f | 419 | qib_put_ss(&qp->r_sge); |
f931551b RC |
420 | |
421 | if (clr_sends) { | |
422 | while (qp->s_last != qp->s_head) { | |
423 | struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | |
424 | unsigned i; | |
425 | ||
426 | for (i = 0; i < wqe->wr.num_sge; i++) { | |
427 | struct qib_sge *sge = &wqe->sg_list[i]; | |
428 | ||
6a82649f | 429 | qib_put_mr(sge->mr); |
f931551b RC |
430 | } |
431 | if (qp->ibqp.qp_type == IB_QPT_UD || | |
432 | qp->ibqp.qp_type == IB_QPT_SMI || | |
433 | qp->ibqp.qp_type == IB_QPT_GSI) | |
434 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); | |
435 | if (++qp->s_last >= qp->s_size) | |
436 | qp->s_last = 0; | |
437 | } | |
438 | if (qp->s_rdma_mr) { | |
6a82649f | 439 | qib_put_mr(qp->s_rdma_mr); |
f931551b RC |
440 | qp->s_rdma_mr = NULL; |
441 | } | |
442 | } | |
443 | ||
444 | if (qp->ibqp.qp_type != IB_QPT_RC) | |
445 | return; | |
446 | ||
447 | for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { | |
448 | struct qib_ack_entry *e = &qp->s_ack_queue[n]; | |
449 | ||
450 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | |
451 | e->rdma_sge.mr) { | |
6a82649f | 452 | qib_put_mr(e->rdma_sge.mr); |
f931551b RC |
453 | e->rdma_sge.mr = NULL; |
454 | } | |
455 | } | |
456 | } | |
457 | ||
458 | /** | |
459 | * qib_error_qp - put a QP into the error state | |
460 | * @qp: the QP to put into the error state | |
461 | * @err: the receive completion error to signal if a RWQE is active | |
462 | * | |
463 | * Flushes both send and receive work queues. | |
464 | * Returns true if last WQE event should be generated. | |
a5210c12 | 465 | * The QP r_lock and s_lock should be held and interrupts disabled. |
f931551b RC |
466 | * If we are already in error state, just return. |
467 | */ | |
468 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | |
469 | { | |
470 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); | |
471 | struct ib_wc wc; | |
472 | int ret = 0; | |
473 | ||
474 | if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) | |
475 | goto bail; | |
476 | ||
477 | qp->state = IB_QPS_ERR; | |
478 | ||
479 | if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { | |
480 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); | |
481 | del_timer(&qp->s_timer); | |
482 | } | |
16028f27 MM |
483 | |
484 | if (qp->s_flags & QIB_S_ANY_WAIT_SEND) | |
485 | qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; | |
486 | ||
f931551b RC |
487 | spin_lock(&dev->pending_lock); |
488 | if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { | |
489 | qp->s_flags &= ~QIB_S_ANY_WAIT_IO; | |
490 | list_del_init(&qp->iowait); | |
491 | } | |
492 | spin_unlock(&dev->pending_lock); | |
493 | ||
494 | if (!(qp->s_flags & QIB_S_BUSY)) { | |
495 | qp->s_hdrwords = 0; | |
496 | if (qp->s_rdma_mr) { | |
6a82649f | 497 | qib_put_mr(qp->s_rdma_mr); |
f931551b RC |
498 | qp->s_rdma_mr = NULL; |
499 | } | |
500 | if (qp->s_tx) { | |
501 | qib_put_txreq(qp->s_tx); | |
502 | qp->s_tx = NULL; | |
503 | } | |
504 | } | |
505 | ||
506 | /* Schedule the sending tasklet to drain the send work queue. */ | |
507 | if (qp->s_last != qp->s_head) | |
508 | qib_schedule_send(qp); | |
509 | ||
510 | clear_mr_refs(qp, 0); | |
511 | ||
512 | memset(&wc, 0, sizeof(wc)); | |
513 | wc.qp = &qp->ibqp; | |
514 | wc.opcode = IB_WC_RECV; | |
515 | ||
516 | if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { | |
517 | wc.wr_id = qp->r_wr_id; | |
518 | wc.status = err; | |
519 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | |
520 | } | |
521 | wc.status = IB_WC_WR_FLUSH_ERR; | |
522 | ||
523 | if (qp->r_rq.wq) { | |
524 | struct qib_rwq *wq; | |
525 | u32 head; | |
526 | u32 tail; | |
527 | ||
528 | spin_lock(&qp->r_rq.lock); | |
529 | ||
530 | /* sanity check pointers before trusting them */ | |
531 | wq = qp->r_rq.wq; | |
532 | head = wq->head; | |
533 | if (head >= qp->r_rq.size) | |
534 | head = 0; | |
535 | tail = wq->tail; | |
536 | if (tail >= qp->r_rq.size) | |
537 | tail = 0; | |
538 | while (tail != head) { | |
539 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; | |
540 | if (++tail >= qp->r_rq.size) | |
541 | tail = 0; | |
542 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | |
543 | } | |
544 | wq->tail = tail; | |
545 | ||
546 | spin_unlock(&qp->r_rq.lock); | |
547 | } else if (qp->ibqp.event_handler) | |
548 | ret = 1; | |
549 | ||
550 | bail: | |
551 | return ret; | |
552 | } | |
553 | ||
554 | /** | |
555 | * qib_modify_qp - modify the attributes of a queue pair | |
556 | * @ibqp: the queue pair who's attributes we're modifying | |
557 | * @attr: the new attributes | |
558 | * @attr_mask: the mask of attributes to modify | |
559 | * @udata: user data for libibverbs.so | |
560 | * | |
561 | * Returns 0 on success, otherwise returns an errno. | |
562 | */ | |
563 | int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
564 | int attr_mask, struct ib_udata *udata) | |
565 | { | |
566 | struct qib_ibdev *dev = to_idev(ibqp->device); | |
567 | struct qib_qp *qp = to_iqp(ibqp); | |
568 | enum ib_qp_state cur_state, new_state; | |
569 | struct ib_event ev; | |
570 | int lastwqe = 0; | |
571 | int mig = 0; | |
572 | int ret; | |
573 | u32 pmtu = 0; /* for gcc warning only */ | |
574 | ||
575 | spin_lock_irq(&qp->r_lock); | |
576 | spin_lock(&qp->s_lock); | |
577 | ||
578 | cur_state = attr_mask & IB_QP_CUR_STATE ? | |
579 | attr->cur_qp_state : qp->state; | |
580 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; | |
581 | ||
582 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, | |
583 | attr_mask)) | |
584 | goto inval; | |
585 | ||
586 | if (attr_mask & IB_QP_AV) { | |
587 | if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE) | |
588 | goto inval; | |
589 | if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) | |
590 | goto inval; | |
591 | } | |
592 | ||
593 | if (attr_mask & IB_QP_ALT_PATH) { | |
594 | if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE) | |
595 | goto inval; | |
596 | if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) | |
597 | goto inval; | |
598 | if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev))) | |
599 | goto inval; | |
600 | } | |
601 | ||
602 | if (attr_mask & IB_QP_PKEY_INDEX) | |
603 | if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev))) | |
604 | goto inval; | |
605 | ||
606 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | |
607 | if (attr->min_rnr_timer > 31) | |
608 | goto inval; | |
609 | ||
610 | if (attr_mask & IB_QP_PORT) | |
611 | if (qp->ibqp.qp_type == IB_QPT_SMI || | |
612 | qp->ibqp.qp_type == IB_QPT_GSI || | |
613 | attr->port_num == 0 || | |
614 | attr->port_num > ibqp->device->phys_port_cnt) | |
615 | goto inval; | |
616 | ||
617 | if (attr_mask & IB_QP_DEST_QPN) | |
618 | if (attr->dest_qp_num > QIB_QPN_MASK) | |
619 | goto inval; | |
620 | ||
621 | if (attr_mask & IB_QP_RETRY_CNT) | |
622 | if (attr->retry_cnt > 7) | |
623 | goto inval; | |
624 | ||
625 | if (attr_mask & IB_QP_RNR_RETRY) | |
626 | if (attr->rnr_retry > 7) | |
627 | goto inval; | |
628 | ||
629 | /* | |
630 | * Don't allow invalid path_mtu values. OK to set greater | |
631 | * than the active mtu (or even the max_cap, if we have tuned | |
632 | * that to a small mtu. We'll set qp->path_mtu | |
633 | * to the lesser of requested attribute mtu and active, | |
634 | * for packetizing messages. | |
635 | * Note that the QP port has to be set in INIT and MTU in RTR. | |
636 | */ | |
637 | if (attr_mask & IB_QP_PATH_MTU) { | |
638 | struct qib_devdata *dd = dd_from_dev(dev); | |
639 | int mtu, pidx = qp->port_num - 1; | |
640 | ||
641 | mtu = ib_mtu_enum_to_int(attr->path_mtu); | |
642 | if (mtu == -1) | |
643 | goto inval; | |
644 | if (mtu > dd->pport[pidx].ibmtu) { | |
645 | switch (dd->pport[pidx].ibmtu) { | |
646 | case 4096: | |
647 | pmtu = IB_MTU_4096; | |
648 | break; | |
649 | case 2048: | |
650 | pmtu = IB_MTU_2048; | |
651 | break; | |
652 | case 1024: | |
653 | pmtu = IB_MTU_1024; | |
654 | break; | |
655 | case 512: | |
656 | pmtu = IB_MTU_512; | |
657 | break; | |
658 | case 256: | |
659 | pmtu = IB_MTU_256; | |
660 | break; | |
661 | default: | |
662 | pmtu = IB_MTU_2048; | |
663 | } | |
664 | } else | |
665 | pmtu = attr->path_mtu; | |
666 | } | |
667 | ||
668 | if (attr_mask & IB_QP_PATH_MIG_STATE) { | |
669 | if (attr->path_mig_state == IB_MIG_REARM) { | |
670 | if (qp->s_mig_state == IB_MIG_ARMED) | |
671 | goto inval; | |
672 | if (new_state != IB_QPS_RTS) | |
673 | goto inval; | |
674 | } else if (attr->path_mig_state == IB_MIG_MIGRATED) { | |
675 | if (qp->s_mig_state == IB_MIG_REARM) | |
676 | goto inval; | |
677 | if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) | |
678 | goto inval; | |
679 | if (qp->s_mig_state == IB_MIG_ARMED) | |
680 | mig = 1; | |
681 | } else | |
682 | goto inval; | |
683 | } | |
684 | ||
685 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
686 | if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC) | |
687 | goto inval; | |
688 | ||
689 | switch (new_state) { | |
690 | case IB_QPS_RESET: | |
691 | if (qp->state != IB_QPS_RESET) { | |
692 | qp->state = IB_QPS_RESET; | |
693 | spin_lock(&dev->pending_lock); | |
694 | if (!list_empty(&qp->iowait)) | |
695 | list_del_init(&qp->iowait); | |
696 | spin_unlock(&dev->pending_lock); | |
697 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); | |
698 | spin_unlock(&qp->s_lock); | |
699 | spin_unlock_irq(&qp->r_lock); | |
700 | /* Stop the sending work queue and retry timer */ | |
701 | cancel_work_sync(&qp->s_work); | |
702 | del_timer_sync(&qp->s_timer); | |
703 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | |
704 | if (qp->s_tx) { | |
705 | qib_put_txreq(qp->s_tx); | |
706 | qp->s_tx = NULL; | |
707 | } | |
708 | remove_qp(dev, qp); | |
709 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | |
710 | spin_lock_irq(&qp->r_lock); | |
711 | spin_lock(&qp->s_lock); | |
712 | clear_mr_refs(qp, 1); | |
713 | qib_reset_qp(qp, ibqp->qp_type); | |
714 | } | |
715 | break; | |
716 | ||
717 | case IB_QPS_RTR: | |
718 | /* Allow event to retrigger if QP set to RTR more than once */ | |
719 | qp->r_flags &= ~QIB_R_COMM_EST; | |
720 | qp->state = new_state; | |
721 | break; | |
722 | ||
723 | case IB_QPS_SQD: | |
724 | qp->s_draining = qp->s_last != qp->s_cur; | |
725 | qp->state = new_state; | |
726 | break; | |
727 | ||
728 | case IB_QPS_SQE: | |
729 | if (qp->ibqp.qp_type == IB_QPT_RC) | |
730 | goto inval; | |
731 | qp->state = new_state; | |
732 | break; | |
733 | ||
734 | case IB_QPS_ERR: | |
735 | lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); | |
736 | break; | |
737 | ||
738 | default: | |
739 | qp->state = new_state; | |
740 | break; | |
741 | } | |
742 | ||
743 | if (attr_mask & IB_QP_PKEY_INDEX) | |
744 | qp->s_pkey_index = attr->pkey_index; | |
745 | ||
746 | if (attr_mask & IB_QP_PORT) | |
747 | qp->port_num = attr->port_num; | |
748 | ||
749 | if (attr_mask & IB_QP_DEST_QPN) | |
750 | qp->remote_qpn = attr->dest_qp_num; | |
751 | ||
752 | if (attr_mask & IB_QP_SQ_PSN) { | |
753 | qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; | |
754 | qp->s_psn = qp->s_next_psn; | |
755 | qp->s_sending_psn = qp->s_next_psn; | |
756 | qp->s_last_psn = qp->s_next_psn - 1; | |
757 | qp->s_sending_hpsn = qp->s_last_psn; | |
758 | } | |
759 | ||
760 | if (attr_mask & IB_QP_RQ_PSN) | |
761 | qp->r_psn = attr->rq_psn & QIB_PSN_MASK; | |
762 | ||
763 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
764 | qp->qp_access_flags = attr->qp_access_flags; | |
765 | ||
766 | if (attr_mask & IB_QP_AV) { | |
767 | qp->remote_ah_attr = attr->ah_attr; | |
768 | qp->s_srate = attr->ah_attr.static_rate; | |
769 | } | |
770 | ||
771 | if (attr_mask & IB_QP_ALT_PATH) { | |
772 | qp->alt_ah_attr = attr->alt_ah_attr; | |
773 | qp->s_alt_pkey_index = attr->alt_pkey_index; | |
774 | } | |
775 | ||
776 | if (attr_mask & IB_QP_PATH_MIG_STATE) { | |
777 | qp->s_mig_state = attr->path_mig_state; | |
778 | if (mig) { | |
779 | qp->remote_ah_attr = qp->alt_ah_attr; | |
780 | qp->port_num = qp->alt_ah_attr.port_num; | |
781 | qp->s_pkey_index = qp->s_alt_pkey_index; | |
782 | } | |
783 | } | |
784 | ||
cc6ea138 | 785 | if (attr_mask & IB_QP_PATH_MTU) { |
f931551b | 786 | qp->path_mtu = pmtu; |
cc6ea138 MM |
787 | qp->pmtu = ib_mtu_enum_to_int(pmtu); |
788 | } | |
f931551b RC |
789 | |
790 | if (attr_mask & IB_QP_RETRY_CNT) { | |
791 | qp->s_retry_cnt = attr->retry_cnt; | |
792 | qp->s_retry = attr->retry_cnt; | |
793 | } | |
794 | ||
795 | if (attr_mask & IB_QP_RNR_RETRY) { | |
796 | qp->s_rnr_retry_cnt = attr->rnr_retry; | |
797 | qp->s_rnr_retry = attr->rnr_retry; | |
798 | } | |
799 | ||
800 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | |
801 | qp->r_min_rnr_timer = attr->min_rnr_timer; | |
802 | ||
d0f2faf7 | 803 | if (attr_mask & IB_QP_TIMEOUT) { |
f931551b | 804 | qp->timeout = attr->timeout; |
d0f2faf7 MM |
805 | qp->timeout_jiffies = |
806 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | |
807 | 1000UL); | |
808 | } | |
f931551b RC |
809 | |
810 | if (attr_mask & IB_QP_QKEY) | |
811 | qp->qkey = attr->qkey; | |
812 | ||
813 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
814 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; | |
815 | ||
816 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | |
817 | qp->s_max_rd_atomic = attr->max_rd_atomic; | |
818 | ||
819 | spin_unlock(&qp->s_lock); | |
820 | spin_unlock_irq(&qp->r_lock); | |
821 | ||
822 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | |
823 | insert_qp(dev, qp); | |
824 | ||
825 | if (lastwqe) { | |
826 | ev.device = qp->ibqp.device; | |
827 | ev.element.qp = &qp->ibqp; | |
828 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; | |
829 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
830 | } | |
831 | if (mig) { | |
832 | ev.device = qp->ibqp.device; | |
833 | ev.element.qp = &qp->ibqp; | |
834 | ev.event = IB_EVENT_PATH_MIG; | |
835 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
836 | } | |
837 | ret = 0; | |
838 | goto bail; | |
839 | ||
840 | inval: | |
841 | spin_unlock(&qp->s_lock); | |
842 | spin_unlock_irq(&qp->r_lock); | |
843 | ret = -EINVAL; | |
844 | ||
845 | bail: | |
846 | return ret; | |
847 | } | |
848 | ||
849 | int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
850 | int attr_mask, struct ib_qp_init_attr *init_attr) | |
851 | { | |
852 | struct qib_qp *qp = to_iqp(ibqp); | |
853 | ||
854 | attr->qp_state = qp->state; | |
855 | attr->cur_qp_state = attr->qp_state; | |
856 | attr->path_mtu = qp->path_mtu; | |
857 | attr->path_mig_state = qp->s_mig_state; | |
858 | attr->qkey = qp->qkey; | |
859 | attr->rq_psn = qp->r_psn & QIB_PSN_MASK; | |
860 | attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; | |
861 | attr->dest_qp_num = qp->remote_qpn; | |
862 | attr->qp_access_flags = qp->qp_access_flags; | |
863 | attr->cap.max_send_wr = qp->s_size - 1; | |
864 | attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; | |
865 | attr->cap.max_send_sge = qp->s_max_sge; | |
866 | attr->cap.max_recv_sge = qp->r_rq.max_sge; | |
867 | attr->cap.max_inline_data = 0; | |
868 | attr->ah_attr = qp->remote_ah_attr; | |
869 | attr->alt_ah_attr = qp->alt_ah_attr; | |
870 | attr->pkey_index = qp->s_pkey_index; | |
871 | attr->alt_pkey_index = qp->s_alt_pkey_index; | |
872 | attr->en_sqd_async_notify = 0; | |
873 | attr->sq_draining = qp->s_draining; | |
874 | attr->max_rd_atomic = qp->s_max_rd_atomic; | |
875 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; | |
876 | attr->min_rnr_timer = qp->r_min_rnr_timer; | |
877 | attr->port_num = qp->port_num; | |
878 | attr->timeout = qp->timeout; | |
879 | attr->retry_cnt = qp->s_retry_cnt; | |
880 | attr->rnr_retry = qp->s_rnr_retry_cnt; | |
881 | attr->alt_port_num = qp->alt_ah_attr.port_num; | |
882 | attr->alt_timeout = qp->alt_timeout; | |
883 | ||
884 | init_attr->event_handler = qp->ibqp.event_handler; | |
885 | init_attr->qp_context = qp->ibqp.qp_context; | |
886 | init_attr->send_cq = qp->ibqp.send_cq; | |
887 | init_attr->recv_cq = qp->ibqp.recv_cq; | |
888 | init_attr->srq = qp->ibqp.srq; | |
889 | init_attr->cap = attr->cap; | |
890 | if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) | |
891 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; | |
892 | else | |
893 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | |
894 | init_attr->qp_type = qp->ibqp.qp_type; | |
895 | init_attr->port_num = qp->port_num; | |
896 | return 0; | |
897 | } | |
898 | ||
899 | /** | |
900 | * qib_compute_aeth - compute the AETH (syndrome + MSN) | |
901 | * @qp: the queue pair to compute the AETH for | |
902 | * | |
903 | * Returns the AETH. | |
904 | */ | |
905 | __be32 qib_compute_aeth(struct qib_qp *qp) | |
906 | { | |
907 | u32 aeth = qp->r_msn & QIB_MSN_MASK; | |
908 | ||
909 | if (qp->ibqp.srq) { | |
910 | /* | |
911 | * Shared receive queues don't generate credits. | |
912 | * Set the credit field to the invalid value. | |
913 | */ | |
914 | aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT; | |
915 | } else { | |
916 | u32 min, max, x; | |
917 | u32 credits; | |
918 | struct qib_rwq *wq = qp->r_rq.wq; | |
919 | u32 head; | |
920 | u32 tail; | |
921 | ||
922 | /* sanity check pointers before trusting them */ | |
923 | head = wq->head; | |
924 | if (head >= qp->r_rq.size) | |
925 | head = 0; | |
926 | tail = wq->tail; | |
927 | if (tail >= qp->r_rq.size) | |
928 | tail = 0; | |
929 | /* | |
930 | * Compute the number of credits available (RWQEs). | |
931 | * XXX Not holding the r_rq.lock here so there is a small | |
932 | * chance that the pair of reads are not atomic. | |
933 | */ | |
934 | credits = head - tail; | |
935 | if ((int)credits < 0) | |
936 | credits += qp->r_rq.size; | |
937 | /* | |
938 | * Binary search the credit table to find the code to | |
939 | * use. | |
940 | */ | |
941 | min = 0; | |
942 | max = 31; | |
943 | for (;;) { | |
944 | x = (min + max) / 2; | |
945 | if (credit_table[x] == credits) | |
946 | break; | |
947 | if (credit_table[x] > credits) | |
948 | max = x; | |
949 | else if (min == x) | |
950 | break; | |
951 | else | |
952 | min = x; | |
953 | } | |
954 | aeth |= x << QIB_AETH_CREDIT_SHIFT; | |
955 | } | |
956 | return cpu_to_be32(aeth); | |
957 | } | |
958 | ||
959 | /** | |
960 | * qib_create_qp - create a queue pair for a device | |
961 | * @ibpd: the protection domain who's device we create the queue pair for | |
962 | * @init_attr: the attributes of the queue pair | |
963 | * @udata: user data for libibverbs.so | |
964 | * | |
965 | * Returns the queue pair on success, otherwise returns an errno. | |
966 | * | |
967 | * Called by the ib_create_qp() core verbs function. | |
968 | */ | |
969 | struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |
970 | struct ib_qp_init_attr *init_attr, | |
971 | struct ib_udata *udata) | |
972 | { | |
973 | struct qib_qp *qp; | |
974 | int err; | |
975 | struct qib_swqe *swq = NULL; | |
976 | struct qib_ibdev *dev; | |
977 | struct qib_devdata *dd; | |
978 | size_t sz; | |
979 | size_t sg_list_sz; | |
980 | struct ib_qp *ret; | |
981 | ||
982 | if (init_attr->cap.max_send_sge > ib_qib_max_sges || | |
983 | init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) { | |
984 | ret = ERR_PTR(-EINVAL); | |
985 | goto bail; | |
986 | } | |
987 | ||
988 | /* Check receive queue parameters if no SRQ is specified. */ | |
989 | if (!init_attr->srq) { | |
990 | if (init_attr->cap.max_recv_sge > ib_qib_max_sges || | |
991 | init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) { | |
992 | ret = ERR_PTR(-EINVAL); | |
993 | goto bail; | |
994 | } | |
995 | if (init_attr->cap.max_send_sge + | |
996 | init_attr->cap.max_send_wr + | |
997 | init_attr->cap.max_recv_sge + | |
998 | init_attr->cap.max_recv_wr == 0) { | |
999 | ret = ERR_PTR(-EINVAL); | |
1000 | goto bail; | |
1001 | } | |
1002 | } | |
1003 | ||
1004 | switch (init_attr->qp_type) { | |
1005 | case IB_QPT_SMI: | |
1006 | case IB_QPT_GSI: | |
1007 | if (init_attr->port_num == 0 || | |
1008 | init_attr->port_num > ibpd->device->phys_port_cnt) { | |
1009 | ret = ERR_PTR(-EINVAL); | |
1010 | goto bail; | |
1011 | } | |
1012 | case IB_QPT_UC: | |
1013 | case IB_QPT_RC: | |
1014 | case IB_QPT_UD: | |
1015 | sz = sizeof(struct qib_sge) * | |
1016 | init_attr->cap.max_send_sge + | |
1017 | sizeof(struct qib_swqe); | |
1018 | swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); | |
1019 | if (swq == NULL) { | |
1020 | ret = ERR_PTR(-ENOMEM); | |
1021 | goto bail; | |
1022 | } | |
1023 | sz = sizeof(*qp); | |
1024 | sg_list_sz = 0; | |
1025 | if (init_attr->srq) { | |
1026 | struct qib_srq *srq = to_isrq(init_attr->srq); | |
1027 | ||
1028 | if (srq->rq.max_sge > 1) | |
1029 | sg_list_sz = sizeof(*qp->r_sg_list) * | |
1030 | (srq->rq.max_sge - 1); | |
1031 | } else if (init_attr->cap.max_recv_sge > 1) | |
1032 | sg_list_sz = sizeof(*qp->r_sg_list) * | |
1033 | (init_attr->cap.max_recv_sge - 1); | |
1034 | qp = kzalloc(sz + sg_list_sz, GFP_KERNEL); | |
1035 | if (!qp) { | |
1036 | ret = ERR_PTR(-ENOMEM); | |
1037 | goto bail_swq; | |
1038 | } | |
af061a64 | 1039 | RCU_INIT_POINTER(qp->next, NULL); |
1c94283d MM |
1040 | qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL); |
1041 | if (!qp->s_hdr) { | |
1042 | ret = ERR_PTR(-ENOMEM); | |
1043 | goto bail_qp; | |
1044 | } | |
d0f2faf7 MM |
1045 | qp->timeout_jiffies = |
1046 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | |
1047 | 1000UL); | |
f931551b RC |
1048 | if (init_attr->srq) |
1049 | sz = 0; | |
1050 | else { | |
1051 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; | |
1052 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; | |
1053 | sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + | |
1054 | sizeof(struct qib_rwqe); | |
1055 | qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) + | |
1056 | qp->r_rq.size * sz); | |
1057 | if (!qp->r_rq.wq) { | |
1058 | ret = ERR_PTR(-ENOMEM); | |
1059 | goto bail_qp; | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | /* | |
1064 | * ib_create_qp() will initialize qp->ibqp | |
1065 | * except for qp->ibqp.qp_num. | |
1066 | */ | |
1067 | spin_lock_init(&qp->r_lock); | |
1068 | spin_lock_init(&qp->s_lock); | |
1069 | spin_lock_init(&qp->r_rq.lock); | |
1070 | atomic_set(&qp->refcount, 0); | |
1071 | init_waitqueue_head(&qp->wait); | |
1072 | init_waitqueue_head(&qp->wait_dma); | |
1073 | init_timer(&qp->s_timer); | |
1074 | qp->s_timer.data = (unsigned long)qp; | |
1075 | INIT_WORK(&qp->s_work, qib_do_send); | |
1076 | INIT_LIST_HEAD(&qp->iowait); | |
1077 | INIT_LIST_HEAD(&qp->rspwait); | |
1078 | qp->state = IB_QPS_RESET; | |
1079 | qp->s_wq = swq; | |
1080 | qp->s_size = init_attr->cap.max_send_wr + 1; | |
1081 | qp->s_max_sge = init_attr->cap.max_send_sge; | |
1082 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) | |
1083 | qp->s_flags = QIB_S_SIGNAL_REQ_WR; | |
1084 | dev = to_idev(ibpd->device); | |
1085 | dd = dd_from_dev(dev); | |
1086 | err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type, | |
1087 | init_attr->port_num); | |
1088 | if (err < 0) { | |
1089 | ret = ERR_PTR(err); | |
1090 | vfree(qp->r_rq.wq); | |
1091 | goto bail_qp; | |
1092 | } | |
1093 | qp->ibqp.qp_num = err; | |
1094 | qp->port_num = init_attr->port_num; | |
f931551b RC |
1095 | qib_reset_qp(qp, init_attr->qp_type); |
1096 | break; | |
1097 | ||
1098 | default: | |
1099 | /* Don't support raw QPs */ | |
1100 | ret = ERR_PTR(-ENOSYS); | |
1101 | goto bail; | |
1102 | } | |
1103 | ||
1104 | init_attr->cap.max_inline_data = 0; | |
1105 | ||
1106 | /* | |
1107 | * Return the address of the RWQ as the offset to mmap. | |
1108 | * See qib_mmap() for details. | |
1109 | */ | |
1110 | if (udata && udata->outlen >= sizeof(__u64)) { | |
1111 | if (!qp->r_rq.wq) { | |
1112 | __u64 offset = 0; | |
1113 | ||
1114 | err = ib_copy_to_udata(udata, &offset, | |
1115 | sizeof(offset)); | |
1116 | if (err) { | |
1117 | ret = ERR_PTR(err); | |
1118 | goto bail_ip; | |
1119 | } | |
1120 | } else { | |
1121 | u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; | |
1122 | ||
1123 | qp->ip = qib_create_mmap_info(dev, s, | |
1124 | ibpd->uobject->context, | |
1125 | qp->r_rq.wq); | |
1126 | if (!qp->ip) { | |
1127 | ret = ERR_PTR(-ENOMEM); | |
1128 | goto bail_ip; | |
1129 | } | |
1130 | ||
1131 | err = ib_copy_to_udata(udata, &(qp->ip->offset), | |
1132 | sizeof(qp->ip->offset)); | |
1133 | if (err) { | |
1134 | ret = ERR_PTR(err); | |
1135 | goto bail_ip; | |
1136 | } | |
1137 | } | |
1138 | } | |
1139 | ||
1140 | spin_lock(&dev->n_qps_lock); | |
1141 | if (dev->n_qps_allocated == ib_qib_max_qps) { | |
1142 | spin_unlock(&dev->n_qps_lock); | |
1143 | ret = ERR_PTR(-ENOMEM); | |
1144 | goto bail_ip; | |
1145 | } | |
1146 | ||
1147 | dev->n_qps_allocated++; | |
1148 | spin_unlock(&dev->n_qps_lock); | |
1149 | ||
1150 | if (qp->ip) { | |
1151 | spin_lock_irq(&dev->pending_lock); | |
1152 | list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); | |
1153 | spin_unlock_irq(&dev->pending_lock); | |
1154 | } | |
1155 | ||
1156 | ret = &qp->ibqp; | |
1157 | goto bail; | |
1158 | ||
1159 | bail_ip: | |
1160 | if (qp->ip) | |
1161 | kref_put(&qp->ip->ref, qib_release_mmap_info); | |
1162 | else | |
1163 | vfree(qp->r_rq.wq); | |
1164 | free_qpn(&dev->qpn_table, qp->ibqp.qp_num); | |
1165 | bail_qp: | |
1c94283d | 1166 | kfree(qp->s_hdr); |
f931551b RC |
1167 | kfree(qp); |
1168 | bail_swq: | |
1169 | vfree(swq); | |
1170 | bail: | |
1171 | return ret; | |
1172 | } | |
1173 | ||
1174 | /** | |
1175 | * qib_destroy_qp - destroy a queue pair | |
1176 | * @ibqp: the queue pair to destroy | |
1177 | * | |
1178 | * Returns 0 on success. | |
1179 | * | |
1180 | * Note that this can be called while the QP is actively sending or | |
1181 | * receiving! | |
1182 | */ | |
1183 | int qib_destroy_qp(struct ib_qp *ibqp) | |
1184 | { | |
1185 | struct qib_qp *qp = to_iqp(ibqp); | |
1186 | struct qib_ibdev *dev = to_idev(ibqp->device); | |
1187 | ||
1188 | /* Make sure HW and driver activity is stopped. */ | |
1189 | spin_lock_irq(&qp->s_lock); | |
1190 | if (qp->state != IB_QPS_RESET) { | |
1191 | qp->state = IB_QPS_RESET; | |
1192 | spin_lock(&dev->pending_lock); | |
1193 | if (!list_empty(&qp->iowait)) | |
1194 | list_del_init(&qp->iowait); | |
1195 | spin_unlock(&dev->pending_lock); | |
1196 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); | |
1197 | spin_unlock_irq(&qp->s_lock); | |
1198 | cancel_work_sync(&qp->s_work); | |
1199 | del_timer_sync(&qp->s_timer); | |
1200 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | |
1201 | if (qp->s_tx) { | |
1202 | qib_put_txreq(qp->s_tx); | |
1203 | qp->s_tx = NULL; | |
1204 | } | |
1205 | remove_qp(dev, qp); | |
1206 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | |
1207 | clear_mr_refs(qp, 1); | |
1208 | } else | |
1209 | spin_unlock_irq(&qp->s_lock); | |
1210 | ||
1211 | /* all user's cleaned up, mark it available */ | |
1212 | free_qpn(&dev->qpn_table, qp->ibqp.qp_num); | |
1213 | spin_lock(&dev->n_qps_lock); | |
1214 | dev->n_qps_allocated--; | |
1215 | spin_unlock(&dev->n_qps_lock); | |
1216 | ||
1217 | if (qp->ip) | |
1218 | kref_put(&qp->ip->ref, qib_release_mmap_info); | |
1219 | else | |
1220 | vfree(qp->r_rq.wq); | |
1221 | vfree(qp->s_wq); | |
1c94283d | 1222 | kfree(qp->s_hdr); |
f931551b RC |
1223 | kfree(qp); |
1224 | return 0; | |
1225 | } | |
1226 | ||
1227 | /** | |
1228 | * qib_init_qpn_table - initialize the QP number table for a device | |
1229 | * @qpt: the QPN table | |
1230 | */ | |
1231 | void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt) | |
1232 | { | |
1233 | spin_lock_init(&qpt->lock); | |
1234 | qpt->last = 1; /* start with QPN 2 */ | |
1235 | qpt->nmaps = 1; | |
1236 | qpt->mask = dd->qpn_mask; | |
1237 | } | |
1238 | ||
1239 | /** | |
1240 | * qib_free_qpn_table - free the QP number table for a device | |
1241 | * @qpt: the QPN table | |
1242 | */ | |
1243 | void qib_free_qpn_table(struct qib_qpn_table *qpt) | |
1244 | { | |
1245 | int i; | |
1246 | ||
1247 | for (i = 0; i < ARRAY_SIZE(qpt->map); i++) | |
1248 | if (qpt->map[i].page) | |
1249 | free_page((unsigned long) qpt->map[i].page); | |
1250 | } | |
1251 | ||
1252 | /** | |
1253 | * qib_get_credit - flush the send work queue of a QP | |
1254 | * @qp: the qp who's send work queue to flush | |
1255 | * @aeth: the Acknowledge Extended Transport Header | |
1256 | * | |
1257 | * The QP s_lock should be held. | |
1258 | */ | |
1259 | void qib_get_credit(struct qib_qp *qp, u32 aeth) | |
1260 | { | |
1261 | u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; | |
1262 | ||
1263 | /* | |
1264 | * If the credit is invalid, we can send | |
1265 | * as many packets as we like. Otherwise, we have to | |
1266 | * honor the credit field. | |
1267 | */ | |
1268 | if (credit == QIB_AETH_CREDIT_INVAL) { | |
1269 | if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { | |
1270 | qp->s_flags |= QIB_S_UNLIMITED_CREDIT; | |
1271 | if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { | |
1272 | qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; | |
1273 | qib_schedule_send(qp); | |
1274 | } | |
1275 | } | |
1276 | } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { | |
1277 | /* Compute new LSN (i.e., MSN + credit) */ | |
1278 | credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; | |
1279 | if (qib_cmp24(credit, qp->s_lsn) > 0) { | |
1280 | qp->s_lsn = credit; | |
1281 | if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { | |
1282 | qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; | |
1283 | qib_schedule_send(qp); | |
1284 | } | |
1285 | } | |
1286 | } | |
1287 | } |