Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
80c8ec2c | 3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
2a1d9b7f RD |
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | |
1da177e4 LT |
6 | * |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | * | |
35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ | |
36 | */ | |
37 | ||
38 | #include <linux/init.h> | |
4e57b681 TS |
39 | #include <linux/string.h> |
40 | #include <linux/slab.h> | |
1da177e4 | 41 | |
a4d61e84 RD |
42 | #include <rdma/ib_verbs.h> |
43 | #include <rdma/ib_cache.h> | |
44 | #include <rdma/ib_pack.h> | |
1da177e4 LT |
45 | |
46 | #include "mthca_dev.h" | |
47 | #include "mthca_cmd.h" | |
48 | #include "mthca_memfree.h" | |
c04bc3d1 | 49 | #include "mthca_wqe.h" |
1da177e4 LT |
50 | |
51 | enum { | |
52 | MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, | |
53 | MTHCA_ACK_REQ_FREQ = 10, | |
54 | MTHCA_FLIGHT_LIMIT = 9, | |
80c8ec2c RD |
55 | MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ |
56 | MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ | |
57 | MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ | |
1da177e4 LT |
58 | }; |
59 | ||
60 | enum { | |
61 | MTHCA_QP_STATE_RST = 0, | |
62 | MTHCA_QP_STATE_INIT = 1, | |
63 | MTHCA_QP_STATE_RTR = 2, | |
64 | MTHCA_QP_STATE_RTS = 3, | |
65 | MTHCA_QP_STATE_SQE = 4, | |
66 | MTHCA_QP_STATE_SQD = 5, | |
67 | MTHCA_QP_STATE_ERR = 6, | |
68 | MTHCA_QP_STATE_DRAINING = 7 | |
69 | }; | |
70 | ||
71 | enum { | |
72 | MTHCA_QP_ST_RC = 0x0, | |
73 | MTHCA_QP_ST_UC = 0x1, | |
74 | MTHCA_QP_ST_RD = 0x2, | |
75 | MTHCA_QP_ST_UD = 0x3, | |
76 | MTHCA_QP_ST_MLX = 0x7 | |
77 | }; | |
78 | ||
79 | enum { | |
80 | MTHCA_QP_PM_MIGRATED = 0x3, | |
81 | MTHCA_QP_PM_ARMED = 0x0, | |
82 | MTHCA_QP_PM_REARM = 0x1 | |
83 | }; | |
84 | ||
85 | enum { | |
86 | /* qp_context flags */ | |
87 | MTHCA_QP_BIT_DE = 1 << 8, | |
88 | /* params1 */ | |
89 | MTHCA_QP_BIT_SRE = 1 << 15, | |
90 | MTHCA_QP_BIT_SWE = 1 << 14, | |
91 | MTHCA_QP_BIT_SAE = 1 << 13, | |
92 | MTHCA_QP_BIT_SIC = 1 << 4, | |
93 | MTHCA_QP_BIT_SSC = 1 << 3, | |
94 | /* params2 */ | |
95 | MTHCA_QP_BIT_RRE = 1 << 15, | |
96 | MTHCA_QP_BIT_RWE = 1 << 14, | |
97 | MTHCA_QP_BIT_RAE = 1 << 13, | |
98 | MTHCA_QP_BIT_RIC = 1 << 4, | |
99 | MTHCA_QP_BIT_RSC = 1 << 3 | |
100 | }; | |
101 | ||
102 | struct mthca_qp_path { | |
97f52eb4 SH |
103 | __be32 port_pkey; |
104 | u8 rnr_retry; | |
105 | u8 g_mylmc; | |
106 | __be16 rlid; | |
107 | u8 ackto; | |
108 | u8 mgid_index; | |
109 | u8 static_rate; | |
110 | u8 hop_limit; | |
111 | __be32 sl_tclass_flowlabel; | |
112 | u8 rgid[16]; | |
1da177e4 LT |
113 | } __attribute__((packed)); |
114 | ||
115 | struct mthca_qp_context { | |
97f52eb4 SH |
116 | __be32 flags; |
117 | __be32 tavor_sched_queue; /* Reserved on Arbel */ | |
118 | u8 mtu_msgmax; | |
119 | u8 rq_size_stride; /* Reserved on Tavor */ | |
120 | u8 sq_size_stride; /* Reserved on Tavor */ | |
121 | u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ | |
122 | __be32 usr_page; | |
123 | __be32 local_qpn; | |
124 | __be32 remote_qpn; | |
125 | u32 reserved1[2]; | |
1da177e4 LT |
126 | struct mthca_qp_path pri_path; |
127 | struct mthca_qp_path alt_path; | |
97f52eb4 SH |
128 | __be32 rdd; |
129 | __be32 pd; | |
130 | __be32 wqe_base; | |
131 | __be32 wqe_lkey; | |
132 | __be32 params1; | |
133 | __be32 reserved2; | |
134 | __be32 next_send_psn; | |
135 | __be32 cqn_snd; | |
136 | __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ | |
137 | __be32 snd_db_index; /* (debugging only entries) */ | |
138 | __be32 last_acked_psn; | |
139 | __be32 ssn; | |
140 | __be32 params2; | |
141 | __be32 rnr_nextrecvpsn; | |
142 | __be32 ra_buff_indx; | |
143 | __be32 cqn_rcv; | |
144 | __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ | |
145 | __be32 rcv_db_index; /* (debugging only entries) */ | |
146 | __be32 qkey; | |
147 | __be32 srqn; | |
148 | __be32 rmsn; | |
149 | __be16 rq_wqe_counter; /* reserved on Tavor */ | |
150 | __be16 sq_wqe_counter; /* reserved on Tavor */ | |
151 | u32 reserved3[18]; | |
1da177e4 LT |
152 | } __attribute__((packed)); |
153 | ||
154 | struct mthca_qp_param { | |
97f52eb4 SH |
155 | __be32 opt_param_mask; |
156 | u32 reserved1; | |
1da177e4 | 157 | struct mthca_qp_context context; |
97f52eb4 | 158 | u32 reserved2[62]; |
1da177e4 LT |
159 | } __attribute__((packed)); |
160 | ||
161 | enum { | |
162 | MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, | |
163 | MTHCA_QP_OPTPAR_RRE = 1 << 1, | |
164 | MTHCA_QP_OPTPAR_RAE = 1 << 2, | |
165 | MTHCA_QP_OPTPAR_RWE = 1 << 3, | |
166 | MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, | |
167 | MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, | |
168 | MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, | |
169 | MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, | |
170 | MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, | |
171 | MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, | |
172 | MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, | |
173 | MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, | |
174 | MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, | |
175 | MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, | |
176 | MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, | |
177 | MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, | |
178 | MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 | |
179 | }; | |
180 | ||
1da177e4 LT |
181 | static const u8 mthca_opcode[] = { |
182 | [IB_WR_SEND] = MTHCA_OPCODE_SEND, | |
183 | [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, | |
184 | [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, | |
185 | [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, | |
186 | [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, | |
187 | [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, | |
188 | [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, | |
189 | }; | |
190 | ||
191 | static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) | |
192 | { | |
193 | return qp->qpn >= dev->qp_table.sqp_start && | |
194 | qp->qpn <= dev->qp_table.sqp_start + 3; | |
195 | } | |
196 | ||
197 | static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) | |
198 | { | |
199 | return qp->qpn >= dev->qp_table.sqp_start && | |
200 | qp->qpn <= dev->qp_table.sqp_start + 1; | |
201 | } | |
202 | ||
203 | static void *get_recv_wqe(struct mthca_qp *qp, int n) | |
204 | { | |
205 | if (qp->is_direct) | |
206 | return qp->queue.direct.buf + (n << qp->rq.wqe_shift); | |
207 | else | |
208 | return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + | |
209 | ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); | |
210 | } | |
211 | ||
212 | static void *get_send_wqe(struct mthca_qp *qp, int n) | |
213 | { | |
214 | if (qp->is_direct) | |
215 | return qp->queue.direct.buf + qp->send_wqe_offset + | |
216 | (n << qp->sq.wqe_shift); | |
217 | else | |
218 | return qp->queue.page_list[(qp->send_wqe_offset + | |
219 | (n << qp->sq.wqe_shift)) >> | |
220 | PAGE_SHIFT].buf + | |
221 | ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & | |
222 | (PAGE_SIZE - 1)); | |
223 | } | |
224 | ||
c9fe2b32 RD |
225 | static void mthca_wq_init(struct mthca_wq *wq) |
226 | { | |
227 | spin_lock_init(&wq->lock); | |
228 | wq->next_ind = 0; | |
229 | wq->last_comp = wq->max - 1; | |
230 | wq->head = 0; | |
231 | wq->tail = 0; | |
c9fe2b32 RD |
232 | } |
233 | ||
1da177e4 LT |
234 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, |
235 | enum ib_event_type event_type) | |
236 | { | |
237 | struct mthca_qp *qp; | |
238 | struct ib_event event; | |
239 | ||
240 | spin_lock(&dev->qp_table.lock); | |
241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | |
242 | if (qp) | |
243 | atomic_inc(&qp->refcount); | |
244 | spin_unlock(&dev->qp_table.lock); | |
245 | ||
246 | if (!qp) { | |
247 | mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); | |
248 | return; | |
249 | } | |
250 | ||
251 | event.device = &dev->ib_dev; | |
252 | event.event = event_type; | |
253 | event.element.qp = &qp->ibqp; | |
254 | if (qp->ibqp.event_handler) | |
255 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | |
256 | ||
257 | if (atomic_dec_and_test(&qp->refcount)) | |
258 | wake_up(&qp->wait); | |
259 | } | |
260 | ||
261 | static int to_mthca_state(enum ib_qp_state ib_state) | |
262 | { | |
263 | switch (ib_state) { | |
264 | case IB_QPS_RESET: return MTHCA_QP_STATE_RST; | |
265 | case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; | |
266 | case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; | |
267 | case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; | |
268 | case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; | |
269 | case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; | |
270 | case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; | |
271 | default: return -1; | |
272 | } | |
273 | } | |
274 | ||
275 | enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; | |
276 | ||
277 | static int to_mthca_st(int transport) | |
278 | { | |
279 | switch (transport) { | |
280 | case RC: return MTHCA_QP_ST_RC; | |
281 | case UC: return MTHCA_QP_ST_UC; | |
282 | case UD: return MTHCA_QP_ST_UD; | |
283 | case RD: return MTHCA_QP_ST_RD; | |
284 | case MLX: return MTHCA_QP_ST_MLX; | |
285 | default: return -1; | |
286 | } | |
287 | } | |
288 | ||
289 | static const struct { | |
290 | int trans; | |
291 | u32 req_param[NUM_TRANS]; | |
292 | u32 opt_param[NUM_TRANS]; | |
293 | } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | |
294 | [IB_QPS_RESET] = { | |
295 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
296 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
297 | [IB_QPS_INIT] = { | |
298 | .trans = MTHCA_TRANS_RST2INIT, | |
299 | .req_param = { | |
300 | [UD] = (IB_QP_PKEY_INDEX | | |
301 | IB_QP_PORT | | |
302 | IB_QP_QKEY), | |
9e6970b5 RD |
303 | [UC] = (IB_QP_PKEY_INDEX | |
304 | IB_QP_PORT | | |
305 | IB_QP_ACCESS_FLAGS), | |
1da177e4 LT |
306 | [RC] = (IB_QP_PKEY_INDEX | |
307 | IB_QP_PORT | | |
308 | IB_QP_ACCESS_FLAGS), | |
309 | [MLX] = (IB_QP_PKEY_INDEX | | |
310 | IB_QP_QKEY), | |
311 | }, | |
312 | /* bug-for-bug compatibility with VAPI: */ | |
313 | .opt_param = { | |
314 | [MLX] = IB_QP_PORT | |
315 | } | |
316 | }, | |
317 | }, | |
318 | [IB_QPS_INIT] = { | |
319 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
320 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
321 | [IB_QPS_INIT] = { | |
322 | .trans = MTHCA_TRANS_INIT2INIT, | |
323 | .opt_param = { | |
324 | [UD] = (IB_QP_PKEY_INDEX | | |
325 | IB_QP_PORT | | |
326 | IB_QP_QKEY), | |
9e6970b5 RD |
327 | [UC] = (IB_QP_PKEY_INDEX | |
328 | IB_QP_PORT | | |
329 | IB_QP_ACCESS_FLAGS), | |
1da177e4 LT |
330 | [RC] = (IB_QP_PKEY_INDEX | |
331 | IB_QP_PORT | | |
332 | IB_QP_ACCESS_FLAGS), | |
333 | [MLX] = (IB_QP_PKEY_INDEX | | |
334 | IB_QP_QKEY), | |
335 | } | |
336 | }, | |
337 | [IB_QPS_RTR] = { | |
338 | .trans = MTHCA_TRANS_INIT2RTR, | |
339 | .req_param = { | |
9e6970b5 RD |
340 | [UC] = (IB_QP_AV | |
341 | IB_QP_PATH_MTU | | |
342 | IB_QP_DEST_QPN | | |
547e3090 | 343 | IB_QP_RQ_PSN), |
1da177e4 LT |
344 | [RC] = (IB_QP_AV | |
345 | IB_QP_PATH_MTU | | |
346 | IB_QP_DEST_QPN | | |
347 | IB_QP_RQ_PSN | | |
348 | IB_QP_MAX_DEST_RD_ATOMIC | | |
349 | IB_QP_MIN_RNR_TIMER), | |
350 | }, | |
351 | .opt_param = { | |
352 | [UD] = (IB_QP_PKEY_INDEX | | |
353 | IB_QP_QKEY), | |
9e6970b5 RD |
354 | [UC] = (IB_QP_ALT_PATH | |
355 | IB_QP_ACCESS_FLAGS | | |
356 | IB_QP_PKEY_INDEX), | |
1da177e4 LT |
357 | [RC] = (IB_QP_ALT_PATH | |
358 | IB_QP_ACCESS_FLAGS | | |
359 | IB_QP_PKEY_INDEX), | |
360 | [MLX] = (IB_QP_PKEY_INDEX | | |
361 | IB_QP_QKEY), | |
362 | } | |
363 | } | |
364 | }, | |
365 | [IB_QPS_RTR] = { | |
366 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
367 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
368 | [IB_QPS_RTS] = { | |
369 | .trans = MTHCA_TRANS_RTR2RTS, | |
370 | .req_param = { | |
371 | [UD] = IB_QP_SQ_PSN, | |
547e3090 | 372 | [UC] = IB_QP_SQ_PSN, |
1da177e4 LT |
373 | [RC] = (IB_QP_TIMEOUT | |
374 | IB_QP_RETRY_CNT | | |
375 | IB_QP_RNR_RETRY | | |
376 | IB_QP_SQ_PSN | | |
377 | IB_QP_MAX_QP_RD_ATOMIC), | |
378 | [MLX] = IB_QP_SQ_PSN, | |
379 | }, | |
380 | .opt_param = { | |
381 | [UD] = (IB_QP_CUR_STATE | | |
382 | IB_QP_QKEY), | |
9e6970b5 RD |
383 | [UC] = (IB_QP_CUR_STATE | |
384 | IB_QP_ALT_PATH | | |
385 | IB_QP_ACCESS_FLAGS | | |
386 | IB_QP_PKEY_INDEX | | |
387 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
388 | [RC] = (IB_QP_CUR_STATE | |
389 | IB_QP_ALT_PATH | | |
390 | IB_QP_ACCESS_FLAGS | | |
391 | IB_QP_PKEY_INDEX | | |
392 | IB_QP_MIN_RNR_TIMER | | |
393 | IB_QP_PATH_MIG_STATE), | |
394 | [MLX] = (IB_QP_CUR_STATE | | |
395 | IB_QP_QKEY), | |
396 | } | |
397 | } | |
398 | }, | |
399 | [IB_QPS_RTS] = { | |
400 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
401 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
402 | [IB_QPS_RTS] = { | |
403 | .trans = MTHCA_TRANS_RTS2RTS, | |
404 | .opt_param = { | |
405 | [UD] = (IB_QP_CUR_STATE | | |
406 | IB_QP_QKEY), | |
9e6970b5 RD |
407 | [UC] = (IB_QP_ACCESS_FLAGS | |
408 | IB_QP_ALT_PATH | | |
409 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
410 | [RC] = (IB_QP_ACCESS_FLAGS | |
411 | IB_QP_ALT_PATH | | |
412 | IB_QP_PATH_MIG_STATE | | |
413 | IB_QP_MIN_RNR_TIMER), | |
414 | [MLX] = (IB_QP_CUR_STATE | | |
415 | IB_QP_QKEY), | |
416 | } | |
417 | }, | |
418 | [IB_QPS_SQD] = { | |
419 | .trans = MTHCA_TRANS_RTS2SQD, | |
420 | }, | |
421 | }, | |
422 | [IB_QPS_SQD] = { | |
423 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
424 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
425 | [IB_QPS_RTS] = { | |
426 | .trans = MTHCA_TRANS_SQD2RTS, | |
427 | .opt_param = { | |
428 | [UD] = (IB_QP_CUR_STATE | | |
429 | IB_QP_QKEY), | |
9e6970b5 RD |
430 | [UC] = (IB_QP_CUR_STATE | |
431 | IB_QP_ALT_PATH | | |
432 | IB_QP_ACCESS_FLAGS | | |
433 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
434 | [RC] = (IB_QP_CUR_STATE | |
435 | IB_QP_ALT_PATH | | |
436 | IB_QP_ACCESS_FLAGS | | |
437 | IB_QP_MIN_RNR_TIMER | | |
438 | IB_QP_PATH_MIG_STATE), | |
439 | [MLX] = (IB_QP_CUR_STATE | | |
440 | IB_QP_QKEY), | |
441 | } | |
442 | }, | |
443 | [IB_QPS_SQD] = { | |
444 | .trans = MTHCA_TRANS_SQD2SQD, | |
445 | .opt_param = { | |
446 | [UD] = (IB_QP_PKEY_INDEX | | |
447 | IB_QP_QKEY), | |
9e6970b5 | 448 | [UC] = (IB_QP_AV | |
9e6970b5 RD |
449 | IB_QP_CUR_STATE | |
450 | IB_QP_ALT_PATH | | |
451 | IB_QP_ACCESS_FLAGS | | |
452 | IB_QP_PKEY_INDEX | | |
453 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
454 | [RC] = (IB_QP_AV | |
455 | IB_QP_TIMEOUT | | |
456 | IB_QP_RETRY_CNT | | |
457 | IB_QP_RNR_RETRY | | |
458 | IB_QP_MAX_QP_RD_ATOMIC | | |
459 | IB_QP_MAX_DEST_RD_ATOMIC | | |
460 | IB_QP_CUR_STATE | | |
461 | IB_QP_ALT_PATH | | |
462 | IB_QP_ACCESS_FLAGS | | |
463 | IB_QP_PKEY_INDEX | | |
464 | IB_QP_MIN_RNR_TIMER | | |
465 | IB_QP_PATH_MIG_STATE), | |
466 | [MLX] = (IB_QP_PKEY_INDEX | | |
467 | IB_QP_QKEY), | |
468 | } | |
469 | } | |
470 | }, | |
471 | [IB_QPS_SQE] = { | |
472 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
473 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
474 | [IB_QPS_RTS] = { | |
475 | .trans = MTHCA_TRANS_SQERR2RTS, | |
476 | .opt_param = { | |
477 | [UD] = (IB_QP_CUR_STATE | | |
478 | IB_QP_QKEY), | |
547e3090 | 479 | [UC] = IB_QP_CUR_STATE, |
1da177e4 LT |
480 | [RC] = (IB_QP_CUR_STATE | |
481 | IB_QP_MIN_RNR_TIMER), | |
482 | [MLX] = (IB_QP_CUR_STATE | | |
483 | IB_QP_QKEY), | |
484 | } | |
485 | } | |
486 | }, | |
487 | [IB_QPS_ERR] = { | |
488 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
489 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR } | |
490 | } | |
491 | }; | |
492 | ||
493 | static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, | |
494 | int attr_mask) | |
495 | { | |
496 | if (attr_mask & IB_QP_PKEY_INDEX) | |
497 | sqp->pkey_index = attr->pkey_index; | |
498 | if (attr_mask & IB_QP_QKEY) | |
499 | sqp->qkey = attr->qkey; | |
500 | if (attr_mask & IB_QP_SQ_PSN) | |
501 | sqp->send_psn = attr->sq_psn; | |
502 | } | |
503 | ||
504 | static void init_port(struct mthca_dev *dev, int port) | |
505 | { | |
506 | int err; | |
507 | u8 status; | |
508 | struct mthca_init_ib_param param; | |
509 | ||
510 | memset(¶m, 0, sizeof param); | |
511 | ||
da6561c2 RD |
512 | param.port_width = dev->limits.port_width_cap; |
513 | param.vl_cap = dev->limits.vl_cap; | |
514 | param.mtu_cap = dev->limits.mtu_cap; | |
515 | param.gid_cap = dev->limits.gid_table_len; | |
516 | param.pkey_cap = dev->limits.pkey_table_len; | |
1da177e4 LT |
517 | |
518 | err = mthca_INIT_IB(dev, ¶m, port, &status); | |
519 | if (err) | |
520 | mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); | |
521 | if (status) | |
522 | mthca_warn(dev, "INIT_IB returned status %02x.\n", status); | |
523 | } | |
524 | ||
d1646f86 JM |
525 | static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, |
526 | int attr_mask) | |
527 | { | |
528 | u8 dest_rd_atomic; | |
529 | u32 access_flags; | |
530 | u32 hw_access_flags = 0; | |
531 | ||
532 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
533 | dest_rd_atomic = attr->max_dest_rd_atomic; | |
534 | else | |
535 | dest_rd_atomic = qp->resp_depth; | |
536 | ||
537 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
538 | access_flags = attr->qp_access_flags; | |
539 | else | |
540 | access_flags = qp->atomic_rd_en; | |
541 | ||
542 | if (!dest_rd_atomic) | |
543 | access_flags &= IB_ACCESS_REMOTE_WRITE; | |
544 | ||
545 | if (access_flags & IB_ACCESS_REMOTE_READ) | |
546 | hw_access_flags |= MTHCA_QP_BIT_RRE; | |
547 | if (access_flags & IB_ACCESS_REMOTE_ATOMIC) | |
548 | hw_access_flags |= MTHCA_QP_BIT_RAE; | |
549 | if (access_flags & IB_ACCESS_REMOTE_WRITE) | |
550 | hw_access_flags |= MTHCA_QP_BIT_RWE; | |
551 | ||
552 | return cpu_to_be32(hw_access_flags); | |
553 | } | |
554 | ||
1da177e4 LT |
555 | int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) |
556 | { | |
557 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
558 | struct mthca_qp *qp = to_mqp(ibqp); | |
559 | enum ib_qp_state cur_state, new_state; | |
ed878458 | 560 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
561 | struct mthca_qp_param *qp_param; |
562 | struct mthca_qp_context *qp_context; | |
563 | u32 req_param, opt_param; | |
564 | u8 status; | |
565 | int err; | |
566 | ||
567 | if (attr_mask & IB_QP_CUR_STATE) { | |
568 | if (attr->cur_qp_state != IB_QPS_RTR && | |
569 | attr->cur_qp_state != IB_QPS_RTS && | |
570 | attr->cur_qp_state != IB_QPS_SQD && | |
571 | attr->cur_qp_state != IB_QPS_SQE) | |
572 | return -EINVAL; | |
573 | else | |
574 | cur_state = attr->cur_qp_state; | |
575 | } else { | |
576 | spin_lock_irq(&qp->sq.lock); | |
577 | spin_lock(&qp->rq.lock); | |
578 | cur_state = qp->state; | |
579 | spin_unlock(&qp->rq.lock); | |
580 | spin_unlock_irq(&qp->sq.lock); | |
581 | } | |
582 | ||
583 | if (attr_mask & IB_QP_STATE) { | |
584 | if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) | |
585 | return -EINVAL; | |
586 | new_state = attr->qp_state; | |
587 | } else | |
588 | new_state = cur_state; | |
589 | ||
590 | if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) { | |
591 | mthca_dbg(dev, "Illegal QP transition " | |
592 | "%d->%d\n", cur_state, new_state); | |
593 | return -EINVAL; | |
594 | } | |
595 | ||
596 | req_param = state_table[cur_state][new_state].req_param[qp->transport]; | |
597 | opt_param = state_table[cur_state][new_state].opt_param[qp->transport]; | |
598 | ||
599 | if ((req_param & attr_mask) != req_param) { | |
600 | mthca_dbg(dev, "QP transition " | |
601 | "%d->%d missing req attr 0x%08x\n", | |
602 | cur_state, new_state, | |
603 | req_param & ~attr_mask); | |
604 | return -EINVAL; | |
605 | } | |
606 | ||
607 | if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) { | |
608 | mthca_dbg(dev, "QP transition (transport %d) " | |
609 | "%d->%d has extra attr 0x%08x\n", | |
610 | qp->transport, | |
611 | cur_state, new_state, | |
612 | attr_mask & ~(req_param | opt_param | | |
613 | IB_QP_STATE)); | |
614 | return -EINVAL; | |
615 | } | |
616 | ||
d09e3276 JM |
617 | if ((attr_mask & IB_QP_PKEY_INDEX) && |
618 | attr->pkey_index >= dev->limits.pkey_table_len) { | |
619 | mthca_dbg(dev, "PKey index (%u) too large. max is %d\n", | |
620 | attr->pkey_index,dev->limits.pkey_table_len-1); | |
621 | return -EINVAL; | |
622 | } | |
623 | ||
38d1e793 JM |
624 | if ((attr_mask & IB_QP_PORT) && |
625 | (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { | |
626 | mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); | |
627 | return -EINVAL; | |
628 | } | |
629 | ||
94361cf7 JM |
630 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
631 | attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { | |
632 | mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", | |
633 | attr->max_rd_atomic, dev->limits.max_qp_init_rdma); | |
634 | return -EINVAL; | |
635 | } | |
636 | ||
637 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | |
638 | attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { | |
639 | mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", | |
640 | attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); | |
641 | return -EINVAL; | |
642 | } | |
643 | ||
ed878458 RD |
644 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
645 | if (IS_ERR(mailbox)) | |
646 | return PTR_ERR(mailbox); | |
647 | qp_param = mailbox->buf; | |
1da177e4 LT |
648 | qp_context = &qp_param->context; |
649 | memset(qp_param, 0, sizeof *qp_param); | |
650 | ||
651 | qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | | |
652 | (to_mthca_st(qp->transport) << 16)); | |
653 | qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); | |
654 | if (!(attr_mask & IB_QP_PATH_MIG_STATE)) | |
655 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
656 | else { | |
657 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); | |
658 | switch (attr->path_mig_state) { | |
659 | case IB_MIG_MIGRATED: | |
660 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
661 | break; | |
662 | case IB_MIG_REARM: | |
663 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); | |
664 | break; | |
665 | case IB_MIG_ARMED: | |
666 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); | |
667 | break; | |
668 | } | |
669 | } | |
670 | ||
671 | /* leave tavor_sched_queue as 0 */ | |
672 | ||
673 | if (qp->transport == MLX || qp->transport == UD) | |
674 | qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; | |
675 | else if (attr_mask & IB_QP_PATH_MTU) | |
676 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; | |
677 | ||
d10ddbf6 | 678 | if (mthca_is_memfree(dev)) { |
ec34a922 RD |
679 | if (qp->rq.max) |
680 | qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; | |
681 | qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; | |
682 | ||
683 | if (qp->sq.max) | |
684 | qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; | |
685 | qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; | |
1da177e4 LT |
686 | } |
687 | ||
688 | /* leave arbel_sched_queue as 0 */ | |
689 | ||
80c8ec2c RD |
690 | if (qp->ibqp.uobject) |
691 | qp_context->usr_page = | |
692 | cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); | |
693 | else | |
694 | qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); | |
1da177e4 LT |
695 | qp_context->local_qpn = cpu_to_be32(qp->qpn); |
696 | if (attr_mask & IB_QP_DEST_QPN) { | |
697 | qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); | |
698 | } | |
699 | ||
700 | if (qp->transport == MLX) | |
701 | qp_context->pri_path.port_pkey |= | |
702 | cpu_to_be32(to_msqp(qp)->port << 24); | |
703 | else { | |
704 | if (attr_mask & IB_QP_PORT) { | |
705 | qp_context->pri_path.port_pkey |= | |
706 | cpu_to_be32(attr->port_num << 24); | |
707 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); | |
708 | } | |
709 | } | |
710 | ||
711 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
712 | qp_context->pri_path.port_pkey |= | |
713 | cpu_to_be32(attr->pkey_index); | |
714 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); | |
715 | } | |
716 | ||
717 | if (attr_mask & IB_QP_RNR_RETRY) { | |
718 | qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; | |
719 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY); | |
720 | } | |
721 | ||
722 | if (attr_mask & IB_QP_AV) { | |
723 | qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f; | |
724 | qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid); | |
cd123d7f | 725 | qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate; |
1da177e4 LT |
726 | if (attr->ah_attr.ah_flags & IB_AH_GRH) { |
727 | qp_context->pri_path.g_mylmc |= 1 << 7; | |
728 | qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index; | |
729 | qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit; | |
730 | qp_context->pri_path.sl_tclass_flowlabel = | |
731 | cpu_to_be32((attr->ah_attr.sl << 28) | | |
732 | (attr->ah_attr.grh.traffic_class << 20) | | |
733 | (attr->ah_attr.grh.flow_label)); | |
734 | memcpy(qp_context->pri_path.rgid, | |
735 | attr->ah_attr.grh.dgid.raw, 16); | |
736 | } else { | |
737 | qp_context->pri_path.sl_tclass_flowlabel = | |
738 | cpu_to_be32(attr->ah_attr.sl << 28); | |
739 | } | |
740 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); | |
741 | } | |
742 | ||
743 | if (attr_mask & IB_QP_TIMEOUT) { | |
bb4a7f0d | 744 | qp_context->pri_path.ackto = attr->timeout << 3; |
1da177e4 LT |
745 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); |
746 | } | |
747 | ||
748 | /* XXX alt_path */ | |
749 | ||
750 | /* leave rdd as 0 */ | |
751 | qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); | |
752 | /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ | |
753 | qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); | |
754 | qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | | |
755 | (MTHCA_FLIGHT_LIMIT << 24) | | |
c4342d8a | 756 | MTHCA_QP_BIT_SWE); |
1da177e4 LT |
757 | if (qp->sq_policy == IB_SIGNAL_ALL_WR) |
758 | qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); | |
759 | if (attr_mask & IB_QP_RETRY_CNT) { | |
760 | qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); | |
761 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); | |
762 | } | |
763 | ||
34a4a753 | 764 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { |
c4342d8a JM |
765 | if (attr->max_rd_atomic) { |
766 | qp_context->params1 |= | |
767 | cpu_to_be32(MTHCA_QP_BIT_SRE | | |
768 | MTHCA_QP_BIT_SAE); | |
6aa2e4e8 JM |
769 | qp_context->params1 |= |
770 | cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); | |
c4342d8a | 771 | } |
1da177e4 LT |
772 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); |
773 | } | |
774 | ||
775 | if (attr_mask & IB_QP_SQ_PSN) | |
776 | qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); | |
777 | qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); | |
778 | ||
d10ddbf6 | 779 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
780 | qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); |
781 | qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); | |
782 | } | |
783 | ||
34a4a753 | 784 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
6aa2e4e8 JM |
785 | if (attr->max_dest_rd_atomic) |
786 | qp_context->params2 |= | |
787 | cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); | |
1da177e4 | 788 | |
1da177e4 | 789 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); |
1da177e4 LT |
790 | } |
791 | ||
d1646f86 JM |
792 | if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { |
793 | qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); | |
794 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | | |
795 | MTHCA_QP_OPTPAR_RRE | | |
796 | MTHCA_QP_OPTPAR_RAE); | |
797 | } | |
798 | ||
1da177e4 LT |
799 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); |
800 | ||
ec34a922 RD |
801 | if (ibqp->srq) |
802 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); | |
803 | ||
1da177e4 LT |
804 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { |
805 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); | |
806 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); | |
807 | } | |
808 | if (attr_mask & IB_QP_RQ_PSN) | |
809 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); | |
810 | ||
811 | qp_context->ra_buff_indx = | |
812 | cpu_to_be32(dev->qp_table.rdb_base + | |
813 | ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << | |
814 | dev->qp_table.rdb_shift)); | |
815 | ||
816 | qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); | |
817 | ||
d10ddbf6 | 818 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
819 | qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); |
820 | ||
821 | if (attr_mask & IB_QP_QKEY) { | |
822 | qp_context->qkey = cpu_to_be32(attr->qkey); | |
823 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); | |
824 | } | |
825 | ||
ec34a922 RD |
826 | if (ibqp->srq) |
827 | qp_context->srqn = cpu_to_be32(1 << 24 | | |
828 | to_msrq(ibqp->srq)->srqn); | |
829 | ||
1da177e4 | 830 | err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, |
ed878458 | 831 | qp->qpn, 0, mailbox, 0, &status); |
1da177e4 LT |
832 | if (status) { |
833 | mthca_warn(dev, "modify QP %d returned status %02x.\n", | |
834 | state_table[cur_state][new_state].trans, status); | |
835 | err = -EINVAL; | |
836 | } | |
837 | ||
44b5b030 | 838 | if (!err) { |
1da177e4 | 839 | qp->state = new_state; |
44b5b030 JM |
840 | if (attr_mask & IB_QP_ACCESS_FLAGS) |
841 | qp->atomic_rd_en = attr->qp_access_flags; | |
842 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
843 | qp->resp_depth = attr->max_dest_rd_atomic; | |
844 | } | |
1da177e4 | 845 | |
ed878458 | 846 | mthca_free_mailbox(dev, mailbox); |
1da177e4 LT |
847 | |
848 | if (is_sqp(dev, qp)) | |
849 | store_attrs(to_msqp(qp), attr, attr_mask); | |
850 | ||
851 | /* | |
c9fe2b32 RD |
852 | * If we moved QP0 to RTR, bring the IB link up; if we moved |
853 | * QP0 to RESET or ERROR, bring the link back down. | |
1da177e4 LT |
854 | */ |
855 | if (is_qp0(dev, qp)) { | |
856 | if (cur_state != IB_QPS_RTR && | |
857 | new_state == IB_QPS_RTR) | |
858 | init_port(dev, to_msqp(qp)->port); | |
859 | ||
860 | if (cur_state != IB_QPS_RESET && | |
861 | cur_state != IB_QPS_ERR && | |
862 | (new_state == IB_QPS_RESET || | |
863 | new_state == IB_QPS_ERR)) | |
864 | mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); | |
865 | } | |
866 | ||
c9fe2b32 RD |
867 | /* |
868 | * If we moved a kernel QP to RESET, clean up all old CQ | |
869 | * entries and reinitialize the QP. | |
870 | */ | |
871 | if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | |
872 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | |
873 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
874 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | |
875 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | |
876 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
877 | ||
878 | mthca_wq_init(&qp->sq); | |
187a2586 MT |
879 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
880 | ||
c9fe2b32 | 881 | mthca_wq_init(&qp->rq); |
187a2586 | 882 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); |
c9fe2b32 RD |
883 | |
884 | if (mthca_is_memfree(dev)) { | |
885 | *qp->sq.db = 0; | |
886 | *qp->rq.db = 0; | |
887 | } | |
888 | } | |
889 | ||
1da177e4 LT |
890 | return err; |
891 | } | |
892 | ||
77369ed3 JM |
893 | static void mthca_adjust_qp_caps(struct mthca_dev *dev, |
894 | struct mthca_pd *pd, | |
895 | struct mthca_qp *qp) | |
896 | { | |
897 | int max_data_size; | |
898 | ||
899 | /* | |
900 | * Calculate the maximum size of WQE s/g segments, excluding | |
901 | * the next segment and other non-data segments. | |
902 | */ | |
903 | max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) - | |
904 | sizeof (struct mthca_next_seg); | |
905 | ||
906 | switch (qp->transport) { | |
907 | case MLX: | |
908 | max_data_size -= 2 * sizeof (struct mthca_data_seg); | |
909 | break; | |
910 | ||
911 | case UD: | |
912 | if (mthca_is_memfree(dev)) | |
913 | max_data_size -= sizeof (struct mthca_arbel_ud_seg); | |
914 | else | |
915 | max_data_size -= sizeof (struct mthca_tavor_ud_seg); | |
916 | break; | |
917 | ||
918 | default: | |
919 | max_data_size -= sizeof (struct mthca_raddr_seg); | |
920 | break; | |
921 | } | |
922 | ||
923 | /* We don't support inline data for kernel QPs (yet). */ | |
924 | if (!pd->ibpd.uobject) | |
925 | qp->max_inline_data = 0; | |
926 | else | |
927 | qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE; | |
928 | ||
48fd0d1f MT |
929 | qp->sq.max_gs = min_t(int, dev->limits.max_sg, |
930 | max_data_size / sizeof (struct mthca_data_seg)); | |
931 | qp->rq.max_gs = min_t(int, dev->limits.max_sg, | |
932 | (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - | |
933 | sizeof (struct mthca_next_seg)) / | |
934 | sizeof (struct mthca_data_seg)); | |
77369ed3 JM |
935 | } |
936 | ||
1da177e4 LT |
937 | /* |
938 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, | |
939 | * rq.max_gs and sq.max_gs must all be assigned. | |
940 | * mthca_alloc_wqe_buf will calculate rq.wqe_shift and | |
941 | * sq.wqe_shift (as well as send_wqe_offset, is_direct, and | |
942 | * queue) | |
943 | */ | |
944 | static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |
945 | struct mthca_pd *pd, | |
946 | struct mthca_qp *qp) | |
947 | { | |
948 | int size; | |
1da177e4 LT |
949 | int err = -ENOMEM; |
950 | ||
951 | size = sizeof (struct mthca_next_seg) + | |
952 | qp->rq.max_gs * sizeof (struct mthca_data_seg); | |
953 | ||
77369ed3 JM |
954 | if (size > dev->limits.max_desc_sz) |
955 | return -EINVAL; | |
956 | ||
1da177e4 LT |
957 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; |
958 | qp->rq.wqe_shift++) | |
959 | ; /* nothing */ | |
960 | ||
77369ed3 | 961 | size = qp->sq.max_gs * sizeof (struct mthca_data_seg); |
1da177e4 LT |
962 | switch (qp->transport) { |
963 | case MLX: | |
964 | size += 2 * sizeof (struct mthca_data_seg); | |
965 | break; | |
77369ed3 | 966 | |
1da177e4 | 967 | case UD: |
77369ed3 JM |
968 | size += mthca_is_memfree(dev) ? |
969 | sizeof (struct mthca_arbel_ud_seg) : | |
970 | sizeof (struct mthca_tavor_ud_seg); | |
1da177e4 | 971 | break; |
77369ed3 JM |
972 | |
973 | case UC: | |
974 | size += sizeof (struct mthca_raddr_seg); | |
975 | break; | |
976 | ||
977 | case RC: | |
978 | size += sizeof (struct mthca_raddr_seg); | |
979 | /* | |
980 | * An atomic op will require an atomic segment, a | |
981 | * remote address segment and one scatter entry. | |
982 | */ | |
983 | size = max_t(int, size, | |
984 | sizeof (struct mthca_atomic_seg) + | |
985 | sizeof (struct mthca_raddr_seg) + | |
986 | sizeof (struct mthca_data_seg)); | |
987 | break; | |
988 | ||
1da177e4 | 989 | default: |
77369ed3 | 990 | break; |
1da177e4 LT |
991 | } |
992 | ||
77369ed3 JM |
993 | /* Make sure that we have enough space for a bind request */ |
994 | size = max_t(int, size, sizeof (struct mthca_bind_seg)); | |
995 | ||
996 | size += sizeof (struct mthca_next_seg); | |
997 | ||
998 | if (size > dev->limits.max_desc_sz) | |
999 | return -EINVAL; | |
1000 | ||
1da177e4 LT |
1001 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; |
1002 | qp->sq.wqe_shift++) | |
1003 | ; /* nothing */ | |
1004 | ||
1005 | qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, | |
1006 | 1 << qp->sq.wqe_shift); | |
80c8ec2c RD |
1007 | |
1008 | /* | |
1009 | * If this is a userspace QP, we don't actually have to | |
1010 | * allocate anything. All we need is to calculate the WQE | |
1011 | * sizes and the send_wqe_offset, so we're done now. | |
1012 | */ | |
1013 | if (pd->ibpd.uobject) | |
1014 | return 0; | |
1015 | ||
1da177e4 LT |
1016 | size = PAGE_ALIGN(qp->send_wqe_offset + |
1017 | (qp->sq.max << qp->sq.wqe_shift)); | |
1018 | ||
1019 | qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), | |
1020 | GFP_KERNEL); | |
1021 | if (!qp->wrid) | |
1022 | goto err_out; | |
1023 | ||
87b81670 RD |
1024 | err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, |
1025 | &qp->queue, &qp->is_direct, pd, 0, &qp->mr); | |
1da177e4 | 1026 | if (err) |
87b81670 | 1027 | goto err_out; |
1da177e4 | 1028 | |
1da177e4 LT |
1029 | return 0; |
1030 | ||
87b81670 | 1031 | err_out: |
1da177e4 | 1032 | kfree(qp->wrid); |
1da177e4 LT |
1033 | return err; |
1034 | } | |
1035 | ||
80c8ec2c | 1036 | static void mthca_free_wqe_buf(struct mthca_dev *dev, |
1da177e4 LT |
1037 | struct mthca_qp *qp) |
1038 | { | |
87b81670 RD |
1039 | mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + |
1040 | (qp->sq.max << qp->sq.wqe_shift)), | |
1041 | &qp->queue, qp->is_direct, &qp->mr); | |
80c8ec2c RD |
1042 | kfree(qp->wrid); |
1043 | } | |
1044 | ||
1045 | static int mthca_map_memfree(struct mthca_dev *dev, | |
1046 | struct mthca_qp *qp) | |
1047 | { | |
1048 | int ret; | |
1da177e4 | 1049 | |
d10ddbf6 | 1050 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
1051 | ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); |
1052 | if (ret) | |
1053 | return ret; | |
1054 | ||
1055 | ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); | |
1056 | if (ret) | |
1057 | goto err_qpc; | |
1058 | ||
80c8ec2c RD |
1059 | ret = mthca_table_get(dev, dev->qp_table.rdb_table, |
1060 | qp->qpn << dev->qp_table.rdb_shift); | |
1061 | if (ret) | |
1062 | goto err_eqpc; | |
1da177e4 | 1063 | |
1da177e4 LT |
1064 | } |
1065 | ||
1066 | return 0; | |
1067 | ||
1da177e4 LT |
1068 | err_eqpc: |
1069 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
1070 | ||
1071 | err_qpc: | |
1072 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
1073 | ||
1074 | return ret; | |
1075 | } | |
1076 | ||
80c8ec2c RD |
1077 | static void mthca_unmap_memfree(struct mthca_dev *dev, |
1078 | struct mthca_qp *qp) | |
1079 | { | |
1080 | mthca_table_put(dev, dev->qp_table.rdb_table, | |
1081 | qp->qpn << dev->qp_table.rdb_shift); | |
1082 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
1083 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
1084 | } | |
1085 | ||
1086 | static int mthca_alloc_memfree(struct mthca_dev *dev, | |
1087 | struct mthca_qp *qp) | |
1088 | { | |
1089 | int ret = 0; | |
1090 | ||
1091 | if (mthca_is_memfree(dev)) { | |
1092 | qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, | |
1093 | qp->qpn, &qp->rq.db); | |
1094 | if (qp->rq.db_index < 0) | |
1095 | return ret; | |
1096 | ||
1097 | qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, | |
1098 | qp->qpn, &qp->sq.db); | |
1099 | if (qp->sq.db_index < 0) | |
1100 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
1101 | } | |
1102 | ||
1103 | return ret; | |
1104 | } | |
1105 | ||
1da177e4 LT |
1106 | static void mthca_free_memfree(struct mthca_dev *dev, |
1107 | struct mthca_qp *qp) | |
1108 | { | |
d10ddbf6 | 1109 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
1110 | mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); |
1111 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
1da177e4 LT |
1112 | } |
1113 | } | |
1114 | ||
1da177e4 LT |
1115 | static int mthca_alloc_qp_common(struct mthca_dev *dev, |
1116 | struct mthca_pd *pd, | |
1117 | struct mthca_cq *send_cq, | |
1118 | struct mthca_cq *recv_cq, | |
1119 | enum ib_sig_type send_policy, | |
1120 | struct mthca_qp *qp) | |
1121 | { | |
1da177e4 LT |
1122 | int ret; |
1123 | int i; | |
1124 | ||
1125 | atomic_set(&qp->refcount, 1); | |
30a7e8ef | 1126 | init_waitqueue_head(&qp->wait); |
1da177e4 LT |
1127 | qp->state = IB_QPS_RESET; |
1128 | qp->atomic_rd_en = 0; | |
1129 | qp->resp_depth = 0; | |
1130 | qp->sq_policy = send_policy; | |
1131 | mthca_wq_init(&qp->sq); | |
1132 | mthca_wq_init(&qp->rq); | |
1133 | ||
80c8ec2c | 1134 | ret = mthca_map_memfree(dev, qp); |
1da177e4 LT |
1135 | if (ret) |
1136 | return ret; | |
1137 | ||
1138 | ret = mthca_alloc_wqe_buf(dev, pd, qp); | |
1139 | if (ret) { | |
80c8ec2c RD |
1140 | mthca_unmap_memfree(dev, qp); |
1141 | return ret; | |
1142 | } | |
1143 | ||
77369ed3 JM |
1144 | mthca_adjust_qp_caps(dev, pd, qp); |
1145 | ||
80c8ec2c RD |
1146 | /* |
1147 | * If this is a userspace QP, we're done now. The doorbells | |
1148 | * will be allocated and buffers will be initialized in | |
1149 | * userspace. | |
1150 | */ | |
1151 | if (pd->ibpd.uobject) | |
1152 | return 0; | |
1153 | ||
1154 | ret = mthca_alloc_memfree(dev, qp); | |
1155 | if (ret) { | |
1156 | mthca_free_wqe_buf(dev, qp); | |
1157 | mthca_unmap_memfree(dev, qp); | |
1da177e4 LT |
1158 | return ret; |
1159 | } | |
1160 | ||
d10ddbf6 | 1161 | if (mthca_is_memfree(dev)) { |
ddf841f0 RD |
1162 | struct mthca_next_seg *next; |
1163 | struct mthca_data_seg *scatter; | |
1164 | int size = (sizeof (struct mthca_next_seg) + | |
1165 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; | |
1166 | ||
1da177e4 | 1167 | for (i = 0; i < qp->rq.max; ++i) { |
ddf841f0 RD |
1168 | next = get_recv_wqe(qp, i); |
1169 | next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << | |
1170 | qp->rq.wqe_shift); | |
1171 | next->ee_nds = cpu_to_be32(size); | |
1172 | ||
1173 | for (scatter = (void *) (next + 1); | |
1174 | (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); | |
1175 | ++scatter) | |
1176 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
1da177e4 LT |
1177 | } |
1178 | ||
1179 | for (i = 0; i < qp->sq.max; ++i) { | |
ddf841f0 RD |
1180 | next = get_send_wqe(qp, i); |
1181 | next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << | |
1182 | qp->sq.wqe_shift) + | |
1183 | qp->send_wqe_offset); | |
1da177e4 LT |
1184 | } |
1185 | } | |
1186 | ||
d6cff021 RD |
1187 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
1188 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); | |
1189 | ||
1da177e4 LT |
1190 | return 0; |
1191 | } | |
1192 | ||
80c8ec2c RD |
1193 | static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, |
1194 | struct mthca_qp *qp) | |
1da177e4 | 1195 | { |
80c8ec2c | 1196 | /* Sanity check QP size before proceeding */ |
efaae8f7 JM |
1197 | if (cap->max_send_wr > dev->limits.max_wqes || |
1198 | cap->max_recv_wr > dev->limits.max_wqes || | |
1199 | cap->max_send_sge > dev->limits.max_sg || | |
1200 | cap->max_recv_sge > dev->limits.max_sg) | |
80c8ec2c | 1201 | return -EINVAL; |
1da177e4 | 1202 | |
80c8ec2c RD |
1203 | if (mthca_is_memfree(dev)) { |
1204 | qp->rq.max = cap->max_recv_wr ? | |
1205 | roundup_pow_of_two(cap->max_recv_wr) : 0; | |
1206 | qp->sq.max = cap->max_send_wr ? | |
1207 | roundup_pow_of_two(cap->max_send_wr) : 0; | |
1208 | } else { | |
1209 | qp->rq.max = cap->max_recv_wr; | |
1210 | qp->sq.max = cap->max_send_wr; | |
1211 | } | |
1da177e4 | 1212 | |
80c8ec2c RD |
1213 | qp->rq.max_gs = cap->max_recv_sge; |
1214 | qp->sq.max_gs = max_t(int, cap->max_send_sge, | |
1215 | ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, | |
1216 | MTHCA_INLINE_CHUNK_SIZE) / | |
1217 | sizeof (struct mthca_data_seg)); | |
1da177e4 | 1218 | |
80c8ec2c RD |
1219 | /* |
1220 | * For MLX transport we need 2 extra S/G entries: | |
1221 | * one for the header and one for the checksum at the end | |
1222 | */ | |
1223 | if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) || | |
1224 | qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg) | |
1225 | return -EINVAL; | |
1da177e4 | 1226 | |
80c8ec2c | 1227 | return 0; |
1da177e4 LT |
1228 | } |
1229 | ||
1230 | int mthca_alloc_qp(struct mthca_dev *dev, | |
1231 | struct mthca_pd *pd, | |
1232 | struct mthca_cq *send_cq, | |
1233 | struct mthca_cq *recv_cq, | |
1234 | enum ib_qp_type type, | |
1235 | enum ib_sig_type send_policy, | |
80c8ec2c | 1236 | struct ib_qp_cap *cap, |
1da177e4 LT |
1237 | struct mthca_qp *qp) |
1238 | { | |
1239 | int err; | |
1240 | ||
80c8ec2c RD |
1241 | err = mthca_set_qp_size(dev, cap, qp); |
1242 | if (err) | |
1243 | return err; | |
1da177e4 LT |
1244 | |
1245 | switch (type) { | |
1246 | case IB_QPT_RC: qp->transport = RC; break; | |
1247 | case IB_QPT_UC: qp->transport = UC; break; | |
1248 | case IB_QPT_UD: qp->transport = UD; break; | |
1249 | default: return -EINVAL; | |
1250 | } | |
1251 | ||
1252 | qp->qpn = mthca_alloc(&dev->qp_table.alloc); | |
1253 | if (qp->qpn == -1) | |
1254 | return -ENOMEM; | |
1255 | ||
1256 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | |
1257 | send_policy, qp); | |
1258 | if (err) { | |
1259 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1260 | return err; | |
1261 | } | |
1262 | ||
1263 | spin_lock_irq(&dev->qp_table.lock); | |
1264 | mthca_array_set(&dev->qp_table.qp, | |
1265 | qp->qpn & (dev->limits.num_qps - 1), qp); | |
1266 | spin_unlock_irq(&dev->qp_table.lock); | |
1267 | ||
1268 | return 0; | |
1269 | } | |
1270 | ||
1271 | int mthca_alloc_sqp(struct mthca_dev *dev, | |
1272 | struct mthca_pd *pd, | |
1273 | struct mthca_cq *send_cq, | |
1274 | struct mthca_cq *recv_cq, | |
1275 | enum ib_sig_type send_policy, | |
80c8ec2c | 1276 | struct ib_qp_cap *cap, |
1da177e4 LT |
1277 | int qpn, |
1278 | int port, | |
1279 | struct mthca_sqp *sqp) | |
1280 | { | |
1da177e4 | 1281 | u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; |
80c8ec2c | 1282 | int err; |
1da177e4 | 1283 | |
80c8ec2c RD |
1284 | err = mthca_set_qp_size(dev, cap, &sqp->qp); |
1285 | if (err) | |
1286 | return err; | |
1da177e4 LT |
1287 | |
1288 | sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; | |
1289 | sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1290 | &sqp->header_dma, GFP_KERNEL); | |
1291 | if (!sqp->header_buf) | |
1292 | return -ENOMEM; | |
1293 | ||
1294 | spin_lock_irq(&dev->qp_table.lock); | |
1295 | if (mthca_array_get(&dev->qp_table.qp, mqpn)) | |
1296 | err = -EBUSY; | |
1297 | else | |
1298 | mthca_array_set(&dev->qp_table.qp, mqpn, sqp); | |
1299 | spin_unlock_irq(&dev->qp_table.lock); | |
1300 | ||
1301 | if (err) | |
1302 | goto err_out; | |
1303 | ||
1304 | sqp->port = port; | |
1305 | sqp->qp.qpn = mqpn; | |
1306 | sqp->qp.transport = MLX; | |
1307 | ||
1308 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | |
1309 | send_policy, &sqp->qp); | |
1310 | if (err) | |
1311 | goto err_out_free; | |
1312 | ||
1313 | atomic_inc(&pd->sqp_count); | |
1314 | ||
1315 | return 0; | |
1316 | ||
1317 | err_out_free: | |
1318 | /* | |
1319 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1320 | * without taking a lock. | |
1321 | */ | |
1322 | spin_lock_irq(&send_cq->lock); | |
1323 | if (send_cq != recv_cq) | |
1324 | spin_lock(&recv_cq->lock); | |
1325 | ||
1326 | spin_lock(&dev->qp_table.lock); | |
1327 | mthca_array_clear(&dev->qp_table.qp, mqpn); | |
1328 | spin_unlock(&dev->qp_table.lock); | |
1329 | ||
1330 | if (send_cq != recv_cq) | |
1331 | spin_unlock(&recv_cq->lock); | |
1332 | spin_unlock_irq(&send_cq->lock); | |
1333 | ||
1334 | err_out: | |
1335 | dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1336 | sqp->header_buf, sqp->header_dma); | |
1337 | ||
1338 | return err; | |
1339 | } | |
1340 | ||
1341 | void mthca_free_qp(struct mthca_dev *dev, | |
1342 | struct mthca_qp *qp) | |
1343 | { | |
1344 | u8 status; | |
1da177e4 LT |
1345 | struct mthca_cq *send_cq; |
1346 | struct mthca_cq *recv_cq; | |
1347 | ||
1348 | send_cq = to_mcq(qp->ibqp.send_cq); | |
1349 | recv_cq = to_mcq(qp->ibqp.recv_cq); | |
1350 | ||
1351 | /* | |
1352 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1353 | * without taking a lock. | |
1354 | */ | |
1355 | spin_lock_irq(&send_cq->lock); | |
1356 | if (send_cq != recv_cq) | |
1357 | spin_lock(&recv_cq->lock); | |
1358 | ||
1359 | spin_lock(&dev->qp_table.lock); | |
1360 | mthca_array_clear(&dev->qp_table.qp, | |
1361 | qp->qpn & (dev->limits.num_qps - 1)); | |
1362 | spin_unlock(&dev->qp_table.lock); | |
1363 | ||
1364 | if (send_cq != recv_cq) | |
1365 | spin_unlock(&recv_cq->lock); | |
1366 | spin_unlock_irq(&send_cq->lock); | |
1367 | ||
1368 | atomic_dec(&qp->refcount); | |
1369 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | |
1370 | ||
1371 | if (qp->state != IB_QPS_RESET) | |
1372 | mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); | |
1373 | ||
80c8ec2c RD |
1374 | /* |
1375 | * If this is a userspace QP, the buffers, MR, CQs and so on | |
1376 | * will be cleaned up in userspace, so all we have to do is | |
1377 | * unref the mem-free tables and free the QPN in our table. | |
1378 | */ | |
1379 | if (!qp->ibqp.uobject) { | |
ec34a922 RD |
1380 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, |
1381 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
80c8ec2c | 1382 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
ec34a922 RD |
1383 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, |
1384 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
1da177e4 | 1385 | |
80c8ec2c RD |
1386 | mthca_free_memfree(dev, qp); |
1387 | mthca_free_wqe_buf(dev, qp); | |
1da177e4 LT |
1388 | } |
1389 | ||
80c8ec2c | 1390 | mthca_unmap_memfree(dev, qp); |
1da177e4 LT |
1391 | |
1392 | if (is_sqp(dev, qp)) { | |
1393 | atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); | |
1394 | dma_free_coherent(&dev->pdev->dev, | |
1395 | to_msqp(qp)->header_buf_size, | |
1396 | to_msqp(qp)->header_buf, | |
1397 | to_msqp(qp)->header_dma); | |
1398 | } else | |
1399 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1400 | } | |
1401 | ||
1402 | /* Create UD header for an MLX send and build a data segment for it */ | |
1403 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |
1404 | int ind, struct ib_send_wr *wr, | |
1405 | struct mthca_mlx_seg *mlx, | |
1406 | struct mthca_data_seg *data) | |
1407 | { | |
1408 | int header_size; | |
1409 | int err; | |
97f52eb4 | 1410 | u16 pkey; |
1da177e4 LT |
1411 | |
1412 | ib_ud_header_init(256, /* assume a MAD */ | |
1413 | sqp->ud_header.grh_present, | |
1414 | &sqp->ud_header); | |
1415 | ||
1416 | err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); | |
1417 | if (err) | |
1418 | return err; | |
1419 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); | |
1420 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | | |
97f52eb4 SH |
1421 | (sqp->ud_header.lrh.destination_lid == |
1422 | IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | | |
1da177e4 LT |
1423 | (sqp->ud_header.lrh.service_level << 8)); |
1424 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | |
1425 | mlx->vcrc = 0; | |
1426 | ||
1427 | switch (wr->opcode) { | |
1428 | case IB_WR_SEND: | |
1429 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | |
1430 | sqp->ud_header.immediate_present = 0; | |
1431 | break; | |
1432 | case IB_WR_SEND_WITH_IMM: | |
1433 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | |
1434 | sqp->ud_header.immediate_present = 1; | |
1435 | sqp->ud_header.immediate_data = wr->imm_data; | |
1436 | break; | |
1437 | default: | |
1438 | return -EINVAL; | |
1439 | } | |
1440 | ||
1441 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | |
97f52eb4 SH |
1442 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
1443 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | |
1da177e4 LT |
1444 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); |
1445 | if (!sqp->qp.ibqp.qp_num) | |
1446 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | |
97f52eb4 | 1447 | sqp->pkey_index, &pkey); |
1da177e4 LT |
1448 | else |
1449 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | |
97f52eb4 SH |
1450 | wr->wr.ud.pkey_index, &pkey); |
1451 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | |
1da177e4 LT |
1452 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
1453 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | |
1454 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | |
1455 | sqp->qkey : wr->wr.ud.remote_qkey); | |
1456 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); | |
1457 | ||
1458 | header_size = ib_ud_header_pack(&sqp->ud_header, | |
1459 | sqp->header_buf + | |
1460 | ind * MTHCA_UD_HEADER_SIZE); | |
1461 | ||
1462 | data->byte_count = cpu_to_be32(header_size); | |
1463 | data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); | |
1464 | data->addr = cpu_to_be64(sqp->header_dma + | |
1465 | ind * MTHCA_UD_HEADER_SIZE); | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, | |
1471 | struct ib_cq *ib_cq) | |
1472 | { | |
1473 | unsigned cur; | |
1474 | struct mthca_cq *cq; | |
1475 | ||
1476 | cur = wq->head - wq->tail; | |
1477 | if (likely(cur + nreq < wq->max)) | |
1478 | return 0; | |
1479 | ||
1480 | cq = to_mcq(ib_cq); | |
1481 | spin_lock(&cq->lock); | |
1482 | cur = wq->head - wq->tail; | |
1483 | spin_unlock(&cq->lock); | |
1484 | ||
1485 | return cur + nreq >= wq->max; | |
1486 | } | |
1487 | ||
1488 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1489 | struct ib_send_wr **bad_wr) | |
1490 | { | |
1491 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1492 | struct mthca_qp *qp = to_mqp(ibqp); | |
1493 | void *wqe; | |
1494 | void *prev_wqe; | |
1495 | unsigned long flags; | |
1496 | int err = 0; | |
1497 | int nreq; | |
1498 | int i; | |
1499 | int size; | |
1500 | int size0 = 0; | |
1501 | u32 f0 = 0; | |
1502 | int ind; | |
1503 | u8 op0 = 0; | |
1504 | ||
1505 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1506 | ||
1507 | /* XXX check that state is OK to post send */ | |
1508 | ||
1509 | ind = qp->sq.next_ind; | |
1510 | ||
1511 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
1512 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { | |
1513 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1514 | " %d max, %d nreq)\n", qp->qpn, | |
1515 | qp->sq.head, qp->sq.tail, | |
1516 | qp->sq.max, nreq); | |
1517 | err = -ENOMEM; | |
1518 | *bad_wr = wr; | |
1519 | goto out; | |
1520 | } | |
1521 | ||
1522 | wqe = get_send_wqe(qp, ind); | |
1523 | prev_wqe = qp->sq.last; | |
1524 | qp->sq.last = wqe; | |
1525 | ||
1526 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1527 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
1528 | ((struct mthca_next_seg *) wqe)->flags = | |
1529 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1530 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1531 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1532 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1533 | cpu_to_be32(1); | |
1534 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1535 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1536 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1537 | |
1538 | wqe += sizeof (struct mthca_next_seg); | |
1539 | size = sizeof (struct mthca_next_seg) / 16; | |
1540 | ||
1541 | switch (qp->transport) { | |
1542 | case RC: | |
1543 | switch (wr->opcode) { | |
1544 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1545 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1546 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1547 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1548 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1549 | cpu_to_be32(wr->wr.atomic.rkey); | |
1550 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1551 | ||
1552 | wqe += sizeof (struct mthca_raddr_seg); | |
1553 | ||
1554 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1555 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1556 | cpu_to_be64(wr->wr.atomic.swap); | |
1557 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1558 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1559 | } else { | |
1560 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1561 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1562 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1563 | } | |
1564 | ||
1565 | wqe += sizeof (struct mthca_atomic_seg); | |
62abb841 MT |
1566 | size += (sizeof (struct mthca_raddr_seg) + |
1567 | sizeof (struct mthca_atomic_seg)) / 16; | |
1da177e4 LT |
1568 | break; |
1569 | ||
1570 | case IB_WR_RDMA_WRITE: | |
1571 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1572 | case IB_WR_RDMA_READ: | |
1573 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1574 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1575 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1576 | cpu_to_be32(wr->wr.rdma.rkey); | |
1577 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1578 | wqe += sizeof (struct mthca_raddr_seg); | |
1579 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1580 | break; | |
1581 | ||
1582 | default: | |
1583 | /* No extra segments required for sends */ | |
1584 | break; | |
1585 | } | |
1586 | ||
1587 | break; | |
1588 | ||
9e6970b5 RD |
1589 | case UC: |
1590 | switch (wr->opcode) { | |
1591 | case IB_WR_RDMA_WRITE: | |
1592 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1593 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1594 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1595 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1596 | cpu_to_be32(wr->wr.rdma.rkey); | |
1597 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1598 | wqe += sizeof (struct mthca_raddr_seg); | |
1599 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1600 | break; | |
1601 | ||
1602 | default: | |
1603 | /* No extra segments required for sends */ | |
1604 | break; | |
1605 | } | |
1606 | ||
1607 | break; | |
1608 | ||
1da177e4 LT |
1609 | case UD: |
1610 | ((struct mthca_tavor_ud_seg *) wqe)->lkey = | |
1611 | cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | |
1612 | ((struct mthca_tavor_ud_seg *) wqe)->av_addr = | |
1613 | cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | |
1614 | ((struct mthca_tavor_ud_seg *) wqe)->dqpn = | |
1615 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
1616 | ((struct mthca_tavor_ud_seg *) wqe)->qkey = | |
1617 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
1618 | ||
1619 | wqe += sizeof (struct mthca_tavor_ud_seg); | |
1620 | size += sizeof (struct mthca_tavor_ud_seg) / 16; | |
1621 | break; | |
1622 | ||
1623 | case MLX: | |
1624 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
1625 | wqe - sizeof (struct mthca_next_seg), | |
1626 | wqe); | |
1627 | if (err) { | |
1628 | *bad_wr = wr; | |
1629 | goto out; | |
1630 | } | |
1631 | wqe += sizeof (struct mthca_data_seg); | |
1632 | size += sizeof (struct mthca_data_seg) / 16; | |
1633 | break; | |
1634 | } | |
1635 | ||
1636 | if (wr->num_sge > qp->sq.max_gs) { | |
1637 | mthca_err(dev, "too many gathers\n"); | |
1638 | err = -EINVAL; | |
1639 | *bad_wr = wr; | |
1640 | goto out; | |
1641 | } | |
1642 | ||
1643 | for (i = 0; i < wr->num_sge; ++i) { | |
1644 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1645 | cpu_to_be32(wr->sg_list[i].length); | |
1646 | ((struct mthca_data_seg *) wqe)->lkey = | |
1647 | cpu_to_be32(wr->sg_list[i].lkey); | |
1648 | ((struct mthca_data_seg *) wqe)->addr = | |
1649 | cpu_to_be64(wr->sg_list[i].addr); | |
1650 | wqe += sizeof (struct mthca_data_seg); | |
1651 | size += sizeof (struct mthca_data_seg) / 16; | |
1652 | } | |
1653 | ||
1654 | /* Add one more inline data segment for ICRC */ | |
1655 | if (qp->transport == MLX) { | |
1656 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1657 | cpu_to_be32((1 << 31) | 4); | |
1658 | ((u32 *) wqe)[1] = 0; | |
1659 | wqe += sizeof (struct mthca_data_seg); | |
1660 | size += sizeof (struct mthca_data_seg) / 16; | |
1661 | } | |
1662 | ||
1663 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
1664 | ||
1665 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
1666 | mthca_err(dev, "opcode invalid\n"); | |
1667 | err = -EINVAL; | |
1668 | *bad_wr = wr; | |
1669 | goto out; | |
1670 | } | |
1671 | ||
d6cff021 RD |
1672 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1673 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
1674 | qp->send_wqe_offset) | | |
1675 | mthca_opcode[wr->opcode]); | |
1676 | wmb(); | |
1677 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
1678 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size); | |
1da177e4 LT |
1679 | |
1680 | if (!size0) { | |
1681 | size0 = size; | |
1682 | op0 = mthca_opcode[wr->opcode]; | |
1683 | } | |
1684 | ||
1685 | ++ind; | |
1686 | if (unlikely(ind >= qp->sq.max)) | |
1687 | ind -= qp->sq.max; | |
1688 | } | |
1689 | ||
1690 | out: | |
1691 | if (likely(nreq)) { | |
97f52eb4 | 1692 | __be32 doorbell[2]; |
1da177e4 LT |
1693 | |
1694 | doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + | |
1695 | qp->send_wqe_offset) | f0 | op0); | |
1696 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1697 | ||
1698 | wmb(); | |
1699 | ||
1700 | mthca_write64(doorbell, | |
1701 | dev->kar + MTHCA_SEND_DOORBELL, | |
1702 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1703 | } | |
1704 | ||
1705 | qp->sq.next_ind = ind; | |
1706 | qp->sq.head += nreq; | |
1707 | ||
1708 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
1709 | return err; | |
1710 | } | |
1711 | ||
1712 | int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
1713 | struct ib_recv_wr **bad_wr) | |
1714 | { | |
1715 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1716 | struct mthca_qp *qp = to_mqp(ibqp); | |
ae57e24a | 1717 | __be32 doorbell[2]; |
1da177e4 LT |
1718 | unsigned long flags; |
1719 | int err = 0; | |
1720 | int nreq; | |
1721 | int i; | |
1722 | int size; | |
1723 | int size0 = 0; | |
1724 | int ind; | |
1725 | void *wqe; | |
1726 | void *prev_wqe; | |
1727 | ||
1728 | spin_lock_irqsave(&qp->rq.lock, flags); | |
1729 | ||
1730 | /* XXX check that state is OK to post receive */ | |
1731 | ||
1732 | ind = qp->rq.next_ind; | |
1733 | ||
1734 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
ae57e24a MT |
1735 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { |
1736 | nreq = 0; | |
1737 | ||
1738 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | |
1739 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | |
1740 | ||
1741 | wmb(); | |
1742 | ||
1743 | mthca_write64(doorbell, | |
1744 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
1745 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1746 | ||
1747 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | |
1748 | size0 = 0; | |
1749 | } | |
1750 | ||
1da177e4 LT |
1751 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1752 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
1753 | " %d max, %d nreq)\n", qp->qpn, | |
1754 | qp->rq.head, qp->rq.tail, | |
1755 | qp->rq.max, nreq); | |
1756 | err = -ENOMEM; | |
1757 | *bad_wr = wr; | |
1758 | goto out; | |
1759 | } | |
1760 | ||
1761 | wqe = get_recv_wqe(qp, ind); | |
1762 | prev_wqe = qp->rq.last; | |
1763 | qp->rq.last = wqe; | |
1764 | ||
1765 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1766 | ((struct mthca_next_seg *) wqe)->ee_nds = | |
1767 | cpu_to_be32(MTHCA_NEXT_DBD); | |
1768 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
1769 | ||
1770 | wqe += sizeof (struct mthca_next_seg); | |
1771 | size = sizeof (struct mthca_next_seg) / 16; | |
1772 | ||
1773 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
1774 | err = -EINVAL; | |
1775 | *bad_wr = wr; | |
1776 | goto out; | |
1777 | } | |
1778 | ||
1779 | for (i = 0; i < wr->num_sge; ++i) { | |
1780 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1781 | cpu_to_be32(wr->sg_list[i].length); | |
1782 | ((struct mthca_data_seg *) wqe)->lkey = | |
1783 | cpu_to_be32(wr->sg_list[i].lkey); | |
1784 | ((struct mthca_data_seg *) wqe)->addr = | |
1785 | cpu_to_be64(wr->sg_list[i].addr); | |
1786 | wqe += sizeof (struct mthca_data_seg); | |
1787 | size += sizeof (struct mthca_data_seg) / 16; | |
1788 | } | |
1789 | ||
1790 | qp->wrid[ind] = wr->wr_id; | |
1791 | ||
d6cff021 RD |
1792 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1793 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | |
1794 | wmb(); | |
1795 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
1796 | cpu_to_be32(MTHCA_NEXT_DBD | size); | |
1da177e4 LT |
1797 | |
1798 | if (!size0) | |
1799 | size0 = size; | |
1800 | ||
1801 | ++ind; | |
1802 | if (unlikely(ind >= qp->rq.max)) | |
1803 | ind -= qp->rq.max; | |
1804 | } | |
1805 | ||
1806 | out: | |
1807 | if (likely(nreq)) { | |
1da177e4 LT |
1808 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); |
1809 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); | |
1810 | ||
1811 | wmb(); | |
1812 | ||
1813 | mthca_write64(doorbell, | |
1814 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
1815 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1816 | } | |
1817 | ||
1818 | qp->rq.next_ind = ind; | |
1819 | qp->rq.head += nreq; | |
1820 | ||
1821 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
1822 | return err; | |
1823 | } | |
1824 | ||
1825 | int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1826 | struct ib_send_wr **bad_wr) | |
1827 | { | |
1828 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1829 | struct mthca_qp *qp = to_mqp(ibqp); | |
e0ae9ecf | 1830 | __be32 doorbell[2]; |
1da177e4 LT |
1831 | void *wqe; |
1832 | void *prev_wqe; | |
1833 | unsigned long flags; | |
1834 | int err = 0; | |
1835 | int nreq; | |
1836 | int i; | |
1837 | int size; | |
1838 | int size0 = 0; | |
1839 | u32 f0 = 0; | |
1840 | int ind; | |
1841 | u8 op0 = 0; | |
1842 | ||
1843 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1844 | ||
1845 | /* XXX check that state is OK to post send */ | |
1846 | ||
1847 | ind = qp->sq.head & (qp->sq.max - 1); | |
1848 | ||
1849 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
e0ae9ecf MT |
1850 | if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { |
1851 | nreq = 0; | |
1852 | ||
1853 | doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | | |
1854 | ((qp->sq.head & 0xffff) << 8) | | |
1855 | f0 | op0); | |
1856 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1857 | ||
1858 | qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; | |
1859 | size0 = 0; | |
1860 | ||
1861 | /* | |
1862 | * Make sure that descriptors are written before | |
1863 | * doorbell record. | |
1864 | */ | |
1865 | wmb(); | |
1866 | *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); | |
1867 | ||
1868 | /* | |
1869 | * Make sure doorbell record is written before we | |
1870 | * write MMIO send doorbell. | |
1871 | */ | |
1872 | wmb(); | |
1873 | mthca_write64(doorbell, | |
1874 | dev->kar + MTHCA_SEND_DOORBELL, | |
1875 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1876 | } | |
1877 | ||
1da177e4 LT |
1878 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { |
1879 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1880 | " %d max, %d nreq)\n", qp->qpn, | |
1881 | qp->sq.head, qp->sq.tail, | |
1882 | qp->sq.max, nreq); | |
1883 | err = -ENOMEM; | |
1884 | *bad_wr = wr; | |
1885 | goto out; | |
1886 | } | |
1887 | ||
1888 | wqe = get_send_wqe(qp, ind); | |
1889 | prev_wqe = qp->sq.last; | |
1890 | qp->sq.last = wqe; | |
1891 | ||
1892 | ((struct mthca_next_seg *) wqe)->flags = | |
1893 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1894 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1895 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1896 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1897 | cpu_to_be32(1); | |
1898 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1899 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1900 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1901 | |
1902 | wqe += sizeof (struct mthca_next_seg); | |
1903 | size = sizeof (struct mthca_next_seg) / 16; | |
1904 | ||
1905 | switch (qp->transport) { | |
ddb934e0 RD |
1906 | case RC: |
1907 | switch (wr->opcode) { | |
1908 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1909 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1910 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1911 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1912 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1913 | cpu_to_be32(wr->wr.atomic.rkey); | |
1914 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1915 | ||
1916 | wqe += sizeof (struct mthca_raddr_seg); | |
1917 | ||
1918 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1919 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1920 | cpu_to_be64(wr->wr.atomic.swap); | |
1921 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1922 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1923 | } else { | |
1924 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1925 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1926 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1927 | } | |
1928 | ||
1929 | wqe += sizeof (struct mthca_atomic_seg); | |
62abb841 MT |
1930 | size += (sizeof (struct mthca_raddr_seg) + |
1931 | sizeof (struct mthca_atomic_seg)) / 16; | |
ddb934e0 RD |
1932 | break; |
1933 | ||
9e6970b5 RD |
1934 | case IB_WR_RDMA_READ: |
1935 | case IB_WR_RDMA_WRITE: | |
1936 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1937 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1938 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1939 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1940 | cpu_to_be32(wr->wr.rdma.rkey); | |
1941 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1942 | wqe += sizeof (struct mthca_raddr_seg); | |
1943 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1944 | break; | |
1945 | ||
1946 | default: | |
1947 | /* No extra segments required for sends */ | |
1948 | break; | |
1949 | } | |
1950 | ||
1951 | break; | |
1952 | ||
1953 | case UC: | |
1954 | switch (wr->opcode) { | |
ddb934e0 RD |
1955 | case IB_WR_RDMA_WRITE: |
1956 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
ddb934e0 RD |
1957 | ((struct mthca_raddr_seg *) wqe)->raddr = |
1958 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1959 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1960 | cpu_to_be32(wr->wr.rdma.rkey); | |
1961 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1962 | wqe += sizeof (struct mthca_raddr_seg); | |
1963 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1964 | break; | |
1965 | ||
1966 | default: | |
1967 | /* No extra segments required for sends */ | |
1968 | break; | |
1969 | } | |
1970 | ||
1971 | break; | |
1972 | ||
1da177e4 LT |
1973 | case UD: |
1974 | memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, | |
1975 | to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | |
1976 | ((struct mthca_arbel_ud_seg *) wqe)->dqpn = | |
1977 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
1978 | ((struct mthca_arbel_ud_seg *) wqe)->qkey = | |
1979 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
1980 | ||
1981 | wqe += sizeof (struct mthca_arbel_ud_seg); | |
1982 | size += sizeof (struct mthca_arbel_ud_seg) / 16; | |
1983 | break; | |
1984 | ||
1985 | case MLX: | |
1986 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
1987 | wqe - sizeof (struct mthca_next_seg), | |
1988 | wqe); | |
1989 | if (err) { | |
1990 | *bad_wr = wr; | |
1991 | goto out; | |
1992 | } | |
1993 | wqe += sizeof (struct mthca_data_seg); | |
1994 | size += sizeof (struct mthca_data_seg) / 16; | |
1995 | break; | |
1996 | } | |
1997 | ||
1998 | if (wr->num_sge > qp->sq.max_gs) { | |
1999 | mthca_err(dev, "too many gathers\n"); | |
2000 | err = -EINVAL; | |
2001 | *bad_wr = wr; | |
2002 | goto out; | |
2003 | } | |
2004 | ||
2005 | for (i = 0; i < wr->num_sge; ++i) { | |
2006 | ((struct mthca_data_seg *) wqe)->byte_count = | |
2007 | cpu_to_be32(wr->sg_list[i].length); | |
2008 | ((struct mthca_data_seg *) wqe)->lkey = | |
2009 | cpu_to_be32(wr->sg_list[i].lkey); | |
2010 | ((struct mthca_data_seg *) wqe)->addr = | |
2011 | cpu_to_be64(wr->sg_list[i].addr); | |
2012 | wqe += sizeof (struct mthca_data_seg); | |
2013 | size += sizeof (struct mthca_data_seg) / 16; | |
2014 | } | |
2015 | ||
2016 | /* Add one more inline data segment for ICRC */ | |
2017 | if (qp->transport == MLX) { | |
2018 | ((struct mthca_data_seg *) wqe)->byte_count = | |
2019 | cpu_to_be32((1 << 31) | 4); | |
2020 | ((u32 *) wqe)[1] = 0; | |
2021 | wqe += sizeof (struct mthca_data_seg); | |
2022 | size += sizeof (struct mthca_data_seg) / 16; | |
2023 | } | |
2024 | ||
2025 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
2026 | ||
2027 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
2028 | mthca_err(dev, "opcode invalid\n"); | |
2029 | err = -EINVAL; | |
2030 | *bad_wr = wr; | |
2031 | goto out; | |
2032 | } | |
2033 | ||
d6cff021 RD |
2034 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
2035 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
2036 | qp->send_wqe_offset) | | |
2037 | mthca_opcode[wr->opcode]); | |
2038 | wmb(); | |
2039 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
2040 | cpu_to_be32(MTHCA_NEXT_DBD | size); | |
1da177e4 LT |
2041 | |
2042 | if (!size0) { | |
2043 | size0 = size; | |
2044 | op0 = mthca_opcode[wr->opcode]; | |
2045 | } | |
2046 | ||
2047 | ++ind; | |
2048 | if (unlikely(ind >= qp->sq.max)) | |
2049 | ind -= qp->sq.max; | |
2050 | } | |
2051 | ||
2052 | out: | |
2053 | if (likely(nreq)) { | |
1da177e4 LT |
2054 | doorbell[0] = cpu_to_be32((nreq << 24) | |
2055 | ((qp->sq.head & 0xffff) << 8) | | |
2056 | f0 | op0); | |
2057 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
2058 | ||
2059 | qp->sq.head += nreq; | |
2060 | ||
2061 | /* | |
2062 | * Make sure that descriptors are written before | |
2063 | * doorbell record. | |
2064 | */ | |
2065 | wmb(); | |
2066 | *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); | |
2067 | ||
2068 | /* | |
2069 | * Make sure doorbell record is written before we | |
2070 | * write MMIO send doorbell. | |
2071 | */ | |
2072 | wmb(); | |
2073 | mthca_write64(doorbell, | |
2074 | dev->kar + MTHCA_SEND_DOORBELL, | |
2075 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
2076 | } | |
2077 | ||
2078 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
2079 | return err; | |
2080 | } | |
2081 | ||
2082 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
2083 | struct ib_recv_wr **bad_wr) | |
2084 | { | |
2085 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
2086 | struct mthca_qp *qp = to_mqp(ibqp); | |
2087 | unsigned long flags; | |
2088 | int err = 0; | |
2089 | int nreq; | |
2090 | int ind; | |
2091 | int i; | |
2092 | void *wqe; | |
2093 | ||
2094 | spin_lock_irqsave(&qp->rq.lock, flags); | |
2095 | ||
2096 | /* XXX check that state is OK to post receive */ | |
2097 | ||
2098 | ind = qp->rq.head & (qp->rq.max - 1); | |
2099 | ||
2100 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
2101 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | |
2102 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
2103 | " %d max, %d nreq)\n", qp->qpn, | |
2104 | qp->rq.head, qp->rq.tail, | |
2105 | qp->rq.max, nreq); | |
2106 | err = -ENOMEM; | |
2107 | *bad_wr = wr; | |
2108 | goto out; | |
2109 | } | |
2110 | ||
2111 | wqe = get_recv_wqe(qp, ind); | |
2112 | ||
2113 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
2114 | ||
2115 | wqe += sizeof (struct mthca_next_seg); | |
2116 | ||
2117 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
2118 | err = -EINVAL; | |
2119 | *bad_wr = wr; | |
2120 | goto out; | |
2121 | } | |
2122 | ||
2123 | for (i = 0; i < wr->num_sge; ++i) { | |
2124 | ((struct mthca_data_seg *) wqe)->byte_count = | |
2125 | cpu_to_be32(wr->sg_list[i].length); | |
2126 | ((struct mthca_data_seg *) wqe)->lkey = | |
2127 | cpu_to_be32(wr->sg_list[i].lkey); | |
2128 | ((struct mthca_data_seg *) wqe)->addr = | |
2129 | cpu_to_be64(wr->sg_list[i].addr); | |
2130 | wqe += sizeof (struct mthca_data_seg); | |
2131 | } | |
2132 | ||
2133 | if (i < qp->rq.max_gs) { | |
2134 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
ddf841f0 | 2135 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); |
1da177e4 LT |
2136 | ((struct mthca_data_seg *) wqe)->addr = 0; |
2137 | } | |
2138 | ||
2139 | qp->wrid[ind] = wr->wr_id; | |
2140 | ||
2141 | ++ind; | |
2142 | if (unlikely(ind >= qp->rq.max)) | |
2143 | ind -= qp->rq.max; | |
2144 | } | |
2145 | out: | |
2146 | if (likely(nreq)) { | |
2147 | qp->rq.head += nreq; | |
2148 | ||
2149 | /* | |
2150 | * Make sure that descriptors are written before | |
2151 | * doorbell record. | |
2152 | */ | |
2153 | wmb(); | |
2154 | *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); | |
2155 | } | |
2156 | ||
2157 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
2158 | return err; | |
2159 | } | |
2160 | ||
2161 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | |
97f52eb4 | 2162 | int index, int *dbd, __be32 *new_wqe) |
1da177e4 LT |
2163 | { |
2164 | struct mthca_next_seg *next; | |
2165 | ||
ec34a922 RD |
2166 | /* |
2167 | * For SRQs, all WQEs generate a CQE, so we're always at the | |
2168 | * end of the doorbell chain. | |
2169 | */ | |
2170 | if (qp->ibqp.srq) { | |
2171 | *new_wqe = 0; | |
2172 | return 0; | |
2173 | } | |
2174 | ||
1da177e4 LT |
2175 | if (is_send) |
2176 | next = get_send_wqe(qp, index); | |
2177 | else | |
2178 | next = get_recv_wqe(qp, index); | |
2179 | ||
288bdeb4 | 2180 | *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); |
1da177e4 LT |
2181 | if (next->ee_nds & cpu_to_be32(0x3f)) |
2182 | *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | | |
2183 | (next->ee_nds & cpu_to_be32(0x3f)); | |
2184 | else | |
2185 | *new_wqe = 0; | |
2186 | ||
2187 | return 0; | |
2188 | } | |
2189 | ||
2190 | int __devinit mthca_init_qp_table(struct mthca_dev *dev) | |
2191 | { | |
2192 | int err; | |
2193 | u8 status; | |
2194 | int i; | |
2195 | ||
2196 | spin_lock_init(&dev->qp_table.lock); | |
2197 | ||
2198 | /* | |
2199 | * We reserve 2 extra QPs per port for the special QPs. The | |
2200 | * special QP for port 1 has to be even, so round up. | |
2201 | */ | |
2202 | dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; | |
2203 | err = mthca_alloc_init(&dev->qp_table.alloc, | |
2204 | dev->limits.num_qps, | |
2205 | (1 << 24) - 1, | |
2206 | dev->qp_table.sqp_start + | |
2207 | MTHCA_MAX_PORTS * 2); | |
2208 | if (err) | |
2209 | return err; | |
2210 | ||
2211 | err = mthca_array_init(&dev->qp_table.qp, | |
2212 | dev->limits.num_qps); | |
2213 | if (err) { | |
2214 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2215 | return err; | |
2216 | } | |
2217 | ||
2218 | for (i = 0; i < 2; ++i) { | |
2219 | err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, | |
2220 | dev->qp_table.sqp_start + i * 2, | |
2221 | &status); | |
2222 | if (err) | |
2223 | goto err_out; | |
2224 | if (status) { | |
2225 | mthca_warn(dev, "CONF_SPECIAL_QP returned " | |
2226 | "status %02x, aborting.\n", | |
2227 | status); | |
2228 | err = -EINVAL; | |
2229 | goto err_out; | |
2230 | } | |
2231 | } | |
2232 | return 0; | |
2233 | ||
2234 | err_out: | |
2235 | for (i = 0; i < 2; ++i) | |
2236 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2237 | ||
2238 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); | |
2239 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2240 | ||
2241 | return err; | |
2242 | } | |
2243 | ||
2244 | void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) | |
2245 | { | |
2246 | int i; | |
2247 | u8 status; | |
2248 | ||
2249 | for (i = 0; i < 2; ++i) | |
2250 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2251 | ||
71eea47d | 2252 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); |
1da177e4 LT |
2253 | mthca_alloc_cleanup(&dev->qp_table.alloc); |
2254 | } |