Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
80c8ec2c | 3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
2a1d9b7f | 4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
2fa5e2eb | 5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
1da177e4 LT |
6 | * |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | * | |
35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ | |
36 | */ | |
37 | ||
4e57b681 TS |
38 | #include <linux/string.h> |
39 | #include <linux/slab.h> | |
1da177e4 | 40 | |
1f5c23e2 AK |
41 | #include <asm/io.h> |
42 | ||
a4d61e84 RD |
43 | #include <rdma/ib_verbs.h> |
44 | #include <rdma/ib_cache.h> | |
45 | #include <rdma/ib_pack.h> | |
1da177e4 LT |
46 | |
47 | #include "mthca_dev.h" | |
48 | #include "mthca_cmd.h" | |
49 | #include "mthca_memfree.h" | |
c04bc3d1 | 50 | #include "mthca_wqe.h" |
1da177e4 LT |
51 | |
52 | enum { | |
53 | MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, | |
54 | MTHCA_ACK_REQ_FREQ = 10, | |
55 | MTHCA_FLIGHT_LIMIT = 9, | |
80c8ec2c RD |
56 | MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ |
57 | MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ | |
58 | MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ | |
1da177e4 LT |
59 | }; |
60 | ||
61 | enum { | |
62 | MTHCA_QP_STATE_RST = 0, | |
63 | MTHCA_QP_STATE_INIT = 1, | |
64 | MTHCA_QP_STATE_RTR = 2, | |
65 | MTHCA_QP_STATE_RTS = 3, | |
66 | MTHCA_QP_STATE_SQE = 4, | |
67 | MTHCA_QP_STATE_SQD = 5, | |
68 | MTHCA_QP_STATE_ERR = 6, | |
69 | MTHCA_QP_STATE_DRAINING = 7 | |
70 | }; | |
71 | ||
72 | enum { | |
73 | MTHCA_QP_ST_RC = 0x0, | |
74 | MTHCA_QP_ST_UC = 0x1, | |
75 | MTHCA_QP_ST_RD = 0x2, | |
76 | MTHCA_QP_ST_UD = 0x3, | |
77 | MTHCA_QP_ST_MLX = 0x7 | |
78 | }; | |
79 | ||
80 | enum { | |
81 | MTHCA_QP_PM_MIGRATED = 0x3, | |
82 | MTHCA_QP_PM_ARMED = 0x0, | |
83 | MTHCA_QP_PM_REARM = 0x1 | |
84 | }; | |
85 | ||
86 | enum { | |
87 | /* qp_context flags */ | |
88 | MTHCA_QP_BIT_DE = 1 << 8, | |
89 | /* params1 */ | |
90 | MTHCA_QP_BIT_SRE = 1 << 15, | |
91 | MTHCA_QP_BIT_SWE = 1 << 14, | |
92 | MTHCA_QP_BIT_SAE = 1 << 13, | |
93 | MTHCA_QP_BIT_SIC = 1 << 4, | |
94 | MTHCA_QP_BIT_SSC = 1 << 3, | |
95 | /* params2 */ | |
96 | MTHCA_QP_BIT_RRE = 1 << 15, | |
97 | MTHCA_QP_BIT_RWE = 1 << 14, | |
98 | MTHCA_QP_BIT_RAE = 1 << 13, | |
99 | MTHCA_QP_BIT_RIC = 1 << 4, | |
100 | MTHCA_QP_BIT_RSC = 1 << 3 | |
101 | }; | |
102 | ||
e54b82d7 MT |
103 | enum { |
104 | MTHCA_SEND_DOORBELL_FENCE = 1 << 5 | |
105 | }; | |
106 | ||
1da177e4 | 107 | struct mthca_qp_path { |
97f52eb4 SH |
108 | __be32 port_pkey; |
109 | u8 rnr_retry; | |
110 | u8 g_mylmc; | |
111 | __be16 rlid; | |
112 | u8 ackto; | |
113 | u8 mgid_index; | |
114 | u8 static_rate; | |
115 | u8 hop_limit; | |
116 | __be32 sl_tclass_flowlabel; | |
117 | u8 rgid[16]; | |
1da177e4 LT |
118 | } __attribute__((packed)); |
119 | ||
120 | struct mthca_qp_context { | |
97f52eb4 SH |
121 | __be32 flags; |
122 | __be32 tavor_sched_queue; /* Reserved on Arbel */ | |
123 | u8 mtu_msgmax; | |
124 | u8 rq_size_stride; /* Reserved on Tavor */ | |
125 | u8 sq_size_stride; /* Reserved on Tavor */ | |
126 | u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ | |
127 | __be32 usr_page; | |
128 | __be32 local_qpn; | |
129 | __be32 remote_qpn; | |
130 | u32 reserved1[2]; | |
1da177e4 LT |
131 | struct mthca_qp_path pri_path; |
132 | struct mthca_qp_path alt_path; | |
97f52eb4 SH |
133 | __be32 rdd; |
134 | __be32 pd; | |
135 | __be32 wqe_base; | |
136 | __be32 wqe_lkey; | |
137 | __be32 params1; | |
138 | __be32 reserved2; | |
139 | __be32 next_send_psn; | |
140 | __be32 cqn_snd; | |
141 | __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ | |
142 | __be32 snd_db_index; /* (debugging only entries) */ | |
143 | __be32 last_acked_psn; | |
144 | __be32 ssn; | |
145 | __be32 params2; | |
146 | __be32 rnr_nextrecvpsn; | |
147 | __be32 ra_buff_indx; | |
148 | __be32 cqn_rcv; | |
149 | __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ | |
150 | __be32 rcv_db_index; /* (debugging only entries) */ | |
151 | __be32 qkey; | |
152 | __be32 srqn; | |
153 | __be32 rmsn; | |
154 | __be16 rq_wqe_counter; /* reserved on Tavor */ | |
155 | __be16 sq_wqe_counter; /* reserved on Tavor */ | |
156 | u32 reserved3[18]; | |
1da177e4 LT |
157 | } __attribute__((packed)); |
158 | ||
159 | struct mthca_qp_param { | |
97f52eb4 SH |
160 | __be32 opt_param_mask; |
161 | u32 reserved1; | |
1da177e4 | 162 | struct mthca_qp_context context; |
97f52eb4 | 163 | u32 reserved2[62]; |
1da177e4 LT |
164 | } __attribute__((packed)); |
165 | ||
166 | enum { | |
167 | MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, | |
168 | MTHCA_QP_OPTPAR_RRE = 1 << 1, | |
169 | MTHCA_QP_OPTPAR_RAE = 1 << 2, | |
170 | MTHCA_QP_OPTPAR_RWE = 1 << 3, | |
171 | MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, | |
172 | MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, | |
173 | MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, | |
174 | MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, | |
175 | MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, | |
176 | MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, | |
177 | MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, | |
178 | MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, | |
179 | MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, | |
180 | MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, | |
181 | MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, | |
182 | MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, | |
183 | MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 | |
184 | }; | |
185 | ||
1da177e4 LT |
186 | static const u8 mthca_opcode[] = { |
187 | [IB_WR_SEND] = MTHCA_OPCODE_SEND, | |
188 | [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, | |
189 | [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, | |
190 | [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, | |
191 | [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, | |
192 | [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, | |
193 | [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, | |
194 | }; | |
195 | ||
196 | static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) | |
197 | { | |
198 | return qp->qpn >= dev->qp_table.sqp_start && | |
199 | qp->qpn <= dev->qp_table.sqp_start + 3; | |
200 | } | |
201 | ||
202 | static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) | |
203 | { | |
204 | return qp->qpn >= dev->qp_table.sqp_start && | |
205 | qp->qpn <= dev->qp_table.sqp_start + 1; | |
206 | } | |
207 | ||
208 | static void *get_recv_wqe(struct mthca_qp *qp, int n) | |
209 | { | |
210 | if (qp->is_direct) | |
211 | return qp->queue.direct.buf + (n << qp->rq.wqe_shift); | |
212 | else | |
213 | return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + | |
214 | ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); | |
215 | } | |
216 | ||
217 | static void *get_send_wqe(struct mthca_qp *qp, int n) | |
218 | { | |
219 | if (qp->is_direct) | |
220 | return qp->queue.direct.buf + qp->send_wqe_offset + | |
221 | (n << qp->sq.wqe_shift); | |
222 | else | |
223 | return qp->queue.page_list[(qp->send_wqe_offset + | |
224 | (n << qp->sq.wqe_shift)) >> | |
225 | PAGE_SHIFT].buf + | |
226 | ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & | |
227 | (PAGE_SIZE - 1)); | |
228 | } | |
229 | ||
0964d916 | 230 | static void mthca_wq_reset(struct mthca_wq *wq) |
c9fe2b32 | 231 | { |
c9fe2b32 RD |
232 | wq->next_ind = 0; |
233 | wq->last_comp = wq->max - 1; | |
234 | wq->head = 0; | |
235 | wq->tail = 0; | |
c9fe2b32 RD |
236 | } |
237 | ||
1da177e4 LT |
238 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, |
239 | enum ib_event_type event_type) | |
240 | { | |
241 | struct mthca_qp *qp; | |
242 | struct ib_event event; | |
243 | ||
244 | spin_lock(&dev->qp_table.lock); | |
245 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | |
246 | if (qp) | |
a3285aa4 | 247 | ++qp->refcount; |
1da177e4 LT |
248 | spin_unlock(&dev->qp_table.lock); |
249 | ||
250 | if (!qp) { | |
251 | mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); | |
252 | return; | |
253 | } | |
254 | ||
bf6a9e31 JM |
255 | if (event_type == IB_EVENT_PATH_MIG) |
256 | qp->port = qp->alt_port; | |
257 | ||
1da177e4 LT |
258 | event.device = &dev->ib_dev; |
259 | event.event = event_type; | |
260 | event.element.qp = &qp->ibqp; | |
261 | if (qp->ibqp.event_handler) | |
262 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | |
263 | ||
a3285aa4 RD |
264 | spin_lock(&dev->qp_table.lock); |
265 | if (!--qp->refcount) | |
1da177e4 | 266 | wake_up(&qp->wait); |
a3285aa4 | 267 | spin_unlock(&dev->qp_table.lock); |
1da177e4 LT |
268 | } |
269 | ||
270 | static int to_mthca_state(enum ib_qp_state ib_state) | |
271 | { | |
272 | switch (ib_state) { | |
273 | case IB_QPS_RESET: return MTHCA_QP_STATE_RST; | |
274 | case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; | |
275 | case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; | |
276 | case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; | |
277 | case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; | |
278 | case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; | |
279 | case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; | |
280 | default: return -1; | |
281 | } | |
282 | } | |
283 | ||
284 | enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; | |
285 | ||
286 | static int to_mthca_st(int transport) | |
287 | { | |
288 | switch (transport) { | |
289 | case RC: return MTHCA_QP_ST_RC; | |
290 | case UC: return MTHCA_QP_ST_UC; | |
291 | case UD: return MTHCA_QP_ST_UD; | |
292 | case RD: return MTHCA_QP_ST_RD; | |
293 | case MLX: return MTHCA_QP_ST_MLX; | |
294 | default: return -1; | |
295 | } | |
296 | } | |
297 | ||
1da177e4 LT |
298 | static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, |
299 | int attr_mask) | |
300 | { | |
301 | if (attr_mask & IB_QP_PKEY_INDEX) | |
302 | sqp->pkey_index = attr->pkey_index; | |
303 | if (attr_mask & IB_QP_QKEY) | |
304 | sqp->qkey = attr->qkey; | |
305 | if (attr_mask & IB_QP_SQ_PSN) | |
306 | sqp->send_psn = attr->sq_psn; | |
307 | } | |
308 | ||
309 | static void init_port(struct mthca_dev *dev, int port) | |
310 | { | |
311 | int err; | |
312 | u8 status; | |
313 | struct mthca_init_ib_param param; | |
314 | ||
315 | memset(¶m, 0, sizeof param); | |
316 | ||
da6561c2 RD |
317 | param.port_width = dev->limits.port_width_cap; |
318 | param.vl_cap = dev->limits.vl_cap; | |
319 | param.mtu_cap = dev->limits.mtu_cap; | |
320 | param.gid_cap = dev->limits.gid_table_len; | |
321 | param.pkey_cap = dev->limits.pkey_table_len; | |
1da177e4 LT |
322 | |
323 | err = mthca_INIT_IB(dev, ¶m, port, &status); | |
324 | if (err) | |
325 | mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); | |
326 | if (status) | |
327 | mthca_warn(dev, "INIT_IB returned status %02x.\n", status); | |
328 | } | |
329 | ||
d1646f86 JM |
330 | static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, |
331 | int attr_mask) | |
332 | { | |
333 | u8 dest_rd_atomic; | |
334 | u32 access_flags; | |
335 | u32 hw_access_flags = 0; | |
336 | ||
337 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
338 | dest_rd_atomic = attr->max_dest_rd_atomic; | |
339 | else | |
340 | dest_rd_atomic = qp->resp_depth; | |
341 | ||
342 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
343 | access_flags = attr->qp_access_flags; | |
344 | else | |
345 | access_flags = qp->atomic_rd_en; | |
346 | ||
347 | if (!dest_rd_atomic) | |
348 | access_flags &= IB_ACCESS_REMOTE_WRITE; | |
349 | ||
350 | if (access_flags & IB_ACCESS_REMOTE_READ) | |
351 | hw_access_flags |= MTHCA_QP_BIT_RRE; | |
352 | if (access_flags & IB_ACCESS_REMOTE_ATOMIC) | |
353 | hw_access_flags |= MTHCA_QP_BIT_RAE; | |
354 | if (access_flags & IB_ACCESS_REMOTE_WRITE) | |
355 | hw_access_flags |= MTHCA_QP_BIT_RWE; | |
356 | ||
357 | return cpu_to_be32(hw_access_flags); | |
358 | } | |
359 | ||
8ebe5077 EC |
360 | static inline enum ib_qp_state to_ib_qp_state(int mthca_state) |
361 | { | |
362 | switch (mthca_state) { | |
363 | case MTHCA_QP_STATE_RST: return IB_QPS_RESET; | |
364 | case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; | |
365 | case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; | |
366 | case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; | |
367 | case MTHCA_QP_STATE_DRAINING: | |
368 | case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; | |
369 | case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; | |
370 | case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; | |
371 | default: return -1; | |
372 | } | |
373 | } | |
374 | ||
375 | static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) | |
376 | { | |
377 | switch (mthca_mig_state) { | |
378 | case 0: return IB_MIG_ARMED; | |
379 | case 1: return IB_MIG_REARM; | |
380 | case 3: return IB_MIG_MIGRATED; | |
381 | default: return -1; | |
382 | } | |
383 | } | |
384 | ||
385 | static int to_ib_qp_access_flags(int mthca_flags) | |
386 | { | |
387 | int ib_flags = 0; | |
388 | ||
389 | if (mthca_flags & MTHCA_QP_BIT_RRE) | |
390 | ib_flags |= IB_ACCESS_REMOTE_READ; | |
391 | if (mthca_flags & MTHCA_QP_BIT_RWE) | |
392 | ib_flags |= IB_ACCESS_REMOTE_WRITE; | |
393 | if (mthca_flags & MTHCA_QP_BIT_RAE) | |
394 | ib_flags |= IB_ACCESS_REMOTE_ATOMIC; | |
395 | ||
396 | return ib_flags; | |
397 | } | |
398 | ||
399 | static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, | |
400 | struct mthca_qp_path *path) | |
401 | { | |
99d4f22e | 402 | memset(ib_ah_attr, 0, sizeof *ib_ah_attr); |
8ebe5077 | 403 | ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; |
bf6a9e31 JM |
404 | |
405 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) | |
406 | return; | |
407 | ||
8ebe5077 EC |
408 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); |
409 | ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; | |
410 | ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; | |
bf6a9e31 | 411 | ib_ah_attr->static_rate = mthca_rate_to_ib(dev, |
9e583b85 | 412 | path->static_rate & 0xf, |
bf6a9e31 | 413 | ib_ah_attr->port_num); |
8ebe5077 EC |
414 | ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; |
415 | if (ib_ah_attr->ah_flags) { | |
416 | ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); | |
417 | ib_ah_attr->grh.hop_limit = path->hop_limit; | |
418 | ib_ah_attr->grh.traffic_class = | |
419 | (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; | |
420 | ib_ah_attr->grh.flow_label = | |
421 | be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; | |
422 | memcpy(ib_ah_attr->grh.dgid.raw, | |
423 | path->rgid, sizeof ib_ah_attr->grh.dgid.raw); | |
424 | } | |
425 | } | |
426 | ||
427 | int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | |
428 | struct ib_qp_init_attr *qp_init_attr) | |
429 | { | |
430 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
431 | struct mthca_qp *qp = to_mqp(ibqp); | |
f5e10529 DB |
432 | int err = 0; |
433 | struct mthca_mailbox *mailbox = NULL; | |
8ebe5077 EC |
434 | struct mthca_qp_param *qp_param; |
435 | struct mthca_qp_context *context; | |
436 | int mthca_state; | |
437 | u8 status; | |
438 | ||
f5e10529 DB |
439 | if (qp->state == IB_QPS_RESET) { |
440 | qp_attr->qp_state = IB_QPS_RESET; | |
441 | goto done; | |
442 | } | |
443 | ||
8ebe5077 EC |
444 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
445 | if (IS_ERR(mailbox)) | |
446 | return PTR_ERR(mailbox); | |
447 | ||
448 | err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); | |
449 | if (err) | |
450 | goto out; | |
451 | if (status) { | |
452 | mthca_warn(dev, "QUERY_QP returned status %02x\n", status); | |
453 | err = -EINVAL; | |
454 | goto out; | |
455 | } | |
456 | ||
457 | qp_param = mailbox->buf; | |
458 | context = &qp_param->context; | |
459 | mthca_state = be32_to_cpu(context->flags) >> 28; | |
460 | ||
461 | qp_attr->qp_state = to_ib_qp_state(mthca_state); | |
8ebe5077 EC |
462 | qp_attr->path_mtu = context->mtu_msgmax >> 5; |
463 | qp_attr->path_mig_state = | |
464 | to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); | |
465 | qp_attr->qkey = be32_to_cpu(context->qkey); | |
466 | qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; | |
467 | qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; | |
468 | qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; | |
469 | qp_attr->qp_access_flags = | |
470 | to_ib_qp_access_flags(be32_to_cpu(context->params2)); | |
8ebe5077 | 471 | |
bf6a9e31 JM |
472 | if (qp->transport == RC || qp->transport == UC) { |
473 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | |
474 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | |
f6f76725 JM |
475 | qp_attr->alt_pkey_index = |
476 | be32_to_cpu(context->alt_path.port_pkey) & 0x7f; | |
477 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | |
bf6a9e31 | 478 | } |
8ebe5077 | 479 | |
f6f76725 JM |
480 | qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; |
481 | qp_attr->port_num = | |
482 | (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; | |
8ebe5077 EC |
483 | |
484 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | |
485 | qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; | |
486 | ||
487 | qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); | |
488 | ||
489 | qp_attr->max_dest_rd_atomic = | |
490 | 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); | |
491 | qp_attr->min_rnr_timer = | |
492 | (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; | |
8ebe5077 EC |
493 | qp_attr->timeout = context->pri_path.ackto >> 3; |
494 | qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; | |
495 | qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; | |
8ebe5077 | 496 | qp_attr->alt_timeout = context->alt_path.ackto >> 3; |
f5e10529 DB |
497 | |
498 | done: | |
499 | qp_attr->cur_qp_state = qp_attr->qp_state; | |
500 | qp_attr->cap.max_send_wr = qp->sq.max; | |
501 | qp_attr->cap.max_recv_wr = qp->rq.max; | |
502 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | |
503 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | |
504 | qp_attr->cap.max_inline_data = qp->max_inline_data; | |
505 | ||
506 | qp_init_attr->cap = qp_attr->cap; | |
8ebe5077 EC |
507 | |
508 | out: | |
509 | mthca_free_mailbox(dev, mailbox); | |
510 | return err; | |
511 | } | |
512 | ||
0ef61db8 | 513 | static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, |
bf6a9e31 | 514 | struct mthca_qp_path *path, u8 port) |
4de144bf DB |
515 | { |
516 | path->g_mylmc = ah->src_path_bits & 0x7f; | |
517 | path->rlid = cpu_to_be16(ah->dlid); | |
bf6a9e31 | 518 | path->static_rate = mthca_get_rate(dev, ah->static_rate, port); |
4de144bf DB |
519 | |
520 | if (ah->ah_flags & IB_AH_GRH) { | |
0ef61db8 DB |
521 | if (ah->grh.sgid_index >= dev->limits.gid_table_len) { |
522 | mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", | |
523 | ah->grh.sgid_index, dev->limits.gid_table_len-1); | |
524 | return -1; | |
525 | } | |
526 | ||
4de144bf DB |
527 | path->g_mylmc |= 1 << 7; |
528 | path->mgid_index = ah->grh.sgid_index; | |
529 | path->hop_limit = ah->grh.hop_limit; | |
2fa5e2eb | 530 | path->sl_tclass_flowlabel = |
4de144bf | 531 | cpu_to_be32((ah->sl << 28) | |
2fa5e2eb | 532 | (ah->grh.traffic_class << 20) | |
4de144bf DB |
533 | (ah->grh.flow_label)); |
534 | memcpy(path->rgid, ah->grh.dgid.raw, 16); | |
535 | } else | |
536 | path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); | |
0ef61db8 DB |
537 | |
538 | return 0; | |
4de144bf DB |
539 | } |
540 | ||
9bc57e2d RC |
541 | int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, |
542 | struct ib_udata *udata) | |
1da177e4 LT |
543 | { |
544 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
545 | struct mthca_qp *qp = to_mqp(ibqp); | |
546 | enum ib_qp_state cur_state, new_state; | |
ed878458 | 547 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
548 | struct mthca_qp_param *qp_param; |
549 | struct mthca_qp_context *qp_context; | |
3fa1fa3e | 550 | u32 sqd_event = 0; |
1da177e4 | 551 | u8 status; |
c9c5d9fe | 552 | int err = -EINVAL; |
1da177e4 | 553 | |
c93b6fba RD |
554 | mutex_lock(&qp->mutex); |
555 | ||
1da177e4 | 556 | if (attr_mask & IB_QP_CUR_STATE) { |
d844183d | 557 | cur_state = attr->cur_qp_state; |
1da177e4 LT |
558 | } else { |
559 | spin_lock_irq(&qp->sq.lock); | |
560 | spin_lock(&qp->rq.lock); | |
561 | cur_state = qp->state; | |
562 | spin_unlock(&qp->rq.lock); | |
563 | spin_unlock_irq(&qp->sq.lock); | |
564 | } | |
565 | ||
d844183d | 566 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
1da177e4 | 567 | |
d844183d RD |
568 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { |
569 | mthca_dbg(dev, "Bad QP transition (transport %d) " | |
570 | "%d->%d with attr 0x%08x\n", | |
571 | qp->transport, cur_state, new_state, | |
572 | attr_mask); | |
c93b6fba | 573 | goto out; |
1da177e4 LT |
574 | } |
575 | ||
fc89afce DB |
576 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { |
577 | err = 0; | |
578 | goto out; | |
579 | } | |
580 | ||
2fa5e2eb | 581 | if ((attr_mask & IB_QP_PKEY_INDEX) && |
d09e3276 | 582 | attr->pkey_index >= dev->limits.pkey_table_len) { |
67e73776 DB |
583 | mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", |
584 | attr->pkey_index, dev->limits.pkey_table_len-1); | |
c93b6fba | 585 | goto out; |
d09e3276 JM |
586 | } |
587 | ||
38d1e793 JM |
588 | if ((attr_mask & IB_QP_PORT) && |
589 | (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { | |
590 | mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); | |
c93b6fba | 591 | goto out; |
38d1e793 JM |
592 | } |
593 | ||
94361cf7 JM |
594 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
595 | attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { | |
596 | mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", | |
597 | attr->max_rd_atomic, dev->limits.max_qp_init_rdma); | |
c93b6fba | 598 | goto out; |
94361cf7 JM |
599 | } |
600 | ||
601 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | |
602 | attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { | |
603 | mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", | |
604 | attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); | |
c93b6fba | 605 | goto out; |
94361cf7 JM |
606 | } |
607 | ||
ed878458 | 608 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
c93b6fba RD |
609 | if (IS_ERR(mailbox)) { |
610 | err = PTR_ERR(mailbox); | |
611 | goto out; | |
612 | } | |
ed878458 | 613 | qp_param = mailbox->buf; |
1da177e4 LT |
614 | qp_context = &qp_param->context; |
615 | memset(qp_param, 0, sizeof *qp_param); | |
616 | ||
617 | qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | | |
618 | (to_mthca_st(qp->transport) << 16)); | |
619 | qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); | |
620 | if (!(attr_mask & IB_QP_PATH_MIG_STATE)) | |
621 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
622 | else { | |
623 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); | |
624 | switch (attr->path_mig_state) { | |
625 | case IB_MIG_MIGRATED: | |
626 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
627 | break; | |
628 | case IB_MIG_REARM: | |
629 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); | |
630 | break; | |
631 | case IB_MIG_ARMED: | |
632 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); | |
633 | break; | |
634 | } | |
635 | } | |
636 | ||
637 | /* leave tavor_sched_queue as 0 */ | |
638 | ||
639 | if (qp->transport == MLX || qp->transport == UD) | |
640 | qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; | |
0ef61db8 DB |
641 | else if (attr_mask & IB_QP_PATH_MTU) { |
642 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { | |
643 | mthca_dbg(dev, "path MTU (%u) is invalid\n", | |
644 | attr->path_mtu); | |
c93b6fba | 645 | goto out_mailbox; |
0ef61db8 | 646 | } |
1da177e4 | 647 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; |
0ef61db8 | 648 | } |
1da177e4 | 649 | |
d10ddbf6 | 650 | if (mthca_is_memfree(dev)) { |
ec34a922 | 651 | if (qp->rq.max) |
f0d1b0b3 | 652 | qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; |
ec34a922 RD |
653 | qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; |
654 | ||
655 | if (qp->sq.max) | |
f0d1b0b3 | 656 | qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; |
ec34a922 | 657 | qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; |
1da177e4 LT |
658 | } |
659 | ||
660 | /* leave arbel_sched_queue as 0 */ | |
661 | ||
80c8ec2c RD |
662 | if (qp->ibqp.uobject) |
663 | qp_context->usr_page = | |
664 | cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); | |
665 | else | |
666 | qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); | |
1da177e4 LT |
667 | qp_context->local_qpn = cpu_to_be32(qp->qpn); |
668 | if (attr_mask & IB_QP_DEST_QPN) { | |
669 | qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); | |
670 | } | |
671 | ||
672 | if (qp->transport == MLX) | |
673 | qp_context->pri_path.port_pkey |= | |
bf6a9e31 | 674 | cpu_to_be32(qp->port << 24); |
1da177e4 LT |
675 | else { |
676 | if (attr_mask & IB_QP_PORT) { | |
677 | qp_context->pri_path.port_pkey |= | |
678 | cpu_to_be32(attr->port_num << 24); | |
679 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); | |
680 | } | |
681 | } | |
682 | ||
683 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
684 | qp_context->pri_path.port_pkey |= | |
685 | cpu_to_be32(attr->pkey_index); | |
686 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); | |
687 | } | |
688 | ||
689 | if (attr_mask & IB_QP_RNR_RETRY) { | |
4de144bf DB |
690 | qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = |
691 | attr->rnr_retry << 5; | |
2fa5e2eb | 692 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | |
4de144bf | 693 | MTHCA_QP_OPTPAR_ALT_RNR_RETRY); |
1da177e4 LT |
694 | } |
695 | ||
696 | if (attr_mask & IB_QP_AV) { | |
bf6a9e31 JM |
697 | if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, |
698 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) | |
c93b6fba | 699 | goto out_mailbox; |
0ef61db8 | 700 | |
1da177e4 LT |
701 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); |
702 | } | |
703 | ||
704 | if (attr_mask & IB_QP_TIMEOUT) { | |
bb4a7f0d | 705 | qp_context->pri_path.ackto = attr->timeout << 3; |
1da177e4 LT |
706 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); |
707 | } | |
708 | ||
4de144bf | 709 | if (attr_mask & IB_QP_ALT_PATH) { |
67e73776 DB |
710 | if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { |
711 | mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", | |
712 | attr->alt_pkey_index, dev->limits.pkey_table_len-1); | |
c93b6fba | 713 | goto out_mailbox; |
67e73776 DB |
714 | } |
715 | ||
4de144bf | 716 | if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { |
2fa5e2eb | 717 | mthca_dbg(dev, "Alternate port number (%u) is invalid\n", |
4de144bf | 718 | attr->alt_port_num); |
c93b6fba | 719 | goto out_mailbox; |
4de144bf DB |
720 | } |
721 | ||
bf6a9e31 JM |
722 | if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, |
723 | attr->alt_ah_attr.port_num)) | |
c93b6fba | 724 | goto out_mailbox; |
0ef61db8 | 725 | |
2fa5e2eb | 726 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | |
4de144bf DB |
727 | attr->alt_port_num << 24); |
728 | qp_context->alt_path.ackto = attr->alt_timeout << 3; | |
729 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); | |
730 | } | |
1da177e4 LT |
731 | |
732 | /* leave rdd as 0 */ | |
733 | qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); | |
734 | /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ | |
735 | qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); | |
736 | qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | | |
737 | (MTHCA_FLIGHT_LIMIT << 24) | | |
c4342d8a | 738 | MTHCA_QP_BIT_SWE); |
1da177e4 LT |
739 | if (qp->sq_policy == IB_SIGNAL_ALL_WR) |
740 | qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); | |
741 | if (attr_mask & IB_QP_RETRY_CNT) { | |
742 | qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); | |
743 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); | |
744 | } | |
745 | ||
34a4a753 | 746 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { |
c4342d8a JM |
747 | if (attr->max_rd_atomic) { |
748 | qp_context->params1 |= | |
749 | cpu_to_be32(MTHCA_QP_BIT_SRE | | |
750 | MTHCA_QP_BIT_SAE); | |
6aa2e4e8 JM |
751 | qp_context->params1 |= |
752 | cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); | |
c4342d8a | 753 | } |
1da177e4 LT |
754 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); |
755 | } | |
756 | ||
757 | if (attr_mask & IB_QP_SQ_PSN) | |
758 | qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); | |
759 | qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); | |
760 | ||
d10ddbf6 | 761 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
762 | qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); |
763 | qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); | |
764 | } | |
765 | ||
34a4a753 | 766 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
6aa2e4e8 JM |
767 | if (attr->max_dest_rd_atomic) |
768 | qp_context->params2 |= | |
769 | cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); | |
1da177e4 | 770 | |
1da177e4 | 771 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); |
1da177e4 LT |
772 | } |
773 | ||
d1646f86 JM |
774 | if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { |
775 | qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); | |
776 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | | |
777 | MTHCA_QP_OPTPAR_RRE | | |
778 | MTHCA_QP_OPTPAR_RAE); | |
779 | } | |
780 | ||
1da177e4 LT |
781 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); |
782 | ||
ec34a922 RD |
783 | if (ibqp->srq) |
784 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); | |
785 | ||
1da177e4 LT |
786 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { |
787 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); | |
788 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); | |
789 | } | |
790 | if (attr_mask & IB_QP_RQ_PSN) | |
791 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); | |
792 | ||
793 | qp_context->ra_buff_indx = | |
794 | cpu_to_be32(dev->qp_table.rdb_base + | |
795 | ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << | |
796 | dev->qp_table.rdb_shift)); | |
797 | ||
798 | qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); | |
799 | ||
d10ddbf6 | 800 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
801 | qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); |
802 | ||
803 | if (attr_mask & IB_QP_QKEY) { | |
804 | qp_context->qkey = cpu_to_be32(attr->qkey); | |
805 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); | |
806 | } | |
807 | ||
ec34a922 RD |
808 | if (ibqp->srq) |
809 | qp_context->srqn = cpu_to_be32(1 << 24 | | |
810 | to_msrq(ibqp->srq)->srqn); | |
811 | ||
3fa1fa3e RD |
812 | if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && |
813 | attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && | |
814 | attr->en_sqd_async_notify) | |
815 | sqd_event = 1 << 31; | |
816 | ||
d844183d RD |
817 | err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, |
818 | mailbox, sqd_event, &status); | |
192daa18 | 819 | if (err) |
c93b6fba | 820 | goto out_mailbox; |
1da177e4 | 821 | if (status) { |
d844183d RD |
822 | mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", |
823 | cur_state, new_state, status); | |
1da177e4 | 824 | err = -EINVAL; |
c93b6fba | 825 | goto out_mailbox; |
1da177e4 LT |
826 | } |
827 | ||
192daa18 RD |
828 | qp->state = new_state; |
829 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
830 | qp->atomic_rd_en = attr->qp_access_flags; | |
831 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
832 | qp->resp_depth = attr->max_dest_rd_atomic; | |
bf6a9e31 JM |
833 | if (attr_mask & IB_QP_PORT) |
834 | qp->port = attr->port_num; | |
835 | if (attr_mask & IB_QP_ALT_PATH) | |
836 | qp->alt_port = attr->alt_port_num; | |
1da177e4 LT |
837 | |
838 | if (is_sqp(dev, qp)) | |
839 | store_attrs(to_msqp(qp), attr, attr_mask); | |
840 | ||
841 | /* | |
c9fe2b32 RD |
842 | * If we moved QP0 to RTR, bring the IB link up; if we moved |
843 | * QP0 to RESET or ERROR, bring the link back down. | |
1da177e4 LT |
844 | */ |
845 | if (is_qp0(dev, qp)) { | |
846 | if (cur_state != IB_QPS_RTR && | |
847 | new_state == IB_QPS_RTR) | |
bf6a9e31 | 848 | init_port(dev, qp->port); |
1da177e4 LT |
849 | |
850 | if (cur_state != IB_QPS_RESET && | |
851 | cur_state != IB_QPS_ERR && | |
852 | (new_state == IB_QPS_RESET || | |
853 | new_state == IB_QPS_ERR)) | |
bf6a9e31 | 854 | mthca_CLOSE_IB(dev, qp->port, &status); |
1da177e4 LT |
855 | } |
856 | ||
c9fe2b32 RD |
857 | /* |
858 | * If we moved a kernel QP to RESET, clean up all old CQ | |
859 | * entries and reinitialize the QP. | |
860 | */ | |
192daa18 | 861 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
d35cc330 | 862 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
c9fe2b32 RD |
863 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
864 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | |
d35cc330 | 865 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); |
c9fe2b32 | 866 | |
0964d916 | 867 | mthca_wq_reset(&qp->sq); |
187a2586 MT |
868 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
869 | ||
0964d916 | 870 | mthca_wq_reset(&qp->rq); |
187a2586 | 871 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); |
c9fe2b32 RD |
872 | |
873 | if (mthca_is_memfree(dev)) { | |
874 | *qp->sq.db = 0; | |
875 | *qp->rq.db = 0; | |
876 | } | |
877 | } | |
878 | ||
c93b6fba | 879 | out_mailbox: |
192daa18 | 880 | mthca_free_mailbox(dev, mailbox); |
c93b6fba RD |
881 | |
882 | out: | |
883 | mutex_unlock(&qp->mutex); | |
1da177e4 LT |
884 | return err; |
885 | } | |
886 | ||
5b3bc7a6 | 887 | static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) |
77369ed3 | 888 | { |
77369ed3 JM |
889 | /* |
890 | * Calculate the maximum size of WQE s/g segments, excluding | |
891 | * the next segment and other non-data segments. | |
892 | */ | |
5b3bc7a6 | 893 | int max_data_size = desc_sz - sizeof (struct mthca_next_seg); |
77369ed3 JM |
894 | |
895 | switch (qp->transport) { | |
896 | case MLX: | |
897 | max_data_size -= 2 * sizeof (struct mthca_data_seg); | |
898 | break; | |
899 | ||
900 | case UD: | |
901 | if (mthca_is_memfree(dev)) | |
902 | max_data_size -= sizeof (struct mthca_arbel_ud_seg); | |
903 | else | |
904 | max_data_size -= sizeof (struct mthca_tavor_ud_seg); | |
905 | break; | |
906 | ||
907 | default: | |
908 | max_data_size -= sizeof (struct mthca_raddr_seg); | |
909 | break; | |
910 | } | |
911 | ||
5b3bc7a6 JM |
912 | return max_data_size; |
913 | } | |
914 | ||
915 | static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) | |
916 | { | |
77369ed3 | 917 | /* We don't support inline data for kernel QPs (yet). */ |
5b3bc7a6 JM |
918 | return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; |
919 | } | |
920 | ||
921 | static void mthca_adjust_qp_caps(struct mthca_dev *dev, | |
922 | struct mthca_pd *pd, | |
923 | struct mthca_qp *qp) | |
924 | { | |
925 | int max_data_size = mthca_max_data_size(dev, qp, | |
926 | min(dev->limits.max_desc_sz, | |
927 | 1 << qp->sq.wqe_shift)); | |
928 | ||
929 | qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); | |
77369ed3 | 930 | |
48fd0d1f MT |
931 | qp->sq.max_gs = min_t(int, dev->limits.max_sg, |
932 | max_data_size / sizeof (struct mthca_data_seg)); | |
933 | qp->rq.max_gs = min_t(int, dev->limits.max_sg, | |
934 | (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - | |
935 | sizeof (struct mthca_next_seg)) / | |
936 | sizeof (struct mthca_data_seg)); | |
77369ed3 JM |
937 | } |
938 | ||
1da177e4 LT |
939 | /* |
940 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, | |
941 | * rq.max_gs and sq.max_gs must all be assigned. | |
942 | * mthca_alloc_wqe_buf will calculate rq.wqe_shift and | |
943 | * sq.wqe_shift (as well as send_wqe_offset, is_direct, and | |
944 | * queue) | |
945 | */ | |
946 | static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |
947 | struct mthca_pd *pd, | |
948 | struct mthca_qp *qp) | |
949 | { | |
950 | int size; | |
1da177e4 LT |
951 | int err = -ENOMEM; |
952 | ||
953 | size = sizeof (struct mthca_next_seg) + | |
954 | qp->rq.max_gs * sizeof (struct mthca_data_seg); | |
955 | ||
77369ed3 JM |
956 | if (size > dev->limits.max_desc_sz) |
957 | return -EINVAL; | |
958 | ||
1da177e4 LT |
959 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; |
960 | qp->rq.wqe_shift++) | |
961 | ; /* nothing */ | |
962 | ||
77369ed3 | 963 | size = qp->sq.max_gs * sizeof (struct mthca_data_seg); |
1da177e4 LT |
964 | switch (qp->transport) { |
965 | case MLX: | |
966 | size += 2 * sizeof (struct mthca_data_seg); | |
967 | break; | |
77369ed3 | 968 | |
1da177e4 | 969 | case UD: |
77369ed3 JM |
970 | size += mthca_is_memfree(dev) ? |
971 | sizeof (struct mthca_arbel_ud_seg) : | |
972 | sizeof (struct mthca_tavor_ud_seg); | |
1da177e4 | 973 | break; |
77369ed3 JM |
974 | |
975 | case UC: | |
976 | size += sizeof (struct mthca_raddr_seg); | |
977 | break; | |
978 | ||
979 | case RC: | |
980 | size += sizeof (struct mthca_raddr_seg); | |
981 | /* | |
982 | * An atomic op will require an atomic segment, a | |
983 | * remote address segment and one scatter entry. | |
984 | */ | |
985 | size = max_t(int, size, | |
986 | sizeof (struct mthca_atomic_seg) + | |
987 | sizeof (struct mthca_raddr_seg) + | |
988 | sizeof (struct mthca_data_seg)); | |
989 | break; | |
990 | ||
1da177e4 | 991 | default: |
77369ed3 | 992 | break; |
1da177e4 LT |
993 | } |
994 | ||
77369ed3 JM |
995 | /* Make sure that we have enough space for a bind request */ |
996 | size = max_t(int, size, sizeof (struct mthca_bind_seg)); | |
997 | ||
998 | size += sizeof (struct mthca_next_seg); | |
999 | ||
1000 | if (size > dev->limits.max_desc_sz) | |
1001 | return -EINVAL; | |
1002 | ||
1da177e4 LT |
1003 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; |
1004 | qp->sq.wqe_shift++) | |
1005 | ; /* nothing */ | |
1006 | ||
1007 | qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, | |
1008 | 1 << qp->sq.wqe_shift); | |
80c8ec2c RD |
1009 | |
1010 | /* | |
1011 | * If this is a userspace QP, we don't actually have to | |
1012 | * allocate anything. All we need is to calculate the WQE | |
1013 | * sizes and the send_wqe_offset, so we're done now. | |
1014 | */ | |
1015 | if (pd->ibpd.uobject) | |
1016 | return 0; | |
1017 | ||
1da177e4 LT |
1018 | size = PAGE_ALIGN(qp->send_wqe_offset + |
1019 | (qp->sq.max << qp->sq.wqe_shift)); | |
1020 | ||
1021 | qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), | |
1022 | GFP_KERNEL); | |
1023 | if (!qp->wrid) | |
1024 | goto err_out; | |
1025 | ||
87b81670 RD |
1026 | err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, |
1027 | &qp->queue, &qp->is_direct, pd, 0, &qp->mr); | |
1da177e4 | 1028 | if (err) |
87b81670 | 1029 | goto err_out; |
1da177e4 | 1030 | |
1da177e4 LT |
1031 | return 0; |
1032 | ||
87b81670 | 1033 | err_out: |
1da177e4 | 1034 | kfree(qp->wrid); |
1da177e4 LT |
1035 | return err; |
1036 | } | |
1037 | ||
80c8ec2c | 1038 | static void mthca_free_wqe_buf(struct mthca_dev *dev, |
1da177e4 LT |
1039 | struct mthca_qp *qp) |
1040 | { | |
87b81670 RD |
1041 | mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + |
1042 | (qp->sq.max << qp->sq.wqe_shift)), | |
1043 | &qp->queue, qp->is_direct, &qp->mr); | |
80c8ec2c RD |
1044 | kfree(qp->wrid); |
1045 | } | |
1046 | ||
1047 | static int mthca_map_memfree(struct mthca_dev *dev, | |
1048 | struct mthca_qp *qp) | |
1049 | { | |
1050 | int ret; | |
1da177e4 | 1051 | |
d10ddbf6 | 1052 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
1053 | ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); |
1054 | if (ret) | |
1055 | return ret; | |
1056 | ||
1057 | ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); | |
1058 | if (ret) | |
1059 | goto err_qpc; | |
1060 | ||
2fa5e2eb RD |
1061 | ret = mthca_table_get(dev, dev->qp_table.rdb_table, |
1062 | qp->qpn << dev->qp_table.rdb_shift); | |
1063 | if (ret) | |
1064 | goto err_eqpc; | |
1da177e4 | 1065 | |
1da177e4 LT |
1066 | } |
1067 | ||
1068 | return 0; | |
1069 | ||
1da177e4 LT |
1070 | err_eqpc: |
1071 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
1072 | ||
1073 | err_qpc: | |
1074 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
1075 | ||
1076 | return ret; | |
1077 | } | |
1078 | ||
80c8ec2c RD |
1079 | static void mthca_unmap_memfree(struct mthca_dev *dev, |
1080 | struct mthca_qp *qp) | |
1081 | { | |
1082 | mthca_table_put(dev, dev->qp_table.rdb_table, | |
1083 | qp->qpn << dev->qp_table.rdb_shift); | |
1084 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
1085 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
1086 | } | |
1087 | ||
1088 | static int mthca_alloc_memfree(struct mthca_dev *dev, | |
1089 | struct mthca_qp *qp) | |
1090 | { | |
1091 | int ret = 0; | |
1092 | ||
1093 | if (mthca_is_memfree(dev)) { | |
1094 | qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, | |
1095 | qp->qpn, &qp->rq.db); | |
1096 | if (qp->rq.db_index < 0) | |
1097 | return ret; | |
1098 | ||
1099 | qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, | |
1100 | qp->qpn, &qp->sq.db); | |
1101 | if (qp->sq.db_index < 0) | |
1102 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
1103 | } | |
1104 | ||
1105 | return ret; | |
1106 | } | |
1107 | ||
1da177e4 LT |
1108 | static void mthca_free_memfree(struct mthca_dev *dev, |
1109 | struct mthca_qp *qp) | |
1110 | { | |
d10ddbf6 | 1111 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
1112 | mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); |
1113 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
1da177e4 LT |
1114 | } |
1115 | } | |
1116 | ||
1da177e4 LT |
1117 | static int mthca_alloc_qp_common(struct mthca_dev *dev, |
1118 | struct mthca_pd *pd, | |
1119 | struct mthca_cq *send_cq, | |
1120 | struct mthca_cq *recv_cq, | |
1121 | enum ib_sig_type send_policy, | |
1122 | struct mthca_qp *qp) | |
1123 | { | |
1da177e4 LT |
1124 | int ret; |
1125 | int i; | |
1126 | ||
a3285aa4 | 1127 | qp->refcount = 1; |
30a7e8ef | 1128 | init_waitqueue_head(&qp->wait); |
c93b6fba | 1129 | mutex_init(&qp->mutex); |
1da177e4 LT |
1130 | qp->state = IB_QPS_RESET; |
1131 | qp->atomic_rd_en = 0; | |
1132 | qp->resp_depth = 0; | |
1133 | qp->sq_policy = send_policy; | |
0964d916 MT |
1134 | mthca_wq_reset(&qp->sq); |
1135 | mthca_wq_reset(&qp->rq); | |
1136 | ||
a46f9484 ZB |
1137 | spin_lock_init(&qp->sq.lock); |
1138 | spin_lock_init(&qp->rq.lock); | |
1da177e4 | 1139 | |
80c8ec2c | 1140 | ret = mthca_map_memfree(dev, qp); |
1da177e4 LT |
1141 | if (ret) |
1142 | return ret; | |
1143 | ||
1144 | ret = mthca_alloc_wqe_buf(dev, pd, qp); | |
1145 | if (ret) { | |
80c8ec2c RD |
1146 | mthca_unmap_memfree(dev, qp); |
1147 | return ret; | |
1148 | } | |
1149 | ||
77369ed3 JM |
1150 | mthca_adjust_qp_caps(dev, pd, qp); |
1151 | ||
80c8ec2c RD |
1152 | /* |
1153 | * If this is a userspace QP, we're done now. The doorbells | |
1154 | * will be allocated and buffers will be initialized in | |
1155 | * userspace. | |
1156 | */ | |
1157 | if (pd->ibpd.uobject) | |
1158 | return 0; | |
1159 | ||
1160 | ret = mthca_alloc_memfree(dev, qp); | |
1161 | if (ret) { | |
1162 | mthca_free_wqe_buf(dev, qp); | |
1163 | mthca_unmap_memfree(dev, qp); | |
1da177e4 LT |
1164 | return ret; |
1165 | } | |
1166 | ||
d10ddbf6 | 1167 | if (mthca_is_memfree(dev)) { |
ddf841f0 RD |
1168 | struct mthca_next_seg *next; |
1169 | struct mthca_data_seg *scatter; | |
1170 | int size = (sizeof (struct mthca_next_seg) + | |
1171 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; | |
1172 | ||
1da177e4 | 1173 | for (i = 0; i < qp->rq.max; ++i) { |
ddf841f0 RD |
1174 | next = get_recv_wqe(qp, i); |
1175 | next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << | |
1176 | qp->rq.wqe_shift); | |
1177 | next->ee_nds = cpu_to_be32(size); | |
1178 | ||
1179 | for (scatter = (void *) (next + 1); | |
1180 | (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); | |
1181 | ++scatter) | |
1182 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
1da177e4 LT |
1183 | } |
1184 | ||
1185 | for (i = 0; i < qp->sq.max; ++i) { | |
ddf841f0 RD |
1186 | next = get_send_wqe(qp, i); |
1187 | next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << | |
1188 | qp->sq.wqe_shift) + | |
1189 | qp->send_wqe_offset); | |
1da177e4 LT |
1190 | } |
1191 | } | |
1192 | ||
d6cff021 RD |
1193 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
1194 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); | |
1195 | ||
1da177e4 LT |
1196 | return 0; |
1197 | } | |
1198 | ||
80c8ec2c | 1199 | static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, |
5b3bc7a6 | 1200 | struct mthca_pd *pd, struct mthca_qp *qp) |
1da177e4 | 1201 | { |
5b3bc7a6 JM |
1202 | int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); |
1203 | ||
80c8ec2c | 1204 | /* Sanity check QP size before proceeding */ |
5b3bc7a6 JM |
1205 | if (cap->max_send_wr > dev->limits.max_wqes || |
1206 | cap->max_recv_wr > dev->limits.max_wqes || | |
1207 | cap->max_send_sge > dev->limits.max_sg || | |
1208 | cap->max_recv_sge > dev->limits.max_sg || | |
1209 | cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) | |
1210 | return -EINVAL; | |
1211 | ||
1212 | /* | |
1213 | * For MLX transport we need 2 extra S/G entries: | |
1214 | * one for the header and one for the checksum at the end | |
1215 | */ | |
1216 | if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg) | |
80c8ec2c | 1217 | return -EINVAL; |
1da177e4 | 1218 | |
80c8ec2c RD |
1219 | if (mthca_is_memfree(dev)) { |
1220 | qp->rq.max = cap->max_recv_wr ? | |
1221 | roundup_pow_of_two(cap->max_recv_wr) : 0; | |
1222 | qp->sq.max = cap->max_send_wr ? | |
1223 | roundup_pow_of_two(cap->max_send_wr) : 0; | |
1224 | } else { | |
1225 | qp->rq.max = cap->max_recv_wr; | |
1226 | qp->sq.max = cap->max_send_wr; | |
1227 | } | |
1da177e4 | 1228 | |
80c8ec2c RD |
1229 | qp->rq.max_gs = cap->max_recv_sge; |
1230 | qp->sq.max_gs = max_t(int, cap->max_send_sge, | |
1231 | ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, | |
1232 | MTHCA_INLINE_CHUNK_SIZE) / | |
1233 | sizeof (struct mthca_data_seg)); | |
1da177e4 | 1234 | |
80c8ec2c | 1235 | return 0; |
1da177e4 LT |
1236 | } |
1237 | ||
1238 | int mthca_alloc_qp(struct mthca_dev *dev, | |
1239 | struct mthca_pd *pd, | |
1240 | struct mthca_cq *send_cq, | |
1241 | struct mthca_cq *recv_cq, | |
1242 | enum ib_qp_type type, | |
1243 | enum ib_sig_type send_policy, | |
80c8ec2c | 1244 | struct ib_qp_cap *cap, |
1da177e4 LT |
1245 | struct mthca_qp *qp) |
1246 | { | |
1247 | int err; | |
1248 | ||
1da177e4 LT |
1249 | switch (type) { |
1250 | case IB_QPT_RC: qp->transport = RC; break; | |
1251 | case IB_QPT_UC: qp->transport = UC; break; | |
1252 | case IB_QPT_UD: qp->transport = UD; break; | |
1253 | default: return -EINVAL; | |
1254 | } | |
1255 | ||
b3f64967 JM |
1256 | err = mthca_set_qp_size(dev, cap, pd, qp); |
1257 | if (err) | |
1258 | return err; | |
1259 | ||
1da177e4 LT |
1260 | qp->qpn = mthca_alloc(&dev->qp_table.alloc); |
1261 | if (qp->qpn == -1) | |
1262 | return -ENOMEM; | |
1263 | ||
bf6a9e31 JM |
1264 | /* initialize port to zero for error-catching. */ |
1265 | qp->port = 0; | |
1266 | ||
1da177e4 LT |
1267 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, |
1268 | send_policy, qp); | |
1269 | if (err) { | |
1270 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1271 | return err; | |
1272 | } | |
1273 | ||
1274 | spin_lock_irq(&dev->qp_table.lock); | |
1275 | mthca_array_set(&dev->qp_table.qp, | |
1276 | qp->qpn & (dev->limits.num_qps - 1), qp); | |
1277 | spin_unlock_irq(&dev->qp_table.lock); | |
1278 | ||
1279 | return 0; | |
1280 | } | |
1281 | ||
a19aa5c5 RD |
1282 | static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) |
1283 | { | |
1284 | if (send_cq == recv_cq) | |
1285 | spin_lock_irq(&send_cq->lock); | |
1286 | else if (send_cq->cqn < recv_cq->cqn) { | |
1287 | spin_lock_irq(&send_cq->lock); | |
1288 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | |
1289 | } else { | |
1290 | spin_lock_irq(&recv_cq->lock); | |
1291 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | |
1292 | } | |
1293 | } | |
1294 | ||
1295 | static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) | |
1296 | { | |
1297 | if (send_cq == recv_cq) | |
1298 | spin_unlock_irq(&send_cq->lock); | |
1299 | else if (send_cq->cqn < recv_cq->cqn) { | |
1300 | spin_unlock(&recv_cq->lock); | |
1301 | spin_unlock_irq(&send_cq->lock); | |
1302 | } else { | |
1303 | spin_unlock(&send_cq->lock); | |
1304 | spin_unlock_irq(&recv_cq->lock); | |
1305 | } | |
1306 | } | |
1307 | ||
1da177e4 LT |
1308 | int mthca_alloc_sqp(struct mthca_dev *dev, |
1309 | struct mthca_pd *pd, | |
1310 | struct mthca_cq *send_cq, | |
1311 | struct mthca_cq *recv_cq, | |
1312 | enum ib_sig_type send_policy, | |
80c8ec2c | 1313 | struct ib_qp_cap *cap, |
1da177e4 LT |
1314 | int qpn, |
1315 | int port, | |
1316 | struct mthca_sqp *sqp) | |
1317 | { | |
1da177e4 | 1318 | u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; |
80c8ec2c | 1319 | int err; |
1da177e4 | 1320 | |
b3f64967 | 1321 | sqp->qp.transport = MLX; |
5b3bc7a6 | 1322 | err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); |
80c8ec2c RD |
1323 | if (err) |
1324 | return err; | |
1da177e4 LT |
1325 | |
1326 | sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; | |
1327 | sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1328 | &sqp->header_dma, GFP_KERNEL); | |
1329 | if (!sqp->header_buf) | |
1330 | return -ENOMEM; | |
1331 | ||
1332 | spin_lock_irq(&dev->qp_table.lock); | |
1333 | if (mthca_array_get(&dev->qp_table.qp, mqpn)) | |
1334 | err = -EBUSY; | |
1335 | else | |
1336 | mthca_array_set(&dev->qp_table.qp, mqpn, sqp); | |
1337 | spin_unlock_irq(&dev->qp_table.lock); | |
1338 | ||
1339 | if (err) | |
1340 | goto err_out; | |
1341 | ||
bf6a9e31 | 1342 | sqp->qp.port = port; |
1da177e4 LT |
1343 | sqp->qp.qpn = mqpn; |
1344 | sqp->qp.transport = MLX; | |
1345 | ||
1346 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | |
1347 | send_policy, &sqp->qp); | |
1348 | if (err) | |
1349 | goto err_out_free; | |
1350 | ||
1351 | atomic_inc(&pd->sqp_count); | |
1352 | ||
1353 | return 0; | |
1354 | ||
1355 | err_out_free: | |
1356 | /* | |
1357 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1358 | * without taking a lock. | |
1359 | */ | |
a19aa5c5 | 1360 | mthca_lock_cqs(send_cq, recv_cq); |
1da177e4 LT |
1361 | |
1362 | spin_lock(&dev->qp_table.lock); | |
1363 | mthca_array_clear(&dev->qp_table.qp, mqpn); | |
1364 | spin_unlock(&dev->qp_table.lock); | |
1365 | ||
a19aa5c5 | 1366 | mthca_unlock_cqs(send_cq, recv_cq); |
1da177e4 LT |
1367 | |
1368 | err_out: | |
1369 | dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1370 | sqp->header_buf, sqp->header_dma); | |
1371 | ||
1372 | return err; | |
1373 | } | |
1374 | ||
a3285aa4 RD |
1375 | static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) |
1376 | { | |
1377 | int c; | |
1378 | ||
1379 | spin_lock_irq(&dev->qp_table.lock); | |
1380 | c = qp->refcount; | |
1381 | spin_unlock_irq(&dev->qp_table.lock); | |
1382 | ||
1383 | return c; | |
1384 | } | |
1385 | ||
1da177e4 LT |
1386 | void mthca_free_qp(struct mthca_dev *dev, |
1387 | struct mthca_qp *qp) | |
1388 | { | |
1389 | u8 status; | |
1da177e4 LT |
1390 | struct mthca_cq *send_cq; |
1391 | struct mthca_cq *recv_cq; | |
1392 | ||
1393 | send_cq = to_mcq(qp->ibqp.send_cq); | |
1394 | recv_cq = to_mcq(qp->ibqp.recv_cq); | |
1395 | ||
1396 | /* | |
1397 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1398 | * without taking a lock. | |
1399 | */ | |
a19aa5c5 | 1400 | mthca_lock_cqs(send_cq, recv_cq); |
1da177e4 LT |
1401 | |
1402 | spin_lock(&dev->qp_table.lock); | |
1403 | mthca_array_clear(&dev->qp_table.qp, | |
1404 | qp->qpn & (dev->limits.num_qps - 1)); | |
a3285aa4 | 1405 | --qp->refcount; |
1da177e4 LT |
1406 | spin_unlock(&dev->qp_table.lock); |
1407 | ||
a19aa5c5 | 1408 | mthca_unlock_cqs(send_cq, recv_cq); |
1da177e4 | 1409 | |
a3285aa4 | 1410 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
1da177e4 LT |
1411 | |
1412 | if (qp->state != IB_QPS_RESET) | |
d844183d RD |
1413 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
1414 | NULL, 0, &status); | |
1da177e4 | 1415 | |
80c8ec2c RD |
1416 | /* |
1417 | * If this is a userspace QP, the buffers, MR, CQs and so on | |
1418 | * will be cleaned up in userspace, so all we have to do is | |
1419 | * unref the mem-free tables and free the QPN in our table. | |
1420 | */ | |
1421 | if (!qp->ibqp.uobject) { | |
a3285aa4 | 1422 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
ec34a922 | 1423 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
80c8ec2c | 1424 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
a3285aa4 | 1425 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
ec34a922 | 1426 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1da177e4 | 1427 | |
80c8ec2c RD |
1428 | mthca_free_memfree(dev, qp); |
1429 | mthca_free_wqe_buf(dev, qp); | |
1da177e4 LT |
1430 | } |
1431 | ||
80c8ec2c | 1432 | mthca_unmap_memfree(dev, qp); |
1da177e4 LT |
1433 | |
1434 | if (is_sqp(dev, qp)) { | |
1435 | atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); | |
1436 | dma_free_coherent(&dev->pdev->dev, | |
1437 | to_msqp(qp)->header_buf_size, | |
1438 | to_msqp(qp)->header_buf, | |
1439 | to_msqp(qp)->header_dma); | |
1440 | } else | |
1441 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1442 | } | |
1443 | ||
1444 | /* Create UD header for an MLX send and build a data segment for it */ | |
1445 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |
1446 | int ind, struct ib_send_wr *wr, | |
1447 | struct mthca_mlx_seg *mlx, | |
1448 | struct mthca_data_seg *data) | |
1449 | { | |
1450 | int header_size; | |
1451 | int err; | |
97f52eb4 | 1452 | u16 pkey; |
1da177e4 LT |
1453 | |
1454 | ib_ud_header_init(256, /* assume a MAD */ | |
9eacee2a | 1455 | mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), |
1da177e4 LT |
1456 | &sqp->ud_header); |
1457 | ||
1458 | err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); | |
1459 | if (err) | |
1460 | return err; | |
1461 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); | |
1462 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | | |
97f52eb4 SH |
1463 | (sqp->ud_header.lrh.destination_lid == |
1464 | IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | | |
1da177e4 LT |
1465 | (sqp->ud_header.lrh.service_level << 8)); |
1466 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | |
1467 | mlx->vcrc = 0; | |
1468 | ||
1469 | switch (wr->opcode) { | |
1470 | case IB_WR_SEND: | |
1471 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | |
1472 | sqp->ud_header.immediate_present = 0; | |
1473 | break; | |
1474 | case IB_WR_SEND_WITH_IMM: | |
1475 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | |
1476 | sqp->ud_header.immediate_present = 1; | |
1477 | sqp->ud_header.immediate_data = wr->imm_data; | |
1478 | break; | |
1479 | default: | |
1480 | return -EINVAL; | |
1481 | } | |
1482 | ||
1483 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | |
97f52eb4 SH |
1484 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
1485 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | |
1da177e4 LT |
1486 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); |
1487 | if (!sqp->qp.ibqp.qp_num) | |
bf6a9e31 | 1488 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
97f52eb4 | 1489 | sqp->pkey_index, &pkey); |
1da177e4 | 1490 | else |
bf6a9e31 | 1491 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
97f52eb4 SH |
1492 | wr->wr.ud.pkey_index, &pkey); |
1493 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | |
1da177e4 LT |
1494 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
1495 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | |
1496 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | |
1497 | sqp->qkey : wr->wr.ud.remote_qkey); | |
1498 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); | |
1499 | ||
1500 | header_size = ib_ud_header_pack(&sqp->ud_header, | |
1501 | sqp->header_buf + | |
1502 | ind * MTHCA_UD_HEADER_SIZE); | |
1503 | ||
1504 | data->byte_count = cpu_to_be32(header_size); | |
1505 | data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); | |
1506 | data->addr = cpu_to_be64(sqp->header_dma + | |
1507 | ind * MTHCA_UD_HEADER_SIZE); | |
1508 | ||
1509 | return 0; | |
1510 | } | |
1511 | ||
1512 | static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, | |
1513 | struct ib_cq *ib_cq) | |
1514 | { | |
1515 | unsigned cur; | |
1516 | struct mthca_cq *cq; | |
1517 | ||
1518 | cur = wq->head - wq->tail; | |
1519 | if (likely(cur + nreq < wq->max)) | |
1520 | return 0; | |
1521 | ||
1522 | cq = to_mcq(ib_cq); | |
1523 | spin_lock(&cq->lock); | |
1524 | cur = wq->head - wq->tail; | |
1525 | spin_unlock(&cq->lock); | |
1526 | ||
1527 | return cur + nreq >= wq->max; | |
1528 | } | |
1529 | ||
1530 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1531 | struct ib_send_wr **bad_wr) | |
1532 | { | |
1533 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1534 | struct mthca_qp *qp = to_mqp(ibqp); | |
1535 | void *wqe; | |
1536 | void *prev_wqe; | |
1537 | unsigned long flags; | |
1538 | int err = 0; | |
1539 | int nreq; | |
1540 | int i; | |
1541 | int size; | |
1542 | int size0 = 0; | |
e54b82d7 | 1543 | u32 f0; |
1da177e4 LT |
1544 | int ind; |
1545 | u8 op0 = 0; | |
1546 | ||
1547 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1548 | ||
1549 | /* XXX check that state is OK to post send */ | |
1550 | ||
1551 | ind = qp->sq.next_ind; | |
1552 | ||
1553 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
1554 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { | |
1555 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1556 | " %d max, %d nreq)\n", qp->qpn, | |
1557 | qp->sq.head, qp->sq.tail, | |
1558 | qp->sq.max, nreq); | |
1559 | err = -ENOMEM; | |
1560 | *bad_wr = wr; | |
1561 | goto out; | |
1562 | } | |
1563 | ||
1564 | wqe = get_send_wqe(qp, ind); | |
1565 | prev_wqe = qp->sq.last; | |
1566 | qp->sq.last = wqe; | |
1567 | ||
1568 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1569 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
1570 | ((struct mthca_next_seg *) wqe)->flags = | |
1571 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1572 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1573 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1574 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1575 | cpu_to_be32(1); | |
1576 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1577 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1578 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1579 | |
1580 | wqe += sizeof (struct mthca_next_seg); | |
1581 | size = sizeof (struct mthca_next_seg) / 16; | |
1582 | ||
1583 | switch (qp->transport) { | |
1584 | case RC: | |
1585 | switch (wr->opcode) { | |
1586 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1587 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1588 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1589 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1590 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1591 | cpu_to_be32(wr->wr.atomic.rkey); | |
1592 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1593 | ||
1594 | wqe += sizeof (struct mthca_raddr_seg); | |
1595 | ||
1596 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1597 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1598 | cpu_to_be64(wr->wr.atomic.swap); | |
1599 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1600 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1601 | } else { | |
1602 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1603 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1604 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1605 | } | |
1606 | ||
1607 | wqe += sizeof (struct mthca_atomic_seg); | |
62abb841 MT |
1608 | size += (sizeof (struct mthca_raddr_seg) + |
1609 | sizeof (struct mthca_atomic_seg)) / 16; | |
1da177e4 LT |
1610 | break; |
1611 | ||
1612 | case IB_WR_RDMA_WRITE: | |
1613 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1614 | case IB_WR_RDMA_READ: | |
1615 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1616 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1617 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1618 | cpu_to_be32(wr->wr.rdma.rkey); | |
1619 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1620 | wqe += sizeof (struct mthca_raddr_seg); | |
1621 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1622 | break; | |
1623 | ||
1624 | default: | |
1625 | /* No extra segments required for sends */ | |
1626 | break; | |
1627 | } | |
1628 | ||
1629 | break; | |
1630 | ||
9e6970b5 RD |
1631 | case UC: |
1632 | switch (wr->opcode) { | |
1633 | case IB_WR_RDMA_WRITE: | |
1634 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1635 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1636 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1637 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1638 | cpu_to_be32(wr->wr.rdma.rkey); | |
1639 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1640 | wqe += sizeof (struct mthca_raddr_seg); | |
1641 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1642 | break; | |
1643 | ||
1644 | default: | |
1645 | /* No extra segments required for sends */ | |
1646 | break; | |
1647 | } | |
1648 | ||
1649 | break; | |
1650 | ||
1da177e4 LT |
1651 | case UD: |
1652 | ((struct mthca_tavor_ud_seg *) wqe)->lkey = | |
1653 | cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | |
1654 | ((struct mthca_tavor_ud_seg *) wqe)->av_addr = | |
1655 | cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | |
1656 | ((struct mthca_tavor_ud_seg *) wqe)->dqpn = | |
1657 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
1658 | ((struct mthca_tavor_ud_seg *) wqe)->qkey = | |
1659 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
1660 | ||
1661 | wqe += sizeof (struct mthca_tavor_ud_seg); | |
1662 | size += sizeof (struct mthca_tavor_ud_seg) / 16; | |
1663 | break; | |
1664 | ||
1665 | case MLX: | |
1666 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
1667 | wqe - sizeof (struct mthca_next_seg), | |
1668 | wqe); | |
1669 | if (err) { | |
1670 | *bad_wr = wr; | |
1671 | goto out; | |
1672 | } | |
1673 | wqe += sizeof (struct mthca_data_seg); | |
1674 | size += sizeof (struct mthca_data_seg) / 16; | |
1675 | break; | |
1676 | } | |
1677 | ||
1678 | if (wr->num_sge > qp->sq.max_gs) { | |
1679 | mthca_err(dev, "too many gathers\n"); | |
1680 | err = -EINVAL; | |
1681 | *bad_wr = wr; | |
1682 | goto out; | |
1683 | } | |
1684 | ||
1685 | for (i = 0; i < wr->num_sge; ++i) { | |
1686 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1687 | cpu_to_be32(wr->sg_list[i].length); | |
1688 | ((struct mthca_data_seg *) wqe)->lkey = | |
1689 | cpu_to_be32(wr->sg_list[i].lkey); | |
1690 | ((struct mthca_data_seg *) wqe)->addr = | |
1691 | cpu_to_be64(wr->sg_list[i].addr); | |
1692 | wqe += sizeof (struct mthca_data_seg); | |
1693 | size += sizeof (struct mthca_data_seg) / 16; | |
1694 | } | |
1695 | ||
1696 | /* Add one more inline data segment for ICRC */ | |
1697 | if (qp->transport == MLX) { | |
1698 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1699 | cpu_to_be32((1 << 31) | 4); | |
1700 | ((u32 *) wqe)[1] = 0; | |
1701 | wqe += sizeof (struct mthca_data_seg); | |
1702 | size += sizeof (struct mthca_data_seg) / 16; | |
1703 | } | |
1704 | ||
1705 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
1706 | ||
1707 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
1708 | mthca_err(dev, "opcode invalid\n"); | |
1709 | err = -EINVAL; | |
1710 | *bad_wr = wr; | |
1711 | goto out; | |
1712 | } | |
1713 | ||
d6cff021 RD |
1714 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1715 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
1716 | qp->send_wqe_offset) | | |
1717 | mthca_opcode[wr->opcode]); | |
1718 | wmb(); | |
1719 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
7667abd1 DB |
1720 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size | |
1721 | ((wr->send_flags & IB_SEND_FENCE) ? | |
1722 | MTHCA_NEXT_FENCE : 0)); | |
1da177e4 LT |
1723 | |
1724 | if (!size0) { | |
1725 | size0 = size; | |
1726 | op0 = mthca_opcode[wr->opcode]; | |
e54b82d7 MT |
1727 | f0 = wr->send_flags & IB_SEND_FENCE ? |
1728 | MTHCA_SEND_DOORBELL_FENCE : 0; | |
1da177e4 LT |
1729 | } |
1730 | ||
1731 | ++ind; | |
1732 | if (unlikely(ind >= qp->sq.max)) | |
1733 | ind -= qp->sq.max; | |
1734 | } | |
1735 | ||
1736 | out: | |
1737 | if (likely(nreq)) { | |
97f52eb4 | 1738 | __be32 doorbell[2]; |
1da177e4 LT |
1739 | |
1740 | doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + | |
1741 | qp->send_wqe_offset) | f0 | op0); | |
1742 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1743 | ||
1744 | wmb(); | |
1745 | ||
1746 | mthca_write64(doorbell, | |
1747 | dev->kar + MTHCA_SEND_DOORBELL, | |
1748 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1f5c23e2 AK |
1749 | /* |
1750 | * Make sure doorbells don't leak out of SQ spinlock | |
1751 | * and reach the HCA out of order: | |
1752 | */ | |
1753 | mmiowb(); | |
1da177e4 LT |
1754 | } |
1755 | ||
1756 | qp->sq.next_ind = ind; | |
1757 | qp->sq.head += nreq; | |
1758 | ||
1759 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
1760 | return err; | |
1761 | } | |
1762 | ||
1763 | int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
1764 | struct ib_recv_wr **bad_wr) | |
1765 | { | |
1766 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1767 | struct mthca_qp *qp = to_mqp(ibqp); | |
ae57e24a | 1768 | __be32 doorbell[2]; |
1da177e4 LT |
1769 | unsigned long flags; |
1770 | int err = 0; | |
1771 | int nreq; | |
1772 | int i; | |
1773 | int size; | |
1774 | int size0 = 0; | |
1775 | int ind; | |
1776 | void *wqe; | |
1777 | void *prev_wqe; | |
1778 | ||
1779 | spin_lock_irqsave(&qp->rq.lock, flags); | |
1780 | ||
1781 | /* XXX check that state is OK to post receive */ | |
1782 | ||
1783 | ind = qp->rq.next_ind; | |
1784 | ||
23f3bc0f | 1785 | for (nreq = 0; wr; wr = wr->next) { |
1da177e4 LT |
1786 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1787 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
1788 | " %d max, %d nreq)\n", qp->qpn, | |
1789 | qp->rq.head, qp->rq.tail, | |
1790 | qp->rq.max, nreq); | |
1791 | err = -ENOMEM; | |
1792 | *bad_wr = wr; | |
1793 | goto out; | |
1794 | } | |
1795 | ||
1796 | wqe = get_recv_wqe(qp, ind); | |
1797 | prev_wqe = qp->rq.last; | |
1798 | qp->rq.last = wqe; | |
1799 | ||
1800 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1801 | ((struct mthca_next_seg *) wqe)->ee_nds = | |
1802 | cpu_to_be32(MTHCA_NEXT_DBD); | |
1803 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
1804 | ||
1805 | wqe += sizeof (struct mthca_next_seg); | |
1806 | size = sizeof (struct mthca_next_seg) / 16; | |
1807 | ||
1808 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
1809 | err = -EINVAL; | |
1810 | *bad_wr = wr; | |
1811 | goto out; | |
1812 | } | |
1813 | ||
1814 | for (i = 0; i < wr->num_sge; ++i) { | |
1815 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1816 | cpu_to_be32(wr->sg_list[i].length); | |
1817 | ((struct mthca_data_seg *) wqe)->lkey = | |
1818 | cpu_to_be32(wr->sg_list[i].lkey); | |
1819 | ((struct mthca_data_seg *) wqe)->addr = | |
1820 | cpu_to_be64(wr->sg_list[i].addr); | |
1821 | wqe += sizeof (struct mthca_data_seg); | |
1822 | size += sizeof (struct mthca_data_seg) / 16; | |
1823 | } | |
1824 | ||
1825 | qp->wrid[ind] = wr->wr_id; | |
1826 | ||
d6cff021 RD |
1827 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1828 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | |
1829 | wmb(); | |
1830 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
1831 | cpu_to_be32(MTHCA_NEXT_DBD | size); | |
1da177e4 LT |
1832 | |
1833 | if (!size0) | |
1834 | size0 = size; | |
1835 | ||
1836 | ++ind; | |
1837 | if (unlikely(ind >= qp->rq.max)) | |
1838 | ind -= qp->rq.max; | |
23f3bc0f MT |
1839 | |
1840 | ++nreq; | |
1841 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | |
1842 | nreq = 0; | |
1843 | ||
1844 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | |
1845 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | |
1846 | ||
1847 | wmb(); | |
1848 | ||
1849 | mthca_write64(doorbell, | |
1850 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
1851 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1852 | ||
1853 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | |
1854 | size0 = 0; | |
1855 | } | |
1da177e4 LT |
1856 | } |
1857 | ||
1858 | out: | |
1859 | if (likely(nreq)) { | |
1da177e4 LT |
1860 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); |
1861 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); | |
1862 | ||
1863 | wmb(); | |
1864 | ||
1865 | mthca_write64(doorbell, | |
1866 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
1867 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1868 | } | |
1869 | ||
1870 | qp->rq.next_ind = ind; | |
1871 | qp->rq.head += nreq; | |
1872 | ||
1f5c23e2 AK |
1873 | /* |
1874 | * Make sure doorbells don't leak out of RQ spinlock and reach | |
1875 | * the HCA out of order: | |
1876 | */ | |
1877 | mmiowb(); | |
1878 | ||
1da177e4 LT |
1879 | spin_unlock_irqrestore(&qp->rq.lock, flags); |
1880 | return err; | |
1881 | } | |
1882 | ||
1883 | int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1884 | struct ib_send_wr **bad_wr) | |
1885 | { | |
1886 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1887 | struct mthca_qp *qp = to_mqp(ibqp); | |
e0ae9ecf | 1888 | __be32 doorbell[2]; |
1da177e4 LT |
1889 | void *wqe; |
1890 | void *prev_wqe; | |
1891 | unsigned long flags; | |
1892 | int err = 0; | |
1893 | int nreq; | |
1894 | int i; | |
1895 | int size; | |
1896 | int size0 = 0; | |
e54b82d7 | 1897 | u32 f0; |
1da177e4 LT |
1898 | int ind; |
1899 | u8 op0 = 0; | |
1900 | ||
1901 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1902 | ||
1903 | /* XXX check that state is OK to post send */ | |
1904 | ||
1905 | ind = qp->sq.head & (qp->sq.max - 1); | |
1906 | ||
1907 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
e0ae9ecf MT |
1908 | if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { |
1909 | nreq = 0; | |
1910 | ||
1911 | doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | | |
1912 | ((qp->sq.head & 0xffff) << 8) | | |
1913 | f0 | op0); | |
1914 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1915 | ||
1916 | qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; | |
1917 | size0 = 0; | |
1918 | ||
1919 | /* | |
1920 | * Make sure that descriptors are written before | |
1921 | * doorbell record. | |
1922 | */ | |
1923 | wmb(); | |
1924 | *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); | |
1925 | ||
1926 | /* | |
1927 | * Make sure doorbell record is written before we | |
1928 | * write MMIO send doorbell. | |
1929 | */ | |
1930 | wmb(); | |
1931 | mthca_write64(doorbell, | |
1932 | dev->kar + MTHCA_SEND_DOORBELL, | |
1933 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1934 | } | |
1935 | ||
1da177e4 LT |
1936 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { |
1937 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1938 | " %d max, %d nreq)\n", qp->qpn, | |
1939 | qp->sq.head, qp->sq.tail, | |
1940 | qp->sq.max, nreq); | |
1941 | err = -ENOMEM; | |
1942 | *bad_wr = wr; | |
1943 | goto out; | |
1944 | } | |
1945 | ||
1946 | wqe = get_send_wqe(qp, ind); | |
1947 | prev_wqe = qp->sq.last; | |
1948 | qp->sq.last = wqe; | |
1949 | ||
1950 | ((struct mthca_next_seg *) wqe)->flags = | |
1951 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1952 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1953 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1954 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1955 | cpu_to_be32(1); | |
1956 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1957 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1958 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1959 | |
1960 | wqe += sizeof (struct mthca_next_seg); | |
1961 | size = sizeof (struct mthca_next_seg) / 16; | |
1962 | ||
1963 | switch (qp->transport) { | |
ddb934e0 RD |
1964 | case RC: |
1965 | switch (wr->opcode) { | |
1966 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1967 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1968 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1969 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1970 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1971 | cpu_to_be32(wr->wr.atomic.rkey); | |
1972 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1973 | ||
1974 | wqe += sizeof (struct mthca_raddr_seg); | |
1975 | ||
1976 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1977 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1978 | cpu_to_be64(wr->wr.atomic.swap); | |
1979 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1980 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1981 | } else { | |
1982 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1983 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1984 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1985 | } | |
1986 | ||
1987 | wqe += sizeof (struct mthca_atomic_seg); | |
62abb841 MT |
1988 | size += (sizeof (struct mthca_raddr_seg) + |
1989 | sizeof (struct mthca_atomic_seg)) / 16; | |
ddb934e0 RD |
1990 | break; |
1991 | ||
9e6970b5 RD |
1992 | case IB_WR_RDMA_READ: |
1993 | case IB_WR_RDMA_WRITE: | |
1994 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1995 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1996 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1997 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1998 | cpu_to_be32(wr->wr.rdma.rkey); | |
1999 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
2000 | wqe += sizeof (struct mthca_raddr_seg); | |
2001 | size += sizeof (struct mthca_raddr_seg) / 16; | |
2002 | break; | |
2003 | ||
2004 | default: | |
2005 | /* No extra segments required for sends */ | |
2006 | break; | |
2007 | } | |
2008 | ||
2009 | break; | |
2010 | ||
2011 | case UC: | |
2012 | switch (wr->opcode) { | |
ddb934e0 RD |
2013 | case IB_WR_RDMA_WRITE: |
2014 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
ddb934e0 RD |
2015 | ((struct mthca_raddr_seg *) wqe)->raddr = |
2016 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
2017 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
2018 | cpu_to_be32(wr->wr.rdma.rkey); | |
2019 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
2020 | wqe += sizeof (struct mthca_raddr_seg); | |
2021 | size += sizeof (struct mthca_raddr_seg) / 16; | |
2022 | break; | |
2023 | ||
2024 | default: | |
2025 | /* No extra segments required for sends */ | |
2026 | break; | |
2027 | } | |
2028 | ||
2029 | break; | |
2030 | ||
1da177e4 LT |
2031 | case UD: |
2032 | memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, | |
2033 | to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | |
2034 | ((struct mthca_arbel_ud_seg *) wqe)->dqpn = | |
2035 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
2036 | ((struct mthca_arbel_ud_seg *) wqe)->qkey = | |
2037 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
2038 | ||
2039 | wqe += sizeof (struct mthca_arbel_ud_seg); | |
2040 | size += sizeof (struct mthca_arbel_ud_seg) / 16; | |
2041 | break; | |
2042 | ||
2043 | case MLX: | |
2044 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
2045 | wqe - sizeof (struct mthca_next_seg), | |
2046 | wqe); | |
2047 | if (err) { | |
2048 | *bad_wr = wr; | |
2049 | goto out; | |
2050 | } | |
2051 | wqe += sizeof (struct mthca_data_seg); | |
2052 | size += sizeof (struct mthca_data_seg) / 16; | |
2053 | break; | |
2054 | } | |
2055 | ||
2056 | if (wr->num_sge > qp->sq.max_gs) { | |
2057 | mthca_err(dev, "too many gathers\n"); | |
2058 | err = -EINVAL; | |
2059 | *bad_wr = wr; | |
2060 | goto out; | |
2061 | } | |
2062 | ||
2063 | for (i = 0; i < wr->num_sge; ++i) { | |
2064 | ((struct mthca_data_seg *) wqe)->byte_count = | |
2065 | cpu_to_be32(wr->sg_list[i].length); | |
2066 | ((struct mthca_data_seg *) wqe)->lkey = | |
2067 | cpu_to_be32(wr->sg_list[i].lkey); | |
2068 | ((struct mthca_data_seg *) wqe)->addr = | |
2069 | cpu_to_be64(wr->sg_list[i].addr); | |
2070 | wqe += sizeof (struct mthca_data_seg); | |
2071 | size += sizeof (struct mthca_data_seg) / 16; | |
2072 | } | |
2073 | ||
2074 | /* Add one more inline data segment for ICRC */ | |
2075 | if (qp->transport == MLX) { | |
2076 | ((struct mthca_data_seg *) wqe)->byte_count = | |
2077 | cpu_to_be32((1 << 31) | 4); | |
2078 | ((u32 *) wqe)[1] = 0; | |
2079 | wqe += sizeof (struct mthca_data_seg); | |
2080 | size += sizeof (struct mthca_data_seg) / 16; | |
2081 | } | |
2082 | ||
2083 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
2084 | ||
2085 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
2086 | mthca_err(dev, "opcode invalid\n"); | |
2087 | err = -EINVAL; | |
2088 | *bad_wr = wr; | |
2089 | goto out; | |
2090 | } | |
2091 | ||
d6cff021 RD |
2092 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
2093 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
2094 | qp->send_wqe_offset) | | |
2095 | mthca_opcode[wr->opcode]); | |
2096 | wmb(); | |
2097 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
7667abd1 | 2098 | cpu_to_be32(MTHCA_NEXT_DBD | size | |
b0b3a8e1 RD |
2099 | ((wr->send_flags & IB_SEND_FENCE) ? |
2100 | MTHCA_NEXT_FENCE : 0)); | |
1da177e4 LT |
2101 | |
2102 | if (!size0) { | |
2103 | size0 = size; | |
2104 | op0 = mthca_opcode[wr->opcode]; | |
e54b82d7 MT |
2105 | f0 = wr->send_flags & IB_SEND_FENCE ? |
2106 | MTHCA_SEND_DOORBELL_FENCE : 0; | |
1da177e4 LT |
2107 | } |
2108 | ||
2109 | ++ind; | |
2110 | if (unlikely(ind >= qp->sq.max)) | |
2111 | ind -= qp->sq.max; | |
2112 | } | |
2113 | ||
2114 | out: | |
2115 | if (likely(nreq)) { | |
1da177e4 LT |
2116 | doorbell[0] = cpu_to_be32((nreq << 24) | |
2117 | ((qp->sq.head & 0xffff) << 8) | | |
2118 | f0 | op0); | |
2119 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
2120 | ||
2121 | qp->sq.head += nreq; | |
2122 | ||
2123 | /* | |
2124 | * Make sure that descriptors are written before | |
2125 | * doorbell record. | |
2126 | */ | |
2127 | wmb(); | |
2128 | *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); | |
2129 | ||
2130 | /* | |
2131 | * Make sure doorbell record is written before we | |
2132 | * write MMIO send doorbell. | |
2133 | */ | |
2134 | wmb(); | |
2135 | mthca_write64(doorbell, | |
2136 | dev->kar + MTHCA_SEND_DOORBELL, | |
2137 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
2138 | } | |
2139 | ||
1f5c23e2 AK |
2140 | /* |
2141 | * Make sure doorbells don't leak out of SQ spinlock and reach | |
2142 | * the HCA out of order: | |
2143 | */ | |
2144 | mmiowb(); | |
2145 | ||
1da177e4 LT |
2146 | spin_unlock_irqrestore(&qp->sq.lock, flags); |
2147 | return err; | |
2148 | } | |
2149 | ||
2150 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
2151 | struct ib_recv_wr **bad_wr) | |
2152 | { | |
2153 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
2154 | struct mthca_qp *qp = to_mqp(ibqp); | |
2155 | unsigned long flags; | |
2156 | int err = 0; | |
2157 | int nreq; | |
2158 | int ind; | |
2159 | int i; | |
2160 | void *wqe; | |
2161 | ||
2fa5e2eb | 2162 | spin_lock_irqsave(&qp->rq.lock, flags); |
1da177e4 LT |
2163 | |
2164 | /* XXX check that state is OK to post receive */ | |
2165 | ||
2166 | ind = qp->rq.head & (qp->rq.max - 1); | |
2167 | ||
2168 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
2169 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | |
2170 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
2171 | " %d max, %d nreq)\n", qp->qpn, | |
2172 | qp->rq.head, qp->rq.tail, | |
2173 | qp->rq.max, nreq); | |
2174 | err = -ENOMEM; | |
2175 | *bad_wr = wr; | |
2176 | goto out; | |
2177 | } | |
2178 | ||
2179 | wqe = get_recv_wqe(qp, ind); | |
2180 | ||
2181 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
2182 | ||
2183 | wqe += sizeof (struct mthca_next_seg); | |
2184 | ||
2185 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
2186 | err = -EINVAL; | |
2187 | *bad_wr = wr; | |
2188 | goto out; | |
2189 | } | |
2190 | ||
2191 | for (i = 0; i < wr->num_sge; ++i) { | |
2192 | ((struct mthca_data_seg *) wqe)->byte_count = | |
2193 | cpu_to_be32(wr->sg_list[i].length); | |
2194 | ((struct mthca_data_seg *) wqe)->lkey = | |
2195 | cpu_to_be32(wr->sg_list[i].lkey); | |
2196 | ((struct mthca_data_seg *) wqe)->addr = | |
2197 | cpu_to_be64(wr->sg_list[i].addr); | |
2198 | wqe += sizeof (struct mthca_data_seg); | |
2199 | } | |
2200 | ||
2201 | if (i < qp->rq.max_gs) { | |
2202 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
ddf841f0 | 2203 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); |
1da177e4 LT |
2204 | ((struct mthca_data_seg *) wqe)->addr = 0; |
2205 | } | |
2206 | ||
2207 | qp->wrid[ind] = wr->wr_id; | |
2208 | ||
2209 | ++ind; | |
2210 | if (unlikely(ind >= qp->rq.max)) | |
2211 | ind -= qp->rq.max; | |
2212 | } | |
2213 | out: | |
2214 | if (likely(nreq)) { | |
2215 | qp->rq.head += nreq; | |
2216 | ||
2217 | /* | |
2218 | * Make sure that descriptors are written before | |
2219 | * doorbell record. | |
2220 | */ | |
2221 | wmb(); | |
2222 | *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); | |
2223 | } | |
2224 | ||
2225 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
2226 | return err; | |
2227 | } | |
2228 | ||
d9b98b0f RD |
2229 | void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, |
2230 | int index, int *dbd, __be32 *new_wqe) | |
1da177e4 LT |
2231 | { |
2232 | struct mthca_next_seg *next; | |
2233 | ||
ec34a922 RD |
2234 | /* |
2235 | * For SRQs, all WQEs generate a CQE, so we're always at the | |
2236 | * end of the doorbell chain. | |
2237 | */ | |
2238 | if (qp->ibqp.srq) { | |
2239 | *new_wqe = 0; | |
d9b98b0f | 2240 | return; |
ec34a922 RD |
2241 | } |
2242 | ||
1da177e4 LT |
2243 | if (is_send) |
2244 | next = get_send_wqe(qp, index); | |
2245 | else | |
2246 | next = get_recv_wqe(qp, index); | |
2247 | ||
288bdeb4 | 2248 | *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); |
1da177e4 LT |
2249 | if (next->ee_nds & cpu_to_be32(0x3f)) |
2250 | *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | | |
2251 | (next->ee_nds & cpu_to_be32(0x3f)); | |
2252 | else | |
2253 | *new_wqe = 0; | |
1da177e4 LT |
2254 | } |
2255 | ||
f4f3d0f0 | 2256 | int mthca_init_qp_table(struct mthca_dev *dev) |
1da177e4 LT |
2257 | { |
2258 | int err; | |
2259 | u8 status; | |
2260 | int i; | |
2261 | ||
2262 | spin_lock_init(&dev->qp_table.lock); | |
2263 | ||
2264 | /* | |
2265 | * We reserve 2 extra QPs per port for the special QPs. The | |
2266 | * special QP for port 1 has to be even, so round up. | |
2267 | */ | |
2268 | dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; | |
2269 | err = mthca_alloc_init(&dev->qp_table.alloc, | |
2270 | dev->limits.num_qps, | |
2271 | (1 << 24) - 1, | |
2272 | dev->qp_table.sqp_start + | |
2273 | MTHCA_MAX_PORTS * 2); | |
2274 | if (err) | |
2275 | return err; | |
2276 | ||
2277 | err = mthca_array_init(&dev->qp_table.qp, | |
2278 | dev->limits.num_qps); | |
2279 | if (err) { | |
2280 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2281 | return err; | |
2282 | } | |
2283 | ||
2284 | for (i = 0; i < 2; ++i) { | |
2285 | err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, | |
2286 | dev->qp_table.sqp_start + i * 2, | |
2287 | &status); | |
2288 | if (err) | |
2289 | goto err_out; | |
2290 | if (status) { | |
2291 | mthca_warn(dev, "CONF_SPECIAL_QP returned " | |
2292 | "status %02x, aborting.\n", | |
2293 | status); | |
2294 | err = -EINVAL; | |
2295 | goto err_out; | |
2296 | } | |
2297 | } | |
2298 | return 0; | |
2299 | ||
2300 | err_out: | |
2301 | for (i = 0; i < 2; ++i) | |
2302 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2303 | ||
2304 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); | |
2305 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2306 | ||
2307 | return err; | |
2308 | } | |
2309 | ||
e1f7868c | 2310 | void mthca_cleanup_qp_table(struct mthca_dev *dev) |
1da177e4 LT |
2311 | { |
2312 | int i; | |
2313 | u8 status; | |
2314 | ||
2315 | for (i = 0; i < 2; ++i) | |
2316 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2317 | ||
71eea47d | 2318 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); |
1da177e4 LT |
2319 | mthca_alloc_cleanup(&dev->qp_table.alloc); |
2320 | } |