Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
80c8ec2c | 3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
2a1d9b7f | 4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
2fa5e2eb | 5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
1da177e4 LT |
6 | * |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | * | |
35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ | |
36 | */ | |
37 | ||
38 | #include <linux/init.h> | |
4e57b681 TS |
39 | #include <linux/string.h> |
40 | #include <linux/slab.h> | |
1da177e4 | 41 | |
a4d61e84 RD |
42 | #include <rdma/ib_verbs.h> |
43 | #include <rdma/ib_cache.h> | |
44 | #include <rdma/ib_pack.h> | |
1da177e4 LT |
45 | |
46 | #include "mthca_dev.h" | |
47 | #include "mthca_cmd.h" | |
48 | #include "mthca_memfree.h" | |
c04bc3d1 | 49 | #include "mthca_wqe.h" |
1da177e4 LT |
50 | |
51 | enum { | |
52 | MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, | |
53 | MTHCA_ACK_REQ_FREQ = 10, | |
54 | MTHCA_FLIGHT_LIMIT = 9, | |
80c8ec2c RD |
55 | MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ |
56 | MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ | |
57 | MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ | |
1da177e4 LT |
58 | }; |
59 | ||
60 | enum { | |
61 | MTHCA_QP_STATE_RST = 0, | |
62 | MTHCA_QP_STATE_INIT = 1, | |
63 | MTHCA_QP_STATE_RTR = 2, | |
64 | MTHCA_QP_STATE_RTS = 3, | |
65 | MTHCA_QP_STATE_SQE = 4, | |
66 | MTHCA_QP_STATE_SQD = 5, | |
67 | MTHCA_QP_STATE_ERR = 6, | |
68 | MTHCA_QP_STATE_DRAINING = 7 | |
69 | }; | |
70 | ||
71 | enum { | |
72 | MTHCA_QP_ST_RC = 0x0, | |
73 | MTHCA_QP_ST_UC = 0x1, | |
74 | MTHCA_QP_ST_RD = 0x2, | |
75 | MTHCA_QP_ST_UD = 0x3, | |
76 | MTHCA_QP_ST_MLX = 0x7 | |
77 | }; | |
78 | ||
79 | enum { | |
80 | MTHCA_QP_PM_MIGRATED = 0x3, | |
81 | MTHCA_QP_PM_ARMED = 0x0, | |
82 | MTHCA_QP_PM_REARM = 0x1 | |
83 | }; | |
84 | ||
85 | enum { | |
86 | /* qp_context flags */ | |
87 | MTHCA_QP_BIT_DE = 1 << 8, | |
88 | /* params1 */ | |
89 | MTHCA_QP_BIT_SRE = 1 << 15, | |
90 | MTHCA_QP_BIT_SWE = 1 << 14, | |
91 | MTHCA_QP_BIT_SAE = 1 << 13, | |
92 | MTHCA_QP_BIT_SIC = 1 << 4, | |
93 | MTHCA_QP_BIT_SSC = 1 << 3, | |
94 | /* params2 */ | |
95 | MTHCA_QP_BIT_RRE = 1 << 15, | |
96 | MTHCA_QP_BIT_RWE = 1 << 14, | |
97 | MTHCA_QP_BIT_RAE = 1 << 13, | |
98 | MTHCA_QP_BIT_RIC = 1 << 4, | |
99 | MTHCA_QP_BIT_RSC = 1 << 3 | |
100 | }; | |
101 | ||
102 | struct mthca_qp_path { | |
97f52eb4 SH |
103 | __be32 port_pkey; |
104 | u8 rnr_retry; | |
105 | u8 g_mylmc; | |
106 | __be16 rlid; | |
107 | u8 ackto; | |
108 | u8 mgid_index; | |
109 | u8 static_rate; | |
110 | u8 hop_limit; | |
111 | __be32 sl_tclass_flowlabel; | |
112 | u8 rgid[16]; | |
1da177e4 LT |
113 | } __attribute__((packed)); |
114 | ||
115 | struct mthca_qp_context { | |
97f52eb4 SH |
116 | __be32 flags; |
117 | __be32 tavor_sched_queue; /* Reserved on Arbel */ | |
118 | u8 mtu_msgmax; | |
119 | u8 rq_size_stride; /* Reserved on Tavor */ | |
120 | u8 sq_size_stride; /* Reserved on Tavor */ | |
121 | u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ | |
122 | __be32 usr_page; | |
123 | __be32 local_qpn; | |
124 | __be32 remote_qpn; | |
125 | u32 reserved1[2]; | |
1da177e4 LT |
126 | struct mthca_qp_path pri_path; |
127 | struct mthca_qp_path alt_path; | |
97f52eb4 SH |
128 | __be32 rdd; |
129 | __be32 pd; | |
130 | __be32 wqe_base; | |
131 | __be32 wqe_lkey; | |
132 | __be32 params1; | |
133 | __be32 reserved2; | |
134 | __be32 next_send_psn; | |
135 | __be32 cqn_snd; | |
136 | __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ | |
137 | __be32 snd_db_index; /* (debugging only entries) */ | |
138 | __be32 last_acked_psn; | |
139 | __be32 ssn; | |
140 | __be32 params2; | |
141 | __be32 rnr_nextrecvpsn; | |
142 | __be32 ra_buff_indx; | |
143 | __be32 cqn_rcv; | |
144 | __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ | |
145 | __be32 rcv_db_index; /* (debugging only entries) */ | |
146 | __be32 qkey; | |
147 | __be32 srqn; | |
148 | __be32 rmsn; | |
149 | __be16 rq_wqe_counter; /* reserved on Tavor */ | |
150 | __be16 sq_wqe_counter; /* reserved on Tavor */ | |
151 | u32 reserved3[18]; | |
1da177e4 LT |
152 | } __attribute__((packed)); |
153 | ||
154 | struct mthca_qp_param { | |
97f52eb4 SH |
155 | __be32 opt_param_mask; |
156 | u32 reserved1; | |
1da177e4 | 157 | struct mthca_qp_context context; |
97f52eb4 | 158 | u32 reserved2[62]; |
1da177e4 LT |
159 | } __attribute__((packed)); |
160 | ||
161 | enum { | |
162 | MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, | |
163 | MTHCA_QP_OPTPAR_RRE = 1 << 1, | |
164 | MTHCA_QP_OPTPAR_RAE = 1 << 2, | |
165 | MTHCA_QP_OPTPAR_RWE = 1 << 3, | |
166 | MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, | |
167 | MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, | |
168 | MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, | |
169 | MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, | |
170 | MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, | |
171 | MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, | |
172 | MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, | |
173 | MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, | |
174 | MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, | |
175 | MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, | |
176 | MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, | |
177 | MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, | |
178 | MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 | |
179 | }; | |
180 | ||
1da177e4 LT |
181 | static const u8 mthca_opcode[] = { |
182 | [IB_WR_SEND] = MTHCA_OPCODE_SEND, | |
183 | [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, | |
184 | [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, | |
185 | [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, | |
186 | [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, | |
187 | [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, | |
188 | [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, | |
189 | }; | |
190 | ||
191 | static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) | |
192 | { | |
193 | return qp->qpn >= dev->qp_table.sqp_start && | |
194 | qp->qpn <= dev->qp_table.sqp_start + 3; | |
195 | } | |
196 | ||
197 | static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) | |
198 | { | |
199 | return qp->qpn >= dev->qp_table.sqp_start && | |
200 | qp->qpn <= dev->qp_table.sqp_start + 1; | |
201 | } | |
202 | ||
203 | static void *get_recv_wqe(struct mthca_qp *qp, int n) | |
204 | { | |
205 | if (qp->is_direct) | |
206 | return qp->queue.direct.buf + (n << qp->rq.wqe_shift); | |
207 | else | |
208 | return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + | |
209 | ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); | |
210 | } | |
211 | ||
212 | static void *get_send_wqe(struct mthca_qp *qp, int n) | |
213 | { | |
214 | if (qp->is_direct) | |
215 | return qp->queue.direct.buf + qp->send_wqe_offset + | |
216 | (n << qp->sq.wqe_shift); | |
217 | else | |
218 | return qp->queue.page_list[(qp->send_wqe_offset + | |
219 | (n << qp->sq.wqe_shift)) >> | |
220 | PAGE_SHIFT].buf + | |
221 | ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & | |
222 | (PAGE_SIZE - 1)); | |
223 | } | |
224 | ||
c9fe2b32 RD |
225 | static void mthca_wq_init(struct mthca_wq *wq) |
226 | { | |
227 | spin_lock_init(&wq->lock); | |
228 | wq->next_ind = 0; | |
229 | wq->last_comp = wq->max - 1; | |
230 | wq->head = 0; | |
231 | wq->tail = 0; | |
c9fe2b32 RD |
232 | } |
233 | ||
1da177e4 LT |
234 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, |
235 | enum ib_event_type event_type) | |
236 | { | |
237 | struct mthca_qp *qp; | |
238 | struct ib_event event; | |
239 | ||
240 | spin_lock(&dev->qp_table.lock); | |
241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | |
242 | if (qp) | |
243 | atomic_inc(&qp->refcount); | |
244 | spin_unlock(&dev->qp_table.lock); | |
245 | ||
246 | if (!qp) { | |
247 | mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); | |
248 | return; | |
249 | } | |
250 | ||
251 | event.device = &dev->ib_dev; | |
252 | event.event = event_type; | |
253 | event.element.qp = &qp->ibqp; | |
254 | if (qp->ibqp.event_handler) | |
255 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | |
256 | ||
257 | if (atomic_dec_and_test(&qp->refcount)) | |
258 | wake_up(&qp->wait); | |
259 | } | |
260 | ||
261 | static int to_mthca_state(enum ib_qp_state ib_state) | |
262 | { | |
263 | switch (ib_state) { | |
264 | case IB_QPS_RESET: return MTHCA_QP_STATE_RST; | |
265 | case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; | |
266 | case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; | |
267 | case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; | |
268 | case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; | |
269 | case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; | |
270 | case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; | |
271 | default: return -1; | |
272 | } | |
273 | } | |
274 | ||
275 | enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; | |
276 | ||
277 | static int to_mthca_st(int transport) | |
278 | { | |
279 | switch (transport) { | |
280 | case RC: return MTHCA_QP_ST_RC; | |
281 | case UC: return MTHCA_QP_ST_UC; | |
282 | case UD: return MTHCA_QP_ST_UD; | |
283 | case RD: return MTHCA_QP_ST_RD; | |
284 | case MLX: return MTHCA_QP_ST_MLX; | |
285 | default: return -1; | |
286 | } | |
287 | } | |
288 | ||
1da177e4 LT |
289 | static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, |
290 | int attr_mask) | |
291 | { | |
292 | if (attr_mask & IB_QP_PKEY_INDEX) | |
293 | sqp->pkey_index = attr->pkey_index; | |
294 | if (attr_mask & IB_QP_QKEY) | |
295 | sqp->qkey = attr->qkey; | |
296 | if (attr_mask & IB_QP_SQ_PSN) | |
297 | sqp->send_psn = attr->sq_psn; | |
298 | } | |
299 | ||
300 | static void init_port(struct mthca_dev *dev, int port) | |
301 | { | |
302 | int err; | |
303 | u8 status; | |
304 | struct mthca_init_ib_param param; | |
305 | ||
306 | memset(¶m, 0, sizeof param); | |
307 | ||
da6561c2 RD |
308 | param.port_width = dev->limits.port_width_cap; |
309 | param.vl_cap = dev->limits.vl_cap; | |
310 | param.mtu_cap = dev->limits.mtu_cap; | |
311 | param.gid_cap = dev->limits.gid_table_len; | |
312 | param.pkey_cap = dev->limits.pkey_table_len; | |
1da177e4 LT |
313 | |
314 | err = mthca_INIT_IB(dev, ¶m, port, &status); | |
315 | if (err) | |
316 | mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); | |
317 | if (status) | |
318 | mthca_warn(dev, "INIT_IB returned status %02x.\n", status); | |
319 | } | |
320 | ||
d1646f86 JM |
321 | static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, |
322 | int attr_mask) | |
323 | { | |
324 | u8 dest_rd_atomic; | |
325 | u32 access_flags; | |
326 | u32 hw_access_flags = 0; | |
327 | ||
328 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
329 | dest_rd_atomic = attr->max_dest_rd_atomic; | |
330 | else | |
331 | dest_rd_atomic = qp->resp_depth; | |
332 | ||
333 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
334 | access_flags = attr->qp_access_flags; | |
335 | else | |
336 | access_flags = qp->atomic_rd_en; | |
337 | ||
338 | if (!dest_rd_atomic) | |
339 | access_flags &= IB_ACCESS_REMOTE_WRITE; | |
340 | ||
341 | if (access_flags & IB_ACCESS_REMOTE_READ) | |
342 | hw_access_flags |= MTHCA_QP_BIT_RRE; | |
343 | if (access_flags & IB_ACCESS_REMOTE_ATOMIC) | |
344 | hw_access_flags |= MTHCA_QP_BIT_RAE; | |
345 | if (access_flags & IB_ACCESS_REMOTE_WRITE) | |
346 | hw_access_flags |= MTHCA_QP_BIT_RWE; | |
347 | ||
348 | return cpu_to_be32(hw_access_flags); | |
349 | } | |
350 | ||
8ebe5077 EC |
351 | static inline enum ib_qp_state to_ib_qp_state(int mthca_state) |
352 | { | |
353 | switch (mthca_state) { | |
354 | case MTHCA_QP_STATE_RST: return IB_QPS_RESET; | |
355 | case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; | |
356 | case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; | |
357 | case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; | |
358 | case MTHCA_QP_STATE_DRAINING: | |
359 | case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; | |
360 | case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; | |
361 | case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; | |
362 | default: return -1; | |
363 | } | |
364 | } | |
365 | ||
366 | static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) | |
367 | { | |
368 | switch (mthca_mig_state) { | |
369 | case 0: return IB_MIG_ARMED; | |
370 | case 1: return IB_MIG_REARM; | |
371 | case 3: return IB_MIG_MIGRATED; | |
372 | default: return -1; | |
373 | } | |
374 | } | |
375 | ||
376 | static int to_ib_qp_access_flags(int mthca_flags) | |
377 | { | |
378 | int ib_flags = 0; | |
379 | ||
380 | if (mthca_flags & MTHCA_QP_BIT_RRE) | |
381 | ib_flags |= IB_ACCESS_REMOTE_READ; | |
382 | if (mthca_flags & MTHCA_QP_BIT_RWE) | |
383 | ib_flags |= IB_ACCESS_REMOTE_WRITE; | |
384 | if (mthca_flags & MTHCA_QP_BIT_RAE) | |
385 | ib_flags |= IB_ACCESS_REMOTE_ATOMIC; | |
386 | ||
387 | return ib_flags; | |
388 | } | |
389 | ||
390 | static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, | |
391 | struct mthca_qp_path *path) | |
392 | { | |
393 | memset(ib_ah_attr, 0, sizeof *path); | |
394 | ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; | |
395 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); | |
396 | ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; | |
397 | ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; | |
398 | ib_ah_attr->static_rate = path->static_rate & 0x7; | |
399 | ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; | |
400 | if (ib_ah_attr->ah_flags) { | |
401 | ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); | |
402 | ib_ah_attr->grh.hop_limit = path->hop_limit; | |
403 | ib_ah_attr->grh.traffic_class = | |
404 | (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; | |
405 | ib_ah_attr->grh.flow_label = | |
406 | be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; | |
407 | memcpy(ib_ah_attr->grh.dgid.raw, | |
408 | path->rgid, sizeof ib_ah_attr->grh.dgid.raw); | |
409 | } | |
410 | } | |
411 | ||
412 | int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | |
413 | struct ib_qp_init_attr *qp_init_attr) | |
414 | { | |
415 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
416 | struct mthca_qp *qp = to_mqp(ibqp); | |
417 | int err; | |
418 | struct mthca_mailbox *mailbox; | |
419 | struct mthca_qp_param *qp_param; | |
420 | struct mthca_qp_context *context; | |
421 | int mthca_state; | |
422 | u8 status; | |
423 | ||
424 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | |
425 | if (IS_ERR(mailbox)) | |
426 | return PTR_ERR(mailbox); | |
427 | ||
428 | err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); | |
429 | if (err) | |
430 | goto out; | |
431 | if (status) { | |
432 | mthca_warn(dev, "QUERY_QP returned status %02x\n", status); | |
433 | err = -EINVAL; | |
434 | goto out; | |
435 | } | |
436 | ||
437 | qp_param = mailbox->buf; | |
438 | context = &qp_param->context; | |
439 | mthca_state = be32_to_cpu(context->flags) >> 28; | |
440 | ||
441 | qp_attr->qp_state = to_ib_qp_state(mthca_state); | |
442 | qp_attr->cur_qp_state = qp_attr->qp_state; | |
443 | qp_attr->path_mtu = context->mtu_msgmax >> 5; | |
444 | qp_attr->path_mig_state = | |
445 | to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); | |
446 | qp_attr->qkey = be32_to_cpu(context->qkey); | |
447 | qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; | |
448 | qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; | |
449 | qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; | |
450 | qp_attr->qp_access_flags = | |
451 | to_ib_qp_access_flags(be32_to_cpu(context->params2)); | |
452 | qp_attr->cap.max_send_wr = qp->sq.max; | |
453 | qp_attr->cap.max_recv_wr = qp->rq.max; | |
454 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | |
455 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | |
456 | qp_attr->cap.max_inline_data = qp->max_inline_data; | |
457 | ||
458 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | |
459 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | |
460 | ||
461 | qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; | |
462 | qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; | |
463 | ||
464 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | |
465 | qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; | |
466 | ||
467 | qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); | |
468 | ||
469 | qp_attr->max_dest_rd_atomic = | |
470 | 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); | |
471 | qp_attr->min_rnr_timer = | |
472 | (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; | |
473 | qp_attr->port_num = qp_attr->ah_attr.port_num; | |
474 | qp_attr->timeout = context->pri_path.ackto >> 3; | |
475 | qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; | |
476 | qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; | |
477 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | |
478 | qp_attr->alt_timeout = context->alt_path.ackto >> 3; | |
479 | qp_init_attr->cap = qp_attr->cap; | |
480 | ||
481 | out: | |
482 | mthca_free_mailbox(dev, mailbox); | |
483 | return err; | |
484 | } | |
485 | ||
4de144bf DB |
486 | static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path) |
487 | { | |
488 | path->g_mylmc = ah->src_path_bits & 0x7f; | |
489 | path->rlid = cpu_to_be16(ah->dlid); | |
490 | path->static_rate = !!ah->static_rate; | |
491 | ||
492 | if (ah->ah_flags & IB_AH_GRH) { | |
493 | path->g_mylmc |= 1 << 7; | |
494 | path->mgid_index = ah->grh.sgid_index; | |
495 | path->hop_limit = ah->grh.hop_limit; | |
2fa5e2eb | 496 | path->sl_tclass_flowlabel = |
4de144bf | 497 | cpu_to_be32((ah->sl << 28) | |
2fa5e2eb | 498 | (ah->grh.traffic_class << 20) | |
4de144bf DB |
499 | (ah->grh.flow_label)); |
500 | memcpy(path->rgid, ah->grh.dgid.raw, 16); | |
501 | } else | |
502 | path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); | |
503 | } | |
504 | ||
1da177e4 LT |
505 | int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) |
506 | { | |
507 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
508 | struct mthca_qp *qp = to_mqp(ibqp); | |
509 | enum ib_qp_state cur_state, new_state; | |
ed878458 | 510 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
511 | struct mthca_qp_param *qp_param; |
512 | struct mthca_qp_context *qp_context; | |
3fa1fa3e | 513 | u32 sqd_event = 0; |
1da177e4 LT |
514 | u8 status; |
515 | int err; | |
516 | ||
517 | if (attr_mask & IB_QP_CUR_STATE) { | |
d844183d | 518 | cur_state = attr->cur_qp_state; |
1da177e4 LT |
519 | } else { |
520 | spin_lock_irq(&qp->sq.lock); | |
521 | spin_lock(&qp->rq.lock); | |
522 | cur_state = qp->state; | |
523 | spin_unlock(&qp->rq.lock); | |
524 | spin_unlock_irq(&qp->sq.lock); | |
525 | } | |
526 | ||
d844183d | 527 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
1da177e4 | 528 | |
d844183d RD |
529 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { |
530 | mthca_dbg(dev, "Bad QP transition (transport %d) " | |
531 | "%d->%d with attr 0x%08x\n", | |
532 | qp->transport, cur_state, new_state, | |
533 | attr_mask); | |
1da177e4 LT |
534 | return -EINVAL; |
535 | } | |
536 | ||
2fa5e2eb | 537 | if ((attr_mask & IB_QP_PKEY_INDEX) && |
d09e3276 | 538 | attr->pkey_index >= dev->limits.pkey_table_len) { |
67e73776 DB |
539 | mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", |
540 | attr->pkey_index, dev->limits.pkey_table_len-1); | |
d09e3276 JM |
541 | return -EINVAL; |
542 | } | |
543 | ||
38d1e793 JM |
544 | if ((attr_mask & IB_QP_PORT) && |
545 | (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { | |
546 | mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); | |
547 | return -EINVAL; | |
548 | } | |
549 | ||
94361cf7 JM |
550 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
551 | attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { | |
552 | mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", | |
553 | attr->max_rd_atomic, dev->limits.max_qp_init_rdma); | |
554 | return -EINVAL; | |
555 | } | |
556 | ||
557 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | |
558 | attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { | |
559 | mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", | |
560 | attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); | |
561 | return -EINVAL; | |
562 | } | |
563 | ||
ed878458 RD |
564 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
565 | if (IS_ERR(mailbox)) | |
566 | return PTR_ERR(mailbox); | |
567 | qp_param = mailbox->buf; | |
1da177e4 LT |
568 | qp_context = &qp_param->context; |
569 | memset(qp_param, 0, sizeof *qp_param); | |
570 | ||
571 | qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | | |
572 | (to_mthca_st(qp->transport) << 16)); | |
573 | qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); | |
574 | if (!(attr_mask & IB_QP_PATH_MIG_STATE)) | |
575 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
576 | else { | |
577 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); | |
578 | switch (attr->path_mig_state) { | |
579 | case IB_MIG_MIGRATED: | |
580 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
581 | break; | |
582 | case IB_MIG_REARM: | |
583 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); | |
584 | break; | |
585 | case IB_MIG_ARMED: | |
586 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); | |
587 | break; | |
588 | } | |
589 | } | |
590 | ||
591 | /* leave tavor_sched_queue as 0 */ | |
592 | ||
593 | if (qp->transport == MLX || qp->transport == UD) | |
594 | qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; | |
595 | else if (attr_mask & IB_QP_PATH_MTU) | |
596 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; | |
597 | ||
d10ddbf6 | 598 | if (mthca_is_memfree(dev)) { |
ec34a922 RD |
599 | if (qp->rq.max) |
600 | qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; | |
601 | qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; | |
602 | ||
603 | if (qp->sq.max) | |
604 | qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; | |
605 | qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; | |
1da177e4 LT |
606 | } |
607 | ||
608 | /* leave arbel_sched_queue as 0 */ | |
609 | ||
80c8ec2c RD |
610 | if (qp->ibqp.uobject) |
611 | qp_context->usr_page = | |
612 | cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); | |
613 | else | |
614 | qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); | |
1da177e4 LT |
615 | qp_context->local_qpn = cpu_to_be32(qp->qpn); |
616 | if (attr_mask & IB_QP_DEST_QPN) { | |
617 | qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); | |
618 | } | |
619 | ||
620 | if (qp->transport == MLX) | |
621 | qp_context->pri_path.port_pkey |= | |
622 | cpu_to_be32(to_msqp(qp)->port << 24); | |
623 | else { | |
624 | if (attr_mask & IB_QP_PORT) { | |
625 | qp_context->pri_path.port_pkey |= | |
626 | cpu_to_be32(attr->port_num << 24); | |
627 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); | |
628 | } | |
629 | } | |
630 | ||
631 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
632 | qp_context->pri_path.port_pkey |= | |
633 | cpu_to_be32(attr->pkey_index); | |
634 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); | |
635 | } | |
636 | ||
637 | if (attr_mask & IB_QP_RNR_RETRY) { | |
4de144bf DB |
638 | qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = |
639 | attr->rnr_retry << 5; | |
2fa5e2eb | 640 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | |
4de144bf | 641 | MTHCA_QP_OPTPAR_ALT_RNR_RETRY); |
1da177e4 LT |
642 | } |
643 | ||
644 | if (attr_mask & IB_QP_AV) { | |
4de144bf | 645 | mthca_path_set(&attr->ah_attr, &qp_context->pri_path); |
1da177e4 LT |
646 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); |
647 | } | |
648 | ||
649 | if (attr_mask & IB_QP_TIMEOUT) { | |
bb4a7f0d | 650 | qp_context->pri_path.ackto = attr->timeout << 3; |
1da177e4 LT |
651 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); |
652 | } | |
653 | ||
4de144bf | 654 | if (attr_mask & IB_QP_ALT_PATH) { |
67e73776 DB |
655 | if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { |
656 | mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", | |
657 | attr->alt_pkey_index, dev->limits.pkey_table_len-1); | |
658 | return -EINVAL; | |
659 | } | |
660 | ||
4de144bf | 661 | if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { |
2fa5e2eb | 662 | mthca_dbg(dev, "Alternate port number (%u) is invalid\n", |
4de144bf DB |
663 | attr->alt_port_num); |
664 | return -EINVAL; | |
665 | } | |
666 | ||
667 | mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path); | |
2fa5e2eb | 668 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | |
4de144bf DB |
669 | attr->alt_port_num << 24); |
670 | qp_context->alt_path.ackto = attr->alt_timeout << 3; | |
671 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); | |
672 | } | |
1da177e4 LT |
673 | |
674 | /* leave rdd as 0 */ | |
675 | qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); | |
676 | /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ | |
677 | qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); | |
678 | qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | | |
679 | (MTHCA_FLIGHT_LIMIT << 24) | | |
c4342d8a | 680 | MTHCA_QP_BIT_SWE); |
1da177e4 LT |
681 | if (qp->sq_policy == IB_SIGNAL_ALL_WR) |
682 | qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); | |
683 | if (attr_mask & IB_QP_RETRY_CNT) { | |
684 | qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); | |
685 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); | |
686 | } | |
687 | ||
34a4a753 | 688 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { |
c4342d8a JM |
689 | if (attr->max_rd_atomic) { |
690 | qp_context->params1 |= | |
691 | cpu_to_be32(MTHCA_QP_BIT_SRE | | |
692 | MTHCA_QP_BIT_SAE); | |
6aa2e4e8 JM |
693 | qp_context->params1 |= |
694 | cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); | |
c4342d8a | 695 | } |
1da177e4 LT |
696 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); |
697 | } | |
698 | ||
699 | if (attr_mask & IB_QP_SQ_PSN) | |
700 | qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); | |
701 | qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); | |
702 | ||
d10ddbf6 | 703 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
704 | qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); |
705 | qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); | |
706 | } | |
707 | ||
34a4a753 | 708 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
6aa2e4e8 JM |
709 | if (attr->max_dest_rd_atomic) |
710 | qp_context->params2 |= | |
711 | cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); | |
1da177e4 | 712 | |
1da177e4 | 713 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); |
1da177e4 LT |
714 | } |
715 | ||
d1646f86 JM |
716 | if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { |
717 | qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); | |
718 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | | |
719 | MTHCA_QP_OPTPAR_RRE | | |
720 | MTHCA_QP_OPTPAR_RAE); | |
721 | } | |
722 | ||
1da177e4 LT |
723 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); |
724 | ||
ec34a922 RD |
725 | if (ibqp->srq) |
726 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); | |
727 | ||
1da177e4 LT |
728 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { |
729 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); | |
730 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); | |
731 | } | |
732 | if (attr_mask & IB_QP_RQ_PSN) | |
733 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); | |
734 | ||
735 | qp_context->ra_buff_indx = | |
736 | cpu_to_be32(dev->qp_table.rdb_base + | |
737 | ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << | |
738 | dev->qp_table.rdb_shift)); | |
739 | ||
740 | qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); | |
741 | ||
d10ddbf6 | 742 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
743 | qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); |
744 | ||
745 | if (attr_mask & IB_QP_QKEY) { | |
746 | qp_context->qkey = cpu_to_be32(attr->qkey); | |
747 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); | |
748 | } | |
749 | ||
ec34a922 RD |
750 | if (ibqp->srq) |
751 | qp_context->srqn = cpu_to_be32(1 << 24 | | |
752 | to_msrq(ibqp->srq)->srqn); | |
753 | ||
3fa1fa3e RD |
754 | if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && |
755 | attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && | |
756 | attr->en_sqd_async_notify) | |
757 | sqd_event = 1 << 31; | |
758 | ||
d844183d RD |
759 | err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, |
760 | mailbox, sqd_event, &status); | |
1da177e4 | 761 | if (status) { |
d844183d RD |
762 | mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", |
763 | cur_state, new_state, status); | |
1da177e4 LT |
764 | err = -EINVAL; |
765 | } | |
766 | ||
44b5b030 | 767 | if (!err) { |
1da177e4 | 768 | qp->state = new_state; |
44b5b030 JM |
769 | if (attr_mask & IB_QP_ACCESS_FLAGS) |
770 | qp->atomic_rd_en = attr->qp_access_flags; | |
771 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
772 | qp->resp_depth = attr->max_dest_rd_atomic; | |
773 | } | |
1da177e4 | 774 | |
ed878458 | 775 | mthca_free_mailbox(dev, mailbox); |
1da177e4 LT |
776 | |
777 | if (is_sqp(dev, qp)) | |
778 | store_attrs(to_msqp(qp), attr, attr_mask); | |
779 | ||
780 | /* | |
c9fe2b32 RD |
781 | * If we moved QP0 to RTR, bring the IB link up; if we moved |
782 | * QP0 to RESET or ERROR, bring the link back down. | |
1da177e4 LT |
783 | */ |
784 | if (is_qp0(dev, qp)) { | |
785 | if (cur_state != IB_QPS_RTR && | |
786 | new_state == IB_QPS_RTR) | |
787 | init_port(dev, to_msqp(qp)->port); | |
788 | ||
789 | if (cur_state != IB_QPS_RESET && | |
790 | cur_state != IB_QPS_ERR && | |
791 | (new_state == IB_QPS_RESET || | |
792 | new_state == IB_QPS_ERR)) | |
793 | mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); | |
794 | } | |
795 | ||
c9fe2b32 RD |
796 | /* |
797 | * If we moved a kernel QP to RESET, clean up all old CQ | |
798 | * entries and reinitialize the QP. | |
799 | */ | |
800 | if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | |
801 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | |
802 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
803 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | |
804 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | |
805 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
806 | ||
807 | mthca_wq_init(&qp->sq); | |
187a2586 MT |
808 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
809 | ||
c9fe2b32 | 810 | mthca_wq_init(&qp->rq); |
187a2586 | 811 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); |
c9fe2b32 RD |
812 | |
813 | if (mthca_is_memfree(dev)) { | |
814 | *qp->sq.db = 0; | |
815 | *qp->rq.db = 0; | |
816 | } | |
817 | } | |
818 | ||
1da177e4 LT |
819 | return err; |
820 | } | |
821 | ||
5b3bc7a6 | 822 | static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) |
77369ed3 | 823 | { |
77369ed3 JM |
824 | /* |
825 | * Calculate the maximum size of WQE s/g segments, excluding | |
826 | * the next segment and other non-data segments. | |
827 | */ | |
5b3bc7a6 | 828 | int max_data_size = desc_sz - sizeof (struct mthca_next_seg); |
77369ed3 JM |
829 | |
830 | switch (qp->transport) { | |
831 | case MLX: | |
832 | max_data_size -= 2 * sizeof (struct mthca_data_seg); | |
833 | break; | |
834 | ||
835 | case UD: | |
836 | if (mthca_is_memfree(dev)) | |
837 | max_data_size -= sizeof (struct mthca_arbel_ud_seg); | |
838 | else | |
839 | max_data_size -= sizeof (struct mthca_tavor_ud_seg); | |
840 | break; | |
841 | ||
842 | default: | |
843 | max_data_size -= sizeof (struct mthca_raddr_seg); | |
844 | break; | |
845 | } | |
846 | ||
5b3bc7a6 JM |
847 | return max_data_size; |
848 | } | |
849 | ||
850 | static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) | |
851 | { | |
77369ed3 | 852 | /* We don't support inline data for kernel QPs (yet). */ |
5b3bc7a6 JM |
853 | return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; |
854 | } | |
855 | ||
856 | static void mthca_adjust_qp_caps(struct mthca_dev *dev, | |
857 | struct mthca_pd *pd, | |
858 | struct mthca_qp *qp) | |
859 | { | |
860 | int max_data_size = mthca_max_data_size(dev, qp, | |
861 | min(dev->limits.max_desc_sz, | |
862 | 1 << qp->sq.wqe_shift)); | |
863 | ||
864 | qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); | |
77369ed3 | 865 | |
48fd0d1f MT |
866 | qp->sq.max_gs = min_t(int, dev->limits.max_sg, |
867 | max_data_size / sizeof (struct mthca_data_seg)); | |
868 | qp->rq.max_gs = min_t(int, dev->limits.max_sg, | |
869 | (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - | |
870 | sizeof (struct mthca_next_seg)) / | |
871 | sizeof (struct mthca_data_seg)); | |
77369ed3 JM |
872 | } |
873 | ||
1da177e4 LT |
874 | /* |
875 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, | |
876 | * rq.max_gs and sq.max_gs must all be assigned. | |
877 | * mthca_alloc_wqe_buf will calculate rq.wqe_shift and | |
878 | * sq.wqe_shift (as well as send_wqe_offset, is_direct, and | |
879 | * queue) | |
880 | */ | |
881 | static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |
882 | struct mthca_pd *pd, | |
883 | struct mthca_qp *qp) | |
884 | { | |
885 | int size; | |
1da177e4 LT |
886 | int err = -ENOMEM; |
887 | ||
888 | size = sizeof (struct mthca_next_seg) + | |
889 | qp->rq.max_gs * sizeof (struct mthca_data_seg); | |
890 | ||
77369ed3 JM |
891 | if (size > dev->limits.max_desc_sz) |
892 | return -EINVAL; | |
893 | ||
1da177e4 LT |
894 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; |
895 | qp->rq.wqe_shift++) | |
896 | ; /* nothing */ | |
897 | ||
77369ed3 | 898 | size = qp->sq.max_gs * sizeof (struct mthca_data_seg); |
1da177e4 LT |
899 | switch (qp->transport) { |
900 | case MLX: | |
901 | size += 2 * sizeof (struct mthca_data_seg); | |
902 | break; | |
77369ed3 | 903 | |
1da177e4 | 904 | case UD: |
77369ed3 JM |
905 | size += mthca_is_memfree(dev) ? |
906 | sizeof (struct mthca_arbel_ud_seg) : | |
907 | sizeof (struct mthca_tavor_ud_seg); | |
1da177e4 | 908 | break; |
77369ed3 JM |
909 | |
910 | case UC: | |
911 | size += sizeof (struct mthca_raddr_seg); | |
912 | break; | |
913 | ||
914 | case RC: | |
915 | size += sizeof (struct mthca_raddr_seg); | |
916 | /* | |
917 | * An atomic op will require an atomic segment, a | |
918 | * remote address segment and one scatter entry. | |
919 | */ | |
920 | size = max_t(int, size, | |
921 | sizeof (struct mthca_atomic_seg) + | |
922 | sizeof (struct mthca_raddr_seg) + | |
923 | sizeof (struct mthca_data_seg)); | |
924 | break; | |
925 | ||
1da177e4 | 926 | default: |
77369ed3 | 927 | break; |
1da177e4 LT |
928 | } |
929 | ||
77369ed3 JM |
930 | /* Make sure that we have enough space for a bind request */ |
931 | size = max_t(int, size, sizeof (struct mthca_bind_seg)); | |
932 | ||
933 | size += sizeof (struct mthca_next_seg); | |
934 | ||
935 | if (size > dev->limits.max_desc_sz) | |
936 | return -EINVAL; | |
937 | ||
1da177e4 LT |
938 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; |
939 | qp->sq.wqe_shift++) | |
940 | ; /* nothing */ | |
941 | ||
942 | qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, | |
943 | 1 << qp->sq.wqe_shift); | |
80c8ec2c RD |
944 | |
945 | /* | |
946 | * If this is a userspace QP, we don't actually have to | |
947 | * allocate anything. All we need is to calculate the WQE | |
948 | * sizes and the send_wqe_offset, so we're done now. | |
949 | */ | |
950 | if (pd->ibpd.uobject) | |
951 | return 0; | |
952 | ||
1da177e4 LT |
953 | size = PAGE_ALIGN(qp->send_wqe_offset + |
954 | (qp->sq.max << qp->sq.wqe_shift)); | |
955 | ||
956 | qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), | |
957 | GFP_KERNEL); | |
958 | if (!qp->wrid) | |
959 | goto err_out; | |
960 | ||
87b81670 RD |
961 | err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, |
962 | &qp->queue, &qp->is_direct, pd, 0, &qp->mr); | |
1da177e4 | 963 | if (err) |
87b81670 | 964 | goto err_out; |
1da177e4 | 965 | |
1da177e4 LT |
966 | return 0; |
967 | ||
87b81670 | 968 | err_out: |
1da177e4 | 969 | kfree(qp->wrid); |
1da177e4 LT |
970 | return err; |
971 | } | |
972 | ||
80c8ec2c | 973 | static void mthca_free_wqe_buf(struct mthca_dev *dev, |
1da177e4 LT |
974 | struct mthca_qp *qp) |
975 | { | |
87b81670 RD |
976 | mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + |
977 | (qp->sq.max << qp->sq.wqe_shift)), | |
978 | &qp->queue, qp->is_direct, &qp->mr); | |
80c8ec2c RD |
979 | kfree(qp->wrid); |
980 | } | |
981 | ||
982 | static int mthca_map_memfree(struct mthca_dev *dev, | |
983 | struct mthca_qp *qp) | |
984 | { | |
985 | int ret; | |
1da177e4 | 986 | |
d10ddbf6 | 987 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
988 | ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); |
989 | if (ret) | |
990 | return ret; | |
991 | ||
992 | ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); | |
993 | if (ret) | |
994 | goto err_qpc; | |
995 | ||
2fa5e2eb RD |
996 | ret = mthca_table_get(dev, dev->qp_table.rdb_table, |
997 | qp->qpn << dev->qp_table.rdb_shift); | |
998 | if (ret) | |
999 | goto err_eqpc; | |
1da177e4 | 1000 | |
1da177e4 LT |
1001 | } |
1002 | ||
1003 | return 0; | |
1004 | ||
1da177e4 LT |
1005 | err_eqpc: |
1006 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
1007 | ||
1008 | err_qpc: | |
1009 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
1010 | ||
1011 | return ret; | |
1012 | } | |
1013 | ||
80c8ec2c RD |
1014 | static void mthca_unmap_memfree(struct mthca_dev *dev, |
1015 | struct mthca_qp *qp) | |
1016 | { | |
1017 | mthca_table_put(dev, dev->qp_table.rdb_table, | |
1018 | qp->qpn << dev->qp_table.rdb_shift); | |
1019 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
1020 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
1021 | } | |
1022 | ||
1023 | static int mthca_alloc_memfree(struct mthca_dev *dev, | |
1024 | struct mthca_qp *qp) | |
1025 | { | |
1026 | int ret = 0; | |
1027 | ||
1028 | if (mthca_is_memfree(dev)) { | |
1029 | qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, | |
1030 | qp->qpn, &qp->rq.db); | |
1031 | if (qp->rq.db_index < 0) | |
1032 | return ret; | |
1033 | ||
1034 | qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, | |
1035 | qp->qpn, &qp->sq.db); | |
1036 | if (qp->sq.db_index < 0) | |
1037 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
1038 | } | |
1039 | ||
1040 | return ret; | |
1041 | } | |
1042 | ||
1da177e4 LT |
1043 | static void mthca_free_memfree(struct mthca_dev *dev, |
1044 | struct mthca_qp *qp) | |
1045 | { | |
d10ddbf6 | 1046 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
1047 | mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); |
1048 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
1da177e4 LT |
1049 | } |
1050 | } | |
1051 | ||
1da177e4 LT |
1052 | static int mthca_alloc_qp_common(struct mthca_dev *dev, |
1053 | struct mthca_pd *pd, | |
1054 | struct mthca_cq *send_cq, | |
1055 | struct mthca_cq *recv_cq, | |
1056 | enum ib_sig_type send_policy, | |
1057 | struct mthca_qp *qp) | |
1058 | { | |
1da177e4 LT |
1059 | int ret; |
1060 | int i; | |
1061 | ||
1062 | atomic_set(&qp->refcount, 1); | |
30a7e8ef | 1063 | init_waitqueue_head(&qp->wait); |
1da177e4 LT |
1064 | qp->state = IB_QPS_RESET; |
1065 | qp->atomic_rd_en = 0; | |
1066 | qp->resp_depth = 0; | |
1067 | qp->sq_policy = send_policy; | |
1068 | mthca_wq_init(&qp->sq); | |
1069 | mthca_wq_init(&qp->rq); | |
1070 | ||
80c8ec2c | 1071 | ret = mthca_map_memfree(dev, qp); |
1da177e4 LT |
1072 | if (ret) |
1073 | return ret; | |
1074 | ||
1075 | ret = mthca_alloc_wqe_buf(dev, pd, qp); | |
1076 | if (ret) { | |
80c8ec2c RD |
1077 | mthca_unmap_memfree(dev, qp); |
1078 | return ret; | |
1079 | } | |
1080 | ||
77369ed3 JM |
1081 | mthca_adjust_qp_caps(dev, pd, qp); |
1082 | ||
80c8ec2c RD |
1083 | /* |
1084 | * If this is a userspace QP, we're done now. The doorbells | |
1085 | * will be allocated and buffers will be initialized in | |
1086 | * userspace. | |
1087 | */ | |
1088 | if (pd->ibpd.uobject) | |
1089 | return 0; | |
1090 | ||
1091 | ret = mthca_alloc_memfree(dev, qp); | |
1092 | if (ret) { | |
1093 | mthca_free_wqe_buf(dev, qp); | |
1094 | mthca_unmap_memfree(dev, qp); | |
1da177e4 LT |
1095 | return ret; |
1096 | } | |
1097 | ||
d10ddbf6 | 1098 | if (mthca_is_memfree(dev)) { |
ddf841f0 RD |
1099 | struct mthca_next_seg *next; |
1100 | struct mthca_data_seg *scatter; | |
1101 | int size = (sizeof (struct mthca_next_seg) + | |
1102 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; | |
1103 | ||
1da177e4 | 1104 | for (i = 0; i < qp->rq.max; ++i) { |
ddf841f0 RD |
1105 | next = get_recv_wqe(qp, i); |
1106 | next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << | |
1107 | qp->rq.wqe_shift); | |
1108 | next->ee_nds = cpu_to_be32(size); | |
1109 | ||
1110 | for (scatter = (void *) (next + 1); | |
1111 | (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); | |
1112 | ++scatter) | |
1113 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
1da177e4 LT |
1114 | } |
1115 | ||
1116 | for (i = 0; i < qp->sq.max; ++i) { | |
ddf841f0 RD |
1117 | next = get_send_wqe(qp, i); |
1118 | next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << | |
1119 | qp->sq.wqe_shift) + | |
1120 | qp->send_wqe_offset); | |
1da177e4 LT |
1121 | } |
1122 | } | |
1123 | ||
d6cff021 RD |
1124 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
1125 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); | |
1126 | ||
1da177e4 LT |
1127 | return 0; |
1128 | } | |
1129 | ||
80c8ec2c | 1130 | static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, |
5b3bc7a6 | 1131 | struct mthca_pd *pd, struct mthca_qp *qp) |
1da177e4 | 1132 | { |
5b3bc7a6 JM |
1133 | int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); |
1134 | ||
80c8ec2c | 1135 | /* Sanity check QP size before proceeding */ |
5b3bc7a6 JM |
1136 | if (cap->max_send_wr > dev->limits.max_wqes || |
1137 | cap->max_recv_wr > dev->limits.max_wqes || | |
1138 | cap->max_send_sge > dev->limits.max_sg || | |
1139 | cap->max_recv_sge > dev->limits.max_sg || | |
1140 | cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) | |
1141 | return -EINVAL; | |
1142 | ||
1143 | /* | |
1144 | * For MLX transport we need 2 extra S/G entries: | |
1145 | * one for the header and one for the checksum at the end | |
1146 | */ | |
1147 | if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg) | |
80c8ec2c | 1148 | return -EINVAL; |
1da177e4 | 1149 | |
80c8ec2c RD |
1150 | if (mthca_is_memfree(dev)) { |
1151 | qp->rq.max = cap->max_recv_wr ? | |
1152 | roundup_pow_of_two(cap->max_recv_wr) : 0; | |
1153 | qp->sq.max = cap->max_send_wr ? | |
1154 | roundup_pow_of_two(cap->max_send_wr) : 0; | |
1155 | } else { | |
1156 | qp->rq.max = cap->max_recv_wr; | |
1157 | qp->sq.max = cap->max_send_wr; | |
1158 | } | |
1da177e4 | 1159 | |
80c8ec2c RD |
1160 | qp->rq.max_gs = cap->max_recv_sge; |
1161 | qp->sq.max_gs = max_t(int, cap->max_send_sge, | |
1162 | ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, | |
1163 | MTHCA_INLINE_CHUNK_SIZE) / | |
1164 | sizeof (struct mthca_data_seg)); | |
1da177e4 | 1165 | |
80c8ec2c | 1166 | return 0; |
1da177e4 LT |
1167 | } |
1168 | ||
1169 | int mthca_alloc_qp(struct mthca_dev *dev, | |
1170 | struct mthca_pd *pd, | |
1171 | struct mthca_cq *send_cq, | |
1172 | struct mthca_cq *recv_cq, | |
1173 | enum ib_qp_type type, | |
1174 | enum ib_sig_type send_policy, | |
80c8ec2c | 1175 | struct ib_qp_cap *cap, |
1da177e4 LT |
1176 | struct mthca_qp *qp) |
1177 | { | |
1178 | int err; | |
1179 | ||
5b3bc7a6 | 1180 | err = mthca_set_qp_size(dev, cap, pd, qp); |
80c8ec2c RD |
1181 | if (err) |
1182 | return err; | |
1da177e4 LT |
1183 | |
1184 | switch (type) { | |
1185 | case IB_QPT_RC: qp->transport = RC; break; | |
1186 | case IB_QPT_UC: qp->transport = UC; break; | |
1187 | case IB_QPT_UD: qp->transport = UD; break; | |
1188 | default: return -EINVAL; | |
1189 | } | |
1190 | ||
1191 | qp->qpn = mthca_alloc(&dev->qp_table.alloc); | |
1192 | if (qp->qpn == -1) | |
1193 | return -ENOMEM; | |
1194 | ||
1195 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | |
1196 | send_policy, qp); | |
1197 | if (err) { | |
1198 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1199 | return err; | |
1200 | } | |
1201 | ||
1202 | spin_lock_irq(&dev->qp_table.lock); | |
1203 | mthca_array_set(&dev->qp_table.qp, | |
1204 | qp->qpn & (dev->limits.num_qps - 1), qp); | |
1205 | spin_unlock_irq(&dev->qp_table.lock); | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | int mthca_alloc_sqp(struct mthca_dev *dev, | |
1211 | struct mthca_pd *pd, | |
1212 | struct mthca_cq *send_cq, | |
1213 | struct mthca_cq *recv_cq, | |
1214 | enum ib_sig_type send_policy, | |
80c8ec2c | 1215 | struct ib_qp_cap *cap, |
1da177e4 LT |
1216 | int qpn, |
1217 | int port, | |
1218 | struct mthca_sqp *sqp) | |
1219 | { | |
1da177e4 | 1220 | u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; |
80c8ec2c | 1221 | int err; |
1da177e4 | 1222 | |
5b3bc7a6 | 1223 | err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); |
80c8ec2c RD |
1224 | if (err) |
1225 | return err; | |
1da177e4 LT |
1226 | |
1227 | sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; | |
1228 | sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1229 | &sqp->header_dma, GFP_KERNEL); | |
1230 | if (!sqp->header_buf) | |
1231 | return -ENOMEM; | |
1232 | ||
1233 | spin_lock_irq(&dev->qp_table.lock); | |
1234 | if (mthca_array_get(&dev->qp_table.qp, mqpn)) | |
1235 | err = -EBUSY; | |
1236 | else | |
1237 | mthca_array_set(&dev->qp_table.qp, mqpn, sqp); | |
1238 | spin_unlock_irq(&dev->qp_table.lock); | |
1239 | ||
1240 | if (err) | |
1241 | goto err_out; | |
1242 | ||
1243 | sqp->port = port; | |
1244 | sqp->qp.qpn = mqpn; | |
1245 | sqp->qp.transport = MLX; | |
1246 | ||
1247 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | |
1248 | send_policy, &sqp->qp); | |
1249 | if (err) | |
1250 | goto err_out_free; | |
1251 | ||
1252 | atomic_inc(&pd->sqp_count); | |
1253 | ||
1254 | return 0; | |
1255 | ||
1256 | err_out_free: | |
1257 | /* | |
1258 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1259 | * without taking a lock. | |
1260 | */ | |
1261 | spin_lock_irq(&send_cq->lock); | |
1262 | if (send_cq != recv_cq) | |
1263 | spin_lock(&recv_cq->lock); | |
1264 | ||
1265 | spin_lock(&dev->qp_table.lock); | |
1266 | mthca_array_clear(&dev->qp_table.qp, mqpn); | |
1267 | spin_unlock(&dev->qp_table.lock); | |
1268 | ||
1269 | if (send_cq != recv_cq) | |
1270 | spin_unlock(&recv_cq->lock); | |
1271 | spin_unlock_irq(&send_cq->lock); | |
1272 | ||
1273 | err_out: | |
1274 | dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1275 | sqp->header_buf, sqp->header_dma); | |
1276 | ||
1277 | return err; | |
1278 | } | |
1279 | ||
1280 | void mthca_free_qp(struct mthca_dev *dev, | |
1281 | struct mthca_qp *qp) | |
1282 | { | |
1283 | u8 status; | |
1da177e4 LT |
1284 | struct mthca_cq *send_cq; |
1285 | struct mthca_cq *recv_cq; | |
1286 | ||
1287 | send_cq = to_mcq(qp->ibqp.send_cq); | |
1288 | recv_cq = to_mcq(qp->ibqp.recv_cq); | |
1289 | ||
1290 | /* | |
1291 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1292 | * without taking a lock. | |
1293 | */ | |
1294 | spin_lock_irq(&send_cq->lock); | |
1295 | if (send_cq != recv_cq) | |
1296 | spin_lock(&recv_cq->lock); | |
1297 | ||
1298 | spin_lock(&dev->qp_table.lock); | |
1299 | mthca_array_clear(&dev->qp_table.qp, | |
1300 | qp->qpn & (dev->limits.num_qps - 1)); | |
1301 | spin_unlock(&dev->qp_table.lock); | |
1302 | ||
1303 | if (send_cq != recv_cq) | |
1304 | spin_unlock(&recv_cq->lock); | |
1305 | spin_unlock_irq(&send_cq->lock); | |
1306 | ||
1307 | atomic_dec(&qp->refcount); | |
1308 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | |
1309 | ||
1310 | if (qp->state != IB_QPS_RESET) | |
d844183d RD |
1311 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
1312 | NULL, 0, &status); | |
1da177e4 | 1313 | |
80c8ec2c RD |
1314 | /* |
1315 | * If this is a userspace QP, the buffers, MR, CQs and so on | |
1316 | * will be cleaned up in userspace, so all we have to do is | |
1317 | * unref the mem-free tables and free the QPN in our table. | |
1318 | */ | |
1319 | if (!qp->ibqp.uobject) { | |
ec34a922 RD |
1320 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, |
1321 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
80c8ec2c | 1322 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
ec34a922 RD |
1323 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, |
1324 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
1da177e4 | 1325 | |
80c8ec2c RD |
1326 | mthca_free_memfree(dev, qp); |
1327 | mthca_free_wqe_buf(dev, qp); | |
1da177e4 LT |
1328 | } |
1329 | ||
80c8ec2c | 1330 | mthca_unmap_memfree(dev, qp); |
1da177e4 LT |
1331 | |
1332 | if (is_sqp(dev, qp)) { | |
1333 | atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); | |
1334 | dma_free_coherent(&dev->pdev->dev, | |
1335 | to_msqp(qp)->header_buf_size, | |
1336 | to_msqp(qp)->header_buf, | |
1337 | to_msqp(qp)->header_dma); | |
1338 | } else | |
1339 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1340 | } | |
1341 | ||
1342 | /* Create UD header for an MLX send and build a data segment for it */ | |
1343 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |
1344 | int ind, struct ib_send_wr *wr, | |
1345 | struct mthca_mlx_seg *mlx, | |
1346 | struct mthca_data_seg *data) | |
1347 | { | |
1348 | int header_size; | |
1349 | int err; | |
97f52eb4 | 1350 | u16 pkey; |
1da177e4 LT |
1351 | |
1352 | ib_ud_header_init(256, /* assume a MAD */ | |
9eacee2a | 1353 | mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), |
1da177e4 LT |
1354 | &sqp->ud_header); |
1355 | ||
1356 | err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); | |
1357 | if (err) | |
1358 | return err; | |
1359 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); | |
1360 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | | |
97f52eb4 SH |
1361 | (sqp->ud_header.lrh.destination_lid == |
1362 | IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | | |
1da177e4 LT |
1363 | (sqp->ud_header.lrh.service_level << 8)); |
1364 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | |
1365 | mlx->vcrc = 0; | |
1366 | ||
1367 | switch (wr->opcode) { | |
1368 | case IB_WR_SEND: | |
1369 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | |
1370 | sqp->ud_header.immediate_present = 0; | |
1371 | break; | |
1372 | case IB_WR_SEND_WITH_IMM: | |
1373 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | |
1374 | sqp->ud_header.immediate_present = 1; | |
1375 | sqp->ud_header.immediate_data = wr->imm_data; | |
1376 | break; | |
1377 | default: | |
1378 | return -EINVAL; | |
1379 | } | |
1380 | ||
1381 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | |
97f52eb4 SH |
1382 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
1383 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | |
1da177e4 LT |
1384 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); |
1385 | if (!sqp->qp.ibqp.qp_num) | |
1386 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | |
97f52eb4 | 1387 | sqp->pkey_index, &pkey); |
1da177e4 LT |
1388 | else |
1389 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | |
97f52eb4 SH |
1390 | wr->wr.ud.pkey_index, &pkey); |
1391 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | |
1da177e4 LT |
1392 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
1393 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | |
1394 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | |
1395 | sqp->qkey : wr->wr.ud.remote_qkey); | |
1396 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); | |
1397 | ||
1398 | header_size = ib_ud_header_pack(&sqp->ud_header, | |
1399 | sqp->header_buf + | |
1400 | ind * MTHCA_UD_HEADER_SIZE); | |
1401 | ||
1402 | data->byte_count = cpu_to_be32(header_size); | |
1403 | data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); | |
1404 | data->addr = cpu_to_be64(sqp->header_dma + | |
1405 | ind * MTHCA_UD_HEADER_SIZE); | |
1406 | ||
1407 | return 0; | |
1408 | } | |
1409 | ||
1410 | static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, | |
1411 | struct ib_cq *ib_cq) | |
1412 | { | |
1413 | unsigned cur; | |
1414 | struct mthca_cq *cq; | |
1415 | ||
1416 | cur = wq->head - wq->tail; | |
1417 | if (likely(cur + nreq < wq->max)) | |
1418 | return 0; | |
1419 | ||
1420 | cq = to_mcq(ib_cq); | |
1421 | spin_lock(&cq->lock); | |
1422 | cur = wq->head - wq->tail; | |
1423 | spin_unlock(&cq->lock); | |
1424 | ||
1425 | return cur + nreq >= wq->max; | |
1426 | } | |
1427 | ||
1428 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1429 | struct ib_send_wr **bad_wr) | |
1430 | { | |
1431 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1432 | struct mthca_qp *qp = to_mqp(ibqp); | |
1433 | void *wqe; | |
1434 | void *prev_wqe; | |
1435 | unsigned long flags; | |
1436 | int err = 0; | |
1437 | int nreq; | |
1438 | int i; | |
1439 | int size; | |
1440 | int size0 = 0; | |
1441 | u32 f0 = 0; | |
1442 | int ind; | |
1443 | u8 op0 = 0; | |
1444 | ||
1445 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1446 | ||
1447 | /* XXX check that state is OK to post send */ | |
1448 | ||
1449 | ind = qp->sq.next_ind; | |
1450 | ||
1451 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
1452 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { | |
1453 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1454 | " %d max, %d nreq)\n", qp->qpn, | |
1455 | qp->sq.head, qp->sq.tail, | |
1456 | qp->sq.max, nreq); | |
1457 | err = -ENOMEM; | |
1458 | *bad_wr = wr; | |
1459 | goto out; | |
1460 | } | |
1461 | ||
1462 | wqe = get_send_wqe(qp, ind); | |
1463 | prev_wqe = qp->sq.last; | |
1464 | qp->sq.last = wqe; | |
1465 | ||
1466 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1467 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
1468 | ((struct mthca_next_seg *) wqe)->flags = | |
1469 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1470 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1471 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1472 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1473 | cpu_to_be32(1); | |
1474 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1475 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1476 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1477 | |
1478 | wqe += sizeof (struct mthca_next_seg); | |
1479 | size = sizeof (struct mthca_next_seg) / 16; | |
1480 | ||
1481 | switch (qp->transport) { | |
1482 | case RC: | |
1483 | switch (wr->opcode) { | |
1484 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1485 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1486 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1487 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1488 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1489 | cpu_to_be32(wr->wr.atomic.rkey); | |
1490 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1491 | ||
1492 | wqe += sizeof (struct mthca_raddr_seg); | |
1493 | ||
1494 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1495 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1496 | cpu_to_be64(wr->wr.atomic.swap); | |
1497 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1498 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1499 | } else { | |
1500 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1501 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1502 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1503 | } | |
1504 | ||
1505 | wqe += sizeof (struct mthca_atomic_seg); | |
62abb841 MT |
1506 | size += (sizeof (struct mthca_raddr_seg) + |
1507 | sizeof (struct mthca_atomic_seg)) / 16; | |
1da177e4 LT |
1508 | break; |
1509 | ||
1510 | case IB_WR_RDMA_WRITE: | |
1511 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1512 | case IB_WR_RDMA_READ: | |
1513 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1514 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1515 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1516 | cpu_to_be32(wr->wr.rdma.rkey); | |
1517 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1518 | wqe += sizeof (struct mthca_raddr_seg); | |
1519 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1520 | break; | |
1521 | ||
1522 | default: | |
1523 | /* No extra segments required for sends */ | |
1524 | break; | |
1525 | } | |
1526 | ||
1527 | break; | |
1528 | ||
9e6970b5 RD |
1529 | case UC: |
1530 | switch (wr->opcode) { | |
1531 | case IB_WR_RDMA_WRITE: | |
1532 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1533 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1534 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1535 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1536 | cpu_to_be32(wr->wr.rdma.rkey); | |
1537 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1538 | wqe += sizeof (struct mthca_raddr_seg); | |
1539 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1540 | break; | |
1541 | ||
1542 | default: | |
1543 | /* No extra segments required for sends */ | |
1544 | break; | |
1545 | } | |
1546 | ||
1547 | break; | |
1548 | ||
1da177e4 LT |
1549 | case UD: |
1550 | ((struct mthca_tavor_ud_seg *) wqe)->lkey = | |
1551 | cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | |
1552 | ((struct mthca_tavor_ud_seg *) wqe)->av_addr = | |
1553 | cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | |
1554 | ((struct mthca_tavor_ud_seg *) wqe)->dqpn = | |
1555 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
1556 | ((struct mthca_tavor_ud_seg *) wqe)->qkey = | |
1557 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
1558 | ||
1559 | wqe += sizeof (struct mthca_tavor_ud_seg); | |
1560 | size += sizeof (struct mthca_tavor_ud_seg) / 16; | |
1561 | break; | |
1562 | ||
1563 | case MLX: | |
1564 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
1565 | wqe - sizeof (struct mthca_next_seg), | |
1566 | wqe); | |
1567 | if (err) { | |
1568 | *bad_wr = wr; | |
1569 | goto out; | |
1570 | } | |
1571 | wqe += sizeof (struct mthca_data_seg); | |
1572 | size += sizeof (struct mthca_data_seg) / 16; | |
1573 | break; | |
1574 | } | |
1575 | ||
1576 | if (wr->num_sge > qp->sq.max_gs) { | |
1577 | mthca_err(dev, "too many gathers\n"); | |
1578 | err = -EINVAL; | |
1579 | *bad_wr = wr; | |
1580 | goto out; | |
1581 | } | |
1582 | ||
1583 | for (i = 0; i < wr->num_sge; ++i) { | |
1584 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1585 | cpu_to_be32(wr->sg_list[i].length); | |
1586 | ((struct mthca_data_seg *) wqe)->lkey = | |
1587 | cpu_to_be32(wr->sg_list[i].lkey); | |
1588 | ((struct mthca_data_seg *) wqe)->addr = | |
1589 | cpu_to_be64(wr->sg_list[i].addr); | |
1590 | wqe += sizeof (struct mthca_data_seg); | |
1591 | size += sizeof (struct mthca_data_seg) / 16; | |
1592 | } | |
1593 | ||
1594 | /* Add one more inline data segment for ICRC */ | |
1595 | if (qp->transport == MLX) { | |
1596 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1597 | cpu_to_be32((1 << 31) | 4); | |
1598 | ((u32 *) wqe)[1] = 0; | |
1599 | wqe += sizeof (struct mthca_data_seg); | |
1600 | size += sizeof (struct mthca_data_seg) / 16; | |
1601 | } | |
1602 | ||
1603 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
1604 | ||
1605 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
1606 | mthca_err(dev, "opcode invalid\n"); | |
1607 | err = -EINVAL; | |
1608 | *bad_wr = wr; | |
1609 | goto out; | |
1610 | } | |
1611 | ||
d6cff021 RD |
1612 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1613 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
1614 | qp->send_wqe_offset) | | |
1615 | mthca_opcode[wr->opcode]); | |
1616 | wmb(); | |
1617 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
7667abd1 DB |
1618 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size | |
1619 | ((wr->send_flags & IB_SEND_FENCE) ? | |
1620 | MTHCA_NEXT_FENCE : 0)); | |
1da177e4 LT |
1621 | |
1622 | if (!size0) { | |
1623 | size0 = size; | |
1624 | op0 = mthca_opcode[wr->opcode]; | |
1625 | } | |
1626 | ||
1627 | ++ind; | |
1628 | if (unlikely(ind >= qp->sq.max)) | |
1629 | ind -= qp->sq.max; | |
1630 | } | |
1631 | ||
1632 | out: | |
1633 | if (likely(nreq)) { | |
97f52eb4 | 1634 | __be32 doorbell[2]; |
1da177e4 LT |
1635 | |
1636 | doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + | |
1637 | qp->send_wqe_offset) | f0 | op0); | |
1638 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1639 | ||
1640 | wmb(); | |
1641 | ||
1642 | mthca_write64(doorbell, | |
1643 | dev->kar + MTHCA_SEND_DOORBELL, | |
1644 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1645 | } | |
1646 | ||
1647 | qp->sq.next_ind = ind; | |
1648 | qp->sq.head += nreq; | |
1649 | ||
1650 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
1651 | return err; | |
1652 | } | |
1653 | ||
1654 | int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
1655 | struct ib_recv_wr **bad_wr) | |
1656 | { | |
1657 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1658 | struct mthca_qp *qp = to_mqp(ibqp); | |
ae57e24a | 1659 | __be32 doorbell[2]; |
1da177e4 LT |
1660 | unsigned long flags; |
1661 | int err = 0; | |
1662 | int nreq; | |
1663 | int i; | |
1664 | int size; | |
1665 | int size0 = 0; | |
1666 | int ind; | |
1667 | void *wqe; | |
1668 | void *prev_wqe; | |
1669 | ||
1670 | spin_lock_irqsave(&qp->rq.lock, flags); | |
1671 | ||
1672 | /* XXX check that state is OK to post receive */ | |
1673 | ||
1674 | ind = qp->rq.next_ind; | |
1675 | ||
1676 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
ae57e24a MT |
1677 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { |
1678 | nreq = 0; | |
1679 | ||
1680 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | |
1681 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | |
1682 | ||
1683 | wmb(); | |
1684 | ||
1685 | mthca_write64(doorbell, | |
1686 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
1687 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1688 | ||
1689 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | |
1690 | size0 = 0; | |
1691 | } | |
1692 | ||
1da177e4 LT |
1693 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1694 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
1695 | " %d max, %d nreq)\n", qp->qpn, | |
1696 | qp->rq.head, qp->rq.tail, | |
1697 | qp->rq.max, nreq); | |
1698 | err = -ENOMEM; | |
1699 | *bad_wr = wr; | |
1700 | goto out; | |
1701 | } | |
1702 | ||
1703 | wqe = get_recv_wqe(qp, ind); | |
1704 | prev_wqe = qp->rq.last; | |
1705 | qp->rq.last = wqe; | |
1706 | ||
1707 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1708 | ((struct mthca_next_seg *) wqe)->ee_nds = | |
1709 | cpu_to_be32(MTHCA_NEXT_DBD); | |
1710 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
1711 | ||
1712 | wqe += sizeof (struct mthca_next_seg); | |
1713 | size = sizeof (struct mthca_next_seg) / 16; | |
1714 | ||
1715 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
1716 | err = -EINVAL; | |
1717 | *bad_wr = wr; | |
1718 | goto out; | |
1719 | } | |
1720 | ||
1721 | for (i = 0; i < wr->num_sge; ++i) { | |
1722 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1723 | cpu_to_be32(wr->sg_list[i].length); | |
1724 | ((struct mthca_data_seg *) wqe)->lkey = | |
1725 | cpu_to_be32(wr->sg_list[i].lkey); | |
1726 | ((struct mthca_data_seg *) wqe)->addr = | |
1727 | cpu_to_be64(wr->sg_list[i].addr); | |
1728 | wqe += sizeof (struct mthca_data_seg); | |
1729 | size += sizeof (struct mthca_data_seg) / 16; | |
1730 | } | |
1731 | ||
1732 | qp->wrid[ind] = wr->wr_id; | |
1733 | ||
d6cff021 RD |
1734 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1735 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | |
1736 | wmb(); | |
1737 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
1738 | cpu_to_be32(MTHCA_NEXT_DBD | size); | |
1da177e4 LT |
1739 | |
1740 | if (!size0) | |
1741 | size0 = size; | |
1742 | ||
1743 | ++ind; | |
1744 | if (unlikely(ind >= qp->rq.max)) | |
1745 | ind -= qp->rq.max; | |
1746 | } | |
1747 | ||
1748 | out: | |
1749 | if (likely(nreq)) { | |
1da177e4 LT |
1750 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); |
1751 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); | |
1752 | ||
1753 | wmb(); | |
1754 | ||
1755 | mthca_write64(doorbell, | |
1756 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
1757 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1758 | } | |
1759 | ||
1760 | qp->rq.next_ind = ind; | |
1761 | qp->rq.head += nreq; | |
1762 | ||
1763 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
1764 | return err; | |
1765 | } | |
1766 | ||
1767 | int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1768 | struct ib_send_wr **bad_wr) | |
1769 | { | |
1770 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1771 | struct mthca_qp *qp = to_mqp(ibqp); | |
e0ae9ecf | 1772 | __be32 doorbell[2]; |
1da177e4 LT |
1773 | void *wqe; |
1774 | void *prev_wqe; | |
1775 | unsigned long flags; | |
1776 | int err = 0; | |
1777 | int nreq; | |
1778 | int i; | |
1779 | int size; | |
1780 | int size0 = 0; | |
1781 | u32 f0 = 0; | |
1782 | int ind; | |
1783 | u8 op0 = 0; | |
1784 | ||
1785 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1786 | ||
1787 | /* XXX check that state is OK to post send */ | |
1788 | ||
1789 | ind = qp->sq.head & (qp->sq.max - 1); | |
1790 | ||
1791 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
e0ae9ecf MT |
1792 | if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { |
1793 | nreq = 0; | |
1794 | ||
1795 | doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | | |
1796 | ((qp->sq.head & 0xffff) << 8) | | |
1797 | f0 | op0); | |
1798 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1799 | ||
1800 | qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; | |
1801 | size0 = 0; | |
1802 | ||
1803 | /* | |
1804 | * Make sure that descriptors are written before | |
1805 | * doorbell record. | |
1806 | */ | |
1807 | wmb(); | |
1808 | *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); | |
1809 | ||
1810 | /* | |
1811 | * Make sure doorbell record is written before we | |
1812 | * write MMIO send doorbell. | |
1813 | */ | |
1814 | wmb(); | |
1815 | mthca_write64(doorbell, | |
1816 | dev->kar + MTHCA_SEND_DOORBELL, | |
1817 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1818 | } | |
1819 | ||
1da177e4 LT |
1820 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { |
1821 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1822 | " %d max, %d nreq)\n", qp->qpn, | |
1823 | qp->sq.head, qp->sq.tail, | |
1824 | qp->sq.max, nreq); | |
1825 | err = -ENOMEM; | |
1826 | *bad_wr = wr; | |
1827 | goto out; | |
1828 | } | |
1829 | ||
1830 | wqe = get_send_wqe(qp, ind); | |
1831 | prev_wqe = qp->sq.last; | |
1832 | qp->sq.last = wqe; | |
1833 | ||
1834 | ((struct mthca_next_seg *) wqe)->flags = | |
1835 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1836 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1837 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1838 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1839 | cpu_to_be32(1); | |
1840 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1841 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1842 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1843 | |
1844 | wqe += sizeof (struct mthca_next_seg); | |
1845 | size = sizeof (struct mthca_next_seg) / 16; | |
1846 | ||
1847 | switch (qp->transport) { | |
ddb934e0 RD |
1848 | case RC: |
1849 | switch (wr->opcode) { | |
1850 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1851 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1852 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1853 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1854 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1855 | cpu_to_be32(wr->wr.atomic.rkey); | |
1856 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1857 | ||
1858 | wqe += sizeof (struct mthca_raddr_seg); | |
1859 | ||
1860 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1861 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1862 | cpu_to_be64(wr->wr.atomic.swap); | |
1863 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1864 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1865 | } else { | |
1866 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1867 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1868 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1869 | } | |
1870 | ||
1871 | wqe += sizeof (struct mthca_atomic_seg); | |
62abb841 MT |
1872 | size += (sizeof (struct mthca_raddr_seg) + |
1873 | sizeof (struct mthca_atomic_seg)) / 16; | |
ddb934e0 RD |
1874 | break; |
1875 | ||
9e6970b5 RD |
1876 | case IB_WR_RDMA_READ: |
1877 | case IB_WR_RDMA_WRITE: | |
1878 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1879 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1880 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1881 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1882 | cpu_to_be32(wr->wr.rdma.rkey); | |
1883 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1884 | wqe += sizeof (struct mthca_raddr_seg); | |
1885 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1886 | break; | |
1887 | ||
1888 | default: | |
1889 | /* No extra segments required for sends */ | |
1890 | break; | |
1891 | } | |
1892 | ||
1893 | break; | |
1894 | ||
1895 | case UC: | |
1896 | switch (wr->opcode) { | |
ddb934e0 RD |
1897 | case IB_WR_RDMA_WRITE: |
1898 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
ddb934e0 RD |
1899 | ((struct mthca_raddr_seg *) wqe)->raddr = |
1900 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1901 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1902 | cpu_to_be32(wr->wr.rdma.rkey); | |
1903 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1904 | wqe += sizeof (struct mthca_raddr_seg); | |
1905 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1906 | break; | |
1907 | ||
1908 | default: | |
1909 | /* No extra segments required for sends */ | |
1910 | break; | |
1911 | } | |
1912 | ||
1913 | break; | |
1914 | ||
1da177e4 LT |
1915 | case UD: |
1916 | memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, | |
1917 | to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | |
1918 | ((struct mthca_arbel_ud_seg *) wqe)->dqpn = | |
1919 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
1920 | ((struct mthca_arbel_ud_seg *) wqe)->qkey = | |
1921 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
1922 | ||
1923 | wqe += sizeof (struct mthca_arbel_ud_seg); | |
1924 | size += sizeof (struct mthca_arbel_ud_seg) / 16; | |
1925 | break; | |
1926 | ||
1927 | case MLX: | |
1928 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
1929 | wqe - sizeof (struct mthca_next_seg), | |
1930 | wqe); | |
1931 | if (err) { | |
1932 | *bad_wr = wr; | |
1933 | goto out; | |
1934 | } | |
1935 | wqe += sizeof (struct mthca_data_seg); | |
1936 | size += sizeof (struct mthca_data_seg) / 16; | |
1937 | break; | |
1938 | } | |
1939 | ||
1940 | if (wr->num_sge > qp->sq.max_gs) { | |
1941 | mthca_err(dev, "too many gathers\n"); | |
1942 | err = -EINVAL; | |
1943 | *bad_wr = wr; | |
1944 | goto out; | |
1945 | } | |
1946 | ||
1947 | for (i = 0; i < wr->num_sge; ++i) { | |
1948 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1949 | cpu_to_be32(wr->sg_list[i].length); | |
1950 | ((struct mthca_data_seg *) wqe)->lkey = | |
1951 | cpu_to_be32(wr->sg_list[i].lkey); | |
1952 | ((struct mthca_data_seg *) wqe)->addr = | |
1953 | cpu_to_be64(wr->sg_list[i].addr); | |
1954 | wqe += sizeof (struct mthca_data_seg); | |
1955 | size += sizeof (struct mthca_data_seg) / 16; | |
1956 | } | |
1957 | ||
1958 | /* Add one more inline data segment for ICRC */ | |
1959 | if (qp->transport == MLX) { | |
1960 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1961 | cpu_to_be32((1 << 31) | 4); | |
1962 | ((u32 *) wqe)[1] = 0; | |
1963 | wqe += sizeof (struct mthca_data_seg); | |
1964 | size += sizeof (struct mthca_data_seg) / 16; | |
1965 | } | |
1966 | ||
1967 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
1968 | ||
1969 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
1970 | mthca_err(dev, "opcode invalid\n"); | |
1971 | err = -EINVAL; | |
1972 | *bad_wr = wr; | |
1973 | goto out; | |
1974 | } | |
1975 | ||
d6cff021 RD |
1976 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1977 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
1978 | qp->send_wqe_offset) | | |
1979 | mthca_opcode[wr->opcode]); | |
1980 | wmb(); | |
1981 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
7667abd1 DB |
1982 | cpu_to_be32(MTHCA_NEXT_DBD | size | |
1983 | ((wr->send_flags & IB_SEND_FENCE) ? | |
1984 | MTHCA_NEXT_FENCE : 0)); | |
1da177e4 LT |
1985 | |
1986 | if (!size0) { | |
1987 | size0 = size; | |
1988 | op0 = mthca_opcode[wr->opcode]; | |
1989 | } | |
1990 | ||
1991 | ++ind; | |
1992 | if (unlikely(ind >= qp->sq.max)) | |
1993 | ind -= qp->sq.max; | |
1994 | } | |
1995 | ||
1996 | out: | |
1997 | if (likely(nreq)) { | |
1da177e4 LT |
1998 | doorbell[0] = cpu_to_be32((nreq << 24) | |
1999 | ((qp->sq.head & 0xffff) << 8) | | |
2000 | f0 | op0); | |
2001 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
2002 | ||
2003 | qp->sq.head += nreq; | |
2004 | ||
2005 | /* | |
2006 | * Make sure that descriptors are written before | |
2007 | * doorbell record. | |
2008 | */ | |
2009 | wmb(); | |
2010 | *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); | |
2011 | ||
2012 | /* | |
2013 | * Make sure doorbell record is written before we | |
2014 | * write MMIO send doorbell. | |
2015 | */ | |
2016 | wmb(); | |
2017 | mthca_write64(doorbell, | |
2018 | dev->kar + MTHCA_SEND_DOORBELL, | |
2019 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
2020 | } | |
2021 | ||
2022 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
2023 | return err; | |
2024 | } | |
2025 | ||
2026 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
2027 | struct ib_recv_wr **bad_wr) | |
2028 | { | |
2029 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
2030 | struct mthca_qp *qp = to_mqp(ibqp); | |
2031 | unsigned long flags; | |
2032 | int err = 0; | |
2033 | int nreq; | |
2034 | int ind; | |
2035 | int i; | |
2036 | void *wqe; | |
2037 | ||
2fa5e2eb | 2038 | spin_lock_irqsave(&qp->rq.lock, flags); |
1da177e4 LT |
2039 | |
2040 | /* XXX check that state is OK to post receive */ | |
2041 | ||
2042 | ind = qp->rq.head & (qp->rq.max - 1); | |
2043 | ||
2044 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
2045 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | |
2046 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
2047 | " %d max, %d nreq)\n", qp->qpn, | |
2048 | qp->rq.head, qp->rq.tail, | |
2049 | qp->rq.max, nreq); | |
2050 | err = -ENOMEM; | |
2051 | *bad_wr = wr; | |
2052 | goto out; | |
2053 | } | |
2054 | ||
2055 | wqe = get_recv_wqe(qp, ind); | |
2056 | ||
2057 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
2058 | ||
2059 | wqe += sizeof (struct mthca_next_seg); | |
2060 | ||
2061 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
2062 | err = -EINVAL; | |
2063 | *bad_wr = wr; | |
2064 | goto out; | |
2065 | } | |
2066 | ||
2067 | for (i = 0; i < wr->num_sge; ++i) { | |
2068 | ((struct mthca_data_seg *) wqe)->byte_count = | |
2069 | cpu_to_be32(wr->sg_list[i].length); | |
2070 | ((struct mthca_data_seg *) wqe)->lkey = | |
2071 | cpu_to_be32(wr->sg_list[i].lkey); | |
2072 | ((struct mthca_data_seg *) wqe)->addr = | |
2073 | cpu_to_be64(wr->sg_list[i].addr); | |
2074 | wqe += sizeof (struct mthca_data_seg); | |
2075 | } | |
2076 | ||
2077 | if (i < qp->rq.max_gs) { | |
2078 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
ddf841f0 | 2079 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); |
1da177e4 LT |
2080 | ((struct mthca_data_seg *) wqe)->addr = 0; |
2081 | } | |
2082 | ||
2083 | qp->wrid[ind] = wr->wr_id; | |
2084 | ||
2085 | ++ind; | |
2086 | if (unlikely(ind >= qp->rq.max)) | |
2087 | ind -= qp->rq.max; | |
2088 | } | |
2089 | out: | |
2090 | if (likely(nreq)) { | |
2091 | qp->rq.head += nreq; | |
2092 | ||
2093 | /* | |
2094 | * Make sure that descriptors are written before | |
2095 | * doorbell record. | |
2096 | */ | |
2097 | wmb(); | |
2098 | *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); | |
2099 | } | |
2100 | ||
2101 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
2102 | return err; | |
2103 | } | |
2104 | ||
d9b98b0f RD |
2105 | void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, |
2106 | int index, int *dbd, __be32 *new_wqe) | |
1da177e4 LT |
2107 | { |
2108 | struct mthca_next_seg *next; | |
2109 | ||
ec34a922 RD |
2110 | /* |
2111 | * For SRQs, all WQEs generate a CQE, so we're always at the | |
2112 | * end of the doorbell chain. | |
2113 | */ | |
2114 | if (qp->ibqp.srq) { | |
2115 | *new_wqe = 0; | |
d9b98b0f | 2116 | return; |
ec34a922 RD |
2117 | } |
2118 | ||
1da177e4 LT |
2119 | if (is_send) |
2120 | next = get_send_wqe(qp, index); | |
2121 | else | |
2122 | next = get_recv_wqe(qp, index); | |
2123 | ||
288bdeb4 | 2124 | *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); |
1da177e4 LT |
2125 | if (next->ee_nds & cpu_to_be32(0x3f)) |
2126 | *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | | |
2127 | (next->ee_nds & cpu_to_be32(0x3f)); | |
2128 | else | |
2129 | *new_wqe = 0; | |
1da177e4 LT |
2130 | } |
2131 | ||
2132 | int __devinit mthca_init_qp_table(struct mthca_dev *dev) | |
2133 | { | |
2134 | int err; | |
2135 | u8 status; | |
2136 | int i; | |
2137 | ||
2138 | spin_lock_init(&dev->qp_table.lock); | |
2139 | ||
2140 | /* | |
2141 | * We reserve 2 extra QPs per port for the special QPs. The | |
2142 | * special QP for port 1 has to be even, so round up. | |
2143 | */ | |
2144 | dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; | |
2145 | err = mthca_alloc_init(&dev->qp_table.alloc, | |
2146 | dev->limits.num_qps, | |
2147 | (1 << 24) - 1, | |
2148 | dev->qp_table.sqp_start + | |
2149 | MTHCA_MAX_PORTS * 2); | |
2150 | if (err) | |
2151 | return err; | |
2152 | ||
2153 | err = mthca_array_init(&dev->qp_table.qp, | |
2154 | dev->limits.num_qps); | |
2155 | if (err) { | |
2156 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2157 | return err; | |
2158 | } | |
2159 | ||
2160 | for (i = 0; i < 2; ++i) { | |
2161 | err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, | |
2162 | dev->qp_table.sqp_start + i * 2, | |
2163 | &status); | |
2164 | if (err) | |
2165 | goto err_out; | |
2166 | if (status) { | |
2167 | mthca_warn(dev, "CONF_SPECIAL_QP returned " | |
2168 | "status %02x, aborting.\n", | |
2169 | status); | |
2170 | err = -EINVAL; | |
2171 | goto err_out; | |
2172 | } | |
2173 | } | |
2174 | return 0; | |
2175 | ||
2176 | err_out: | |
2177 | for (i = 0; i < 2; ++i) | |
2178 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2179 | ||
2180 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); | |
2181 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2182 | ||
2183 | return err; | |
2184 | } | |
2185 | ||
2186 | void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) | |
2187 | { | |
2188 | int i; | |
2189 | u8 status; | |
2190 | ||
2191 | for (i = 0; i < 2; ++i) | |
2192 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2193 | ||
71eea47d | 2194 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); |
1da177e4 LT |
2195 | mthca_alloc_cleanup(&dev->qp_table.alloc); |
2196 | } |