Commit | Line | Data |
---|---|---|
a060b562 CH |
1 | /* |
2 | * Copyright (c) 2016 HGST, a Western Digital Company. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | */ | |
13 | #include <linux/moduleparam.h> | |
14 | #include <linux/slab.h> | |
15 | #include <rdma/mr_pool.h> | |
16 | #include <rdma/rw.h> | |
17 | ||
18 | enum { | |
19 | RDMA_RW_SINGLE_WR, | |
20 | RDMA_RW_MULTI_WR, | |
21 | RDMA_RW_MR, | |
0e353e34 | 22 | RDMA_RW_SIG_MR, |
a060b562 CH |
23 | }; |
24 | ||
25 | static bool rdma_rw_force_mr; | |
26 | module_param_named(force_mr, rdma_rw_force_mr, bool, 0); | |
27 | MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); | |
28 | ||
29 | /* | |
30 | * Check if the device might use memory registration. This is currently only | |
31 | * true for iWarp devices. In the future we can hopefully fine tune this based | |
32 | * on HCA driver input. | |
33 | */ | |
34 | static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) | |
35 | { | |
36 | if (rdma_protocol_iwarp(dev, port_num)) | |
37 | return true; | |
38 | if (unlikely(rdma_rw_force_mr)) | |
39 | return true; | |
40 | return false; | |
41 | } | |
42 | ||
43 | /* | |
44 | * Check if the device will use memory registration for this RW operation. | |
45 | * We currently always use memory registrations for iWarp RDMA READs, and | |
46 | * have a debug option to force usage of MRs. | |
47 | * | |
48 | * XXX: In the future we can hopefully fine tune this based on HCA driver | |
49 | * input. | |
50 | */ | |
51 | static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, | |
52 | enum dma_data_direction dir, int dma_nents) | |
53 | { | |
54 | if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE) | |
55 | return true; | |
56 | if (unlikely(rdma_rw_force_mr)) | |
57 | return true; | |
58 | return false; | |
59 | } | |
60 | ||
61 | static inline u32 rdma_rw_max_sge(struct ib_device *dev, | |
62 | enum dma_data_direction dir) | |
63 | { | |
64 | return dir == DMA_TO_DEVICE ? | |
65 | dev->attrs.max_sge : dev->attrs.max_sge_rd; | |
66 | } | |
67 | ||
68 | static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev) | |
69 | { | |
70 | /* arbitrary limit to avoid allocating gigantic resources */ | |
71 | return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256); | |
72 | } | |
73 | ||
74 | static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, | |
75 | struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, | |
76 | u32 sg_cnt, u32 offset) | |
77 | { | |
78 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); | |
79 | u32 nents = min(sg_cnt, pages_per_mr); | |
80 | int count = 0, ret; | |
81 | ||
82 | reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); | |
83 | if (!reg->mr) | |
84 | return -EAGAIN; | |
85 | ||
86 | if (reg->mr->need_inval) { | |
87 | reg->inv_wr.opcode = IB_WR_LOCAL_INV; | |
88 | reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey; | |
89 | reg->inv_wr.next = ®->reg_wr.wr; | |
90 | count++; | |
91 | } else { | |
92 | reg->inv_wr.next = NULL; | |
93 | } | |
94 | ||
9aa8b321 | 95 | ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); |
a060b562 CH |
96 | if (ret < nents) { |
97 | ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr); | |
98 | return -EINVAL; | |
99 | } | |
100 | ||
101 | reg->reg_wr.wr.opcode = IB_WR_REG_MR; | |
102 | reg->reg_wr.mr = reg->mr; | |
103 | reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; | |
104 | if (rdma_protocol_iwarp(qp->device, port_num)) | |
105 | reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; | |
106 | count++; | |
107 | ||
108 | reg->sge.addr = reg->mr->iova; | |
109 | reg->sge.length = reg->mr->length; | |
110 | return count; | |
111 | } | |
112 | ||
113 | static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
114 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, | |
115 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
116 | { | |
117 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); | |
118 | int i, j, ret = 0, count = 0; | |
119 | ||
120 | ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr; | |
121 | ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL); | |
122 | if (!ctx->reg) { | |
123 | ret = -ENOMEM; | |
124 | goto out; | |
125 | } | |
126 | ||
127 | for (i = 0; i < ctx->nr_ops; i++) { | |
128 | struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL; | |
129 | struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; | |
130 | u32 nents = min(sg_cnt, pages_per_mr); | |
131 | ||
132 | ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt, | |
133 | offset); | |
134 | if (ret < 0) | |
135 | goto out_free; | |
136 | count += ret; | |
137 | ||
138 | if (prev) { | |
139 | if (reg->mr->need_inval) | |
140 | prev->wr.wr.next = ®->inv_wr; | |
141 | else | |
142 | prev->wr.wr.next = ®->reg_wr.wr; | |
143 | } | |
144 | ||
145 | reg->reg_wr.wr.next = ®->wr.wr; | |
146 | ||
147 | reg->wr.wr.sg_list = ®->sge; | |
148 | reg->wr.wr.num_sge = 1; | |
149 | reg->wr.remote_addr = remote_addr; | |
150 | reg->wr.rkey = rkey; | |
151 | if (dir == DMA_TO_DEVICE) { | |
152 | reg->wr.wr.opcode = IB_WR_RDMA_WRITE; | |
153 | } else if (!rdma_cap_read_inv(qp->device, port_num)) { | |
154 | reg->wr.wr.opcode = IB_WR_RDMA_READ; | |
155 | } else { | |
156 | reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; | |
157 | reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey; | |
158 | } | |
159 | count++; | |
160 | ||
161 | remote_addr += reg->sge.length; | |
162 | sg_cnt -= nents; | |
163 | for (j = 0; j < nents; j++) | |
164 | sg = sg_next(sg); | |
165 | offset = 0; | |
166 | } | |
167 | ||
168 | ctx->type = RDMA_RW_MR; | |
169 | return count; | |
170 | ||
171 | out_free: | |
172 | while (--i >= 0) | |
173 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); | |
174 | kfree(ctx->reg); | |
175 | out: | |
176 | return ret; | |
177 | } | |
178 | ||
179 | static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
180 | struct scatterlist *sg, u32 sg_cnt, u32 offset, | |
181 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
182 | { | |
183 | struct ib_device *dev = qp->pd->device; | |
184 | u32 max_sge = rdma_rw_max_sge(dev, dir); | |
185 | struct ib_sge *sge; | |
186 | u32 total_len = 0, i, j; | |
187 | ||
188 | ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge); | |
189 | ||
190 | ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); | |
191 | if (!ctx->map.sges) | |
192 | goto out; | |
193 | ||
194 | ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL); | |
195 | if (!ctx->map.wrs) | |
196 | goto out_free_sges; | |
197 | ||
198 | for (i = 0; i < ctx->nr_ops; i++) { | |
199 | struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i]; | |
200 | u32 nr_sge = min(sg_cnt, max_sge); | |
201 | ||
202 | if (dir == DMA_TO_DEVICE) | |
203 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; | |
204 | else | |
205 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; | |
206 | rdma_wr->remote_addr = remote_addr + total_len; | |
207 | rdma_wr->rkey = rkey; | |
208 | rdma_wr->wr.sg_list = sge; | |
209 | ||
210 | for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { | |
211 | rdma_wr->wr.num_sge++; | |
212 | ||
213 | sge->addr = ib_sg_dma_address(dev, sg) + offset; | |
214 | sge->length = ib_sg_dma_len(dev, sg) - offset; | |
215 | sge->lkey = qp->pd->local_dma_lkey; | |
216 | ||
217 | total_len += sge->length; | |
218 | sge++; | |
219 | sg_cnt--; | |
220 | offset = 0; | |
221 | } | |
222 | ||
223 | if (i + 1 < ctx->nr_ops) | |
224 | rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr; | |
225 | } | |
226 | ||
227 | ctx->type = RDMA_RW_MULTI_WR; | |
228 | return ctx->nr_ops; | |
229 | ||
230 | out_free_sges: | |
231 | kfree(ctx->map.sges); | |
232 | out: | |
233 | return -ENOMEM; | |
234 | } | |
235 | ||
236 | static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
237 | struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, | |
238 | enum dma_data_direction dir) | |
239 | { | |
240 | struct ib_device *dev = qp->pd->device; | |
241 | struct ib_rdma_wr *rdma_wr = &ctx->single.wr; | |
242 | ||
243 | ctx->nr_ops = 1; | |
244 | ||
245 | ctx->single.sge.lkey = qp->pd->local_dma_lkey; | |
246 | ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset; | |
247 | ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset; | |
248 | ||
249 | memset(rdma_wr, 0, sizeof(*rdma_wr)); | |
250 | if (dir == DMA_TO_DEVICE) | |
251 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; | |
252 | else | |
253 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; | |
254 | rdma_wr->wr.sg_list = &ctx->single.sge; | |
255 | rdma_wr->wr.num_sge = 1; | |
256 | rdma_wr->remote_addr = remote_addr; | |
257 | rdma_wr->rkey = rkey; | |
258 | ||
259 | ctx->type = RDMA_RW_SINGLE_WR; | |
260 | return 1; | |
261 | } | |
262 | ||
263 | /** | |
264 | * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context | |
265 | * @ctx: context to initialize | |
266 | * @qp: queue pair to operate on | |
267 | * @port_num: port num to which the connection is bound | |
268 | * @sg: scatterlist to READ/WRITE from/to | |
269 | * @sg_cnt: number of entries in @sg | |
270 | * @sg_offset: current byte offset into @sg | |
271 | * @remote_addr:remote address to read/write (relative to @rkey) | |
272 | * @rkey: remote key to operate on | |
273 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
274 | * | |
275 | * Returns the number of WQEs that will be needed on the workqueue if | |
276 | * successful, or a negative error code. | |
277 | */ | |
278 | int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, | |
279 | struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, | |
280 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
281 | { | |
282 | struct ib_device *dev = qp->pd->device; | |
283 | int ret; | |
284 | ||
285 | ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); | |
286 | if (!ret) | |
287 | return -ENOMEM; | |
288 | sg_cnt = ret; | |
289 | ||
290 | /* | |
291 | * Skip to the S/G entry that sg_offset falls into: | |
292 | */ | |
293 | for (;;) { | |
294 | u32 len = ib_sg_dma_len(dev, sg); | |
295 | ||
296 | if (sg_offset < len) | |
297 | break; | |
298 | ||
299 | sg = sg_next(sg); | |
300 | sg_offset -= len; | |
301 | sg_cnt--; | |
302 | } | |
303 | ||
304 | ret = -EIO; | |
305 | if (WARN_ON_ONCE(sg_cnt == 0)) | |
306 | goto out_unmap_sg; | |
307 | ||
308 | if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { | |
309 | ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, | |
310 | sg_offset, remote_addr, rkey, dir); | |
311 | } else if (sg_cnt > 1) { | |
312 | ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, | |
313 | remote_addr, rkey, dir); | |
314 | } else { | |
315 | ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, | |
316 | remote_addr, rkey, dir); | |
317 | } | |
318 | ||
319 | if (ret < 0) | |
320 | goto out_unmap_sg; | |
321 | return ret; | |
322 | ||
323 | out_unmap_sg: | |
324 | ib_dma_unmap_sg(dev, sg, sg_cnt, dir); | |
325 | return ret; | |
326 | } | |
327 | EXPORT_SYMBOL(rdma_rw_ctx_init); | |
328 | ||
0e353e34 CH |
329 | /** |
330 | * rdma_rw_ctx_signature init - initialize a RW context with signature offload | |
331 | * @ctx: context to initialize | |
332 | * @qp: queue pair to operate on | |
333 | * @port_num: port num to which the connection is bound | |
334 | * @sg: scatterlist to READ/WRITE from/to | |
335 | * @sg_cnt: number of entries in @sg | |
336 | * @prot_sg: scatterlist to READ/WRITE protection information from/to | |
337 | * @prot_sg_cnt: number of entries in @prot_sg | |
338 | * @sig_attrs: signature offloading algorithms | |
339 | * @remote_addr:remote address to read/write (relative to @rkey) | |
340 | * @rkey: remote key to operate on | |
341 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
342 | * | |
343 | * Returns the number of WQEs that will be needed on the workqueue if | |
344 | * successful, or a negative error code. | |
345 | */ | |
346 | int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
347 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, | |
348 | struct scatterlist *prot_sg, u32 prot_sg_cnt, | |
349 | struct ib_sig_attrs *sig_attrs, | |
350 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
351 | { | |
352 | struct ib_device *dev = qp->pd->device; | |
353 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); | |
354 | struct ib_rdma_wr *rdma_wr; | |
355 | struct ib_send_wr *prev_wr = NULL; | |
356 | int count = 0, ret; | |
357 | ||
358 | if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { | |
359 | pr_err("SG count too large\n"); | |
360 | return -EINVAL; | |
361 | } | |
362 | ||
363 | ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); | |
364 | if (!ret) | |
365 | return -ENOMEM; | |
366 | sg_cnt = ret; | |
367 | ||
368 | ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir); | |
369 | if (!ret) { | |
370 | ret = -ENOMEM; | |
371 | goto out_unmap_sg; | |
372 | } | |
373 | prot_sg_cnt = ret; | |
374 | ||
375 | ctx->type = RDMA_RW_SIG_MR; | |
376 | ctx->nr_ops = 1; | |
377 | ctx->sig = kcalloc(1, sizeof(*ctx->sig), GFP_KERNEL); | |
378 | if (!ctx->sig) { | |
379 | ret = -ENOMEM; | |
380 | goto out_unmap_prot_sg; | |
381 | } | |
382 | ||
383 | ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->data, sg, sg_cnt, 0); | |
384 | if (ret < 0) | |
385 | goto out_free_ctx; | |
386 | count += ret; | |
387 | prev_wr = &ctx->sig->data.reg_wr.wr; | |
388 | ||
389 | if (prot_sg_cnt) { | |
390 | ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot, | |
391 | prot_sg, prot_sg_cnt, 0); | |
392 | if (ret < 0) | |
393 | goto out_destroy_data_mr; | |
394 | count += ret; | |
395 | ||
396 | if (ctx->sig->prot.inv_wr.next) | |
397 | prev_wr->next = &ctx->sig->prot.inv_wr; | |
398 | else | |
399 | prev_wr->next = &ctx->sig->prot.reg_wr.wr; | |
400 | prev_wr = &ctx->sig->prot.reg_wr.wr; | |
401 | } else { | |
402 | ctx->sig->prot.mr = NULL; | |
403 | } | |
404 | ||
405 | ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs); | |
406 | if (!ctx->sig->sig_mr) { | |
407 | ret = -EAGAIN; | |
408 | goto out_destroy_prot_mr; | |
409 | } | |
410 | ||
411 | if (ctx->sig->sig_mr->need_inval) { | |
412 | memset(&ctx->sig->sig_inv_wr, 0, sizeof(ctx->sig->sig_inv_wr)); | |
413 | ||
414 | ctx->sig->sig_inv_wr.opcode = IB_WR_LOCAL_INV; | |
415 | ctx->sig->sig_inv_wr.ex.invalidate_rkey = ctx->sig->sig_mr->rkey; | |
416 | ||
417 | prev_wr->next = &ctx->sig->sig_inv_wr; | |
418 | prev_wr = &ctx->sig->sig_inv_wr; | |
419 | } | |
420 | ||
421 | ctx->sig->sig_wr.wr.opcode = IB_WR_REG_SIG_MR; | |
422 | ctx->sig->sig_wr.wr.wr_cqe = NULL; | |
423 | ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge; | |
424 | ctx->sig->sig_wr.wr.num_sge = 1; | |
425 | ctx->sig->sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; | |
426 | ctx->sig->sig_wr.sig_attrs = sig_attrs; | |
427 | ctx->sig->sig_wr.sig_mr = ctx->sig->sig_mr; | |
428 | if (prot_sg_cnt) | |
429 | ctx->sig->sig_wr.prot = &ctx->sig->prot.sge; | |
430 | prev_wr->next = &ctx->sig->sig_wr.wr; | |
431 | prev_wr = &ctx->sig->sig_wr.wr; | |
432 | count++; | |
433 | ||
434 | ctx->sig->sig_sge.addr = 0; | |
435 | ctx->sig->sig_sge.length = ctx->sig->data.sge.length; | |
436 | if (sig_attrs->wire.sig_type != IB_SIG_TYPE_NONE) | |
437 | ctx->sig->sig_sge.length += ctx->sig->prot.sge.length; | |
438 | ||
439 | rdma_wr = &ctx->sig->data.wr; | |
440 | rdma_wr->wr.sg_list = &ctx->sig->sig_sge; | |
441 | rdma_wr->wr.num_sge = 1; | |
442 | rdma_wr->remote_addr = remote_addr; | |
443 | rdma_wr->rkey = rkey; | |
444 | if (dir == DMA_TO_DEVICE) | |
445 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; | |
446 | else | |
447 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; | |
448 | prev_wr->next = &rdma_wr->wr; | |
449 | prev_wr = &rdma_wr->wr; | |
450 | count++; | |
451 | ||
452 | return count; | |
453 | ||
454 | out_destroy_prot_mr: | |
455 | if (prot_sg_cnt) | |
456 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr); | |
457 | out_destroy_data_mr: | |
458 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr); | |
459 | out_free_ctx: | |
460 | kfree(ctx->sig); | |
461 | out_unmap_prot_sg: | |
462 | ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); | |
463 | out_unmap_sg: | |
464 | ib_dma_unmap_sg(dev, sg, sg_cnt, dir); | |
465 | return ret; | |
466 | } | |
467 | EXPORT_SYMBOL(rdma_rw_ctx_signature_init); | |
468 | ||
a060b562 CH |
469 | /* |
470 | * Now that we are going to post the WRs we can update the lkey and need_inval | |
471 | * state on the MRs. If we were doing this at init time, we would get double | |
472 | * or missing invalidations if a context was initialized but not actually | |
473 | * posted. | |
474 | */ | |
475 | static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) | |
476 | { | |
477 | reg->mr->need_inval = need_inval; | |
478 | ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey)); | |
479 | reg->reg_wr.key = reg->mr->lkey; | |
480 | reg->sge.lkey = reg->mr->lkey; | |
481 | } | |
482 | ||
483 | /** | |
484 | * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation | |
485 | * @ctx: context to operate on | |
486 | * @qp: queue pair to operate on | |
487 | * @port_num: port num to which the connection is bound | |
488 | * @cqe: completion queue entry for the last WR | |
489 | * @chain_wr: WR to append to the posted chain | |
490 | * | |
491 | * Return the WR chain for the set of RDMA READ/WRITE operations described by | |
492 | * @ctx, as well as any memory registration operations needed. If @chain_wr | |
493 | * is non-NULL the WR it points to will be appended to the chain of WRs posted. | |
494 | * If @chain_wr is not set @cqe must be set so that the caller gets a | |
495 | * completion notification. | |
496 | */ | |
497 | struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
498 | u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) | |
499 | { | |
500 | struct ib_send_wr *first_wr, *last_wr; | |
501 | int i; | |
502 | ||
503 | switch (ctx->type) { | |
0e353e34 CH |
504 | case RDMA_RW_SIG_MR: |
505 | rdma_rw_update_lkey(&ctx->sig->data, true); | |
506 | if (ctx->sig->prot.mr) | |
507 | rdma_rw_update_lkey(&ctx->sig->prot, true); | |
508 | ||
509 | ctx->sig->sig_mr->need_inval = true; | |
510 | ib_update_fast_reg_key(ctx->sig->sig_mr, | |
511 | ib_inc_rkey(ctx->sig->sig_mr->lkey)); | |
512 | ctx->sig->sig_sge.lkey = ctx->sig->sig_mr->lkey; | |
513 | ||
514 | if (ctx->sig->data.inv_wr.next) | |
515 | first_wr = &ctx->sig->data.inv_wr; | |
516 | else | |
517 | first_wr = &ctx->sig->data.reg_wr.wr; | |
518 | last_wr = &ctx->sig->data.wr.wr; | |
519 | break; | |
a060b562 CH |
520 | case RDMA_RW_MR: |
521 | for (i = 0; i < ctx->nr_ops; i++) { | |
522 | rdma_rw_update_lkey(&ctx->reg[i], | |
523 | ctx->reg[i].wr.wr.opcode != | |
524 | IB_WR_RDMA_READ_WITH_INV); | |
525 | } | |
526 | ||
527 | if (ctx->reg[0].inv_wr.next) | |
528 | first_wr = &ctx->reg[0].inv_wr; | |
529 | else | |
530 | first_wr = &ctx->reg[0].reg_wr.wr; | |
531 | last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr; | |
532 | break; | |
533 | case RDMA_RW_MULTI_WR: | |
534 | first_wr = &ctx->map.wrs[0].wr; | |
535 | last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr; | |
536 | break; | |
537 | case RDMA_RW_SINGLE_WR: | |
538 | first_wr = &ctx->single.wr.wr; | |
539 | last_wr = &ctx->single.wr.wr; | |
540 | break; | |
541 | default: | |
542 | BUG(); | |
543 | } | |
544 | ||
545 | if (chain_wr) { | |
546 | last_wr->next = chain_wr; | |
547 | } else { | |
548 | last_wr->wr_cqe = cqe; | |
549 | last_wr->send_flags |= IB_SEND_SIGNALED; | |
550 | } | |
551 | ||
552 | return first_wr; | |
553 | } | |
554 | EXPORT_SYMBOL(rdma_rw_ctx_wrs); | |
555 | ||
556 | /** | |
557 | * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation | |
558 | * @ctx: context to operate on | |
559 | * @qp: queue pair to operate on | |
560 | * @port_num: port num to which the connection is bound | |
561 | * @cqe: completion queue entry for the last WR | |
562 | * @chain_wr: WR to append to the posted chain | |
563 | * | |
564 | * Post the set of RDMA READ/WRITE operations described by @ctx, as well as | |
565 | * any memory registration operations needed. If @chain_wr is non-NULL the | |
566 | * WR it points to will be appended to the chain of WRs posted. If @chain_wr | |
567 | * is not set @cqe must be set so that the caller gets a completion | |
568 | * notification. | |
569 | */ | |
570 | int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, | |
571 | struct ib_cqe *cqe, struct ib_send_wr *chain_wr) | |
572 | { | |
573 | struct ib_send_wr *first_wr, *bad_wr; | |
574 | ||
575 | first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); | |
576 | return ib_post_send(qp, first_wr, &bad_wr); | |
577 | } | |
578 | EXPORT_SYMBOL(rdma_rw_ctx_post); | |
579 | ||
580 | /** | |
581 | * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init | |
582 | * @ctx: context to release | |
583 | * @qp: queue pair to operate on | |
584 | * @port_num: port num to which the connection is bound | |
585 | * @sg: scatterlist that was used for the READ/WRITE | |
586 | * @sg_cnt: number of entries in @sg | |
587 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
588 | */ | |
589 | void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, | |
590 | struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) | |
591 | { | |
592 | int i; | |
593 | ||
594 | switch (ctx->type) { | |
595 | case RDMA_RW_MR: | |
596 | for (i = 0; i < ctx->nr_ops; i++) | |
597 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); | |
598 | kfree(ctx->reg); | |
599 | break; | |
600 | case RDMA_RW_MULTI_WR: | |
601 | kfree(ctx->map.wrs); | |
602 | kfree(ctx->map.sges); | |
603 | break; | |
604 | case RDMA_RW_SINGLE_WR: | |
605 | break; | |
606 | default: | |
607 | BUG(); | |
608 | break; | |
609 | } | |
610 | ||
611 | ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); | |
612 | } | |
613 | EXPORT_SYMBOL(rdma_rw_ctx_destroy); | |
614 | ||
0e353e34 CH |
615 | /** |
616 | * rdma_rw_ctx_destroy_signature - release all resources allocated by | |
617 | * rdma_rw_ctx_init_signature | |
618 | * @ctx: context to release | |
619 | * @qp: queue pair to operate on | |
620 | * @port_num: port num to which the connection is bound | |
621 | * @sg: scatterlist that was used for the READ/WRITE | |
622 | * @sg_cnt: number of entries in @sg | |
623 | * @prot_sg: scatterlist that was used for the READ/WRITE of the PI | |
624 | * @prot_sg_cnt: number of entries in @prot_sg | |
625 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
626 | */ | |
627 | void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
628 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, | |
629 | struct scatterlist *prot_sg, u32 prot_sg_cnt, | |
630 | enum dma_data_direction dir) | |
631 | { | |
632 | if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR)) | |
633 | return; | |
634 | ||
635 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr); | |
636 | ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); | |
637 | ||
638 | if (ctx->sig->prot.mr) { | |
639 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr); | |
640 | ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); | |
641 | } | |
642 | ||
643 | ib_mr_pool_put(qp, &qp->sig_mrs, ctx->sig->sig_mr); | |
644 | kfree(ctx->sig); | |
645 | } | |
646 | EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); | |
647 | ||
a060b562 CH |
648 | void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) |
649 | { | |
650 | u32 factor; | |
651 | ||
652 | WARN_ON_ONCE(attr->port_num == 0); | |
653 | ||
654 | /* | |
655 | * Each context needs at least one RDMA READ or WRITE WR. | |
656 | * | |
657 | * For some hardware we might need more, eventually we should ask the | |
658 | * HCA driver for a multiplier here. | |
659 | */ | |
660 | factor = 1; | |
661 | ||
662 | /* | |
663 | * If the devices needs MRs to perform RDMA READ or WRITE operations, | |
664 | * we'll need two additional MRs for the registrations and the | |
665 | * invalidation. | |
666 | */ | |
0e353e34 CH |
667 | if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) |
668 | factor += 6; /* (inv + reg) * (data + prot + sig) */ | |
669 | else if (rdma_rw_can_use_mr(dev, attr->port_num)) | |
a060b562 CH |
670 | factor += 2; /* inv + reg */ |
671 | ||
672 | attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; | |
673 | ||
674 | /* | |
675 | * But maybe we were just too high in the sky and the device doesn't | |
676 | * even support all we need, and we'll have to live with what we get.. | |
677 | */ | |
678 | attr->cap.max_send_wr = | |
679 | min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); | |
680 | } | |
681 | ||
682 | int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr) | |
683 | { | |
684 | struct ib_device *dev = qp->pd->device; | |
0e353e34 | 685 | u32 nr_mrs = 0, nr_sig_mrs = 0; |
a060b562 CH |
686 | int ret = 0; |
687 | ||
0e353e34 CH |
688 | if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) { |
689 | nr_sig_mrs = attr->cap.max_rdma_ctxs; | |
690 | nr_mrs = attr->cap.max_rdma_ctxs * 2; | |
691 | } else if (rdma_rw_can_use_mr(dev, attr->port_num)) { | |
692 | nr_mrs = attr->cap.max_rdma_ctxs; | |
693 | } | |
694 | ||
695 | if (nr_mrs) { | |
696 | ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs, | |
697 | IB_MR_TYPE_MEM_REG, | |
a060b562 | 698 | rdma_rw_fr_page_list_len(dev)); |
0e353e34 CH |
699 | if (ret) { |
700 | pr_err("%s: failed to allocated %d MRs\n", | |
701 | __func__, nr_mrs); | |
a060b562 | 702 | return ret; |
0e353e34 CH |
703 | } |
704 | } | |
705 | ||
706 | if (nr_sig_mrs) { | |
707 | ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs, | |
708 | IB_MR_TYPE_SIGNATURE, 2); | |
709 | if (ret) { | |
710 | pr_err("%s: failed to allocated %d SIG MRs\n", | |
711 | __func__, nr_mrs); | |
712 | goto out_free_rdma_mrs; | |
713 | } | |
a060b562 CH |
714 | } |
715 | ||
0e353e34 CH |
716 | return 0; |
717 | ||
718 | out_free_rdma_mrs: | |
719 | ib_mr_pool_destroy(qp, &qp->rdma_mrs); | |
a060b562 CH |
720 | return ret; |
721 | } | |
722 | ||
723 | void rdma_rw_cleanup_mrs(struct ib_qp *qp) | |
724 | { | |
0e353e34 | 725 | ib_mr_pool_destroy(qp, &qp->sig_mrs); |
a060b562 CH |
726 | ib_mr_pool_destroy(qp, &qp->rdma_mrs); |
727 | } |