Commit | Line | Data |
---|---|---|
6461f64a OG |
1 | /* |
2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. | |
3ee07d27 | 3 | * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. |
6461f64a OG |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
6461f64a OG |
32 | */ |
33 | #include <linux/module.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/mm.h> | |
a1f8e7f7 | 37 | #include <linux/highmem.h> |
6461f64a OG |
38 | #include <linux/scatterlist.h> |
39 | ||
40 | #include "iscsi_iser.h" | |
32467c42 SG |
41 | static |
42 | int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, | |
43 | struct iser_data_buf *mem, | |
44 | struct iser_reg_resources *rsc, | |
45 | struct iser_mem_reg *mem_reg); | |
46 | static | |
47 | int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, | |
48 | struct iser_data_buf *mem, | |
49 | struct iser_reg_resources *rsc, | |
50 | struct iser_mem_reg *mem_reg); | |
6461f64a | 51 | |
48afbff6 SG |
52 | static struct iser_reg_ops fastreg_ops = { |
53 | .alloc_reg_res = iser_alloc_fastreg_pool, | |
54 | .free_reg_res = iser_free_fastreg_pool, | |
32467c42 SG |
55 | .reg_mem = iser_fast_reg_mr, |
56 | .unreg_mem = iser_unreg_mem_fastreg, | |
81722909 SG |
57 | .reg_desc_get = iser_reg_desc_get_fr, |
58 | .reg_desc_put = iser_reg_desc_put_fr, | |
48afbff6 SG |
59 | }; |
60 | ||
61 | static struct iser_reg_ops fmr_ops = { | |
62 | .alloc_reg_res = iser_alloc_fmr_pool, | |
63 | .free_reg_res = iser_free_fmr_pool, | |
32467c42 SG |
64 | .reg_mem = iser_fast_reg_fmr, |
65 | .unreg_mem = iser_unreg_mem_fmr, | |
81722909 SG |
66 | .reg_desc_get = iser_reg_desc_get_fmr, |
67 | .reg_desc_put = iser_reg_desc_put_fmr, | |
48afbff6 SG |
68 | }; |
69 | ||
70 | int iser_assign_reg_ops(struct iser_device *device) | |
71 | { | |
72 | struct ib_device_attr *dev_attr = &device->dev_attr; | |
73 | ||
74 | /* Assign function handles - based on FMR support */ | |
75 | if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr && | |
76 | device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) { | |
77 | iser_info("FMR supported, using FMR for registration\n"); | |
78 | device->reg_ops = &fmr_ops; | |
79 | } else | |
80 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { | |
81 | iser_info("FastReg supported, using FastReg for registration\n"); | |
82 | device->reg_ops = &fastreg_ops; | |
83 | } else { | |
84 | iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); | |
85 | return -1; | |
86 | } | |
87 | ||
88 | return 0; | |
89 | } | |
90 | ||
ba943fb2 SG |
91 | static void |
92 | iser_free_bounce_sg(struct iser_data_buf *data) | |
93 | { | |
94 | struct scatterlist *sg; | |
95 | int count; | |
96 | ||
97 | for_each_sg(data->sg, sg, data->size, count) | |
98 | __free_page(sg_page(sg)); | |
99 | ||
100 | kfree(data->sg); | |
101 | ||
102 | data->sg = data->orig_sg; | |
103 | data->size = data->orig_size; | |
104 | data->orig_sg = NULL; | |
105 | data->orig_size = 0; | |
106 | } | |
107 | ||
108 | static int | |
109 | iser_alloc_bounce_sg(struct iser_data_buf *data) | |
110 | { | |
111 | struct scatterlist *sg; | |
112 | struct page *page; | |
113 | unsigned long length = data->data_len; | |
114 | int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE); | |
115 | ||
116 | sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC); | |
117 | if (!sg) | |
118 | goto err; | |
119 | ||
120 | sg_init_table(sg, nents); | |
121 | while (length) { | |
122 | u32 page_len = min_t(u32, length, PAGE_SIZE); | |
123 | ||
124 | page = alloc_page(GFP_ATOMIC); | |
125 | if (!page) | |
126 | goto err; | |
127 | ||
128 | sg_set_page(&sg[i], page, page_len, 0); | |
129 | length -= page_len; | |
130 | i++; | |
131 | } | |
132 | ||
133 | data->orig_sg = data->sg; | |
134 | data->orig_size = data->size; | |
135 | data->sg = sg; | |
136 | data->size = nents; | |
137 | ||
138 | return 0; | |
139 | ||
140 | err: | |
141 | for (; i > 0; i--) | |
142 | __free_page(sg_page(&sg[i - 1])); | |
143 | kfree(sg); | |
144 | ||
145 | return -ENOMEM; | |
146 | } | |
147 | ||
148 | static void | |
149 | iser_copy_bounce(struct iser_data_buf *data, bool to_buffer) | |
150 | { | |
151 | struct scatterlist *osg, *bsg = data->sg; | |
152 | void *oaddr, *baddr; | |
153 | unsigned int left = data->data_len; | |
154 | unsigned int bsg_off = 0; | |
155 | int i; | |
156 | ||
157 | for_each_sg(data->orig_sg, osg, data->orig_size, i) { | |
158 | unsigned int copy_len, osg_off = 0; | |
159 | ||
160 | oaddr = kmap_atomic(sg_page(osg)) + osg->offset; | |
161 | copy_len = min(left, osg->length); | |
162 | while (copy_len) { | |
163 | unsigned int len = min(copy_len, bsg->length - bsg_off); | |
164 | ||
165 | baddr = kmap_atomic(sg_page(bsg)) + bsg->offset; | |
166 | if (to_buffer) | |
167 | memcpy(baddr + bsg_off, oaddr + osg_off, len); | |
168 | else | |
169 | memcpy(oaddr + osg_off, baddr + bsg_off, len); | |
170 | ||
171 | kunmap_atomic(baddr - bsg->offset); | |
172 | osg_off += len; | |
173 | bsg_off += len; | |
174 | copy_len -= len; | |
175 | ||
176 | if (bsg_off >= bsg->length) { | |
177 | bsg = sg_next(bsg); | |
178 | bsg_off = 0; | |
179 | } | |
180 | } | |
181 | kunmap_atomic(oaddr - osg->offset); | |
182 | left -= osg_off; | |
183 | } | |
184 | } | |
185 | ||
186 | static inline void | |
187 | iser_copy_from_bounce(struct iser_data_buf *data) | |
188 | { | |
189 | iser_copy_bounce(data, false); | |
190 | } | |
191 | ||
192 | static inline void | |
193 | iser_copy_to_bounce(struct iser_data_buf *data) | |
194 | { | |
195 | iser_copy_bounce(data, true); | |
196 | } | |
8dfa0876 | 197 | |
5190cc26 | 198 | struct iser_fr_desc * |
81722909 | 199 | iser_reg_desc_get_fr(struct ib_conn *ib_conn) |
bd8b944e | 200 | { |
385ad87d | 201 | struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; |
5190cc26 | 202 | struct iser_fr_desc *desc; |
bd8b944e SG |
203 | unsigned long flags; |
204 | ||
385ad87d | 205 | spin_lock_irqsave(&fr_pool->lock, flags); |
2b3bf958 | 206 | desc = list_first_entry(&fr_pool->list, |
5190cc26 | 207 | struct iser_fr_desc, list); |
bd8b944e | 208 | list_del(&desc->list); |
385ad87d | 209 | spin_unlock_irqrestore(&fr_pool->lock, flags); |
bd8b944e SG |
210 | |
211 | return desc; | |
212 | } | |
213 | ||
214 | void | |
81722909 SG |
215 | iser_reg_desc_put_fr(struct ib_conn *ib_conn, |
216 | struct iser_fr_desc *desc) | |
bd8b944e | 217 | { |
385ad87d | 218 | struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; |
bd8b944e SG |
219 | unsigned long flags; |
220 | ||
385ad87d | 221 | spin_lock_irqsave(&fr_pool->lock, flags); |
2b3bf958 | 222 | list_add(&desc->list, &fr_pool->list); |
385ad87d | 223 | spin_unlock_irqrestore(&fr_pool->lock, flags); |
bd8b944e SG |
224 | } |
225 | ||
81722909 SG |
226 | struct iser_fr_desc * |
227 | iser_reg_desc_get_fmr(struct ib_conn *ib_conn) | |
228 | { | |
229 | struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; | |
230 | ||
231 | return list_first_entry(&fr_pool->list, | |
232 | struct iser_fr_desc, list); | |
233 | } | |
234 | ||
235 | void | |
236 | iser_reg_desc_put_fmr(struct ib_conn *ib_conn, | |
237 | struct iser_fr_desc *desc) | |
238 | { | |
239 | } | |
240 | ||
6461f64a OG |
241 | /** |
242 | * iser_start_rdma_unaligned_sg | |
243 | */ | |
2261ec3d | 244 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
5f588e3d | 245 | struct iser_data_buf *data, |
41179e2d | 246 | enum iser_data_dir cmd_dir) |
6461f64a | 247 | { |
a4ee3539 | 248 | struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; |
ba943fb2 | 249 | int rc; |
6461f64a | 250 | |
ba943fb2 SG |
251 | rc = iser_alloc_bounce_sg(data); |
252 | if (rc) { | |
253 | iser_err("Failed to allocate bounce for data len %lu\n", | |
254 | data->data_len); | |
255 | return rc; | |
6461f64a OG |
256 | } |
257 | ||
ba943fb2 SG |
258 | if (cmd_dir == ISER_DIR_OUT) |
259 | iser_copy_to_bounce(data); | |
260 | ||
261 | data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, | |
262 | (cmd_dir == ISER_DIR_OUT) ? | |
263 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
264 | if (!data->dma_nents) { | |
265 | iser_err("Got dma_nents %d, something went wrong...\n", | |
266 | data->dma_nents); | |
267 | rc = -ENOMEM; | |
268 | goto err; | |
269 | } | |
5f588e3d | 270 | |
6461f64a | 271 | return 0; |
ba943fb2 SG |
272 | err: |
273 | iser_free_bounce_sg(data); | |
274 | return rc; | |
6461f64a OG |
275 | } |
276 | ||
277 | /** | |
278 | * iser_finalize_rdma_unaligned_sg | |
279 | */ | |
9a8b08fa | 280 | |
2261ec3d | 281 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
9a8b08fa | 282 | struct iser_data_buf *data, |
9a8b08fa | 283 | enum iser_data_dir cmd_dir) |
6461f64a | 284 | { |
ba943fb2 | 285 | struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; |
6461f64a | 286 | |
ba943fb2 | 287 | ib_dma_unmap_sg(dev, data->sg, data->size, |
5180311f RC |
288 | (cmd_dir == ISER_DIR_OUT) ? |
289 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
6461f64a | 290 | |
ba943fb2 SG |
291 | if (cmd_dir == ISER_DIR_IN) |
292 | iser_copy_from_bounce(data); | |
6461f64a | 293 | |
ba943fb2 | 294 | iser_free_bounce_sg(data); |
6461f64a OG |
295 | } |
296 | ||
c1ccaf24 OG |
297 | #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) |
298 | ||
6461f64a OG |
299 | /** |
300 | * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses | |
301 | * and returns the length of resulting physical address array (may be less than | |
302 | * the original due to possible compaction). | |
303 | * | |
304 | * we build a "page vec" under the assumption that the SG meets the RDMA | |
305 | * alignment requirements. Other then the first and last SG elements, all | |
306 | * the "internal" elements can be compacted into a list whose elements are | |
307 | * dma addresses of physical pages. The code supports also the weird case | |
308 | * where --few fragments of the same page-- are present in the SG as | |
309 | * consecutive elements. Also, it handles one entry SG. | |
310 | */ | |
c1ccaf24 | 311 | |
6461f64a | 312 | static int iser_sg_to_page_vec(struct iser_data_buf *data, |
919fc274 SG |
313 | struct ib_device *ibdev, u64 *pages, |
314 | int *offset, int *data_size) | |
6461f64a | 315 | { |
e3784bd1 | 316 | struct scatterlist *sg, *sgl = data->sg; |
c1ccaf24 | 317 | u64 start_addr, end_addr, page, chunk_start = 0; |
6461f64a | 318 | unsigned long total_sz = 0; |
c1ccaf24 OG |
319 | unsigned int dma_len; |
320 | int i, new_chunk, cur_page, last_ent = data->dma_nents - 1; | |
6461f64a OG |
321 | |
322 | /* compute the offset of first element */ | |
919fc274 | 323 | *offset = (u64) sgl[0].offset & ~MASK_4K; |
6461f64a | 324 | |
c1ccaf24 OG |
325 | new_chunk = 1; |
326 | cur_page = 0; | |
53d412fc | 327 | for_each_sg(sgl, sg, data->dma_nents, i) { |
c1ccaf24 OG |
328 | start_addr = ib_sg_dma_address(ibdev, sg); |
329 | if (new_chunk) | |
330 | chunk_start = start_addr; | |
331 | dma_len = ib_sg_dma_len(ibdev, sg); | |
332 | end_addr = start_addr + dma_len; | |
5180311f | 333 | total_sz += dma_len; |
6461f64a | 334 | |
c1ccaf24 OG |
335 | /* collect page fragments until aligned or end of SG list */ |
336 | if (!IS_4K_ALIGNED(end_addr) && i < last_ent) { | |
337 | new_chunk = 0; | |
338 | continue; | |
6461f64a | 339 | } |
c1ccaf24 OG |
340 | new_chunk = 1; |
341 | ||
342 | /* address of the first page in the contiguous chunk; | |
343 | masking relevant for the very first SG entry, | |
344 | which might be unaligned */ | |
345 | page = chunk_start & MASK_4K; | |
346 | do { | |
919fc274 | 347 | pages[cur_page++] = page; |
8dfa0876 | 348 | page += SIZE_4K; |
c1ccaf24 | 349 | } while (page < end_addr); |
6461f64a | 350 | } |
c1ccaf24 | 351 | |
919fc274 SG |
352 | *data_size = total_sz; |
353 | iser_dbg("page_vec->data_size:%d cur_page %d\n", | |
354 | *data_size, cur_page); | |
6461f64a OG |
355 | return cur_page; |
356 | } | |
357 | ||
6461f64a OG |
358 | |
359 | /** | |
360 | * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned | |
361 | * for RDMA sub-list of a scatter-gather list of memory buffers, and returns | |
362 | * the number of entries which are aligned correctly. Supports the case where | |
363 | * consecutive SG elements are actually fragments of the same physcial page. | |
364 | */ | |
c1ccaf24 OG |
365 | static int iser_data_buf_aligned_len(struct iser_data_buf *data, |
366 | struct ib_device *ibdev) | |
6461f64a | 367 | { |
e3784bd1 | 368 | struct scatterlist *sg, *sgl, *next_sg = NULL; |
c1ccaf24 OG |
369 | u64 start_addr, end_addr; |
370 | int i, ret_len, start_check = 0; | |
371 | ||
372 | if (data->dma_nents == 1) | |
373 | return 1; | |
6461f64a | 374 | |
e3784bd1 | 375 | sgl = data->sg; |
c1ccaf24 | 376 | start_addr = ib_sg_dma_address(ibdev, sgl); |
6461f64a | 377 | |
53d412fc | 378 | for_each_sg(sgl, sg, data->dma_nents, i) { |
c1ccaf24 OG |
379 | if (start_check && !IS_4K_ALIGNED(start_addr)) |
380 | break; | |
381 | ||
382 | next_sg = sg_next(sg); | |
383 | if (!next_sg) | |
384 | break; | |
385 | ||
386 | end_addr = start_addr + ib_sg_dma_len(ibdev, sg); | |
387 | start_addr = ib_sg_dma_address(ibdev, next_sg); | |
388 | ||
389 | if (end_addr == start_addr) { | |
390 | start_check = 0; | |
391 | continue; | |
392 | } else | |
393 | start_check = 1; | |
394 | ||
395 | if (!IS_4K_ALIGNED(end_addr)) | |
396 | break; | |
6461f64a | 397 | } |
c1ccaf24 | 398 | ret_len = (next_sg) ? i : i+1; |
ea18f5d7 SG |
399 | |
400 | if (unlikely(ret_len != data->dma_nents)) | |
401 | iser_warn("rdma alignment violation (%d/%d aligned)\n", | |
402 | ret_len, data->dma_nents); | |
403 | ||
6461f64a OG |
404 | return ret_len; |
405 | } | |
406 | ||
5180311f RC |
407 | static void iser_data_buf_dump(struct iser_data_buf *data, |
408 | struct ib_device *ibdev) | |
6461f64a | 409 | { |
53d412fc | 410 | struct scatterlist *sg; |
6461f64a OG |
411 | int i; |
412 | ||
e3784bd1 | 413 | for_each_sg(data->sg, sg, data->dma_nents, i) |
f91424cf | 414 | iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " |
e981f1d4 | 415 | "off:0x%x sz:0x%x dma_len:0x%x\n", |
53d412fc | 416 | i, (unsigned long)ib_sg_dma_address(ibdev, sg), |
45711f1a | 417 | sg_page(sg), sg->offset, |
53d412fc | 418 | sg->length, ib_sg_dma_len(ibdev, sg)); |
6461f64a OG |
419 | } |
420 | ||
421 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) | |
422 | { | |
423 | int i; | |
424 | ||
425 | iser_err("page vec length %d data size %d\n", | |
426 | page_vec->length, page_vec->data_size); | |
427 | for (i = 0; i < page_vec->length; i++) | |
428 | iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); | |
429 | } | |
430 | ||
2261ec3d MC |
431 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
432 | struct iser_data_buf *data, | |
433 | enum iser_data_dir iser_dir, | |
434 | enum dma_data_direction dma_dir) | |
74a20780 | 435 | { |
5180311f | 436 | struct ib_device *dev; |
74a20780 | 437 | |
2261ec3d | 438 | iser_task->dir[iser_dir] = 1; |
a4ee3539 | 439 | dev = iser_task->iser_conn->ib_conn.device->ib_device; |
74a20780 | 440 | |
e3784bd1 | 441 | data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir); |
74a20780 EZ |
442 | if (data->dma_nents == 0) { |
443 | iser_err("dma_map_sg failed!!!\n"); | |
444 | return -EINVAL; | |
445 | } | |
446 | return 0; | |
447 | } | |
448 | ||
9a8b08fa | 449 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, |
c6c95ef4 RD |
450 | struct iser_data_buf *data, |
451 | enum dma_data_direction dir) | |
74a20780 | 452 | { |
5180311f | 453 | struct ib_device *dev; |
74a20780 | 454 | |
a4ee3539 | 455 | dev = iser_task->iser_conn->ib_conn.device->ib_device; |
e3784bd1 | 456 | ib_dma_unmap_sg(dev, data->sg, data->size, dir); |
74a20780 EZ |
457 | } |
458 | ||
ad1e5672 SG |
459 | static int |
460 | iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, | |
461 | struct iser_mem_reg *reg) | |
462 | { | |
463 | struct scatterlist *sg = mem->sg; | |
464 | ||
465 | reg->sge.lkey = device->mr->lkey; | |
466 | reg->rkey = device->mr->rkey; | |
467 | reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); | |
468 | reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); | |
469 | ||
470 | iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx," | |
471 | " length=0x%x\n", reg->sge.lkey, reg->rkey, | |
472 | reg->sge.addr, reg->sge.length); | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
919fc274 | 477 | static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, |
5f588e3d | 478 | struct iser_data_buf *mem, |
ea18f5d7 | 479 | enum iser_data_dir cmd_dir) |
919fc274 | 480 | { |
56408325 SG |
481 | struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; |
482 | struct iser_device *device = iser_task->iser_conn->ib_conn.device; | |
919fc274 SG |
483 | |
484 | iscsi_conn->fmr_unalign_cnt++; | |
919fc274 SG |
485 | |
486 | if (iser_debug_level > 0) | |
56408325 | 487 | iser_data_buf_dump(mem, device->ib_device); |
919fc274 SG |
488 | |
489 | /* unmap the command data before accessing it */ | |
c6c95ef4 RD |
490 | iser_dma_unmap_task_data(iser_task, mem, |
491 | (cmd_dir == ISER_DIR_OUT) ? | |
492 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
919fc274 SG |
493 | |
494 | /* allocate copy buf, if we are writing, copy the */ | |
495 | /* unaligned scatterlist, dma map the copy */ | |
e3784bd1 | 496 | if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0) |
5f588e3d | 497 | return -ENOMEM; |
919fc274 SG |
498 | |
499 | return 0; | |
500 | } | |
501 | ||
d03e61d0 SG |
502 | /** |
503 | * iser_reg_page_vec - Register physical memory | |
504 | * | |
505 | * returns: 0 on success, errno code on failure | |
506 | */ | |
507 | static | |
7d0483c9 | 508 | int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, |
f0e35c27 | 509 | struct iser_data_buf *mem, |
2b3bf958 | 510 | struct iser_reg_resources *rsc, |
7d0483c9 | 511 | struct iser_mem_reg *reg) |
d03e61d0 | 512 | { |
f0e35c27 SG |
513 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
514 | struct iser_device *device = ib_conn->device; | |
2b3bf958 AL |
515 | struct iser_page_vec *page_vec = rsc->page_vec; |
516 | struct ib_fmr_pool *fmr_pool = rsc->fmr_pool; | |
f0e35c27 SG |
517 | struct ib_pool_fmr *fmr; |
518 | int ret, plen; | |
519 | ||
520 | plen = iser_sg_to_page_vec(mem, device->ib_device, | |
521 | page_vec->pages, | |
522 | &page_vec->offset, | |
523 | &page_vec->data_size); | |
524 | page_vec->length = plen; | |
525 | if (plen * SIZE_4K < page_vec->data_size) { | |
526 | iser_err("page vec too short to hold this SG\n"); | |
527 | iser_data_buf_dump(mem, device->ib_device); | |
528 | iser_dump_page_vec(page_vec); | |
529 | return -EINVAL; | |
530 | } | |
d03e61d0 | 531 | |
2b3bf958 | 532 | fmr = ib_fmr_pool_map_phys(fmr_pool, |
f0e35c27 | 533 | page_vec->pages, |
d03e61d0 | 534 | page_vec->length, |
f0e35c27 SG |
535 | page_vec->pages[0]); |
536 | if (IS_ERR(fmr)) { | |
537 | ret = PTR_ERR(fmr); | |
538 | iser_err("ib_fmr_pool_map_phys failed: %d\n", ret); | |
539 | return ret; | |
d03e61d0 SG |
540 | } |
541 | ||
7d0483c9 SG |
542 | reg->sge.lkey = fmr->fmr->lkey; |
543 | reg->rkey = fmr->fmr->rkey; | |
544 | reg->sge.addr = page_vec->pages[0] + page_vec->offset; | |
545 | reg->sge.length = page_vec->data_size; | |
546 | reg->mem_h = fmr; | |
f0e35c27 | 547 | |
d03e61d0 SG |
548 | return 0; |
549 | } | |
550 | ||
551 | /** | |
552 | * Unregister (previosuly registered using FMR) memory. | |
553 | * If memory is non-FMR does nothing. | |
554 | */ | |
555 | void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, | |
556 | enum iser_data_dir cmd_dir) | |
557 | { | |
b130eded | 558 | struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; |
d03e61d0 SG |
559 | int ret; |
560 | ||
561 | if (!reg->mem_h) | |
562 | return; | |
563 | ||
564 | iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h); | |
565 | ||
566 | ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); | |
567 | if (ret) | |
568 | iser_err("ib_fmr_pool_unmap failed %d\n", ret); | |
569 | ||
570 | reg->mem_h = NULL; | |
571 | } | |
572 | ||
573 | void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, | |
574 | enum iser_data_dir cmd_dir) | |
575 | { | |
81722909 | 576 | struct iser_device *device = iser_task->iser_conn->ib_conn.device; |
b130eded | 577 | struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; |
d03e61d0 | 578 | |
bd8b944e | 579 | if (!reg->mem_h) |
d03e61d0 SG |
580 | return; |
581 | ||
81722909 SG |
582 | device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, |
583 | reg->mem_h); | |
d03e61d0 | 584 | reg->mem_h = NULL; |
d03e61d0 SG |
585 | } |
586 | ||
5bb6e543 | 587 | static void |
92792c0a SG |
588 | iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, |
589 | struct ib_sig_domain *domain) | |
590 | { | |
78eda2bb | 591 | domain->sig_type = IB_SIG_TYPE_T10_DIF; |
5bb6e543 SG |
592 | domain->sig.dif.pi_interval = scsi_prot_interval(sc); |
593 | domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc); | |
78eda2bb SG |
594 | /* |
595 | * At the moment we hard code those, but in the future | |
596 | * we will take them from sc. | |
597 | */ | |
598 | domain->sig.dif.apptag_check_mask = 0xffff; | |
599 | domain->sig.dif.app_escape = true; | |
600 | domain->sig.dif.ref_escape = true; | |
5bb6e543 | 601 | if (sc->prot_flags & SCSI_PROT_REF_INCREMENT) |
78eda2bb | 602 | domain->sig.dif.ref_remap = true; |
92792c0a | 603 | }; |
177e31bd SG |
604 | |
605 | static int | |
606 | iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) | |
607 | { | |
177e31bd SG |
608 | switch (scsi_get_prot_op(sc)) { |
609 | case SCSI_PROT_WRITE_INSERT: | |
610 | case SCSI_PROT_READ_STRIP: | |
78eda2bb | 611 | sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; |
92792c0a | 612 | iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); |
177e31bd | 613 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; |
177e31bd SG |
614 | break; |
615 | case SCSI_PROT_READ_INSERT: | |
616 | case SCSI_PROT_WRITE_STRIP: | |
78eda2bb | 617 | sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; |
92792c0a | 618 | iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); |
5bb6e543 SG |
619 | sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? |
620 | IB_T10DIF_CSUM : IB_T10DIF_CRC; | |
177e31bd SG |
621 | break; |
622 | case SCSI_PROT_READ_PASS: | |
623 | case SCSI_PROT_WRITE_PASS: | |
92792c0a | 624 | iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); |
177e31bd | 625 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; |
92792c0a | 626 | iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); |
5bb6e543 SG |
627 | sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? |
628 | IB_T10DIF_CSUM : IB_T10DIF_CRC; | |
177e31bd SG |
629 | break; |
630 | default: | |
631 | iser_err("Unsupported PI operation %d\n", | |
632 | scsi_get_prot_op(sc)); | |
633 | return -EINVAL; | |
634 | } | |
78eda2bb | 635 | |
177e31bd SG |
636 | return 0; |
637 | } | |
638 | ||
5bb6e543 | 639 | static inline void |
177e31bd SG |
640 | iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) |
641 | { | |
5bb6e543 SG |
642 | *mask = 0; |
643 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) | |
644 | *mask |= ISER_CHECK_REFTAG; | |
645 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) | |
646 | *mask |= ISER_CHECK_GUARD; | |
177e31bd SG |
647 | } |
648 | ||
a11b3e69 SG |
649 | static void |
650 | iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) | |
651 | { | |
652 | u32 rkey; | |
653 | ||
654 | memset(inv_wr, 0, sizeof(*inv_wr)); | |
655 | inv_wr->opcode = IB_WR_LOCAL_INV; | |
656 | inv_wr->wr_id = ISER_FASTREG_LI_WRID; | |
657 | inv_wr->ex.invalidate_rkey = mr->rkey; | |
658 | ||
659 | rkey = ib_inc_rkey(mr->rkey); | |
660 | ib_update_fast_reg_key(mr, rkey); | |
661 | } | |
662 | ||
177e31bd SG |
663 | static int |
664 | iser_reg_sig_mr(struct iscsi_iser_task *iser_task, | |
d711d81d | 665 | struct iser_pi_context *pi_ctx, |
6ef8bb83 SG |
666 | struct iser_mem_reg *data_reg, |
667 | struct iser_mem_reg *prot_reg, | |
668 | struct iser_mem_reg *sig_reg) | |
177e31bd | 669 | { |
a4ee3539 | 670 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
177e31bd SG |
671 | struct ib_send_wr sig_wr, inv_wr; |
672 | struct ib_send_wr *bad_wr, *wr = NULL; | |
673 | struct ib_sig_attrs sig_attrs; | |
674 | int ret; | |
177e31bd SG |
675 | |
676 | memset(&sig_attrs, 0, sizeof(sig_attrs)); | |
677 | ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs); | |
678 | if (ret) | |
679 | goto err; | |
680 | ||
5bb6e543 | 681 | iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask); |
177e31bd | 682 | |
d711d81d | 683 | if (!pi_ctx->sig_mr_valid) { |
a11b3e69 | 684 | iser_inv_rkey(&inv_wr, pi_ctx->sig_mr); |
177e31bd | 685 | wr = &inv_wr; |
177e31bd SG |
686 | } |
687 | ||
688 | memset(&sig_wr, 0, sizeof(sig_wr)); | |
689 | sig_wr.opcode = IB_WR_REG_SIG_MR; | |
690 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; | |
6ef8bb83 | 691 | sig_wr.sg_list = &data_reg->sge; |
177e31bd SG |
692 | sig_wr.num_sge = 1; |
693 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; | |
694 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; | |
695 | if (scsi_prot_sg_count(iser_task->sc)) | |
6ef8bb83 | 696 | sig_wr.wr.sig_handover.prot = &prot_reg->sge; |
177e31bd SG |
697 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | |
698 | IB_ACCESS_REMOTE_READ | | |
699 | IB_ACCESS_REMOTE_WRITE; | |
700 | ||
701 | if (!wr) | |
702 | wr = &sig_wr; | |
703 | else | |
704 | wr->next = &sig_wr; | |
705 | ||
a4ee3539 | 706 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); |
177e31bd SG |
707 | if (ret) { |
708 | iser_err("reg_sig_mr failed, ret:%d\n", ret); | |
709 | goto err; | |
710 | } | |
d711d81d | 711 | pi_ctx->sig_mr_valid = 0; |
177e31bd | 712 | |
6ef8bb83 SG |
713 | sig_reg->sge.lkey = pi_ctx->sig_mr->lkey; |
714 | sig_reg->rkey = pi_ctx->sig_mr->rkey; | |
715 | sig_reg->sge.addr = 0; | |
716 | sig_reg->sge.length = scsi_transfer_length(iser_task->sc); | |
177e31bd | 717 | |
6ef8bb83 SG |
718 | iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n", |
719 | sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, | |
720 | sig_reg->sge.length); | |
177e31bd SG |
721 | err: |
722 | return ret; | |
723 | } | |
724 | ||
d11ec4ec | 725 | static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, |
d11ec4ec | 726 | struct iser_data_buf *mem, |
d711d81d | 727 | struct iser_reg_resources *rsc, |
6ef8bb83 | 728 | struct iser_mem_reg *reg) |
5587856c | 729 | { |
a4ee3539 SG |
730 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
731 | struct iser_device *device = ib_conn->device; | |
32467c42 SG |
732 | struct ib_mr *mr = rsc->mr; |
733 | struct ib_fast_reg_page_list *frpl = rsc->frpl; | |
5587856c SG |
734 | struct ib_send_wr fastreg_wr, inv_wr; |
735 | struct ib_send_wr *bad_wr, *wr = NULL; | |
d11ec4ec SG |
736 | int ret, offset, size, plen; |
737 | ||
177e31bd | 738 | plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, |
d11ec4ec SG |
739 | &offset, &size); |
740 | if (plen * SIZE_4K < size) { | |
741 | iser_err("fast reg page_list too short to hold this SG\n"); | |
742 | return -EINVAL; | |
743 | } | |
5587856c | 744 | |
d711d81d | 745 | if (!rsc->mr_valid) { |
a11b3e69 | 746 | iser_inv_rkey(&inv_wr, mr); |
5587856c | 747 | wr = &inv_wr; |
5587856c SG |
748 | } |
749 | ||
750 | /* Prepare FASTREG WR */ | |
751 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); | |
7306b8fa | 752 | fastreg_wr.wr_id = ISER_FASTREG_LI_WRID; |
5587856c | 753 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; |
177e31bd SG |
754 | fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; |
755 | fastreg_wr.wr.fast_reg.page_list = frpl; | |
d11ec4ec | 756 | fastreg_wr.wr.fast_reg.page_list_len = plen; |
5587856c | 757 | fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; |
d11ec4ec | 758 | fastreg_wr.wr.fast_reg.length = size; |
177e31bd | 759 | fastreg_wr.wr.fast_reg.rkey = mr->rkey; |
5587856c SG |
760 | fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | |
761 | IB_ACCESS_REMOTE_WRITE | | |
762 | IB_ACCESS_REMOTE_READ); | |
763 | ||
db523b8d | 764 | if (!wr) |
5587856c | 765 | wr = &fastreg_wr; |
db523b8d | 766 | else |
5587856c | 767 | wr->next = &fastreg_wr; |
5587856c | 768 | |
a4ee3539 | 769 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); |
5587856c | 770 | if (ret) { |
5587856c SG |
771 | iser_err("fast registration failed, ret:%d\n", ret); |
772 | return ret; | |
773 | } | |
d711d81d | 774 | rsc->mr_valid = 0; |
5587856c | 775 | |
6ef8bb83 SG |
776 | reg->sge.lkey = mr->lkey; |
777 | reg->rkey = mr->rkey; | |
778 | reg->sge.addr = frpl->page_list[0] + offset; | |
779 | reg->sge.length = size; | |
5587856c SG |
780 | |
781 | return ret; | |
782 | } | |
783 | ||
32467c42 SG |
784 | static int |
785 | iser_handle_unaligned_buf(struct iscsi_iser_task *task, | |
786 | struct iser_data_buf *mem, | |
787 | enum iser_data_dir dir) | |
5587856c | 788 | { |
32467c42 SG |
789 | struct iser_conn *iser_conn = task->iser_conn; |
790 | struct iser_device *device = iser_conn->ib_conn.device; | |
5587856c | 791 | int err, aligned_len; |
5587856c | 792 | |
32467c42 | 793 | aligned_len = iser_data_buf_aligned_len(mem, device->ib_device); |
5587856c | 794 | if (aligned_len != mem->dma_nents) { |
32467c42 SG |
795 | err = fall_to_bounce_buf(task, mem, dir); |
796 | if (err) | |
5587856c | 797 | return err; |
5587856c SG |
798 | } |
799 | ||
32467c42 SG |
800 | return 0; |
801 | } | |
802 | ||
803 | static int | |
804 | iser_reg_prot_sg(struct iscsi_iser_task *task, | |
805 | struct iser_data_buf *mem, | |
806 | struct iser_fr_desc *desc, | |
807 | struct iser_mem_reg *reg) | |
808 | { | |
809 | struct iser_device *device = task->iser_conn->ib_conn.device; | |
810 | ||
811 | if (mem->dma_nents == 1) | |
812 | return iser_reg_dma(device, mem, reg); | |
813 | ||
814 | return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); | |
815 | } | |
816 | ||
817 | static int | |
818 | iser_reg_data_sg(struct iscsi_iser_task *task, | |
819 | struct iser_data_buf *mem, | |
820 | struct iser_fr_desc *desc, | |
821 | struct iser_mem_reg *reg) | |
822 | { | |
823 | struct iser_device *device = task->iser_conn->ib_conn.device; | |
824 | ||
825 | if (mem->dma_nents == 1) | |
826 | return iser_reg_dma(device, mem, reg); | |
827 | ||
828 | return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); | |
829 | } | |
830 | ||
831 | int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |
832 | enum iser_data_dir dir) | |
833 | { | |
834 | struct ib_conn *ib_conn = &task->iser_conn->ib_conn; | |
835 | struct iser_device *device = ib_conn->device; | |
836 | struct iser_data_buf *mem = &task->data[dir]; | |
837 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; | |
838 | struct iser_fr_desc *desc = NULL; | |
839 | int err; | |
840 | ||
841 | err = iser_handle_unaligned_buf(task, mem, dir); | |
842 | if (unlikely(err)) | |
843 | return err; | |
844 | ||
177e31bd | 845 | if (mem->dma_nents != 1 || |
32467c42 | 846 | scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { |
81722909 | 847 | desc = device->reg_ops->reg_desc_get(ib_conn); |
32467c42 | 848 | reg->mem_h = desc; |
d11ec4ec | 849 | } |
5587856c | 850 | |
32467c42 SG |
851 | err = iser_reg_data_sg(task, mem, desc, reg); |
852 | if (unlikely(err)) | |
d11ec4ec SG |
853 | goto err_reg; |
854 | ||
32467c42 | 855 | if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { |
6ef8bb83 | 856 | struct iser_mem_reg prot_reg; |
177e31bd | 857 | |
6ef8bb83 | 858 | memset(&prot_reg, 0, sizeof(prot_reg)); |
32467c42 SG |
859 | if (scsi_prot_sg_count(task->sc)) { |
860 | mem = &task->prot[dir]; | |
861 | err = iser_handle_unaligned_buf(task, mem, dir); | |
862 | if (unlikely(err)) | |
863 | goto err_reg; | |
177e31bd | 864 | |
32467c42 SG |
865 | err = iser_reg_prot_sg(task, mem, desc, &prot_reg); |
866 | if (unlikely(err)) | |
177e31bd SG |
867 | goto err_reg; |
868 | } | |
869 | ||
32467c42 SG |
870 | err = iser_reg_sig_mr(task, desc->pi_ctx, reg, |
871 | &prot_reg, reg); | |
872 | if (unlikely(err)) | |
873 | goto err_reg; | |
874 | ||
d711d81d | 875 | desc->pi_ctx->sig_protected = 1; |
177e31bd | 876 | } |
d11ec4ec | 877 | |
5587856c | 878 | return 0; |
32467c42 | 879 | |
5587856c | 880 | err_reg: |
bd8b944e | 881 | if (desc) |
81722909 | 882 | device->reg_ops->reg_desc_put(ib_conn, desc); |
d11ec4ec | 883 | |
5587856c SG |
884 | return err; |
885 | } | |
32467c42 SG |
886 | |
887 | void iser_unreg_rdma_mem(struct iscsi_iser_task *task, | |
888 | enum iser_data_dir dir) | |
889 | { | |
890 | struct iser_device *device = task->iser_conn->ib_conn.device; | |
891 | ||
892 | device->reg_ops->unreg_mem(task, dir); | |
893 | } |