Merge branch 'serge-next-2' of git://git.kernel.org/pub/scm/linux/kernel/git/sergeh...
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / mr.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/kernel.h>
39 #include <linux/vmalloc.h>
40
41 #include <linux/mlx4/cmd.h>
42
43 #include "mlx4.h"
44 #include "icm.h"
45
46 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
47 {
48 int o;
49 int m;
50 u32 seg;
51
52 spin_lock(&buddy->lock);
53
54 for (o = order; o <= buddy->max_order; ++o)
55 if (buddy->num_free[o]) {
56 m = 1 << (buddy->max_order - o);
57 seg = find_first_bit(buddy->bits[o], m);
58 if (seg < m)
59 goto found;
60 }
61
62 spin_unlock(&buddy->lock);
63 return -1;
64
65 found:
66 clear_bit(seg, buddy->bits[o]);
67 --buddy->num_free[o];
68
69 while (o > order) {
70 --o;
71 seg <<= 1;
72 set_bit(seg ^ 1, buddy->bits[o]);
73 ++buddy->num_free[o];
74 }
75
76 spin_unlock(&buddy->lock);
77
78 seg <<= order;
79
80 return seg;
81 }
82
83 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
84 {
85 seg >>= order;
86
87 spin_lock(&buddy->lock);
88
89 while (test_bit(seg ^ 1, buddy->bits[order])) {
90 clear_bit(seg ^ 1, buddy->bits[order]);
91 --buddy->num_free[order];
92 seg >>= 1;
93 ++order;
94 }
95
96 set_bit(seg, buddy->bits[order]);
97 ++buddy->num_free[order];
98
99 spin_unlock(&buddy->lock);
100 }
101
102 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
103 {
104 int i, s;
105
106 buddy->max_order = max_order;
107 spin_lock_init(&buddy->lock);
108
109 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
110 GFP_KERNEL);
111 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
112 GFP_KERNEL);
113 if (!buddy->bits || !buddy->num_free)
114 goto err_out;
115
116 for (i = 0; i <= buddy->max_order; ++i) {
117 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
118 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
119 if (!buddy->bits[i]) {
120 buddy->bits[i] = vzalloc(s * sizeof(long));
121 if (!buddy->bits[i])
122 goto err_out_free;
123 }
124 }
125
126 set_bit(0, buddy->bits[buddy->max_order]);
127 buddy->num_free[buddy->max_order] = 1;
128
129 return 0;
130
131 err_out_free:
132 for (i = 0; i <= buddy->max_order; ++i)
133 if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
134 vfree(buddy->bits[i]);
135 else
136 kfree(buddy->bits[i]);
137
138 err_out:
139 kfree(buddy->bits);
140 kfree(buddy->num_free);
141
142 return -ENOMEM;
143 }
144
145 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
146 {
147 int i;
148
149 for (i = 0; i <= buddy->max_order; ++i)
150 if (is_vmalloc_addr(buddy->bits[i]))
151 vfree(buddy->bits[i]);
152 else
153 kfree(buddy->bits[i]);
154
155 kfree(buddy->bits);
156 kfree(buddy->num_free);
157 }
158
159 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
160 {
161 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
162 u32 seg;
163 int seg_order;
164 u32 offset;
165
166 seg_order = max_t(int, order - log_mtts_per_seg, 0);
167
168 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
169 if (seg == -1)
170 return -1;
171
172 offset = seg * (1 << log_mtts_per_seg);
173
174 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
175 offset + (1 << order) - 1)) {
176 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
177 return -1;
178 }
179
180 return offset;
181 }
182
183 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
184 {
185 u64 in_param = 0;
186 u64 out_param;
187 int err;
188
189 if (mlx4_is_mfunc(dev)) {
190 set_param_l(&in_param, order);
191 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
192 RES_OP_RESERVE_AND_MAP,
193 MLX4_CMD_ALLOC_RES,
194 MLX4_CMD_TIME_CLASS_A,
195 MLX4_CMD_WRAPPED);
196 if (err)
197 return -1;
198 return get_param_l(&out_param);
199 }
200 return __mlx4_alloc_mtt_range(dev, order);
201 }
202
203 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
204 struct mlx4_mtt *mtt)
205 {
206 int i;
207
208 if (!npages) {
209 mtt->order = -1;
210 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
211 return 0;
212 } else
213 mtt->page_shift = page_shift;
214
215 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
216 ++mtt->order;
217
218 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
219 if (mtt->offset == -1)
220 return -ENOMEM;
221
222 return 0;
223 }
224 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
225
226 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
227 {
228 u32 first_seg;
229 int seg_order;
230 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
231
232 seg_order = max_t(int, order - log_mtts_per_seg, 0);
233 first_seg = offset / (1 << log_mtts_per_seg);
234
235 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
236 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
237 offset + (1 << order) - 1);
238 }
239
240 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
241 {
242 u64 in_param = 0;
243 int err;
244
245 if (mlx4_is_mfunc(dev)) {
246 set_param_l(&in_param, offset);
247 set_param_h(&in_param, order);
248 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
249 MLX4_CMD_FREE_RES,
250 MLX4_CMD_TIME_CLASS_A,
251 MLX4_CMD_WRAPPED);
252 if (err)
253 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
254 offset, order);
255 return;
256 }
257 __mlx4_free_mtt_range(dev, offset, order);
258 }
259
260 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
261 {
262 if (mtt->order < 0)
263 return;
264
265 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
266 }
267 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
268
269 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
270 {
271 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
272 }
273 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
274
275 static u32 hw_index_to_key(u32 ind)
276 {
277 return (ind >> 24) | (ind << 8);
278 }
279
280 static u32 key_to_hw_index(u32 key)
281 {
282 return (key << 24) | (key >> 8);
283 }
284
285 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
286 int mpt_index)
287 {
288 return mlx4_cmd(dev, mailbox->dma, mpt_index,
289 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
290 MLX4_CMD_WRAPPED);
291 }
292
293 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
294 int mpt_index)
295 {
296 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
297 !mailbox, MLX4_CMD_HW2SW_MPT,
298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
299 }
300
301 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
302 u64 iova, u64 size, u32 access, int npages,
303 int page_shift, struct mlx4_mr *mr)
304 {
305 mr->iova = iova;
306 mr->size = size;
307 mr->pd = pd;
308 mr->access = access;
309 mr->enabled = MLX4_MPT_DISABLED;
310 mr->key = hw_index_to_key(mridx);
311
312 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
313 }
314
315 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
316 struct mlx4_cmd_mailbox *mailbox,
317 int num_entries)
318 {
319 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
320 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
321 }
322
323 int __mlx4_mpt_reserve(struct mlx4_dev *dev)
324 {
325 struct mlx4_priv *priv = mlx4_priv(dev);
326
327 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
328 }
329
330 static int mlx4_mpt_reserve(struct mlx4_dev *dev)
331 {
332 u64 out_param;
333
334 if (mlx4_is_mfunc(dev)) {
335 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
336 MLX4_CMD_ALLOC_RES,
337 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
338 return -1;
339 return get_param_l(&out_param);
340 }
341 return __mlx4_mpt_reserve(dev);
342 }
343
344 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
345 {
346 struct mlx4_priv *priv = mlx4_priv(dev);
347
348 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
349 }
350
351 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
352 {
353 u64 in_param = 0;
354
355 if (mlx4_is_mfunc(dev)) {
356 set_param_l(&in_param, index);
357 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
358 MLX4_CMD_FREE_RES,
359 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
360 mlx4_warn(dev, "Failed to release mr index:%d\n",
361 index);
362 return;
363 }
364 __mlx4_mpt_release(dev, index);
365 }
366
367 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
368 {
369 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
370
371 return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
372 }
373
374 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
375 {
376 u64 param = 0;
377
378 if (mlx4_is_mfunc(dev)) {
379 set_param_l(&param, index);
380 return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
381 MLX4_CMD_ALLOC_RES,
382 MLX4_CMD_TIME_CLASS_A,
383 MLX4_CMD_WRAPPED);
384 }
385 return __mlx4_mpt_alloc_icm(dev, index, gfp);
386 }
387
388 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
389 {
390 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
391
392 mlx4_table_put(dev, &mr_table->dmpt_table, index);
393 }
394
395 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
396 {
397 u64 in_param = 0;
398
399 if (mlx4_is_mfunc(dev)) {
400 set_param_l(&in_param, index);
401 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
402 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
403 MLX4_CMD_WRAPPED))
404 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
405 index);
406 return;
407 }
408 return __mlx4_mpt_free_icm(dev, index);
409 }
410
411 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
412 int npages, int page_shift, struct mlx4_mr *mr)
413 {
414 u32 index;
415 int err;
416
417 index = mlx4_mpt_reserve(dev);
418 if (index == -1)
419 return -ENOMEM;
420
421 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
422 access, npages, page_shift, mr);
423 if (err)
424 mlx4_mpt_release(dev, index);
425
426 return err;
427 }
428 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
429
430 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
431 {
432 int err;
433
434 if (mr->enabled == MLX4_MPT_EN_HW) {
435 err = mlx4_HW2SW_MPT(dev, NULL,
436 key_to_hw_index(mr->key) &
437 (dev->caps.num_mpts - 1));
438 if (err) {
439 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
440 err);
441 return err;
442 }
443
444 mr->enabled = MLX4_MPT_EN_SW;
445 }
446 mlx4_mtt_cleanup(dev, &mr->mtt);
447
448 return 0;
449 }
450
451 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
452 {
453 int ret;
454
455 ret = mlx4_mr_free_reserved(dev, mr);
456 if (ret)
457 return ret;
458 if (mr->enabled)
459 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
460 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
461
462 return 0;
463 }
464 EXPORT_SYMBOL_GPL(mlx4_mr_free);
465
466 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
467 {
468 struct mlx4_cmd_mailbox *mailbox;
469 struct mlx4_mpt_entry *mpt_entry;
470 int err;
471
472 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
473 if (err)
474 return err;
475
476 mailbox = mlx4_alloc_cmd_mailbox(dev);
477 if (IS_ERR(mailbox)) {
478 err = PTR_ERR(mailbox);
479 goto err_table;
480 }
481 mpt_entry = mailbox->buf;
482 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
483 MLX4_MPT_FLAG_REGION |
484 mr->access);
485
486 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
487 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
488 mpt_entry->start = cpu_to_be64(mr->iova);
489 mpt_entry->length = cpu_to_be64(mr->size);
490 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
491
492 if (mr->mtt.order < 0) {
493 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
494 mpt_entry->mtt_addr = 0;
495 } else {
496 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
497 &mr->mtt));
498 }
499
500 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
501 /* fast register MR in free state */
502 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
503 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
504 MLX4_MPT_PD_FLAG_RAE);
505 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
506 } else {
507 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
508 }
509
510 err = mlx4_SW2HW_MPT(dev, mailbox,
511 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
512 if (err) {
513 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
514 goto err_cmd;
515 }
516 mr->enabled = MLX4_MPT_EN_HW;
517
518 mlx4_free_cmd_mailbox(dev, mailbox);
519
520 return 0;
521
522 err_cmd:
523 mlx4_free_cmd_mailbox(dev, mailbox);
524
525 err_table:
526 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
527 return err;
528 }
529 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
530
531 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
532 int start_index, int npages, u64 *page_list)
533 {
534 struct mlx4_priv *priv = mlx4_priv(dev);
535 __be64 *mtts;
536 dma_addr_t dma_handle;
537 int i;
538
539 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
540 start_index, &dma_handle);
541
542 if (!mtts)
543 return -ENOMEM;
544
545 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
546 npages * sizeof (u64), DMA_TO_DEVICE);
547
548 for (i = 0; i < npages; ++i)
549 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
550
551 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
552 npages * sizeof (u64), DMA_TO_DEVICE);
553
554 return 0;
555 }
556
557 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
558 int start_index, int npages, u64 *page_list)
559 {
560 int err = 0;
561 int chunk;
562 int mtts_per_page;
563 int max_mtts_first_page;
564
565 /* compute how may mtts fit in the first page */
566 mtts_per_page = PAGE_SIZE / sizeof(u64);
567 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
568 % mtts_per_page;
569
570 chunk = min_t(int, max_mtts_first_page, npages);
571
572 while (npages > 0) {
573 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
574 if (err)
575 return err;
576 npages -= chunk;
577 start_index += chunk;
578 page_list += chunk;
579
580 chunk = min_t(int, mtts_per_page, npages);
581 }
582 return err;
583 }
584
585 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
586 int start_index, int npages, u64 *page_list)
587 {
588 struct mlx4_cmd_mailbox *mailbox = NULL;
589 __be64 *inbox = NULL;
590 int chunk;
591 int err = 0;
592 int i;
593
594 if (mtt->order < 0)
595 return -EINVAL;
596
597 if (mlx4_is_mfunc(dev)) {
598 mailbox = mlx4_alloc_cmd_mailbox(dev);
599 if (IS_ERR(mailbox))
600 return PTR_ERR(mailbox);
601 inbox = mailbox->buf;
602
603 while (npages > 0) {
604 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
605 npages);
606 inbox[0] = cpu_to_be64(mtt->offset + start_index);
607 inbox[1] = 0;
608 for (i = 0; i < chunk; ++i)
609 inbox[i + 2] = cpu_to_be64(page_list[i] |
610 MLX4_MTT_FLAG_PRESENT);
611 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
612 if (err) {
613 mlx4_free_cmd_mailbox(dev, mailbox);
614 return err;
615 }
616
617 npages -= chunk;
618 start_index += chunk;
619 page_list += chunk;
620 }
621 mlx4_free_cmd_mailbox(dev, mailbox);
622 return err;
623 }
624
625 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
626 }
627 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
628
629 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
630 struct mlx4_buf *buf, gfp_t gfp)
631 {
632 u64 *page_list;
633 int err;
634 int i;
635
636 page_list = kmalloc(buf->npages * sizeof *page_list,
637 gfp);
638 if (!page_list)
639 return -ENOMEM;
640
641 for (i = 0; i < buf->npages; ++i)
642 if (buf->nbufs == 1)
643 page_list[i] = buf->direct.map + (i << buf->page_shift);
644 else
645 page_list[i] = buf->page_list[i].map;
646
647 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
648
649 kfree(page_list);
650 return err;
651 }
652 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
653
654 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
655 struct mlx4_mw *mw)
656 {
657 u32 index;
658
659 if ((type == MLX4_MW_TYPE_1 &&
660 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
661 (type == MLX4_MW_TYPE_2 &&
662 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
663 return -ENOTSUPP;
664
665 index = mlx4_mpt_reserve(dev);
666 if (index == -1)
667 return -ENOMEM;
668
669 mw->key = hw_index_to_key(index);
670 mw->pd = pd;
671 mw->type = type;
672 mw->enabled = MLX4_MPT_DISABLED;
673
674 return 0;
675 }
676 EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
677
678 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
679 {
680 struct mlx4_cmd_mailbox *mailbox;
681 struct mlx4_mpt_entry *mpt_entry;
682 int err;
683
684 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
685 if (err)
686 return err;
687
688 mailbox = mlx4_alloc_cmd_mailbox(dev);
689 if (IS_ERR(mailbox)) {
690 err = PTR_ERR(mailbox);
691 goto err_table;
692 }
693 mpt_entry = mailbox->buf;
694
695 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
696 * off, thus creating a memory window and not a memory region.
697 */
698 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
699 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
700 if (mw->type == MLX4_MW_TYPE_2) {
701 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
702 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
703 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
704 }
705
706 err = mlx4_SW2HW_MPT(dev, mailbox,
707 key_to_hw_index(mw->key) &
708 (dev->caps.num_mpts - 1));
709 if (err) {
710 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
711 goto err_cmd;
712 }
713 mw->enabled = MLX4_MPT_EN_HW;
714
715 mlx4_free_cmd_mailbox(dev, mailbox);
716
717 return 0;
718
719 err_cmd:
720 mlx4_free_cmd_mailbox(dev, mailbox);
721
722 err_table:
723 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
724 return err;
725 }
726 EXPORT_SYMBOL_GPL(mlx4_mw_enable);
727
728 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
729 {
730 int err;
731
732 if (mw->enabled == MLX4_MPT_EN_HW) {
733 err = mlx4_HW2SW_MPT(dev, NULL,
734 key_to_hw_index(mw->key) &
735 (dev->caps.num_mpts - 1));
736 if (err)
737 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
738
739 mw->enabled = MLX4_MPT_EN_SW;
740 }
741 if (mw->enabled)
742 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
743 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
744 }
745 EXPORT_SYMBOL_GPL(mlx4_mw_free);
746
747 int mlx4_init_mr_table(struct mlx4_dev *dev)
748 {
749 struct mlx4_priv *priv = mlx4_priv(dev);
750 struct mlx4_mr_table *mr_table = &priv->mr_table;
751 int err;
752
753 /* Nothing to do for slaves - all MR handling is forwarded
754 * to the master */
755 if (mlx4_is_slave(dev))
756 return 0;
757
758 if (!is_power_of_2(dev->caps.num_mpts))
759 return -EINVAL;
760
761 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
762 ~0, dev->caps.reserved_mrws, 0);
763 if (err)
764 return err;
765
766 err = mlx4_buddy_init(&mr_table->mtt_buddy,
767 ilog2((u32)dev->caps.num_mtts /
768 (1 << log_mtts_per_seg)));
769 if (err)
770 goto err_buddy;
771
772 if (dev->caps.reserved_mtts) {
773 priv->reserved_mtts =
774 mlx4_alloc_mtt_range(dev,
775 fls(dev->caps.reserved_mtts - 1));
776 if (priv->reserved_mtts < 0) {
777 mlx4_warn(dev, "MTT table of order %u is too small\n",
778 mr_table->mtt_buddy.max_order);
779 err = -ENOMEM;
780 goto err_reserve_mtts;
781 }
782 }
783
784 return 0;
785
786 err_reserve_mtts:
787 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
788
789 err_buddy:
790 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
791
792 return err;
793 }
794
795 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
796 {
797 struct mlx4_priv *priv = mlx4_priv(dev);
798 struct mlx4_mr_table *mr_table = &priv->mr_table;
799
800 if (mlx4_is_slave(dev))
801 return;
802 if (priv->reserved_mtts >= 0)
803 mlx4_free_mtt_range(dev, priv->reserved_mtts,
804 fls(dev->caps.reserved_mtts - 1));
805 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
806 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
807 }
808
809 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
810 int npages, u64 iova)
811 {
812 int i, page_mask;
813
814 if (npages > fmr->max_pages)
815 return -EINVAL;
816
817 page_mask = (1 << fmr->page_shift) - 1;
818
819 /* We are getting page lists, so va must be page aligned. */
820 if (iova & page_mask)
821 return -EINVAL;
822
823 /* Trust the user not to pass misaligned data in page_list */
824 if (0)
825 for (i = 0; i < npages; ++i) {
826 if (page_list[i] & ~page_mask)
827 return -EINVAL;
828 }
829
830 if (fmr->maps >= fmr->max_maps)
831 return -EINVAL;
832
833 return 0;
834 }
835
836 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
837 int npages, u64 iova, u32 *lkey, u32 *rkey)
838 {
839 u32 key;
840 int i, err;
841
842 err = mlx4_check_fmr(fmr, page_list, npages, iova);
843 if (err)
844 return err;
845
846 ++fmr->maps;
847
848 key = key_to_hw_index(fmr->mr.key);
849 key += dev->caps.num_mpts;
850 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
851
852 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
853
854 /* Make sure MPT status is visible before writing MTT entries */
855 wmb();
856
857 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
858 npages * sizeof(u64), DMA_TO_DEVICE);
859
860 for (i = 0; i < npages; ++i)
861 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
862
863 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
864 npages * sizeof(u64), DMA_TO_DEVICE);
865
866 fmr->mpt->key = cpu_to_be32(key);
867 fmr->mpt->lkey = cpu_to_be32(key);
868 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
869 fmr->mpt->start = cpu_to_be64(iova);
870
871 /* Make MTT entries are visible before setting MPT status */
872 wmb();
873
874 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
875
876 /* Make sure MPT status is visible before consumer can use FMR */
877 wmb();
878
879 return 0;
880 }
881 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
882
883 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
884 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
885 {
886 struct mlx4_priv *priv = mlx4_priv(dev);
887 int err = -ENOMEM;
888
889 if (max_maps > dev->caps.max_fmr_maps)
890 return -EINVAL;
891
892 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
893 return -EINVAL;
894
895 /* All MTTs must fit in the same page */
896 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
897 return -EINVAL;
898
899 fmr->page_shift = page_shift;
900 fmr->max_pages = max_pages;
901 fmr->max_maps = max_maps;
902 fmr->maps = 0;
903
904 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
905 page_shift, &fmr->mr);
906 if (err)
907 return err;
908
909 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
910 fmr->mr.mtt.offset,
911 &fmr->dma_handle);
912
913 if (!fmr->mtts) {
914 err = -ENOMEM;
915 goto err_free;
916 }
917
918 return 0;
919
920 err_free:
921 (void) mlx4_mr_free(dev, &fmr->mr);
922 return err;
923 }
924 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
925
926 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
927 {
928 struct mlx4_priv *priv = mlx4_priv(dev);
929 int err;
930
931 err = mlx4_mr_enable(dev, &fmr->mr);
932 if (err)
933 return err;
934
935 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
936 key_to_hw_index(fmr->mr.key), NULL);
937 if (!fmr->mpt)
938 return -ENOMEM;
939
940 return 0;
941 }
942 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
943
944 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
945 u32 *lkey, u32 *rkey)
946 {
947 struct mlx4_cmd_mailbox *mailbox;
948 int err;
949
950 if (!fmr->maps)
951 return;
952
953 fmr->maps = 0;
954
955 mailbox = mlx4_alloc_cmd_mailbox(dev);
956 if (IS_ERR(mailbox)) {
957 err = PTR_ERR(mailbox);
958 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
959 return;
960 }
961
962 err = mlx4_HW2SW_MPT(dev, NULL,
963 key_to_hw_index(fmr->mr.key) &
964 (dev->caps.num_mpts - 1));
965 mlx4_free_cmd_mailbox(dev, mailbox);
966 if (err) {
967 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
968 return;
969 }
970 fmr->mr.enabled = MLX4_MPT_EN_SW;
971 }
972 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
973
974 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
975 {
976 int ret;
977
978 if (fmr->maps)
979 return -EBUSY;
980
981 ret = mlx4_mr_free(dev, &fmr->mr);
982 if (ret)
983 return ret;
984 fmr->mr.enabled = MLX4_MPT_DISABLED;
985
986 return 0;
987 }
988 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
989
990 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
991 {
992 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
993 MLX4_CMD_NATIVE);
994 }
995 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
This page took 0.050242 seconds and 6 git commands to generate.