mlx4_core: Allow large mlx4_buddy bitmaps
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / mr.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
40 #include <linux/vmalloc.h>
41
42 #include <linux/mlx4/cmd.h>
43
44 #include "mlx4.h"
45 #include "icm.h"
46
47 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
48 #define MLX4_MPT_FLAG_FREE (0x3UL << 28)
49 #define MLX4_MPT_FLAG_MIO (1 << 17)
50 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
51 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
52 #define MLX4_MPT_FLAG_REGION (1 << 8)
53
54 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
55 #define MLX4_MPT_PD_FLAG_RAE (1 << 28)
56 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
57
58 #define MLX4_MPT_STATUS_SW 0xF0
59 #define MLX4_MPT_STATUS_HW 0x00
60
61 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
62 {
63 int o;
64 int m;
65 u32 seg;
66
67 spin_lock(&buddy->lock);
68
69 for (o = order; o <= buddy->max_order; ++o)
70 if (buddy->num_free[o]) {
71 m = 1 << (buddy->max_order - o);
72 seg = find_first_bit(buddy->bits[o], m);
73 if (seg < m)
74 goto found;
75 }
76
77 spin_unlock(&buddy->lock);
78 return -1;
79
80 found:
81 clear_bit(seg, buddy->bits[o]);
82 --buddy->num_free[o];
83
84 while (o > order) {
85 --o;
86 seg <<= 1;
87 set_bit(seg ^ 1, buddy->bits[o]);
88 ++buddy->num_free[o];
89 }
90
91 spin_unlock(&buddy->lock);
92
93 seg <<= order;
94
95 return seg;
96 }
97
98 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
99 {
100 seg >>= order;
101
102 spin_lock(&buddy->lock);
103
104 while (test_bit(seg ^ 1, buddy->bits[order])) {
105 clear_bit(seg ^ 1, buddy->bits[order]);
106 --buddy->num_free[order];
107 seg >>= 1;
108 ++order;
109 }
110
111 set_bit(seg, buddy->bits[order]);
112 ++buddy->num_free[order];
113
114 spin_unlock(&buddy->lock);
115 }
116
117 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
118 {
119 int i, s;
120
121 buddy->max_order = max_order;
122 spin_lock_init(&buddy->lock);
123
124 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
125 GFP_KERNEL);
126 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
127 GFP_KERNEL);
128 if (!buddy->bits || !buddy->num_free)
129 goto err_out;
130
131 for (i = 0; i <= buddy->max_order; ++i) {
132 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
133 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
134 if (!buddy->bits[i]) {
135 buddy->bits[i] = vmalloc(s * sizeof(long));
136 if (!buddy->bits[i])
137 goto err_out_free;
138 }
139 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
140 }
141
142 set_bit(0, buddy->bits[buddy->max_order]);
143 buddy->num_free[buddy->max_order] = 1;
144
145 return 0;
146
147 err_out_free:
148 for (i = 0; i <= buddy->max_order; ++i)
149 if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
150 vfree(buddy->bits[i]);
151 else
152 kfree(buddy->bits[i]);
153
154 err_out:
155 kfree(buddy->bits);
156 kfree(buddy->num_free);
157
158 return -ENOMEM;
159 }
160
161 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
162 {
163 int i;
164
165 for (i = 0; i <= buddy->max_order; ++i)
166 if (is_vmalloc_addr(buddy->bits[i]))
167 vfree(buddy->bits[i]);
168 else
169 kfree(buddy->bits[i]);
170
171 kfree(buddy->bits);
172 kfree(buddy->num_free);
173 }
174
175 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
176 {
177 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
178 u32 seg;
179 int seg_order;
180 u32 offset;
181
182 seg_order = max_t(int, order - log_mtts_per_seg, 0);
183
184 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
185 if (seg == -1)
186 return -1;
187
188 offset = seg * (1 << log_mtts_per_seg);
189
190 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
191 offset + (1 << order) - 1)) {
192 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
193 return -1;
194 }
195
196 return offset;
197 }
198
199 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
200 {
201 u64 in_param;
202 u64 out_param;
203 int err;
204
205 if (mlx4_is_mfunc(dev)) {
206 set_param_l(&in_param, order);
207 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
208 RES_OP_RESERVE_AND_MAP,
209 MLX4_CMD_ALLOC_RES,
210 MLX4_CMD_TIME_CLASS_A,
211 MLX4_CMD_WRAPPED);
212 if (err)
213 return -1;
214 return get_param_l(&out_param);
215 }
216 return __mlx4_alloc_mtt_range(dev, order);
217 }
218
219 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
220 struct mlx4_mtt *mtt)
221 {
222 int i;
223
224 if (!npages) {
225 mtt->order = -1;
226 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
227 return 0;
228 } else
229 mtt->page_shift = page_shift;
230
231 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
232 ++mtt->order;
233
234 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
235 if (mtt->offset == -1)
236 return -ENOMEM;
237
238 return 0;
239 }
240 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
241
242 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
243 {
244 u32 first_seg;
245 int seg_order;
246 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
247
248 seg_order = max_t(int, order - log_mtts_per_seg, 0);
249 first_seg = offset / (1 << log_mtts_per_seg);
250
251 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
252 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
253 offset + (1 << order) - 1);
254 }
255
256 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
257 {
258 u64 in_param;
259 int err;
260
261 if (mlx4_is_mfunc(dev)) {
262 set_param_l(&in_param, offset);
263 set_param_h(&in_param, order);
264 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
265 MLX4_CMD_FREE_RES,
266 MLX4_CMD_TIME_CLASS_A,
267 MLX4_CMD_WRAPPED);
268 if (err)
269 mlx4_warn(dev, "Failed to free mtt range at:"
270 "%d order:%d\n", offset, order);
271 return;
272 }
273 __mlx4_free_mtt_range(dev, offset, order);
274 }
275
276 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
277 {
278 if (mtt->order < 0)
279 return;
280
281 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
282 }
283 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
284
285 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
286 {
287 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
288 }
289 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
290
291 static u32 hw_index_to_key(u32 ind)
292 {
293 return (ind >> 24) | (ind << 8);
294 }
295
296 static u32 key_to_hw_index(u32 key)
297 {
298 return (key << 24) | (key >> 8);
299 }
300
301 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
302 int mpt_index)
303 {
304 return mlx4_cmd(dev, mailbox->dma, mpt_index,
305 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
306 MLX4_CMD_WRAPPED);
307 }
308
309 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
310 int mpt_index)
311 {
312 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
313 !mailbox, MLX4_CMD_HW2SW_MPT,
314 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
315 }
316
317 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
318 u64 iova, u64 size, u32 access, int npages,
319 int page_shift, struct mlx4_mr *mr)
320 {
321 mr->iova = iova;
322 mr->size = size;
323 mr->pd = pd;
324 mr->access = access;
325 mr->enabled = MLX4_MR_DISABLED;
326 mr->key = hw_index_to_key(mridx);
327
328 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
329 }
330
331 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
332 struct mlx4_cmd_mailbox *mailbox,
333 int num_entries)
334 {
335 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
336 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
337 }
338
339 int __mlx4_mr_reserve(struct mlx4_dev *dev)
340 {
341 struct mlx4_priv *priv = mlx4_priv(dev);
342
343 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
344 }
345
346 static int mlx4_mr_reserve(struct mlx4_dev *dev)
347 {
348 u64 out_param;
349
350 if (mlx4_is_mfunc(dev)) {
351 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
352 MLX4_CMD_ALLOC_RES,
353 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
354 return -1;
355 return get_param_l(&out_param);
356 }
357 return __mlx4_mr_reserve(dev);
358 }
359
360 void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
361 {
362 struct mlx4_priv *priv = mlx4_priv(dev);
363
364 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
365 }
366
367 static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
368 {
369 u64 in_param;
370
371 if (mlx4_is_mfunc(dev)) {
372 set_param_l(&in_param, index);
373 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
374 MLX4_CMD_FREE_RES,
375 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
376 mlx4_warn(dev, "Failed to release mr index:%d\n",
377 index);
378 return;
379 }
380 __mlx4_mr_release(dev, index);
381 }
382
383 int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
384 {
385 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
386
387 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
388 }
389
390 static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
391 {
392 u64 param;
393
394 if (mlx4_is_mfunc(dev)) {
395 set_param_l(&param, index);
396 return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
397 MLX4_CMD_ALLOC_RES,
398 MLX4_CMD_TIME_CLASS_A,
399 MLX4_CMD_WRAPPED);
400 }
401 return __mlx4_mr_alloc_icm(dev, index);
402 }
403
404 void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
405 {
406 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
407
408 mlx4_table_put(dev, &mr_table->dmpt_table, index);
409 }
410
411 static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
412 {
413 u64 in_param;
414
415 if (mlx4_is_mfunc(dev)) {
416 set_param_l(&in_param, index);
417 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
418 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
419 MLX4_CMD_WRAPPED))
420 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
421 index);
422 return;
423 }
424 return __mlx4_mr_free_icm(dev, index);
425 }
426
427 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
428 int npages, int page_shift, struct mlx4_mr *mr)
429 {
430 u32 index;
431 int err;
432
433 index = mlx4_mr_reserve(dev);
434 if (index == -1)
435 return -ENOMEM;
436
437 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
438 access, npages, page_shift, mr);
439 if (err)
440 mlx4_mr_release(dev, index);
441
442 return err;
443 }
444 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
445
446 static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
447 {
448 int err;
449
450 if (mr->enabled == MLX4_MR_EN_HW) {
451 err = mlx4_HW2SW_MPT(dev, NULL,
452 key_to_hw_index(mr->key) &
453 (dev->caps.num_mpts - 1));
454 if (err)
455 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
456
457 mr->enabled = MLX4_MR_EN_SW;
458 }
459 mlx4_mtt_cleanup(dev, &mr->mtt);
460 }
461
462 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
463 {
464 mlx4_mr_free_reserved(dev, mr);
465 if (mr->enabled)
466 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
467 mlx4_mr_release(dev, key_to_hw_index(mr->key));
468 }
469 EXPORT_SYMBOL_GPL(mlx4_mr_free);
470
471 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
472 {
473 struct mlx4_cmd_mailbox *mailbox;
474 struct mlx4_mpt_entry *mpt_entry;
475 int err;
476
477 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
478 if (err)
479 return err;
480
481 mailbox = mlx4_alloc_cmd_mailbox(dev);
482 if (IS_ERR(mailbox)) {
483 err = PTR_ERR(mailbox);
484 goto err_table;
485 }
486 mpt_entry = mailbox->buf;
487
488 memset(mpt_entry, 0, sizeof *mpt_entry);
489
490 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
491 MLX4_MPT_FLAG_REGION |
492 mr->access);
493
494 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
495 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
496 mpt_entry->start = cpu_to_be64(mr->iova);
497 mpt_entry->length = cpu_to_be64(mr->size);
498 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
499
500 if (mr->mtt.order < 0) {
501 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
502 mpt_entry->mtt_addr = 0;
503 } else {
504 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
505 &mr->mtt));
506 }
507
508 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
509 /* fast register MR in free state */
510 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
511 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
512 MLX4_MPT_PD_FLAG_RAE);
513 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
514 } else {
515 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
516 }
517
518 err = mlx4_SW2HW_MPT(dev, mailbox,
519 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
520 if (err) {
521 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
522 goto err_cmd;
523 }
524 mr->enabled = MLX4_MR_EN_HW;
525
526 mlx4_free_cmd_mailbox(dev, mailbox);
527
528 return 0;
529
530 err_cmd:
531 mlx4_free_cmd_mailbox(dev, mailbox);
532
533 err_table:
534 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
535 return err;
536 }
537 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
538
539 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
540 int start_index, int npages, u64 *page_list)
541 {
542 struct mlx4_priv *priv = mlx4_priv(dev);
543 __be64 *mtts;
544 dma_addr_t dma_handle;
545 int i;
546
547 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
548 start_index, &dma_handle);
549
550 if (!mtts)
551 return -ENOMEM;
552
553 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
554 npages * sizeof (u64), DMA_TO_DEVICE);
555
556 for (i = 0; i < npages; ++i)
557 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
558
559 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
560 npages * sizeof (u64), DMA_TO_DEVICE);
561
562 return 0;
563 }
564
565 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
566 int start_index, int npages, u64 *page_list)
567 {
568 int err = 0;
569 int chunk;
570 int mtts_per_page;
571 int max_mtts_first_page;
572
573 /* compute how may mtts fit in the first page */
574 mtts_per_page = PAGE_SIZE / sizeof(u64);
575 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
576 % mtts_per_page;
577
578 chunk = min_t(int, max_mtts_first_page, npages);
579
580 while (npages > 0) {
581 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
582 if (err)
583 return err;
584 npages -= chunk;
585 start_index += chunk;
586 page_list += chunk;
587
588 chunk = min_t(int, mtts_per_page, npages);
589 }
590 return err;
591 }
592
593 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
594 int start_index, int npages, u64 *page_list)
595 {
596 struct mlx4_cmd_mailbox *mailbox = NULL;
597 __be64 *inbox = NULL;
598 int chunk;
599 int err = 0;
600 int i;
601
602 if (mtt->order < 0)
603 return -EINVAL;
604
605 if (mlx4_is_mfunc(dev)) {
606 mailbox = mlx4_alloc_cmd_mailbox(dev);
607 if (IS_ERR(mailbox))
608 return PTR_ERR(mailbox);
609 inbox = mailbox->buf;
610
611 while (npages > 0) {
612 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
613 npages);
614 inbox[0] = cpu_to_be64(mtt->offset + start_index);
615 inbox[1] = 0;
616 for (i = 0; i < chunk; ++i)
617 inbox[i + 2] = cpu_to_be64(page_list[i] |
618 MLX4_MTT_FLAG_PRESENT);
619 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
620 if (err) {
621 mlx4_free_cmd_mailbox(dev, mailbox);
622 return err;
623 }
624
625 npages -= chunk;
626 start_index += chunk;
627 page_list += chunk;
628 }
629 mlx4_free_cmd_mailbox(dev, mailbox);
630 return err;
631 }
632
633 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
634 }
635 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
636
637 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
638 struct mlx4_buf *buf)
639 {
640 u64 *page_list;
641 int err;
642 int i;
643
644 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
645 if (!page_list)
646 return -ENOMEM;
647
648 for (i = 0; i < buf->npages; ++i)
649 if (buf->nbufs == 1)
650 page_list[i] = buf->direct.map + (i << buf->page_shift);
651 else
652 page_list[i] = buf->page_list[i].map;
653
654 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
655
656 kfree(page_list);
657 return err;
658 }
659 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
660
661 int mlx4_init_mr_table(struct mlx4_dev *dev)
662 {
663 struct mlx4_priv *priv = mlx4_priv(dev);
664 struct mlx4_mr_table *mr_table = &priv->mr_table;
665 int err;
666
667 if (!is_power_of_2(dev->caps.num_mpts))
668 return -EINVAL;
669
670 /* Nothing to do for slaves - all MR handling is forwarded
671 * to the master */
672 if (mlx4_is_slave(dev))
673 return 0;
674
675 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
676 ~0, dev->caps.reserved_mrws, 0);
677 if (err)
678 return err;
679
680 err = mlx4_buddy_init(&mr_table->mtt_buddy,
681 ilog2(dev->caps.num_mtts /
682 (1 << log_mtts_per_seg)));
683 if (err)
684 goto err_buddy;
685
686 if (dev->caps.reserved_mtts) {
687 priv->reserved_mtts =
688 mlx4_alloc_mtt_range(dev,
689 fls(dev->caps.reserved_mtts - 1));
690 if (priv->reserved_mtts < 0) {
691 mlx4_warn(dev, "MTT table of order %d is too small.\n",
692 mr_table->mtt_buddy.max_order);
693 err = -ENOMEM;
694 goto err_reserve_mtts;
695 }
696 }
697
698 return 0;
699
700 err_reserve_mtts:
701 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
702
703 err_buddy:
704 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
705
706 return err;
707 }
708
709 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
710 {
711 struct mlx4_priv *priv = mlx4_priv(dev);
712 struct mlx4_mr_table *mr_table = &priv->mr_table;
713
714 if (mlx4_is_slave(dev))
715 return;
716 if (priv->reserved_mtts >= 0)
717 mlx4_free_mtt_range(dev, priv->reserved_mtts,
718 fls(dev->caps.reserved_mtts - 1));
719 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
720 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
721 }
722
723 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
724 int npages, u64 iova)
725 {
726 int i, page_mask;
727
728 if (npages > fmr->max_pages)
729 return -EINVAL;
730
731 page_mask = (1 << fmr->page_shift) - 1;
732
733 /* We are getting page lists, so va must be page aligned. */
734 if (iova & page_mask)
735 return -EINVAL;
736
737 /* Trust the user not to pass misaligned data in page_list */
738 if (0)
739 for (i = 0; i < npages; ++i) {
740 if (page_list[i] & ~page_mask)
741 return -EINVAL;
742 }
743
744 if (fmr->maps >= fmr->max_maps)
745 return -EINVAL;
746
747 return 0;
748 }
749
750 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
751 int npages, u64 iova, u32 *lkey, u32 *rkey)
752 {
753 u32 key;
754 int i, err;
755
756 err = mlx4_check_fmr(fmr, page_list, npages, iova);
757 if (err)
758 return err;
759
760 ++fmr->maps;
761
762 key = key_to_hw_index(fmr->mr.key);
763 key += dev->caps.num_mpts;
764 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
765
766 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
767
768 /* Make sure MPT status is visible before writing MTT entries */
769 wmb();
770
771 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
772 npages * sizeof(u64), DMA_TO_DEVICE);
773
774 for (i = 0; i < npages; ++i)
775 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
776
777 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
778 npages * sizeof(u64), DMA_TO_DEVICE);
779
780 fmr->mpt->key = cpu_to_be32(key);
781 fmr->mpt->lkey = cpu_to_be32(key);
782 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
783 fmr->mpt->start = cpu_to_be64(iova);
784
785 /* Make MTT entries are visible before setting MPT status */
786 wmb();
787
788 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
789
790 /* Make sure MPT status is visible before consumer can use FMR */
791 wmb();
792
793 return 0;
794 }
795 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
796
797 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
798 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
799 {
800 struct mlx4_priv *priv = mlx4_priv(dev);
801 int err = -ENOMEM;
802
803 if (max_maps > dev->caps.max_fmr_maps)
804 return -EINVAL;
805
806 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
807 return -EINVAL;
808
809 /* All MTTs must fit in the same page */
810 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
811 return -EINVAL;
812
813 fmr->page_shift = page_shift;
814 fmr->max_pages = max_pages;
815 fmr->max_maps = max_maps;
816 fmr->maps = 0;
817
818 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
819 page_shift, &fmr->mr);
820 if (err)
821 return err;
822
823 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
824 fmr->mr.mtt.offset,
825 &fmr->dma_handle);
826
827 if (!fmr->mtts) {
828 err = -ENOMEM;
829 goto err_free;
830 }
831
832 return 0;
833
834 err_free:
835 mlx4_mr_free(dev, &fmr->mr);
836 return err;
837 }
838 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
839
840 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
841 {
842 struct mlx4_priv *priv = mlx4_priv(dev);
843 int err;
844
845 err = mlx4_mr_enable(dev, &fmr->mr);
846 if (err)
847 return err;
848
849 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
850 key_to_hw_index(fmr->mr.key), NULL);
851 if (!fmr->mpt)
852 return -ENOMEM;
853
854 return 0;
855 }
856 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
857
858 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
859 u32 *lkey, u32 *rkey)
860 {
861 struct mlx4_cmd_mailbox *mailbox;
862 int err;
863
864 if (!fmr->maps)
865 return;
866
867 fmr->maps = 0;
868
869 mailbox = mlx4_alloc_cmd_mailbox(dev);
870 if (IS_ERR(mailbox)) {
871 err = PTR_ERR(mailbox);
872 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
873 " failed (%d)\n", err);
874 return;
875 }
876
877 err = mlx4_HW2SW_MPT(dev, NULL,
878 key_to_hw_index(fmr->mr.key) &
879 (dev->caps.num_mpts - 1));
880 mlx4_free_cmd_mailbox(dev, mailbox);
881 if (err) {
882 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
883 err);
884 return;
885 }
886 fmr->mr.enabled = MLX4_MR_EN_SW;
887 }
888 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
889
890 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
891 {
892 if (fmr->maps)
893 return -EBUSY;
894
895 mlx4_mr_free(dev, &fmr->mr);
896 fmr->mr.enabled = MLX4_MR_DISABLED;
897
898 return 0;
899 }
900 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
901
902 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
903 {
904 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
905 MLX4_CMD_NATIVE);
906 }
907 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
This page took 0.051613 seconds and 6 git commands to generate.