2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include "mlx5_core.h"
41 NUM_LOW_LAT_UUARS
= 4,
45 struct mlx5_alloc_uar_mbox_in
{
46 struct mlx5_inbox_hdr hdr
;
50 struct mlx5_alloc_uar_mbox_out
{
51 struct mlx5_outbox_hdr hdr
;
56 struct mlx5_free_uar_mbox_in
{
57 struct mlx5_inbox_hdr hdr
;
62 struct mlx5_free_uar_mbox_out
{
63 struct mlx5_outbox_hdr hdr
;
67 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
)
69 struct mlx5_alloc_uar_mbox_in in
;
70 struct mlx5_alloc_uar_mbox_out out
;
73 memset(&in
, 0, sizeof(in
));
74 memset(&out
, 0, sizeof(out
));
75 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR
);
76 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
81 err
= mlx5_cmd_status_to_err(&out
.hdr
);
85 *uarn
= be32_to_cpu(out
.uarn
) & 0xffffff;
90 EXPORT_SYMBOL(mlx5_cmd_alloc_uar
);
92 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
)
94 struct mlx5_free_uar_mbox_in in
;
95 struct mlx5_free_uar_mbox_out out
;
98 memset(&in
, 0, sizeof(in
));
99 memset(&out
, 0, sizeof(out
));
100 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR
);
101 in
.uarn
= cpu_to_be32(uarn
);
102 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
107 err
= mlx5_cmd_status_to_err(&out
.hdr
);
112 EXPORT_SYMBOL(mlx5_cmd_free_uar
);
114 static int need_uuar_lock(int uuarn
)
116 int tot_uuars
= NUM_DRIVER_UARS
* MLX5_BF_REGS_PER_PAGE
;
118 if (uuarn
== 0 || tot_uuars
- NUM_LOW_LAT_UUARS
)
124 int mlx5_alloc_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
)
126 int tot_uuars
= NUM_DRIVER_UARS
* MLX5_BF_REGS_PER_PAGE
;
132 uuari
->num_uars
= NUM_DRIVER_UARS
;
133 uuari
->num_low_latency_uuars
= NUM_LOW_LAT_UUARS
;
135 mutex_init(&uuari
->lock
);
136 uuari
->uars
= kcalloc(uuari
->num_uars
, sizeof(*uuari
->uars
), GFP_KERNEL
);
140 uuari
->bfs
= kcalloc(tot_uuars
, sizeof(*uuari
->bfs
), GFP_KERNEL
);
146 uuari
->bitmap
= kcalloc(BITS_TO_LONGS(tot_uuars
), sizeof(*uuari
->bitmap
),
148 if (!uuari
->bitmap
) {
153 uuari
->count
= kcalloc(tot_uuars
, sizeof(*uuari
->count
), GFP_KERNEL
);
159 for (i
= 0; i
< uuari
->num_uars
; i
++) {
160 err
= mlx5_cmd_alloc_uar(dev
, &uuari
->uars
[i
].index
);
164 addr
= dev
->iseg_base
+ ((phys_addr_t
)(uuari
->uars
[i
].index
) << PAGE_SHIFT
);
165 uuari
->uars
[i
].map
= ioremap(addr
, PAGE_SIZE
);
166 if (!uuari
->uars
[i
].map
) {
167 mlx5_cmd_free_uar(dev
, uuari
->uars
[i
].index
);
171 mlx5_core_dbg(dev
, "allocated uar index 0x%x, mmaped at %p\n",
172 uuari
->uars
[i
].index
, uuari
->uars
[i
].map
);
175 for (i
= 0; i
< tot_uuars
; i
++) {
178 bf
->buf_size
= (1 << MLX5_CAP_GEN(dev
, log_bf_reg_size
)) / 2;
179 bf
->uar
= &uuari
->uars
[i
/ MLX5_BF_REGS_PER_PAGE
];
180 bf
->regreg
= uuari
->uars
[i
/ MLX5_BF_REGS_PER_PAGE
].map
;
181 bf
->reg
= NULL
; /* Add WC support */
182 bf
->offset
= (i
% MLX5_BF_REGS_PER_PAGE
) *
183 (1 << MLX5_CAP_GEN(dev
, log_bf_reg_size
)) +
185 bf
->need_lock
= need_uuar_lock(i
);
186 spin_lock_init(&bf
->lock
);
187 spin_lock_init(&bf
->lock32
);
194 for (i
--; i
>= 0; i
--) {
195 iounmap(uuari
->uars
[i
].map
);
196 mlx5_cmd_free_uar(dev
, uuari
->uars
[i
].index
);
201 kfree(uuari
->bitmap
);
211 int mlx5_free_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
)
213 int i
= uuari
->num_uars
;
215 for (i
--; i
>= 0; i
--) {
216 iounmap(uuari
->uars
[i
].map
);
217 mlx5_cmd_free_uar(dev
, uuari
->uars
[i
].index
);
221 kfree(uuari
->bitmap
);
228 int mlx5_alloc_map_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
)
231 phys_addr_t uar_bar_start
;
234 err
= mlx5_cmd_alloc_uar(mdev
, &uar
->index
);
236 mlx5_core_warn(mdev
, "mlx5_cmd_alloc_uar() failed, %d\n", err
);
240 uar_bar_start
= pci_resource_start(mdev
->pdev
, 0);
241 pfn
= (uar_bar_start
>> PAGE_SHIFT
) + uar
->index
;
242 uar
->map
= ioremap(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
244 mlx5_core_warn(mdev
, "ioremap() failed, %d\n", err
);
252 mlx5_cmd_free_uar(mdev
, uar
->index
);
256 EXPORT_SYMBOL(mlx5_alloc_map_uar
);
258 void mlx5_unmap_free_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
)
261 mlx5_cmd_free_uar(mdev
, uar
->index
);
263 EXPORT_SYMBOL(mlx5_unmap_free_uar
);