2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include "mlx5_core.h"
41 MLX5_PAGES_CANT_GIVE
= 0,
49 MLX5_POST_INIT_PAGES
= 3
52 struct mlx5_pages_req
{
53 struct mlx5_core_dev
*dev
;
56 struct work_struct work
;
60 struct rb_node rb_node
;
66 struct mlx5_query_pages_inbox
{
67 struct mlx5_inbox_hdr hdr
;
71 struct mlx5_query_pages_outbox
{
72 struct mlx5_outbox_hdr hdr
;
78 struct mlx5_manage_pages_inbox
{
79 struct mlx5_inbox_hdr hdr
;
86 struct mlx5_manage_pages_outbox
{
87 struct mlx5_outbox_hdr hdr
;
93 static int insert_page(struct mlx5_core_dev
*dev
, u64 addr
, struct page
*page
, u16 func_id
)
95 struct rb_root
*root
= &dev
->priv
.page_root
;
96 struct rb_node
**new = &root
->rb_node
;
97 struct rb_node
*parent
= NULL
;
103 tfp
= rb_entry(parent
, struct fw_page
, rb_node
);
104 if (tfp
->addr
< addr
)
105 new = &parent
->rb_left
;
106 else if (tfp
->addr
> addr
)
107 new = &parent
->rb_right
;
112 nfp
= kmalloc(sizeof(*nfp
), GFP_KERNEL
);
118 nfp
->func_id
= func_id
;
120 rb_link_node(&nfp
->rb_node
, parent
, new);
121 rb_insert_color(&nfp
->rb_node
, root
);
126 static struct page
*remove_page(struct mlx5_core_dev
*dev
, u64 addr
)
128 struct rb_root
*root
= &dev
->priv
.page_root
;
129 struct rb_node
*tmp
= root
->rb_node
;
130 struct page
*result
= NULL
;
134 tfp
= rb_entry(tmp
, struct fw_page
, rb_node
);
135 if (tfp
->addr
< addr
) {
137 } else if (tfp
->addr
> addr
) {
140 rb_erase(&tfp
->rb_node
, root
);
150 static int mlx5_cmd_query_pages(struct mlx5_core_dev
*dev
, u16
*func_id
,
151 s32
*npages
, int boot
)
153 struct mlx5_query_pages_inbox in
;
154 struct mlx5_query_pages_outbox out
;
157 memset(&in
, 0, sizeof(in
));
158 memset(&out
, 0, sizeof(out
));
159 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES
);
160 in
.hdr
.opmod
= boot
? cpu_to_be16(MLX5_BOOT_PAGES
) : cpu_to_be16(MLX5_INIT_PAGES
);
162 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
167 return mlx5_cmd_status_to_err(&out
.hdr
);
169 *npages
= be32_to_cpu(out
.num_pages
);
170 *func_id
= be16_to_cpu(out
.func_id
);
175 static int give_pages(struct mlx5_core_dev
*dev
, u16 func_id
, int npages
,
178 struct mlx5_manage_pages_inbox
*in
;
179 struct mlx5_manage_pages_outbox out
;
186 inlen
= sizeof(*in
) + npages
* sizeof(in
->pas
[0]);
187 in
= mlx5_vzalloc(inlen
);
189 mlx5_core_warn(dev
, "vzalloc failed %d\n", inlen
);
192 memset(&out
, 0, sizeof(out
));
194 for (i
= 0; i
< npages
; i
++) {
195 page
= alloc_page(GFP_HIGHUSER
);
198 mlx5_core_warn(dev
, "failed to allocate page\n");
201 addr
= dma_map_page(&dev
->pdev
->dev
, page
, 0,
202 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
203 if (dma_mapping_error(&dev
->pdev
->dev
, addr
)) {
204 mlx5_core_warn(dev
, "failed dma mapping page\n");
209 err
= insert_page(dev
, addr
, page
, func_id
);
211 mlx5_core_err(dev
, "failed to track allocated page\n");
212 dma_unmap_page(&dev
->pdev
->dev
, addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
217 in
->pas
[i
] = cpu_to_be64(addr
);
220 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
221 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_GIVE
);
222 in
->func_id
= cpu_to_be16(func_id
);
223 in
->num_entries
= cpu_to_be32(npages
);
224 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
225 mlx5_core_dbg(dev
, "err %d\n", err
);
227 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, err %d\n", func_id
, npages
, err
);
230 dev
->priv
.fw_pages
+= npages
;
232 if (out
.hdr
.status
) {
233 err
= mlx5_cmd_status_to_err(&out
.hdr
);
235 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, status %d\n", func_id
, npages
, out
.hdr
.status
);
240 mlx5_core_dbg(dev
, "err %d\n", err
);
246 memset(in
, 0, inlen
);
247 memset(&out
, 0, sizeof(out
));
248 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
249 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_CANT_GIVE
);
250 if (mlx5_cmd_exec(dev
, in
, sizeof(*in
), &out
, sizeof(out
)))
251 mlx5_core_warn(dev
, "\n");
253 for (i
--; i
>= 0; i
--) {
254 addr
= be64_to_cpu(in
->pas
[i
]);
255 page
= remove_page(dev
, addr
);
257 mlx5_core_err(dev
, "BUG: can't remove page at addr 0x%llx\n",
261 dma_unmap_page(&dev
->pdev
->dev
, addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
270 static int reclaim_pages(struct mlx5_core_dev
*dev
, u32 func_id
, int npages
,
273 struct mlx5_manage_pages_inbox in
;
274 struct mlx5_manage_pages_outbox
*out
;
282 memset(&in
, 0, sizeof(in
));
283 outlen
= sizeof(*out
) + npages
* sizeof(out
->pas
[0]);
284 out
= mlx5_vzalloc(outlen
);
288 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
289 in
.hdr
.opmod
= cpu_to_be16(MLX5_PAGES_TAKE
);
290 in
.func_id
= cpu_to_be16(func_id
);
291 in
.num_entries
= cpu_to_be32(npages
);
292 mlx5_core_dbg(dev
, "npages %d, outlen %d\n", npages
, outlen
);
293 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), out
, outlen
);
295 mlx5_core_err(dev
, "failed recliaming pages\n");
298 dev
->priv
.fw_pages
-= npages
;
300 if (out
->hdr
.status
) {
301 err
= mlx5_cmd_status_to_err(&out
->hdr
);
305 num_claimed
= be32_to_cpu(out
->num_entries
);
307 *nclaimed
= num_claimed
;
309 for (i
= 0; i
< num_claimed
; i
++) {
310 addr
= be64_to_cpu(out
->pas
[i
]);
311 page
= remove_page(dev
, addr
);
313 mlx5_core_warn(dev
, "FW reported unknown DMA address 0x%llx\n", addr
);
315 dma_unmap_page(&dev
->pdev
->dev
, addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
325 static void pages_work_handler(struct work_struct
*work
)
327 struct mlx5_pages_req
*req
= container_of(work
, struct mlx5_pages_req
, work
);
328 struct mlx5_core_dev
*dev
= req
->dev
;
332 err
= reclaim_pages(dev
, req
->func_id
, -1 * req
->npages
, NULL
);
333 else if (req
->npages
> 0)
334 err
= give_pages(dev
, req
->func_id
, req
->npages
, 1);
337 mlx5_core_warn(dev
, "%s fail %d\n", req
->npages
< 0 ?
338 "reclaim" : "give", err
);
343 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
346 struct mlx5_pages_req
*req
;
348 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
350 mlx5_core_warn(dev
, "failed to allocate pages request\n");
355 req
->func_id
= func_id
;
356 req
->npages
= npages
;
357 INIT_WORK(&req
->work
, pages_work_handler
);
358 queue_work(dev
->priv
.pg_wq
, &req
->work
);
361 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
)
363 u16
uninitialized_var(func_id
);
364 s32
uninitialized_var(npages
);
367 err
= mlx5_cmd_query_pages(dev
, &func_id
, &npages
, boot
);
371 mlx5_core_dbg(dev
, "requested %d %s pages for func_id 0x%x\n",
372 npages
, boot
? "boot" : "init", func_id
);
374 return give_pages(dev
, func_id
, npages
, 0);
377 static int optimal_reclaimed_pages(void)
379 struct mlx5_cmd_prot_block
*block
;
380 struct mlx5_cmd_layout
*lay
;
383 ret
= (sizeof(lay
->in
) + sizeof(block
->data
) -
384 sizeof(struct mlx5_manage_pages_outbox
)) / 8;
389 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
)
391 unsigned long end
= jiffies
+ msecs_to_jiffies(5000);
397 p
= rb_first(&dev
->priv
.page_root
);
399 fwp
= rb_entry(p
, struct fw_page
, rb_node
);
400 err
= reclaim_pages(dev
, fwp
->func_id
, optimal_reclaimed_pages(), NULL
);
402 mlx5_core_warn(dev
, "failed reclaiming pages (%d)\n", err
);
406 if (time_after(jiffies
, end
)) {
407 mlx5_core_warn(dev
, "FW did not return all pages. giving up...\n");
415 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
)
417 dev
->priv
.page_root
= RB_ROOT
;
420 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
)
425 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
)
427 dev
->priv
.pg_wq
= create_singlethread_workqueue("mlx5_page_allocator");
428 if (!dev
->priv
.pg_wq
)
434 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
)
436 destroy_workqueue(dev
->priv
.pg_wq
);