2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/cmd.h>
39 #include "mlx5_core.h"
42 MLX5_PAGES_CANT_GIVE
= 0,
50 MLX5_POST_INIT_PAGES
= 3
53 struct mlx5_pages_req
{
54 struct mlx5_core_dev
*dev
;
57 struct work_struct work
;
61 struct rb_node rb_node
;
65 unsigned long bitmask
;
66 struct list_head list
;
70 struct mlx5_query_pages_inbox
{
71 struct mlx5_inbox_hdr hdr
;
75 struct mlx5_query_pages_outbox
{
76 struct mlx5_outbox_hdr hdr
;
82 struct mlx5_manage_pages_inbox
{
83 struct mlx5_inbox_hdr hdr
;
90 struct mlx5_manage_pages_outbox
{
91 struct mlx5_outbox_hdr hdr
;
98 MAX_RECLAIM_TIME_MSECS
= 5000,
99 MAX_RECLAIM_VFS_PAGES_TIME_MSECS
= 2 * 1000 * 60,
103 MLX5_MAX_RECLAIM_TIME_MILI
= 5000,
104 MLX5_NUM_4K_IN_PAGE
= PAGE_SIZE
/ MLX5_ADAPTER_PAGE_SIZE
,
107 static int insert_page(struct mlx5_core_dev
*dev
, u64 addr
, struct page
*page
, u16 func_id
)
109 struct rb_root
*root
= &dev
->priv
.page_root
;
110 struct rb_node
**new = &root
->rb_node
;
111 struct rb_node
*parent
= NULL
;
118 tfp
= rb_entry(parent
, struct fw_page
, rb_node
);
119 if (tfp
->addr
< addr
)
120 new = &parent
->rb_left
;
121 else if (tfp
->addr
> addr
)
122 new = &parent
->rb_right
;
127 nfp
= kzalloc(sizeof(*nfp
), GFP_KERNEL
);
133 nfp
->func_id
= func_id
;
134 nfp
->free_count
= MLX5_NUM_4K_IN_PAGE
;
135 for (i
= 0; i
< MLX5_NUM_4K_IN_PAGE
; i
++)
136 set_bit(i
, &nfp
->bitmask
);
138 rb_link_node(&nfp
->rb_node
, parent
, new);
139 rb_insert_color(&nfp
->rb_node
, root
);
140 list_add(&nfp
->list
, &dev
->priv
.free_list
);
145 static struct fw_page
*find_fw_page(struct mlx5_core_dev
*dev
, u64 addr
)
147 struct rb_root
*root
= &dev
->priv
.page_root
;
148 struct rb_node
*tmp
= root
->rb_node
;
149 struct fw_page
*result
= NULL
;
153 tfp
= rb_entry(tmp
, struct fw_page
, rb_node
);
154 if (tfp
->addr
< addr
) {
156 } else if (tfp
->addr
> addr
) {
167 static int mlx5_cmd_query_pages(struct mlx5_core_dev
*dev
, u16
*func_id
,
168 s32
*npages
, int boot
)
170 struct mlx5_query_pages_inbox in
;
171 struct mlx5_query_pages_outbox out
;
174 memset(&in
, 0, sizeof(in
));
175 memset(&out
, 0, sizeof(out
));
176 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES
);
177 in
.hdr
.opmod
= boot
? cpu_to_be16(MLX5_BOOT_PAGES
) : cpu_to_be16(MLX5_INIT_PAGES
);
179 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
184 return mlx5_cmd_status_to_err(&out
.hdr
);
186 *npages
= be32_to_cpu(out
.num_pages
);
187 *func_id
= be16_to_cpu(out
.func_id
);
192 static int alloc_4k(struct mlx5_core_dev
*dev
, u64
*addr
)
197 if (list_empty(&dev
->priv
.free_list
))
200 fp
= list_entry(dev
->priv
.free_list
.next
, struct fw_page
, list
);
201 n
= find_first_bit(&fp
->bitmask
, 8 * sizeof(fp
->bitmask
));
202 if (n
>= MLX5_NUM_4K_IN_PAGE
) {
203 mlx5_core_warn(dev
, "alloc 4k bug\n");
206 clear_bit(n
, &fp
->bitmask
);
211 *addr
= fp
->addr
+ n
* MLX5_ADAPTER_PAGE_SIZE
;
216 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
218 static void free_4k(struct mlx5_core_dev
*dev
, u64 addr
)
223 fwp
= find_fw_page(dev
, addr
& MLX5_U64_4K_PAGE_MASK
);
225 mlx5_core_warn(dev
, "page not found\n");
229 n
= (addr
& ~MLX5_U64_4K_PAGE_MASK
) >> MLX5_ADAPTER_PAGE_SHIFT
;
231 set_bit(n
, &fwp
->bitmask
);
232 if (fwp
->free_count
== MLX5_NUM_4K_IN_PAGE
) {
233 rb_erase(&fwp
->rb_node
, &dev
->priv
.page_root
);
234 if (fwp
->free_count
!= 1)
235 list_del(&fwp
->list
);
236 dma_unmap_page(&dev
->pdev
->dev
, addr
& MLX5_U64_4K_PAGE_MASK
,
237 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
238 __free_page(fwp
->page
);
240 } else if (fwp
->free_count
== 1) {
241 list_add(&fwp
->list
, &dev
->priv
.free_list
);
245 static int alloc_system_page(struct mlx5_core_dev
*dev
, u16 func_id
)
250 int nid
= dev_to_node(&dev
->pdev
->dev
);
252 page
= alloc_pages_node(nid
, GFP_HIGHUSER
, 0);
254 mlx5_core_warn(dev
, "failed to allocate page\n");
257 addr
= dma_map_page(&dev
->pdev
->dev
, page
, 0,
258 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
259 if (dma_mapping_error(&dev
->pdev
->dev
, addr
)) {
260 mlx5_core_warn(dev
, "failed dma mapping page\n");
264 err
= insert_page(dev
, addr
, page
, func_id
);
266 mlx5_core_err(dev
, "failed to track allocated page\n");
273 dma_unmap_page(&dev
->pdev
->dev
, addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
281 static void page_notify_fail(struct mlx5_core_dev
*dev
, u16 func_id
)
283 struct mlx5_manage_pages_inbox
*in
;
284 struct mlx5_manage_pages_outbox out
;
287 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
291 memset(&out
, 0, sizeof(out
));
292 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
293 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_CANT_GIVE
);
294 in
->func_id
= cpu_to_be16(func_id
);
295 err
= mlx5_cmd_exec(dev
, in
, sizeof(*in
), &out
, sizeof(out
));
297 err
= mlx5_cmd_status_to_err(&out
.hdr
);
300 mlx5_core_warn(dev
, "page notify failed\n");
305 static int give_pages(struct mlx5_core_dev
*dev
, u16 func_id
, int npages
,
308 struct mlx5_manage_pages_inbox
*in
;
309 struct mlx5_manage_pages_outbox out
;
315 inlen
= sizeof(*in
) + npages
* sizeof(in
->pas
[0]);
316 in
= mlx5_vzalloc(inlen
);
319 mlx5_core_warn(dev
, "vzalloc failed %d\n", inlen
);
322 memset(&out
, 0, sizeof(out
));
324 for (i
= 0; i
< npages
; i
++) {
326 err
= alloc_4k(dev
, &addr
);
329 err
= alloc_system_page(dev
, func_id
);
335 in
->pas
[i
] = cpu_to_be64(addr
);
338 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
339 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_GIVE
);
340 in
->func_id
= cpu_to_be16(func_id
);
341 in
->num_entries
= cpu_to_be32(npages
);
342 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
344 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, err %d\n",
345 func_id
, npages
, err
);
348 dev
->priv
.fw_pages
+= npages
;
350 err
= mlx5_cmd_status_to_err(&out
.hdr
);
352 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, status %d\n",
353 func_id
, npages
, out
.hdr
.status
);
357 dev
->priv
.fw_pages
+= npages
;
359 dev
->priv
.vfs_pages
+= npages
;
361 mlx5_core_dbg(dev
, "err %d\n", err
);
367 for (i
--; i
>= 0; i
--)
368 free_4k(dev
, be64_to_cpu(in
->pas
[i
]));
372 page_notify_fail(dev
, func_id
);
376 static int reclaim_pages(struct mlx5_core_dev
*dev
, u32 func_id
, int npages
,
379 struct mlx5_manage_pages_inbox in
;
380 struct mlx5_manage_pages_outbox
*out
;
390 memset(&in
, 0, sizeof(in
));
391 outlen
= sizeof(*out
) + npages
* sizeof(out
->pas
[0]);
392 out
= mlx5_vzalloc(outlen
);
396 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
397 in
.hdr
.opmod
= cpu_to_be16(MLX5_PAGES_TAKE
);
398 in
.func_id
= cpu_to_be16(func_id
);
399 in
.num_entries
= cpu_to_be32(npages
);
400 mlx5_core_dbg(dev
, "npages %d, outlen %d\n", npages
, outlen
);
401 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), out
, outlen
);
403 mlx5_core_err(dev
, "failed reclaiming pages\n");
406 dev
->priv
.fw_pages
-= npages
;
408 if (out
->hdr
.status
) {
409 err
= mlx5_cmd_status_to_err(&out
->hdr
);
413 num_claimed
= be32_to_cpu(out
->num_entries
);
414 if (num_claimed
> npages
) {
415 mlx5_core_warn(dev
, "fw returned %d, driver asked %d => corruption\n",
416 num_claimed
, npages
);
421 *nclaimed
= num_claimed
;
423 for (i
= 0; i
< num_claimed
; i
++) {
424 addr
= be64_to_cpu(out
->pas
[i
]);
427 dev
->priv
.fw_pages
-= num_claimed
;
429 dev
->priv
.vfs_pages
-= num_claimed
;
436 static void pages_work_handler(struct work_struct
*work
)
438 struct mlx5_pages_req
*req
= container_of(work
, struct mlx5_pages_req
, work
);
439 struct mlx5_core_dev
*dev
= req
->dev
;
443 err
= reclaim_pages(dev
, req
->func_id
, -1 * req
->npages
, NULL
);
444 else if (req
->npages
> 0)
445 err
= give_pages(dev
, req
->func_id
, req
->npages
, 1);
448 mlx5_core_warn(dev
, "%s fail %d\n",
449 req
->npages
< 0 ? "reclaim" : "give", err
);
454 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
457 struct mlx5_pages_req
*req
;
459 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
461 mlx5_core_warn(dev
, "failed to allocate pages request\n");
466 req
->func_id
= func_id
;
467 req
->npages
= npages
;
468 INIT_WORK(&req
->work
, pages_work_handler
);
469 queue_work(dev
->priv
.pg_wq
, &req
->work
);
472 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
)
474 u16
uninitialized_var(func_id
);
475 s32
uninitialized_var(npages
);
478 err
= mlx5_cmd_query_pages(dev
, &func_id
, &npages
, boot
);
482 mlx5_core_dbg(dev
, "requested %d %s pages for func_id 0x%x\n",
483 npages
, boot
? "boot" : "init", func_id
);
485 return give_pages(dev
, func_id
, npages
, 0);
489 MLX5_BLKS_FOR_RECLAIM_PAGES
= 12
492 static int optimal_reclaimed_pages(void)
494 struct mlx5_cmd_prot_block
*block
;
495 struct mlx5_cmd_layout
*lay
;
498 ret
= (sizeof(lay
->out
) + MLX5_BLKS_FOR_RECLAIM_PAGES
* sizeof(block
->data
) -
499 sizeof(struct mlx5_manage_pages_outbox
)) /
500 FIELD_SIZEOF(struct mlx5_manage_pages_outbox
, pas
[0]);
505 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
)
507 unsigned long end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS
);
514 p
= rb_first(&dev
->priv
.page_root
);
516 fwp
= rb_entry(p
, struct fw_page
, rb_node
);
517 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
518 free_4k(dev
, fwp
->addr
);
521 err
= reclaim_pages(dev
, fwp
->func_id
,
522 optimal_reclaimed_pages(),
526 mlx5_core_warn(dev
, "failed reclaiming pages (%d)\n",
531 end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS
);
533 if (time_after(jiffies
, end
)) {
534 mlx5_core_warn(dev
, "FW did not return all pages. giving up...\n");
542 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
)
544 dev
->priv
.page_root
= RB_ROOT
;
545 INIT_LIST_HEAD(&dev
->priv
.free_list
);
548 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
)
553 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
)
555 dev
->priv
.pg_wq
= create_singlethread_workqueue("mlx5_page_allocator");
556 if (!dev
->priv
.pg_wq
)
562 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
)
564 destroy_workqueue(dev
->priv
.pg_wq
);
567 int mlx5_wait_for_vf_pages(struct mlx5_core_dev
*dev
)
569 unsigned long end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS
);
570 int prev_vfs_pages
= dev
->priv
.vfs_pages
;
572 mlx5_core_dbg(dev
, "Waiting for %d pages from %s\n", prev_vfs_pages
,
574 while (dev
->priv
.vfs_pages
) {
575 if (time_after(jiffies
, end
)) {
576 mlx5_core_warn(dev
, "aborting while there are %d pending pages\n", dev
->priv
.vfs_pages
);
579 if (dev
->priv
.vfs_pages
< prev_vfs_pages
) {
580 end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS
);
581 prev_vfs_pages
= dev
->priv
.vfs_pages
;
586 mlx5_core_dbg(dev
, "All pages received from %s\n", dev
->priv
.name
);