2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 struct workqueue_struct
*mlx5_ib_page_fault_wq
;
37 #define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \
38 if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
39 ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
42 int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev
*dev
)
45 struct mlx5_odp_caps hw_caps
;
46 struct ib_odp_caps
*caps
= &dev
->odp_caps
;
48 memset(caps
, 0, sizeof(*caps
));
50 if (!(dev
->mdev
->caps
.gen
.flags
& MLX5_DEV_CAP_FLAG_ON_DMND_PG
))
53 err
= mlx5_query_odp_caps(dev
->mdev
, &hw_caps
);
57 /* At this point we would copy the capability bits that the driver
58 * supports from the hw_caps struct to the caps struct. However, no
59 * such capabilities are supported so far. */
64 static struct mlx5_ib_mr
*mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev
*dev
,
67 u32 base_key
= mlx5_base_mkey(key
);
68 struct mlx5_core_mr
*mmr
= __mlx5_mr_lookup(dev
->mdev
, base_key
);
70 if (!mmr
|| mmr
->key
!= key
)
73 return container_of(mmr
, struct mlx5_ib_mr
, mmr
);
76 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp
*qp
,
77 struct mlx5_ib_pfault
*pfault
,
79 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
80 int ret
= mlx5_core_page_fault_resume(dev
->mdev
, qp
->mqp
.qpn
,
81 pfault
->mpfault
.flags
,
84 pr_err("Failed to resolve the page fault on QP 0x%x\n",
88 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp
*qp
,
89 struct mlx5_ib_pfault
*pfault
)
91 u8 event_subtype
= pfault
->mpfault
.event_subtype
;
93 switch (event_subtype
) {
95 pr_warn("Invalid page fault event subtype: 0x%x\n",
97 mlx5_ib_page_fault_resume(qp
, pfault
, 1);
102 static void mlx5_ib_qp_pfault_action(struct work_struct
*work
)
104 struct mlx5_ib_pfault
*pfault
= container_of(work
,
105 struct mlx5_ib_pfault
,
107 enum mlx5_ib_pagefault_context context
=
108 mlx5_ib_get_pagefault_context(&pfault
->mpfault
);
109 struct mlx5_ib_qp
*qp
= container_of(pfault
, struct mlx5_ib_qp
,
110 pagefaults
[context
]);
111 mlx5_ib_mr_pfault_handler(qp
, pfault
);
114 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp
*qp
)
118 spin_lock_irqsave(&qp
->disable_page_faults_lock
, flags
);
119 qp
->disable_page_faults
= 1;
120 spin_unlock_irqrestore(&qp
->disable_page_faults_lock
, flags
);
123 * Note that at this point, we are guarenteed that no more
124 * work queue elements will be posted to the work queue with
125 * the QP we are closing.
127 flush_workqueue(mlx5_ib_page_fault_wq
);
130 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp
*qp
)
134 spin_lock_irqsave(&qp
->disable_page_faults_lock
, flags
);
135 qp
->disable_page_faults
= 0;
136 spin_unlock_irqrestore(&qp
->disable_page_faults_lock
, flags
);
139 static void mlx5_ib_pfault_handler(struct mlx5_core_qp
*qp
,
140 struct mlx5_pagefault
*pfault
)
143 * Note that we will only get one fault event per QP per context
144 * (responder/initiator, read/write), until we resolve the page fault
145 * with the mlx5_ib_page_fault_resume command. Since this function is
146 * called from within the work element, there is no risk of missing
149 struct mlx5_ib_qp
*mibqp
= to_mibqp(qp
);
150 enum mlx5_ib_pagefault_context context
=
151 mlx5_ib_get_pagefault_context(pfault
);
152 struct mlx5_ib_pfault
*qp_pfault
= &mibqp
->pagefaults
[context
];
154 qp_pfault
->mpfault
= *pfault
;
156 /* No need to stop interrupts here since we are in an interrupt */
157 spin_lock(&mibqp
->disable_page_faults_lock
);
158 if (!mibqp
->disable_page_faults
)
159 queue_work(mlx5_ib_page_fault_wq
, &qp_pfault
->work
);
160 spin_unlock(&mibqp
->disable_page_faults_lock
);
163 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp
*qp
)
167 qp
->disable_page_faults
= 1;
168 spin_lock_init(&qp
->disable_page_faults_lock
);
170 qp
->mqp
.pfault_handler
= mlx5_ib_pfault_handler
;
172 for (i
= 0; i
< MLX5_IB_PAGEFAULT_CONTEXTS
; ++i
)
173 INIT_WORK(&qp
->pagefaults
[i
].work
, mlx5_ib_qp_pfault_action
);
176 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
)
180 ret
= init_srcu_struct(&ibdev
->mr_srcu
);
187 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev
*ibdev
)
189 cleanup_srcu_struct(&ibdev
->mr_srcu
);
192 int __init
mlx5_ib_odp_init(void)
194 mlx5_ib_page_fault_wq
=
195 create_singlethread_workqueue("mlx5_ib_page_faults");
196 if (!mlx5_ib_page_fault_wq
)
202 void mlx5_ib_odp_cleanup(void)
204 destroy_workqueue(mlx5_ib_page_fault_wq
);