Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
5e0b537c | 3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
2a1d9b7f | 4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | * | |
34 | * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $ | |
35 | */ | |
36 | ||
37 | #ifndef MTHCA_PROVIDER_H | |
38 | #define MTHCA_PROVIDER_H | |
39 | ||
a4d61e84 RD |
40 | #include <rdma/ib_verbs.h> |
41 | #include <rdma/ib_pack.h> | |
1da177e4 LT |
42 | |
43 | #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) | |
44 | #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) | |
45 | #define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) | |
46 | #define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) | |
47 | #define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) | |
48 | ||
49 | struct mthca_buf_list { | |
50 | void *buf; | |
51 | DECLARE_PCI_UNMAP_ADDR(mapping) | |
52 | }; | |
53 | ||
87b81670 RD |
54 | union mthca_buf { |
55 | struct mthca_buf_list direct; | |
56 | struct mthca_buf_list *page_list; | |
57 | }; | |
58 | ||
1da177e4 LT |
59 | struct mthca_uar { |
60 | unsigned long pfn; | |
61 | int index; | |
62 | }; | |
63 | ||
5e0b537c RD |
64 | struct mthca_user_db_table; |
65 | ||
66 | struct mthca_ucontext { | |
67 | struct ib_ucontext ibucontext; | |
68 | struct mthca_uar uar; | |
69 | struct mthca_user_db_table *db_tab; | |
70 | }; | |
71 | ||
d56d6f95 RD |
72 | struct mthca_mtt; |
73 | ||
1da177e4 | 74 | struct mthca_mr { |
d56d6f95 RD |
75 | struct ib_mr ibmr; |
76 | struct mthca_mtt *mtt; | |
1da177e4 LT |
77 | }; |
78 | ||
e0f5fdca | 79 | struct mthca_fmr { |
d56d6f95 | 80 | struct ib_fmr ibmr; |
e0f5fdca | 81 | struct ib_fmr_attr attr; |
d56d6f95 RD |
82 | struct mthca_mtt *mtt; |
83 | int maps; | |
e0f5fdca MT |
84 | union { |
85 | struct { | |
86 | struct mthca_mpt_entry __iomem *mpt; | |
87 | u64 __iomem *mtts; | |
88 | } tavor; | |
89 | struct { | |
90 | struct mthca_mpt_entry *mpt; | |
91 | __be64 *mtts; | |
92 | } arbel; | |
93 | } mem; | |
94 | }; | |
95 | ||
1da177e4 LT |
96 | struct mthca_pd { |
97 | struct ib_pd ibpd; | |
98 | u32 pd_num; | |
99 | atomic_t sqp_count; | |
100 | struct mthca_mr ntmr; | |
99264c1e | 101 | int privileged; |
1da177e4 LT |
102 | }; |
103 | ||
104 | struct mthca_eq { | |
105 | struct mthca_dev *dev; | |
106 | int eqn; | |
107 | u32 eqn_mask; | |
108 | u32 cons_index; | |
109 | u16 msi_x_vector; | |
110 | u16 msi_x_entry; | |
111 | int have_irq; | |
112 | int nent; | |
113 | struct mthca_buf_list *page_list; | |
114 | struct mthca_mr mr; | |
115 | }; | |
116 | ||
117 | struct mthca_av; | |
118 | ||
119 | enum mthca_ah_type { | |
120 | MTHCA_AH_ON_HCA, | |
121 | MTHCA_AH_PCI_POOL, | |
122 | MTHCA_AH_KMALLOC | |
123 | }; | |
124 | ||
125 | struct mthca_ah { | |
126 | struct ib_ah ibah; | |
127 | enum mthca_ah_type type; | |
128 | u32 key; | |
129 | struct mthca_av *av; | |
130 | dma_addr_t avdma; | |
131 | }; | |
132 | ||
133 | /* | |
134 | * Quick description of our CQ/QP locking scheme: | |
135 | * | |
136 | * We have one global lock that protects dev->cq/qp_table. Each | |
137 | * struct mthca_cq/qp also has its own lock. An individual qp lock | |
138 | * may be taken inside of an individual cq lock. Both cqs attached to | |
139 | * a qp may be locked, with the send cq locked first. No other | |
140 | * nesting should be done. | |
141 | * | |
142 | * Each struct mthca_cq/qp also has an atomic_t ref count. The | |
143 | * pointer from the cq/qp_table to the struct counts as one reference. | |
144 | * This reference also is good for access through the consumer API, so | |
145 | * modifying the CQ/QP etc doesn't need to take another reference. | |
146 | * Access because of a completion being polled does need a reference. | |
147 | * | |
148 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the | |
149 | * destroy function to sleep on. | |
150 | * | |
151 | * This means that access from the consumer API requires nothing but | |
152 | * taking the struct's lock. | |
153 | * | |
154 | * Access because of a completion event should go as follows: | |
155 | * - lock cq/qp_table and look up struct | |
156 | * - increment ref count in struct | |
157 | * - drop cq/qp_table lock | |
158 | * - lock struct, do your thing, and unlock struct | |
159 | * - decrement ref count; if zero, wake up waiters | |
160 | * | |
161 | * To destroy a CQ/QP, we can do the following: | |
162 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | |
163 | * - decrement ref count | |
164 | * - wait_event until ref count is zero | |
165 | * | |
166 | * It is the consumer's responsibilty to make sure that no QP | |
167 | * operations (WQE posting or state modification) are pending when the | |
168 | * QP is destroyed. Also, the consumer must make sure that calls to | |
169 | * qp_modify are serialized. | |
170 | * | |
171 | * Possible optimizations (wait for profile data to see if/where we | |
172 | * have locks bouncing between CPUs): | |
173 | * - split cq/qp table lock into n separate (cache-aligned) locks, | |
174 | * indexed (say) by the page in the table | |
175 | * - split QP struct lock into three (one for common info, one for the | |
176 | * send queue and one for the receive queue) | |
177 | */ | |
178 | ||
179 | struct mthca_cq { | |
180 | struct ib_cq ibcq; | |
181 | spinlock_t lock; | |
182 | atomic_t refcount; | |
183 | int cqn; | |
184 | u32 cons_index; | |
185 | int is_direct; | |
74c2174e | 186 | int is_kernel; |
1da177e4 LT |
187 | |
188 | /* Next fields are Arbel only */ | |
189 | int set_ci_db_index; | |
97f52eb4 | 190 | __be32 *set_ci_db; |
1da177e4 | 191 | int arm_db_index; |
97f52eb4 | 192 | __be32 *arm_db; |
1da177e4 LT |
193 | int arm_sn; |
194 | ||
87b81670 | 195 | union mthca_buf queue; |
1da177e4 LT |
196 | struct mthca_mr mr; |
197 | wait_queue_head_t wait; | |
198 | }; | |
199 | ||
ec34a922 RD |
200 | struct mthca_srq { |
201 | struct ib_srq ibsrq; | |
202 | spinlock_t lock; | |
203 | atomic_t refcount; | |
204 | int srqn; | |
205 | int max; | |
206 | int max_gs; | |
207 | int wqe_shift; | |
208 | int first_free; | |
209 | int last_free; | |
210 | u16 counter; /* Arbel only */ | |
211 | int db_index; /* Arbel only */ | |
212 | __be32 *db; /* Arbel only */ | |
213 | void *last; | |
214 | ||
215 | int is_direct; | |
216 | u64 *wrid; | |
217 | union mthca_buf queue; | |
218 | struct mthca_mr mr; | |
219 | ||
220 | wait_queue_head_t wait; | |
221 | }; | |
222 | ||
1da177e4 LT |
223 | struct mthca_wq { |
224 | spinlock_t lock; | |
225 | int max; | |
226 | unsigned next_ind; | |
227 | unsigned last_comp; | |
228 | unsigned head; | |
229 | unsigned tail; | |
230 | void *last; | |
231 | int max_gs; | |
232 | int wqe_shift; | |
233 | ||
234 | int db_index; /* Arbel only */ | |
97f52eb4 | 235 | __be32 *db; |
1da177e4 LT |
236 | }; |
237 | ||
238 | struct mthca_qp { | |
239 | struct ib_qp ibqp; | |
240 | atomic_t refcount; | |
241 | u32 qpn; | |
242 | int is_direct; | |
243 | u8 transport; | |
244 | u8 state; | |
245 | u8 atomic_rd_en; | |
246 | u8 resp_depth; | |
247 | ||
248 | struct mthca_mr mr; | |
249 | ||
250 | struct mthca_wq rq; | |
251 | struct mthca_wq sq; | |
252 | enum ib_sig_type sq_policy; | |
253 | int send_wqe_offset; | |
77369ed3 | 254 | int max_inline_data; |
1da177e4 LT |
255 | |
256 | u64 *wrid; | |
87b81670 | 257 | union mthca_buf queue; |
1da177e4 LT |
258 | |
259 | wait_queue_head_t wait; | |
260 | }; | |
261 | ||
262 | struct mthca_sqp { | |
263 | struct mthca_qp qp; | |
264 | int port; | |
265 | int pkey_index; | |
266 | u32 qkey; | |
267 | u32 send_psn; | |
268 | struct ib_ud_header ud_header; | |
269 | int header_buf_size; | |
270 | void *header_buf; | |
271 | dma_addr_t header_dma; | |
272 | }; | |
273 | ||
5e0b537c RD |
274 | static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) |
275 | { | |
276 | return container_of(ibucontext, struct mthca_ucontext, ibucontext); | |
277 | } | |
278 | ||
e0f5fdca MT |
279 | static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) |
280 | { | |
281 | return container_of(ibmr, struct mthca_fmr, ibmr); | |
282 | } | |
283 | ||
1da177e4 LT |
284 | static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) |
285 | { | |
286 | return container_of(ibmr, struct mthca_mr, ibmr); | |
287 | } | |
288 | ||
289 | static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) | |
290 | { | |
291 | return container_of(ibpd, struct mthca_pd, ibpd); | |
292 | } | |
293 | ||
294 | static inline struct mthca_ah *to_mah(struct ib_ah *ibah) | |
295 | { | |
296 | return container_of(ibah, struct mthca_ah, ibah); | |
297 | } | |
298 | ||
299 | static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) | |
300 | { | |
301 | return container_of(ibcq, struct mthca_cq, ibcq); | |
302 | } | |
303 | ||
ec34a922 RD |
304 | static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) |
305 | { | |
306 | return container_of(ibsrq, struct mthca_srq, ibsrq); | |
307 | } | |
308 | ||
1da177e4 LT |
309 | static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) |
310 | { | |
311 | return container_of(ibqp, struct mthca_qp, ibqp); | |
312 | } | |
313 | ||
314 | static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) | |
315 | { | |
316 | return container_of(qp, struct mthca_sqp, qp); | |
317 | } | |
318 | ||
319 | #endif /* MTHCA_PROVIDER_H */ |