Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
5e0b537c | 3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
2a1d9b7f | 4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | * | |
34 | * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $ | |
35 | */ | |
36 | ||
37 | #ifndef MTHCA_PROVIDER_H | |
38 | #define MTHCA_PROVIDER_H | |
39 | ||
40 | #include <ib_verbs.h> | |
41 | #include <ib_pack.h> | |
42 | ||
43 | #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) | |
44 | #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) | |
45 | #define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) | |
46 | #define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) | |
47 | #define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) | |
48 | ||
49 | struct mthca_buf_list { | |
50 | void *buf; | |
51 | DECLARE_PCI_UNMAP_ADDR(mapping) | |
52 | }; | |
53 | ||
54 | struct mthca_uar { | |
55 | unsigned long pfn; | |
56 | int index; | |
57 | }; | |
58 | ||
5e0b537c RD |
59 | struct mthca_user_db_table; |
60 | ||
61 | struct mthca_ucontext { | |
62 | struct ib_ucontext ibucontext; | |
63 | struct mthca_uar uar; | |
64 | struct mthca_user_db_table *db_tab; | |
65 | }; | |
66 | ||
d56d6f95 RD |
67 | struct mthca_mtt; |
68 | ||
1da177e4 | 69 | struct mthca_mr { |
d56d6f95 RD |
70 | struct ib_mr ibmr; |
71 | struct mthca_mtt *mtt; | |
1da177e4 LT |
72 | }; |
73 | ||
e0f5fdca | 74 | struct mthca_fmr { |
d56d6f95 | 75 | struct ib_fmr ibmr; |
e0f5fdca | 76 | struct ib_fmr_attr attr; |
d56d6f95 RD |
77 | struct mthca_mtt *mtt; |
78 | int maps; | |
e0f5fdca MT |
79 | union { |
80 | struct { | |
81 | struct mthca_mpt_entry __iomem *mpt; | |
82 | u64 __iomem *mtts; | |
83 | } tavor; | |
84 | struct { | |
85 | struct mthca_mpt_entry *mpt; | |
86 | __be64 *mtts; | |
87 | } arbel; | |
88 | } mem; | |
89 | }; | |
90 | ||
1da177e4 LT |
91 | struct mthca_pd { |
92 | struct ib_pd ibpd; | |
93 | u32 pd_num; | |
94 | atomic_t sqp_count; | |
95 | struct mthca_mr ntmr; | |
99264c1e | 96 | int privileged; |
1da177e4 LT |
97 | }; |
98 | ||
99 | struct mthca_eq { | |
100 | struct mthca_dev *dev; | |
101 | int eqn; | |
102 | u32 eqn_mask; | |
103 | u32 cons_index; | |
104 | u16 msi_x_vector; | |
105 | u16 msi_x_entry; | |
106 | int have_irq; | |
107 | int nent; | |
108 | struct mthca_buf_list *page_list; | |
109 | struct mthca_mr mr; | |
110 | }; | |
111 | ||
112 | struct mthca_av; | |
113 | ||
114 | enum mthca_ah_type { | |
115 | MTHCA_AH_ON_HCA, | |
116 | MTHCA_AH_PCI_POOL, | |
117 | MTHCA_AH_KMALLOC | |
118 | }; | |
119 | ||
120 | struct mthca_ah { | |
121 | struct ib_ah ibah; | |
122 | enum mthca_ah_type type; | |
123 | u32 key; | |
124 | struct mthca_av *av; | |
125 | dma_addr_t avdma; | |
126 | }; | |
127 | ||
128 | /* | |
129 | * Quick description of our CQ/QP locking scheme: | |
130 | * | |
131 | * We have one global lock that protects dev->cq/qp_table. Each | |
132 | * struct mthca_cq/qp also has its own lock. An individual qp lock | |
133 | * may be taken inside of an individual cq lock. Both cqs attached to | |
134 | * a qp may be locked, with the send cq locked first. No other | |
135 | * nesting should be done. | |
136 | * | |
137 | * Each struct mthca_cq/qp also has an atomic_t ref count. The | |
138 | * pointer from the cq/qp_table to the struct counts as one reference. | |
139 | * This reference also is good for access through the consumer API, so | |
140 | * modifying the CQ/QP etc doesn't need to take another reference. | |
141 | * Access because of a completion being polled does need a reference. | |
142 | * | |
143 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the | |
144 | * destroy function to sleep on. | |
145 | * | |
146 | * This means that access from the consumer API requires nothing but | |
147 | * taking the struct's lock. | |
148 | * | |
149 | * Access because of a completion event should go as follows: | |
150 | * - lock cq/qp_table and look up struct | |
151 | * - increment ref count in struct | |
152 | * - drop cq/qp_table lock | |
153 | * - lock struct, do your thing, and unlock struct | |
154 | * - decrement ref count; if zero, wake up waiters | |
155 | * | |
156 | * To destroy a CQ/QP, we can do the following: | |
157 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | |
158 | * - decrement ref count | |
159 | * - wait_event until ref count is zero | |
160 | * | |
161 | * It is the consumer's responsibilty to make sure that no QP | |
162 | * operations (WQE posting or state modification) are pending when the | |
163 | * QP is destroyed. Also, the consumer must make sure that calls to | |
164 | * qp_modify are serialized. | |
165 | * | |
166 | * Possible optimizations (wait for profile data to see if/where we | |
167 | * have locks bouncing between CPUs): | |
168 | * - split cq/qp table lock into n separate (cache-aligned) locks, | |
169 | * indexed (say) by the page in the table | |
170 | * - split QP struct lock into three (one for common info, one for the | |
171 | * send queue and one for the receive queue) | |
172 | */ | |
173 | ||
174 | struct mthca_cq { | |
175 | struct ib_cq ibcq; | |
176 | spinlock_t lock; | |
177 | atomic_t refcount; | |
178 | int cqn; | |
179 | u32 cons_index; | |
180 | int is_direct; | |
74c2174e | 181 | int is_kernel; |
1da177e4 LT |
182 | |
183 | /* Next fields are Arbel only */ | |
184 | int set_ci_db_index; | |
97f52eb4 | 185 | __be32 *set_ci_db; |
1da177e4 | 186 | int arm_db_index; |
97f52eb4 | 187 | __be32 *arm_db; |
1da177e4 LT |
188 | int arm_sn; |
189 | ||
190 | union { | |
191 | struct mthca_buf_list direct; | |
192 | struct mthca_buf_list *page_list; | |
193 | } queue; | |
194 | struct mthca_mr mr; | |
195 | wait_queue_head_t wait; | |
196 | }; | |
197 | ||
198 | struct mthca_wq { | |
199 | spinlock_t lock; | |
200 | int max; | |
201 | unsigned next_ind; | |
202 | unsigned last_comp; | |
203 | unsigned head; | |
204 | unsigned tail; | |
205 | void *last; | |
206 | int max_gs; | |
207 | int wqe_shift; | |
208 | ||
209 | int db_index; /* Arbel only */ | |
97f52eb4 | 210 | __be32 *db; |
1da177e4 LT |
211 | }; |
212 | ||
213 | struct mthca_qp { | |
214 | struct ib_qp ibqp; | |
215 | atomic_t refcount; | |
216 | u32 qpn; | |
217 | int is_direct; | |
218 | u8 transport; | |
219 | u8 state; | |
220 | u8 atomic_rd_en; | |
221 | u8 resp_depth; | |
222 | ||
223 | struct mthca_mr mr; | |
224 | ||
225 | struct mthca_wq rq; | |
226 | struct mthca_wq sq; | |
227 | enum ib_sig_type sq_policy; | |
228 | int send_wqe_offset; | |
229 | ||
230 | u64 *wrid; | |
231 | union { | |
232 | struct mthca_buf_list direct; | |
233 | struct mthca_buf_list *page_list; | |
234 | } queue; | |
235 | ||
236 | wait_queue_head_t wait; | |
237 | }; | |
238 | ||
239 | struct mthca_sqp { | |
240 | struct mthca_qp qp; | |
241 | int port; | |
242 | int pkey_index; | |
243 | u32 qkey; | |
244 | u32 send_psn; | |
245 | struct ib_ud_header ud_header; | |
246 | int header_buf_size; | |
247 | void *header_buf; | |
248 | dma_addr_t header_dma; | |
249 | }; | |
250 | ||
5e0b537c RD |
251 | static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) |
252 | { | |
253 | return container_of(ibucontext, struct mthca_ucontext, ibucontext); | |
254 | } | |
255 | ||
e0f5fdca MT |
256 | static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) |
257 | { | |
258 | return container_of(ibmr, struct mthca_fmr, ibmr); | |
259 | } | |
260 | ||
1da177e4 LT |
261 | static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) |
262 | { | |
263 | return container_of(ibmr, struct mthca_mr, ibmr); | |
264 | } | |
265 | ||
266 | static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) | |
267 | { | |
268 | return container_of(ibpd, struct mthca_pd, ibpd); | |
269 | } | |
270 | ||
271 | static inline struct mthca_ah *to_mah(struct ib_ah *ibah) | |
272 | { | |
273 | return container_of(ibah, struct mthca_ah, ibah); | |
274 | } | |
275 | ||
276 | static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) | |
277 | { | |
278 | return container_of(ibcq, struct mthca_cq, ibcq); | |
279 | } | |
280 | ||
281 | static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) | |
282 | { | |
283 | return container_of(ibqp, struct mthca_qp, ibqp); | |
284 | } | |
285 | ||
286 | static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) | |
287 | { | |
288 | return container_of(qp, struct mthca_sqp, qp); | |
289 | } | |
290 | ||
291 | #endif /* MTHCA_PROVIDER_H */ |