Commit | Line | Data |
---|---|---|
fe2caefc PP |
1 | /******************************************************************* |
2 | * This file is part of the Emulex RoCE Device Driver for * | |
3 | * RoCE (RDMA over Converged Ethernet) adapters. * | |
4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | |
5 | * EMULEX and SLI are trademarks of Emulex. * | |
6 | * www.emulex.com * | |
7 | * * | |
8 | * This program is free software; you can redistribute it and/or * | |
9 | * modify it under the terms of version 2 of the GNU General * | |
10 | * Public License as published by the Free Software Foundation. * | |
11 | * This program is distributed in the hope that it will be useful. * | |
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | |
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | |
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | |
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | |
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | |
17 | * more details, a copy of which can be found in the file COPYING * | |
18 | * included with this package. * | |
19 | * | |
20 | * Contact Information: | |
21 | * linux-drivers@emulex.com | |
22 | * | |
23 | * Emulex | |
24 | * 3333 Susan Street | |
25 | * Costa Mesa, CA 92626 | |
26 | *******************************************************************/ | |
27 | ||
28 | #ifndef __OCRDMA_H__ | |
29 | #define __OCRDMA_H__ | |
30 | ||
31 | #include <linux/mutex.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/pci.h> | |
35 | ||
36 | #include <rdma/ib_verbs.h> | |
37 | #include <rdma/ib_user_verbs.h> | |
38 | ||
39 | #include <be_roce.h> | |
40 | #include "ocrdma_sli.h" | |
41 | ||
42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" | |
43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | |
44 | ||
45 | #define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg) | |
46 | ||
47 | #define OCRDMA_MAX_AH 512 | |
48 | ||
49 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | |
50 | ||
51 | struct ocrdma_dev_attr { | |
52 | u8 fw_ver[32]; | |
53 | u32 vendor_id; | |
54 | u32 device_id; | |
55 | u16 max_pd; | |
56 | u16 max_cq; | |
57 | u16 max_cqe; | |
58 | u16 max_qp; | |
59 | u16 max_wqe; | |
60 | u16 max_rqe; | |
61 | u32 max_inline_data; | |
62 | int max_send_sge; | |
63 | int max_recv_sge; | |
634c5796 | 64 | int max_srq_sge; |
fe2caefc PP |
65 | int max_mr; |
66 | u64 max_mr_size; | |
67 | u32 max_num_mr_pbl; | |
68 | int max_fmr; | |
69 | int max_map_per_fmr; | |
70 | int max_pages_per_frmr; | |
71 | u16 max_ord_per_qp; | |
72 | u16 max_ird_per_qp; | |
73 | ||
74 | int device_cap_flags; | |
75 | u8 cq_overflow_detect; | |
76 | u8 srq_supported; | |
77 | ||
78 | u32 wqe_size; | |
79 | u32 rqe_size; | |
80 | u32 ird_page_size; | |
81 | u8 local_ca_ack_delay; | |
82 | u8 ird; | |
83 | u8 num_ird_pages; | |
84 | }; | |
85 | ||
86 | struct ocrdma_pbl { | |
87 | void *va; | |
88 | dma_addr_t pa; | |
89 | }; | |
90 | ||
91 | struct ocrdma_queue_info { | |
92 | void *va; | |
93 | dma_addr_t dma; | |
94 | u32 size; | |
95 | u16 len; | |
96 | u16 entry_size; /* Size of an element in the queue */ | |
97 | u16 id; /* qid, where to ring the doorbell. */ | |
98 | u16 head, tail; | |
99 | bool created; | |
100 | atomic_t used; /* Number of valid elements in the queue */ | |
101 | }; | |
102 | ||
103 | struct ocrdma_eq { | |
104 | struct ocrdma_queue_info q; | |
105 | u32 vector; | |
106 | int cq_cnt; | |
107 | struct ocrdma_dev *dev; | |
108 | char irq_name[32]; | |
109 | }; | |
110 | ||
111 | struct ocrdma_mq { | |
112 | struct ocrdma_queue_info sq; | |
113 | struct ocrdma_queue_info cq; | |
114 | bool rearm_cq; | |
115 | }; | |
116 | ||
117 | struct mqe_ctx { | |
118 | struct mutex lock; /* for serializing mailbox commands on MQ */ | |
119 | wait_queue_head_t cmd_wait; | |
120 | u32 tag; | |
121 | u16 cqe_status; | |
122 | u16 ext_status; | |
123 | bool cmd_done; | |
124 | }; | |
125 | ||
126 | struct ocrdma_dev { | |
127 | struct ib_device ibdev; | |
128 | struct ocrdma_dev_attr attr; | |
129 | ||
130 | struct mutex dev_lock; /* provides syncronise access to device data */ | |
131 | spinlock_t flush_q_lock ____cacheline_aligned; | |
132 | ||
133 | struct ocrdma_cq **cq_tbl; | |
134 | struct ocrdma_qp **qp_tbl; | |
135 | ||
136 | struct ocrdma_eq meq; | |
137 | struct ocrdma_eq *qp_eq_tbl; | |
138 | int eq_cnt; | |
139 | u16 base_eqid; | |
140 | u16 max_eq; | |
141 | ||
142 | union ib_gid *sgid_tbl; | |
143 | /* provided synchronization to sgid table for | |
144 | * updating gid entries triggered by notifier. | |
145 | */ | |
146 | spinlock_t sgid_lock; | |
147 | ||
148 | int gsi_qp_created; | |
149 | struct ocrdma_cq *gsi_sqcq; | |
150 | struct ocrdma_cq *gsi_rqcq; | |
151 | ||
152 | struct { | |
153 | struct ocrdma_av *va; | |
154 | dma_addr_t pa; | |
155 | u32 size; | |
156 | u32 num_ah; | |
157 | /* provide synchronization for av | |
158 | * entry allocations. | |
159 | */ | |
160 | spinlock_t lock; | |
161 | u32 ahid; | |
162 | struct ocrdma_pbl pbl; | |
163 | } av_tbl; | |
164 | ||
165 | void *mbx_cmd; | |
166 | struct ocrdma_mq mq; | |
167 | struct mqe_ctx mqe_ctx; | |
168 | ||
169 | struct be_dev_info nic_info; | |
170 | ||
171 | struct list_head entry; | |
3e4d60a8 | 172 | struct rcu_head rcu; |
fe2caefc PP |
173 | int id; |
174 | }; | |
175 | ||
176 | struct ocrdma_cq { | |
177 | struct ib_cq ibcq; | |
178 | struct ocrdma_dev *dev; | |
179 | struct ocrdma_cqe *va; | |
180 | u32 phase; | |
181 | u32 getp; /* pointer to pending wrs to | |
182 | * return to stack, wrap arounds | |
183 | * at max_hw_cqe | |
184 | */ | |
185 | u32 max_hw_cqe; | |
186 | bool phase_change; | |
187 | bool armed, solicited; | |
188 | bool arm_needed; | |
189 | ||
190 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization | |
191 | * to cq polling | |
192 | */ | |
193 | /* syncronizes cq completion handler invoked from multiple context */ | |
194 | spinlock_t comp_handler_lock ____cacheline_aligned; | |
195 | u16 id; | |
196 | u16 eqn; | |
197 | ||
198 | struct ocrdma_ucontext *ucontext; | |
199 | dma_addr_t pa; | |
200 | u32 len; | |
201 | atomic_t use_cnt; | |
202 | ||
203 | /* head of all qp's sq and rq for which cqes need to be flushed | |
204 | * by the software. | |
205 | */ | |
206 | struct list_head sq_head, rq_head; | |
207 | }; | |
208 | ||
209 | struct ocrdma_pd { | |
210 | struct ib_pd ibpd; | |
211 | struct ocrdma_dev *dev; | |
212 | struct ocrdma_ucontext *uctx; | |
213 | atomic_t use_cnt; | |
214 | u32 id; | |
215 | int num_dpp_qp; | |
216 | u32 dpp_page; | |
217 | bool dpp_enabled; | |
218 | }; | |
219 | ||
220 | struct ocrdma_ah { | |
221 | struct ib_ah ibah; | |
222 | struct ocrdma_dev *dev; | |
223 | struct ocrdma_av *av; | |
224 | u16 sgid_index; | |
225 | u32 id; | |
226 | }; | |
227 | ||
228 | struct ocrdma_qp_hwq_info { | |
229 | u8 *va; /* virtual address */ | |
230 | u32 max_sges; | |
231 | u32 head, tail; | |
232 | u32 entry_size; | |
233 | u32 max_cnt; | |
234 | u32 max_wqe_idx; | |
fe2caefc PP |
235 | u16 dbid; /* qid, where to ring the doorbell. */ |
236 | u32 len; | |
237 | dma_addr_t pa; | |
238 | }; | |
239 | ||
240 | struct ocrdma_srq { | |
241 | struct ib_srq ibsrq; | |
242 | struct ocrdma_dev *dev; | |
243 | u8 __iomem *db; | |
244 | /* provide synchronization to multiple context(s) posting rqe */ | |
245 | spinlock_t q_lock ____cacheline_aligned; | |
246 | ||
247 | struct ocrdma_qp_hwq_info rq; | |
248 | struct ocrdma_pd *pd; | |
249 | atomic_t use_cnt; | |
250 | u32 id; | |
251 | u64 *rqe_wr_id_tbl; | |
252 | u32 *idx_bit_fields; | |
253 | u32 bit_fields_len; | |
254 | }; | |
255 | ||
256 | struct ocrdma_qp { | |
257 | struct ib_qp ibqp; | |
258 | struct ocrdma_dev *dev; | |
259 | ||
260 | u8 __iomem *sq_db; | |
261 | /* provide synchronization to multiple context(s) posting wqe, rqe */ | |
262 | spinlock_t q_lock ____cacheline_aligned; | |
263 | struct ocrdma_qp_hwq_info sq; | |
264 | struct { | |
265 | uint64_t wrid; | |
266 | uint16_t dpp_wqe_idx; | |
267 | uint16_t dpp_wqe; | |
268 | uint8_t signaled; | |
269 | uint8_t rsvd[3]; | |
270 | } *wqe_wr_id_tbl; | |
271 | u32 max_inline_data; | |
272 | struct ocrdma_cq *sq_cq; | |
273 | /* list maintained per CQ to flush SQ errors */ | |
274 | struct list_head sq_entry; | |
275 | ||
276 | u8 __iomem *rq_db; | |
277 | struct ocrdma_qp_hwq_info rq; | |
278 | u64 *rqe_wr_id_tbl; | |
279 | struct ocrdma_cq *rq_cq; | |
280 | struct ocrdma_srq *srq; | |
281 | /* list maintained per CQ to flush RQ errors */ | |
282 | struct list_head rq_entry; | |
283 | ||
284 | enum ocrdma_qp_state state; /* QP state */ | |
285 | int cap_flags; | |
286 | u32 max_ord, max_ird; | |
287 | ||
288 | u32 id; | |
289 | struct ocrdma_pd *pd; | |
290 | ||
291 | enum ib_qp_type qp_type; | |
292 | ||
293 | int sgid_idx; | |
294 | u32 qkey; | |
295 | bool dpp_enabled; | |
296 | u8 *ird_q_va; | |
297 | }; | |
298 | ||
299 | #define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \ | |
300 | (((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \ | |
301 | (qp->id < 64)) ? 24 : 16) | |
302 | ||
303 | struct ocrdma_hw_mr { | |
304 | struct ocrdma_dev *dev; | |
305 | u32 lkey; | |
306 | u8 fr_mr; | |
307 | u8 remote_atomic; | |
308 | u8 remote_rd; | |
309 | u8 remote_wr; | |
310 | u8 local_rd; | |
311 | u8 local_wr; | |
312 | u8 mw_bind; | |
313 | u8 rsvd; | |
314 | u64 len; | |
315 | struct ocrdma_pbl *pbl_table; | |
316 | u32 num_pbls; | |
317 | u32 num_pbes; | |
318 | u32 pbl_size; | |
319 | u32 pbe_size; | |
320 | u64 fbo; | |
321 | u64 va; | |
322 | }; | |
323 | ||
324 | struct ocrdma_mr { | |
325 | struct ib_mr ibmr; | |
326 | struct ib_umem *umem; | |
327 | struct ocrdma_hw_mr hwmr; | |
328 | struct ocrdma_pd *pd; | |
329 | }; | |
330 | ||
331 | struct ocrdma_ucontext { | |
332 | struct ib_ucontext ibucontext; | |
333 | struct ocrdma_dev *dev; | |
334 | ||
335 | struct list_head mm_head; | |
336 | struct mutex mm_list_lock; /* protects list entries of mm type */ | |
337 | struct { | |
338 | u32 *va; | |
339 | dma_addr_t pa; | |
340 | u32 len; | |
341 | } ah_tbl; | |
342 | }; | |
343 | ||
344 | struct ocrdma_mm { | |
345 | struct { | |
346 | u64 phy_addr; | |
347 | unsigned long len; | |
348 | } key; | |
349 | struct list_head entry; | |
350 | }; | |
351 | ||
352 | static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) | |
353 | { | |
354 | return container_of(ibdev, struct ocrdma_dev, ibdev); | |
355 | } | |
356 | ||
357 | static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext | |
358 | *ibucontext) | |
359 | { | |
360 | return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); | |
361 | } | |
362 | ||
363 | static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) | |
364 | { | |
365 | return container_of(ibpd, struct ocrdma_pd, ibpd); | |
366 | } | |
367 | ||
368 | static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) | |
369 | { | |
370 | return container_of(ibcq, struct ocrdma_cq, ibcq); | |
371 | } | |
372 | ||
373 | static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) | |
374 | { | |
375 | return container_of(ibqp, struct ocrdma_qp, ibqp); | |
376 | } | |
377 | ||
378 | static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) | |
379 | { | |
380 | return container_of(ibmr, struct ocrdma_mr, ibmr); | |
381 | } | |
382 | ||
383 | static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) | |
384 | { | |
385 | return container_of(ibah, struct ocrdma_ah, ibah); | |
386 | } | |
387 | ||
388 | static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) | |
389 | { | |
390 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); | |
391 | } | |
392 | ||
393 | #endif |