Commit | Line | Data |
---|---|---|
7a291083 JBT |
1 | /* |
2 | * linux/drivers/net/ehea/ehea_qmr.h | |
3 | * | |
4 | * eHEA ethernet device driver for IBM eServer System p | |
5 | * | |
6 | * (C) Copyright IBM Corp. 2006 | |
7 | * | |
8 | * Authors: | |
9 | * Christoph Raisch <raisch@de.ibm.com> | |
10 | * Jan-Bernd Themann <themann@de.ibm.com> | |
11 | * Thomas Klein <tklein@de.ibm.com> | |
12 | * | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2, or (at your option) | |
17 | * any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, write to the Free Software | |
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
27 | */ | |
28 | ||
29 | #ifndef __EHEA_QMR_H__ | |
30 | #define __EHEA_QMR_H__ | |
31 | ||
32 | #include "ehea.h" | |
33 | #include "ehea_hw.h" | |
34 | ||
35 | /* | |
36 | * page size of ehea hardware queues | |
37 | */ | |
38 | ||
44c82152 TK |
39 | #define EHEA_PAGESHIFT 12 |
40 | #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) | |
41 | #define EHEA_SECTSIZE (1UL << 24) | |
2c69448b | 42 | #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) |
3fd09c45 TK |
43 | #define EHEA_HUGEPAGESHIFT 34 |
44 | #define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT) | |
45 | #define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT) | |
44c82152 | 46 | |
f67c6275 DM |
47 | #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) |
48 | #error eHEA module cannot work if kernel sectionsize < ehea sectionsize | |
44c82152 | 49 | #endif |
7a291083 JBT |
50 | |
51 | /* Some abbreviations used here: | |
52 | * | |
53 | * WQE - Work Queue Entry | |
54 | * SWQE - Send Work Queue Entry | |
55 | * RWQE - Receive Work Queue Entry | |
56 | * CQE - Completion Queue Entry | |
57 | * EQE - Event Queue Entry | |
58 | * MR - Memory Region | |
59 | */ | |
60 | ||
61 | /* Use of WR_ID field for EHEA */ | |
62 | #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19) | |
63 | #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23) | |
64 | #define EHEA_SWQE2_TYPE 0x1 | |
65 | #define EHEA_SWQE3_TYPE 0x2 | |
66 | #define EHEA_RWQE2_TYPE 0x3 | |
67 | #define EHEA_RWQE3_TYPE 0x4 | |
68 | #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47) | |
69 | #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63) | |
70 | ||
71 | struct ehea_vsgentry { | |
72 | u64 vaddr; | |
73 | u32 l_key; | |
74 | u32 len; | |
75 | }; | |
76 | ||
77 | /* maximum number of sg entries allowed in a WQE */ | |
78 | #define EHEA_MAX_WQE_SG_ENTRIES 252 | |
79 | #define SWQE2_MAX_IMM (0xD0 - 0x30) | |
80 | #define SWQE3_MAX_IMM 224 | |
81 | ||
82 | /* tx control flags for swqe */ | |
83 | #define EHEA_SWQE_CRC 0x8000 | |
84 | #define EHEA_SWQE_IP_CHECKSUM 0x4000 | |
85 | #define EHEA_SWQE_TCP_CHECKSUM 0x2000 | |
86 | #define EHEA_SWQE_TSO 0x1000 | |
87 | #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800 | |
88 | #define EHEA_SWQE_VLAN_INSERT 0x0400 | |
89 | #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200 | |
90 | #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100 | |
91 | #define EHEA_SWQE_WRAP_CTL_REC 0x0080 | |
92 | #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040 | |
93 | #define EHEA_SWQE_BIND 0x0020 | |
94 | #define EHEA_SWQE_PURGE 0x0010 | |
95 | ||
96 | /* sizeof(struct ehea_swqe) less the union */ | |
97 | #define SWQE_HEADER_SIZE 32 | |
98 | ||
99 | struct ehea_swqe { | |
100 | u64 wr_id; | |
101 | u16 tx_control; | |
102 | u16 vlan_tag; | |
103 | u8 reserved1; | |
104 | u8 ip_start; | |
105 | u8 ip_end; | |
106 | u8 immediate_data_length; | |
107 | u8 tcp_offset; | |
108 | u8 reserved2; | |
109 | u16 tcp_end; | |
110 | u8 wrap_tag; | |
111 | u8 descriptors; /* number of valid descriptors in WQE */ | |
112 | u16 reserved3; | |
113 | u16 reserved4; | |
114 | u16 mss; | |
115 | u32 reserved5; | |
116 | union { | |
117 | /* Send WQE Format 1 */ | |
118 | struct { | |
119 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; | |
120 | } no_immediate_data; | |
121 | ||
122 | /* Send WQE Format 2 */ | |
123 | struct { | |
124 | struct ehea_vsgentry sg_entry; | |
125 | /* 0x30 */ | |
126 | u8 immediate_data[SWQE2_MAX_IMM]; | |
127 | /* 0xd0 */ | |
128 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; | |
129 | } immdata_desc __attribute__ ((packed)); | |
130 | ||
131 | /* Send WQE Format 3 */ | |
132 | struct { | |
133 | u8 immediate_data[SWQE3_MAX_IMM]; | |
134 | } immdata_nodesc; | |
135 | } u; | |
136 | }; | |
137 | ||
138 | struct ehea_rwqe { | |
139 | u64 wr_id; /* work request ID */ | |
140 | u8 reserved1[5]; | |
141 | u8 data_segments; | |
142 | u16 reserved2; | |
143 | u64 reserved3; | |
144 | u64 reserved4; | |
145 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; | |
146 | }; | |
147 | ||
148 | #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 | |
149 | ||
150 | #define EHEA_CQE_TYPE_RQ 0x60 | |
58dd8258 TK |
151 | #define EHEA_CQE_STAT_ERR_MASK 0x700F |
152 | #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF | |
7a291083 | 153 | #define EHEA_CQE_STAT_ERR_TCP 0x4000 |
acbddb59 JBT |
154 | #define EHEA_CQE_STAT_ERR_IP 0x2000 |
155 | #define EHEA_CQE_STAT_ERR_CRC 0x1000 | |
7a291083 JBT |
156 | |
157 | struct ehea_cqe { | |
158 | u64 wr_id; /* work request ID from WQE */ | |
159 | u8 type; | |
160 | u8 valid; | |
161 | u16 status; | |
162 | u16 reserved1; | |
163 | u16 num_bytes_transfered; | |
164 | u16 vlan_tag; | |
165 | u16 inet_checksum_value; | |
166 | u8 reserved2; | |
167 | u8 header_length; | |
168 | u16 reserved3; | |
169 | u16 page_offset; | |
170 | u16 wqe_count; | |
171 | u32 qp_token; | |
172 | u32 timestamp; | |
173 | u32 reserved4; | |
174 | u64 reserved5[3]; | |
175 | }; | |
176 | ||
177 | #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0) | |
178 | #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1) | |
179 | #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7) | |
180 | #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31) | |
181 | #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63) | |
182 | #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63) | |
183 | #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63) | |
184 | #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63) | |
185 | #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63) | |
186 | #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63) | |
187 | #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) | |
188 | #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) | |
189 | ||
190 | struct ehea_eqe { | |
191 | u64 entry; | |
192 | }; | |
193 | ||
f67c6275 DM |
194 | #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63) |
195 | #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7) | |
d2db9eea | 196 | |
7a291083 JBT |
197 | static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) |
198 | { | |
199 | struct ehea_page *current_page; | |
200 | ||
201 | if (q_offset >= queue->queue_length) | |
202 | q_offset -= queue->queue_length; | |
203 | current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; | |
204 | return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)]; | |
205 | } | |
206 | ||
207 | static inline void *hw_qeit_get(struct hw_queue *queue) | |
208 | { | |
209 | return hw_qeit_calc(queue, queue->current_q_offset); | |
210 | } | |
211 | ||
212 | static inline void hw_qeit_inc(struct hw_queue *queue) | |
213 | { | |
214 | queue->current_q_offset += queue->qe_size; | |
215 | if (queue->current_q_offset >= queue->queue_length) { | |
216 | queue->current_q_offset = 0; | |
217 | /* toggle the valid flag */ | |
218 | queue->toggle_state = (~queue->toggle_state) & 1; | |
219 | } | |
220 | } | |
221 | ||
222 | static inline void *hw_qeit_get_inc(struct hw_queue *queue) | |
223 | { | |
224 | void *retvalue = hw_qeit_get(queue); | |
225 | hw_qeit_inc(queue); | |
226 | return retvalue; | |
227 | } | |
228 | ||
229 | static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue) | |
230 | { | |
231 | struct ehea_cqe *retvalue = hw_qeit_get(queue); | |
232 | u8 valid = retvalue->valid; | |
233 | void *pref; | |
234 | ||
235 | if ((valid >> 7) == (queue->toggle_state & 1)) { | |
236 | /* this is a good one */ | |
237 | hw_qeit_inc(queue); | |
238 | pref = hw_qeit_calc(queue, queue->current_q_offset); | |
239 | prefetch(pref); | |
240 | prefetch(pref + 128); | |
241 | } else | |
242 | retvalue = NULL; | |
243 | return retvalue; | |
244 | } | |
245 | ||
246 | static inline void *hw_qeit_get_valid(struct hw_queue *queue) | |
247 | { | |
248 | struct ehea_cqe *retvalue = hw_qeit_get(queue); | |
249 | void *pref; | |
250 | u8 valid; | |
251 | ||
252 | pref = hw_qeit_calc(queue, queue->current_q_offset); | |
253 | prefetch(pref); | |
254 | prefetch(pref + 128); | |
255 | prefetch(pref + 256); | |
256 | valid = retvalue->valid; | |
257 | if (!((valid >> 7) == (queue->toggle_state & 1))) | |
258 | retvalue = NULL; | |
259 | return retvalue; | |
260 | } | |
261 | ||
262 | static inline void *hw_qeit_reset(struct hw_queue *queue) | |
263 | { | |
264 | queue->current_q_offset = 0; | |
265 | return hw_qeit_get(queue); | |
266 | } | |
267 | ||
268 | static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue) | |
269 | { | |
270 | u64 last_entry_in_q = queue->queue_length - queue->qe_size; | |
271 | void *retvalue; | |
272 | ||
273 | retvalue = hw_qeit_get(queue); | |
274 | queue->current_q_offset += queue->qe_size; | |
275 | if (queue->current_q_offset > last_entry_in_q) { | |
276 | queue->current_q_offset = 0; | |
277 | queue->toggle_state = (~queue->toggle_state) & 1; | |
278 | } | |
279 | return retvalue; | |
280 | } | |
281 | ||
282 | static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) | |
283 | { | |
284 | void *retvalue = hw_qeit_get(queue); | |
f67c6275 | 285 | u32 qe = *(u8 *)retvalue; |
7a291083 JBT |
286 | if ((qe >> 7) == (queue->toggle_state & 1)) |
287 | hw_qeit_eq_get_inc(queue); | |
288 | else | |
289 | retvalue = NULL; | |
290 | return retvalue; | |
291 | } | |
292 | ||
293 | static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, | |
294 | int rq_nr) | |
295 | { | |
296 | struct hw_queue *queue; | |
297 | ||
298 | if (rq_nr == 1) | |
299 | queue = &qp->hw_rqueue1; | |
300 | else if (rq_nr == 2) | |
301 | queue = &qp->hw_rqueue2; | |
302 | else | |
303 | queue = &qp->hw_rqueue3; | |
304 | ||
305 | return hw_qeit_get_inc(queue); | |
306 | } | |
307 | ||
308 | static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp, | |
309 | int *wqe_index) | |
310 | { | |
311 | struct hw_queue *queue = &my_qp->hw_squeue; | |
312 | struct ehea_swqe *wqe_p; | |
313 | ||
314 | *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); | |
315 | wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue); | |
316 | ||
317 | return wqe_p; | |
318 | } | |
319 | ||
320 | static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe) | |
321 | { | |
322 | iosync(); | |
323 | ehea_update_sqa(my_qp, 1); | |
324 | } | |
325 | ||
326 | static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) | |
327 | { | |
328 | struct hw_queue *queue = &qp->hw_rqueue1; | |
329 | ||
330 | *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); | |
331 | return hw_qeit_get_valid(queue); | |
332 | } | |
333 | ||
18604c54 JBT |
334 | static inline void ehea_inc_cq(struct ehea_cq *cq) |
335 | { | |
336 | hw_qeit_inc(&cq->hw_queue); | |
337 | } | |
338 | ||
7a291083 JBT |
339 | static inline void ehea_inc_rq1(struct ehea_qp *qp) |
340 | { | |
341 | hw_qeit_inc(&qp->hw_rqueue1); | |
342 | } | |
343 | ||
344 | static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq) | |
345 | { | |
18604c54 | 346 | return hw_qeit_get_valid(&my_cq->hw_queue); |
7a291083 JBT |
347 | } |
348 | ||
349 | #define EHEA_CQ_REGISTER_ORIG 0 | |
350 | #define EHEA_EQ_REGISTER_ORIG 0 | |
351 | ||
352 | enum ehea_eq_type { | |
353 | EHEA_EQ = 0, /* event queue */ | |
354 | EHEA_NEQ /* notification event queue */ | |
355 | }; | |
356 | ||
357 | struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | |
358 | enum ehea_eq_type type, | |
359 | const u32 length, const u8 eqe_gen); | |
360 | ||
361 | int ehea_destroy_eq(struct ehea_eq *eq); | |
362 | ||
363 | struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq); | |
364 | ||
365 | struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe, | |
366 | u64 eq_handle, u32 cq_token); | |
367 | ||
368 | int ehea_destroy_cq(struct ehea_cq *cq); | |
369 | ||
f67c6275 | 370 | struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd, |
7a291083 JBT |
371 | struct ehea_qp_init_attr *init_attr); |
372 | ||
373 | int ehea_destroy_qp(struct ehea_qp *qp); | |
374 | ||
e542aa6b JBT |
375 | int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr); |
376 | ||
377 | int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr, | |
378 | struct ehea_mr *shared_mr); | |
379 | ||
380 | int ehea_rem_mr(struct ehea_mr *mr); | |
7a291083 | 381 | |
d2db9eea JBT |
382 | void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); |
383 | ||
d4f12daf HH |
384 | int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); |
385 | int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); | |
f67c6275 DM |
386 | int ehea_create_busmap(void); |
387 | void ehea_destroy_busmap(void); | |
44c82152 TK |
388 | u64 ehea_map_vaddr(void *caddr); |
389 | ||
7a291083 | 390 | #endif /* __EHEA_QMR_H__ */ |