Commit | Line | Data |
---|---|---|
77241056 MM |
1 | #ifndef _HFI1_SDMA_H |
2 | #define _HFI1_SDMA_H | |
3 | /* | |
4 | * | |
5 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
6 | * redistributing this file, you may do so under either license. | |
7 | * | |
8 | * GPL LICENSE SUMMARY | |
9 | * | |
10 | * Copyright(c) 2015 Intel Corporation. | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of version 2 of the GNU General Public License as | |
14 | * published by the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, but | |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * General Public License for more details. | |
20 | * | |
21 | * BSD LICENSE | |
22 | * | |
23 | * Copyright(c) 2015 Intel Corporation. | |
24 | * | |
25 | * Redistribution and use in source and binary forms, with or without | |
26 | * modification, are permitted provided that the following conditions | |
27 | * are met: | |
28 | * | |
29 | * - Redistributions of source code must retain the above copyright | |
30 | * notice, this list of conditions and the following disclaimer. | |
31 | * - Redistributions in binary form must reproduce the above copyright | |
32 | * notice, this list of conditions and the following disclaimer in | |
33 | * the documentation and/or other materials provided with the | |
34 | * distribution. | |
35 | * - Neither the name of Intel Corporation nor the names of its | |
36 | * contributors may be used to endorse or promote products derived | |
37 | * from this software without specific prior written permission. | |
38 | * | |
39 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
40 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
41 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
42 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
43 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
44 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
45 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
46 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
47 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
48 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
49 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
50 | * | |
51 | */ | |
52 | ||
53 | #include <linux/types.h> | |
54 | #include <linux/list.h> | |
55 | #include <asm/byteorder.h> | |
56 | #include <linux/workqueue.h> | |
57 | #include <linux/rculist.h> | |
58 | ||
59 | #include "hfi.h" | |
60 | #include "verbs.h" | |
61 | ||
62 | /* increased for AHG */ | |
63 | #define NUM_DESC 6 | |
64 | /* Hardware limit */ | |
65 | #define MAX_DESC 64 | |
66 | /* Hardware limit for SDMA packet size */ | |
67 | #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1) | |
68 | ||
69 | ||
70 | #define SDMA_TXREQ_S_OK 0 | |
71 | #define SDMA_TXREQ_S_SENDERROR 1 | |
72 | #define SDMA_TXREQ_S_ABORTED 2 | |
73 | #define SDMA_TXREQ_S_SHUTDOWN 3 | |
74 | ||
75 | /* flags bits */ | |
76 | #define SDMA_TXREQ_F_URGENT 0x0001 | |
77 | #define SDMA_TXREQ_F_AHG_COPY 0x0002 | |
78 | #define SDMA_TXREQ_F_USE_AHG 0x0004 | |
79 | ||
80 | #define SDMA_MAP_NONE 0 | |
81 | #define SDMA_MAP_SINGLE 1 | |
82 | #define SDMA_MAP_PAGE 2 | |
83 | ||
84 | #define SDMA_AHG_VALUE_MASK 0xffff | |
85 | #define SDMA_AHG_VALUE_SHIFT 0 | |
86 | #define SDMA_AHG_INDEX_MASK 0xf | |
87 | #define SDMA_AHG_INDEX_SHIFT 16 | |
88 | #define SDMA_AHG_FIELD_LEN_MASK 0xf | |
89 | #define SDMA_AHG_FIELD_LEN_SHIFT 20 | |
90 | #define SDMA_AHG_FIELD_START_MASK 0x1f | |
91 | #define SDMA_AHG_FIELD_START_SHIFT 24 | |
92 | #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1 | |
93 | #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31 | |
94 | ||
95 | /* AHG modes */ | |
96 | ||
97 | /* | |
98 | * Be aware the ordering and values | |
99 | * for SDMA_AHG_APPLY_UPDATE[123] | |
100 | * are assumed in generating a skip | |
101 | * count in submit_tx() in sdma.c | |
102 | */ | |
103 | #define SDMA_AHG_NO_AHG 0 | |
104 | #define SDMA_AHG_COPY 1 | |
105 | #define SDMA_AHG_APPLY_UPDATE1 2 | |
106 | #define SDMA_AHG_APPLY_UPDATE2 3 | |
107 | #define SDMA_AHG_APPLY_UPDATE3 4 | |
108 | ||
109 | /* | |
110 | * Bits defined in the send DMA descriptor. | |
111 | */ | |
3f2686a2 DC |
112 | #define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63) |
113 | #define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62) | |
77241056 MM |
114 | #define SDMA_DESC0_BYTE_COUNT_SHIFT 48 |
115 | #define SDMA_DESC0_BYTE_COUNT_WIDTH 14 | |
116 | #define SDMA_DESC0_BYTE_COUNT_MASK \ | |
3f2686a2 | 117 | ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1) |
77241056 | 118 | #define SDMA_DESC0_BYTE_COUNT_SMASK \ |
3f2686a2 | 119 | (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT) |
77241056 MM |
120 | #define SDMA_DESC0_PHY_ADDR_SHIFT 0 |
121 | #define SDMA_DESC0_PHY_ADDR_WIDTH 48 | |
122 | #define SDMA_DESC0_PHY_ADDR_MASK \ | |
3f2686a2 | 123 | ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1) |
77241056 | 124 | #define SDMA_DESC0_PHY_ADDR_SMASK \ |
3f2686a2 | 125 | (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT) |
77241056 MM |
126 | |
127 | #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32 | |
128 | #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32 | |
129 | #define SDMA_DESC1_HEADER_UPDATE1_MASK \ | |
3f2686a2 | 130 | ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1) |
77241056 | 131 | #define SDMA_DESC1_HEADER_UPDATE1_SMASK \ |
3f2686a2 | 132 | (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT) |
77241056 MM |
133 | #define SDMA_DESC1_HEADER_MODE_SHIFT 13 |
134 | #define SDMA_DESC1_HEADER_MODE_WIDTH 3 | |
135 | #define SDMA_DESC1_HEADER_MODE_MASK \ | |
3f2686a2 | 136 | ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1) |
77241056 | 137 | #define SDMA_DESC1_HEADER_MODE_SMASK \ |
3f2686a2 | 138 | (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT) |
77241056 MM |
139 | #define SDMA_DESC1_HEADER_INDEX_SHIFT 8 |
140 | #define SDMA_DESC1_HEADER_INDEX_WIDTH 5 | |
141 | #define SDMA_DESC1_HEADER_INDEX_MASK \ | |
3f2686a2 | 142 | ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1) |
77241056 | 143 | #define SDMA_DESC1_HEADER_INDEX_SMASK \ |
3f2686a2 | 144 | (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT) |
77241056 MM |
145 | #define SDMA_DESC1_HEADER_DWS_SHIFT 4 |
146 | #define SDMA_DESC1_HEADER_DWS_WIDTH 4 | |
147 | #define SDMA_DESC1_HEADER_DWS_MASK \ | |
3f2686a2 | 148 | ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1) |
77241056 | 149 | #define SDMA_DESC1_HEADER_DWS_SMASK \ |
3f2686a2 | 150 | (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT) |
77241056 MM |
151 | #define SDMA_DESC1_GENERATION_SHIFT 2 |
152 | #define SDMA_DESC1_GENERATION_WIDTH 2 | |
153 | #define SDMA_DESC1_GENERATION_MASK \ | |
3f2686a2 | 154 | ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1) |
77241056 | 155 | #define SDMA_DESC1_GENERATION_SMASK \ |
3f2686a2 DC |
156 | (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT) |
157 | #define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1) | |
158 | #define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0) | |
77241056 MM |
159 | |
160 | enum sdma_states { | |
161 | sdma_state_s00_hw_down, | |
162 | sdma_state_s10_hw_start_up_halt_wait, | |
163 | sdma_state_s15_hw_start_up_clean_wait, | |
164 | sdma_state_s20_idle, | |
165 | sdma_state_s30_sw_clean_up_wait, | |
166 | sdma_state_s40_hw_clean_up_wait, | |
167 | sdma_state_s50_hw_halt_wait, | |
168 | sdma_state_s60_idle_halt_wait, | |
169 | sdma_state_s80_hw_freeze, | |
170 | sdma_state_s82_freeze_sw_clean, | |
171 | sdma_state_s99_running, | |
172 | }; | |
173 | ||
174 | enum sdma_events { | |
175 | sdma_event_e00_go_hw_down, | |
176 | sdma_event_e10_go_hw_start, | |
177 | sdma_event_e15_hw_halt_done, | |
178 | sdma_event_e25_hw_clean_up_done, | |
179 | sdma_event_e30_go_running, | |
180 | sdma_event_e40_sw_cleaned, | |
181 | sdma_event_e50_hw_cleaned, | |
182 | sdma_event_e60_hw_halted, | |
183 | sdma_event_e70_go_idle, | |
184 | sdma_event_e80_hw_freeze, | |
185 | sdma_event_e81_hw_frozen, | |
186 | sdma_event_e82_hw_unfreeze, | |
187 | sdma_event_e85_link_down, | |
188 | sdma_event_e90_sw_halted, | |
189 | }; | |
190 | ||
191 | struct sdma_set_state_action { | |
192 | unsigned op_enable:1; | |
193 | unsigned op_intenable:1; | |
194 | unsigned op_halt:1; | |
195 | unsigned op_cleanup:1; | |
196 | unsigned go_s99_running_tofalse:1; | |
197 | unsigned go_s99_running_totrue:1; | |
198 | }; | |
199 | ||
200 | struct sdma_state { | |
201 | struct kref kref; | |
202 | struct completion comp; | |
203 | enum sdma_states current_state; | |
204 | unsigned current_op; | |
205 | unsigned go_s99_running; | |
206 | /* debugging/development */ | |
207 | enum sdma_states previous_state; | |
208 | unsigned previous_op; | |
209 | enum sdma_events last_event; | |
210 | }; | |
211 | ||
212 | /** | |
213 | * DOC: sdma exported routines | |
214 | * | |
215 | * These sdma routines fit into three categories: | |
216 | * - The SDMA API for building and submitting packets | |
217 | * to the ring | |
218 | * | |
219 | * - Initialization and tear down routines to buildup | |
220 | * and tear down SDMA | |
221 | * | |
222 | * - ISR entrances to handle interrupts, state changes | |
223 | * and errors | |
224 | */ | |
225 | ||
226 | /** | |
227 | * DOC: sdma PSM/verbs API | |
228 | * | |
229 | * The sdma API is designed to be used by both PSM | |
230 | * and verbs to supply packets to the SDMA ring. | |
231 | * | |
232 | * The usage of the API is as follows: | |
233 | * | |
234 | * Embed a struct iowait in the QP or | |
235 | * PQ. The iowait should be initialized with a | |
236 | * call to iowait_init(). | |
237 | * | |
238 | * The user of the API should create an allocation method | |
239 | * for their version of the txreq. slabs, pre-allocated lists, | |
240 | * and dma pools can be used. Once the user's overload of | |
241 | * the sdma_txreq has been allocated, the sdma_txreq member | |
242 | * must be initialized with sdma_txinit() or sdma_txinit_ahg(). | |
243 | * | |
244 | * The txreq must be declared with the sdma_txreq first. | |
245 | * | |
246 | * The tx request, once initialized, is manipulated with calls to | |
247 | * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr() | |
248 | * for each disjoint memory location. It is the user's responsibility | |
249 | * to understand the packet boundaries and page boundaries to do the | |
250 | * appropriate number of sdma_txadd_* calls.. The user | |
251 | * must be prepared to deal with failures from these routines due to | |
252 | * either memory allocation or dma_mapping failures. | |
253 | * | |
254 | * The mapping specifics for each memory location are recorded | |
255 | * in the tx. Memory locations added with sdma_txadd_page() | |
256 | * and sdma_txadd_kvaddr() are automatically mapped when added | |
257 | * to the tx and nmapped as part of the progress processing in the | |
258 | * SDMA interrupt handling. | |
259 | * | |
260 | * sdma_txadd_daddr() is used to add an dma_addr_t memory to the | |
261 | * tx. An example of a use case would be a pre-allocated | |
262 | * set of headers allocated via dma_pool_alloc() or | |
263 | * dma_alloc_coherent(). For these memory locations, it | |
264 | * is the responsibility of the user to handle that unmapping. | |
265 | * (This would usually be at an unload or job termination.) | |
266 | * | |
267 | * The routine sdma_send_txreq() is used to submit | |
268 | * a tx to the ring after the appropriate number of | |
269 | * sdma_txadd_* have been done. | |
270 | * | |
271 | * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist() | |
272 | * can be used to submit a list of packets. | |
273 | * | |
274 | * The user is free to use the link overhead in the struct sdma_txreq as | |
275 | * long as the tx isn't in flight. | |
276 | * | |
277 | * The extreme degenerate case of the number of descriptors | |
278 | * exceeding the ring size is automatically handled as | |
279 | * memory locations are added. An overflow of the descriptor | |
280 | * array that is part of the sdma_txreq is also automatically | |
281 | * handled. | |
282 | * | |
283 | */ | |
284 | ||
285 | /** | |
286 | * DOC: Infrastructure calls | |
287 | * | |
288 | * sdma_init() is used to initialize data structures and | |
289 | * CSRs for the desired number of SDMA engines. | |
290 | * | |
291 | * sdma_start() is used to kick the SDMA engines initialized | |
292 | * with sdma_init(). Interrupts must be enabled at this | |
293 | * point since aspects of the state machine are interrupt | |
294 | * driven. | |
295 | * | |
296 | * sdma_engine_error() and sdma_engine_interrupt() are | |
297 | * entrances for interrupts. | |
298 | * | |
299 | * sdma_map_init() is for the management of the mapping | |
300 | * table when the number of vls is changed. | |
301 | * | |
302 | */ | |
303 | ||
304 | /* | |
305 | * struct hw_sdma_desc - raw 128 bit SDMA descriptor | |
306 | * | |
307 | * This is the raw descriptor in the SDMA ring | |
308 | */ | |
309 | struct hw_sdma_desc { | |
310 | /* private: don't use directly */ | |
311 | __le64 qw[2]; | |
312 | }; | |
313 | ||
314 | /* | |
315 | * struct sdma_desc - canonical fragment descriptor | |
316 | * | |
317 | * This is the descriptor carried in the tx request | |
318 | * corresponding to each fragment. | |
319 | * | |
320 | */ | |
321 | struct sdma_desc { | |
322 | /* private: don't use directly */ | |
323 | u64 qw[2]; | |
324 | }; | |
325 | ||
326 | struct sdma_txreq; | |
327 | typedef void (*callback_t)(struct sdma_txreq *, int, int); | |
328 | ||
329 | /** | |
330 | * struct sdma_txreq - the sdma_txreq structure (one per packet) | |
331 | * @list: for use by user and by queuing for wait | |
332 | * | |
333 | * This is the representation of a packet which consists of some | |
334 | * number of fragments. Storage is provided to within the structure. | |
335 | * for all fragments. | |
336 | * | |
337 | * The storage for the descriptors are automatically extended as needed | |
338 | * when the currently allocation is exceeded. | |
339 | * | |
340 | * The user (Verbs or PSM) may overload this structure with fields | |
341 | * specific to their use by putting this struct first in their struct. | |
342 | * The method of allocation of the overloaded structure is user dependent | |
343 | * | |
344 | * The list is the only public field in the structure. | |
345 | * | |
346 | */ | |
347 | ||
348 | struct sdma_txreq { | |
349 | struct list_head list; | |
350 | /* private: */ | |
351 | struct sdma_desc *descp; | |
352 | /* private: */ | |
353 | void *coalesce_buf; | |
354 | /* private: */ | |
355 | struct iowait *wait; | |
356 | /* private: */ | |
357 | callback_t complete; | |
358 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
359 | u64 sn; | |
360 | #endif | |
361 | /* private: - used in coalesce/pad processing */ | |
362 | u16 packet_len; | |
363 | /* private: - down-counted to trigger last */ | |
364 | u16 tlen; | |
365 | /* private: flags */ | |
366 | u16 flags; | |
367 | /* private: */ | |
368 | u16 num_desc; | |
369 | /* private: */ | |
370 | u16 desc_limit; | |
371 | /* private: */ | |
372 | u16 next_descq_idx; | |
373 | /* private: */ | |
374 | struct sdma_desc descs[NUM_DESC]; | |
375 | }; | |
376 | ||
377 | struct verbs_txreq { | |
378 | struct hfi1_pio_header phdr; | |
379 | struct sdma_txreq txreq; | |
380 | struct hfi1_qp *qp; | |
381 | struct hfi1_swqe *wqe; | |
382 | struct hfi1_mregion *mr; | |
383 | struct hfi1_sge_state *ss; | |
384 | struct sdma_engine *sde; | |
385 | u16 hdr_dwords; | |
386 | u16 hdr_inx; | |
387 | }; | |
388 | ||
389 | /** | |
390 | * struct sdma_engine - Data pertaining to each SDMA engine. | |
391 | * @dd: a back-pointer to the device data | |
392 | * @ppd: per port back-pointer | |
393 | * @imask: mask for irq manipulation | |
394 | * @idle_mask: mask for determining if an interrupt is due to sdma_idle | |
395 | * | |
396 | * This structure has the state for each sdma_engine. | |
397 | * | |
398 | * Accessing to non public fields are not supported | |
399 | * since the private members are subject to change. | |
400 | */ | |
401 | struct sdma_engine { | |
402 | /* read mostly */ | |
403 | struct hfi1_devdata *dd; | |
404 | struct hfi1_pportdata *ppd; | |
405 | /* private: */ | |
406 | void __iomem *tail_csr; | |
407 | u64 imask; /* clear interrupt mask */ | |
408 | u64 idle_mask; | |
409 | u64 progress_mask; | |
410 | /* private: */ | |
411 | struct workqueue_struct *wq; | |
412 | /* private: */ | |
413 | volatile __le64 *head_dma; /* DMA'ed by chip */ | |
414 | /* private: */ | |
415 | dma_addr_t head_phys; | |
416 | /* private: */ | |
417 | struct hw_sdma_desc *descq; | |
418 | /* private: */ | |
419 | unsigned descq_full_count; | |
420 | struct sdma_txreq **tx_ring; | |
421 | /* private: */ | |
422 | dma_addr_t descq_phys; | |
423 | /* private */ | |
424 | u32 sdma_mask; | |
425 | /* private */ | |
426 | struct sdma_state state; | |
427 | /* private: */ | |
428 | u8 sdma_shift; | |
429 | /* private: */ | |
430 | u8 this_idx; /* zero relative engine */ | |
431 | /* protect changes to senddmactrl shadow */ | |
432 | spinlock_t senddmactrl_lock; | |
433 | /* private: */ | |
434 | u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */ | |
435 | ||
436 | /* read/write using tail_lock */ | |
437 | spinlock_t tail_lock ____cacheline_aligned_in_smp; | |
438 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
439 | /* private: */ | |
440 | u64 tail_sn; | |
441 | #endif | |
442 | /* private: */ | |
443 | u32 descq_tail; | |
444 | /* private: */ | |
445 | unsigned long ahg_bits; | |
446 | /* private: */ | |
447 | u16 desc_avail; | |
448 | /* private: */ | |
449 | u16 tx_tail; | |
450 | /* private: */ | |
451 | u16 descq_cnt; | |
452 | ||
453 | /* read/write using head_lock */ | |
454 | /* private: */ | |
455 | seqlock_t head_lock ____cacheline_aligned_in_smp; | |
456 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
457 | /* private: */ | |
458 | u64 head_sn; | |
459 | #endif | |
460 | /* private: */ | |
461 | u32 descq_head; | |
462 | /* private: */ | |
463 | u16 tx_head; | |
464 | /* private: */ | |
465 | u64 last_status; | |
466 | ||
467 | /* private: */ | |
468 | struct list_head dmawait; | |
469 | ||
470 | /* CONFIG SDMA for now, just blindly duplicate */ | |
471 | /* private: */ | |
472 | struct tasklet_struct sdma_hw_clean_up_task | |
473 | ____cacheline_aligned_in_smp; | |
474 | ||
475 | /* private: */ | |
476 | struct tasklet_struct sdma_sw_clean_up_task | |
477 | ____cacheline_aligned_in_smp; | |
478 | /* private: */ | |
479 | struct work_struct err_halt_worker; | |
480 | /* private */ | |
481 | struct timer_list err_progress_check_timer; | |
482 | u32 progress_check_head; | |
483 | /* private: */ | |
484 | struct work_struct flush_worker; | |
485 | spinlock_t flushlist_lock; | |
486 | /* private: */ | |
487 | struct list_head flushlist; | |
488 | }; | |
489 | ||
490 | ||
491 | int sdma_init(struct hfi1_devdata *dd, u8 port); | |
492 | void sdma_start(struct hfi1_devdata *dd); | |
493 | void sdma_exit(struct hfi1_devdata *dd); | |
494 | void sdma_all_running(struct hfi1_devdata *dd); | |
495 | void sdma_all_idle(struct hfi1_devdata *dd); | |
496 | void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle); | |
497 | void sdma_freeze(struct hfi1_devdata *dd); | |
498 | void sdma_unfreeze(struct hfi1_devdata *dd); | |
499 | void sdma_wait(struct hfi1_devdata *dd); | |
500 | ||
501 | /** | |
502 | * sdma_empty() - idle engine test | |
503 | * @engine: sdma engine | |
504 | * | |
505 | * Currently used by verbs as a latency optimization. | |
506 | * | |
507 | * Return: | |
508 | * 1 - empty, 0 - non-empty | |
509 | */ | |
510 | static inline int sdma_empty(struct sdma_engine *sde) | |
511 | { | |
512 | return sde->descq_tail == sde->descq_head; | |
513 | } | |
514 | ||
515 | static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) | |
516 | { | |
517 | return sde->descq_cnt - | |
518 | (sde->descq_tail - | |
519 | ACCESS_ONCE(sde->descq_head)) - 1; | |
520 | } | |
521 | ||
522 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) | |
523 | { | |
524 | return sde->descq_cnt - sdma_descq_freecnt(sde); | |
525 | } | |
526 | ||
527 | /* | |
528 | * Either head_lock or tail lock required to see | |
529 | * a steady state. | |
530 | */ | |
531 | static inline int __sdma_running(struct sdma_engine *engine) | |
532 | { | |
533 | return engine->state.current_state == sdma_state_s99_running; | |
534 | } | |
535 | ||
536 | ||
537 | /** | |
538 | * sdma_running() - state suitability test | |
539 | * @engine: sdma engine | |
540 | * | |
541 | * sdma_running probes the internal state to determine if it is suitable | |
542 | * for submitting packets. | |
543 | * | |
544 | * Return: | |
545 | * 1 - ok to submit, 0 - not ok to submit | |
546 | * | |
547 | */ | |
548 | static inline int sdma_running(struct sdma_engine *engine) | |
549 | { | |
550 | unsigned long flags; | |
551 | int ret; | |
552 | ||
553 | spin_lock_irqsave(&engine->tail_lock, flags); | |
554 | ret = __sdma_running(engine); | |
555 | spin_unlock_irqrestore(&engine->tail_lock, flags); | |
556 | return ret; | |
557 | } | |
558 | ||
559 | void _sdma_txreq_ahgadd( | |
560 | struct sdma_txreq *tx, | |
561 | u8 num_ahg, | |
562 | u8 ahg_entry, | |
563 | u32 *ahg, | |
564 | u8 ahg_hlen); | |
565 | ||
566 | ||
567 | /** | |
568 | * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG | |
569 | * @tx: tx request to initialize | |
570 | * @flags: flags to key last descriptor additions | |
571 | * @tlen: total packet length (pbc + headers + data) | |
572 | * @ahg_entry: ahg entry to use (0 - 31) | |
573 | * @num_ahg: ahg descriptor for first descriptor (0 - 9) | |
574 | * @ahg: array of AHG descriptors (up to 9 entries) | |
575 | * @ahg_hlen: number of bytes from ASIC entry to use | |
576 | * @cb: callback | |
577 | * | |
578 | * The allocation of the sdma_txreq and it enclosing structure is user | |
579 | * dependent. This routine must be called to initialize the user independent | |
580 | * fields. | |
581 | * | |
582 | * The currently supported flags are SDMA_TXREQ_F_URGENT, | |
583 | * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG. | |
584 | * | |
585 | * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the | |
586 | * completion is desired as soon as possible. | |
587 | * | |
588 | * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be | |
589 | * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in | |
590 | * the AHG descriptors into the first 1 to 3 descriptors. | |
591 | * | |
592 | * Completions of submitted requests can be gotten on selected | |
593 | * txreqs by giving a completion routine callback to sdma_txinit() or | |
594 | * sdma_txinit_ahg(). The environment in which the callback runs | |
595 | * can be from an ISR, a tasklet, or a thread, so no sleeping | |
596 | * kernel routines can be used. Aspects of the sdma ring may | |
597 | * be locked so care should be taken with locking. | |
598 | * | |
599 | * The callback pointer can be NULL to avoid any callback for the packet | |
600 | * being submitted. The callback will be provided this tx, a status, and a flag. | |
601 | * | |
602 | * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, | |
603 | * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. | |
604 | * | |
605 | * The flag, if the is the iowait had been used, indicates the iowait | |
606 | * sdma_busy count has reached zero. | |
607 | * | |
608 | * user data portion of tlen should be precise. The sdma_txadd_* entrances | |
609 | * will pad with a descriptor references 1 - 3 bytes when the number of bytes | |
610 | * specified in tlen have been supplied to the sdma_txreq. | |
611 | * | |
612 | * ahg_hlen is used to determine the number of on-chip entry bytes to | |
613 | * use as the header. This is for cases where the stored header is | |
614 | * larger than the header to be used in a packet. This is typical | |
615 | * for verbs where an RDMA_WRITE_FIRST is larger than the packet in | |
616 | * and RDMA_WRITE_MIDDLE. | |
617 | * | |
618 | */ | |
619 | static inline int sdma_txinit_ahg( | |
620 | struct sdma_txreq *tx, | |
621 | u16 flags, | |
622 | u16 tlen, | |
623 | u8 ahg_entry, | |
624 | u8 num_ahg, | |
625 | u32 *ahg, | |
626 | u8 ahg_hlen, | |
627 | void (*cb)(struct sdma_txreq *, int, int)) | |
628 | { | |
629 | if (tlen == 0) | |
630 | return -ENODATA; | |
631 | if (tlen > MAX_SDMA_PKT_SIZE) | |
632 | return -EMSGSIZE; | |
633 | tx->desc_limit = ARRAY_SIZE(tx->descs); | |
634 | tx->descp = &tx->descs[0]; | |
635 | INIT_LIST_HEAD(&tx->list); | |
636 | tx->num_desc = 0; | |
637 | tx->flags = flags; | |
638 | tx->complete = cb; | |
639 | tx->coalesce_buf = NULL; | |
640 | tx->wait = NULL; | |
641 | tx->tlen = tx->packet_len = tlen; | |
642 | tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG; | |
643 | tx->descs[0].qw[1] = 0; | |
644 | if (flags & SDMA_TXREQ_F_AHG_COPY) | |
645 | tx->descs[0].qw[1] |= | |
646 | (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) | |
647 | << SDMA_DESC1_HEADER_INDEX_SHIFT) | | |
648 | (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK) | |
649 | << SDMA_DESC1_HEADER_MODE_SHIFT); | |
650 | else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg) | |
651 | _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen); | |
652 | return 0; | |
653 | } | |
654 | ||
655 | /** | |
656 | * sdma_txinit() - initialize an sdma_txreq struct (no AHG) | |
657 | * @tx: tx request to initialize | |
658 | * @flags: flags to key last descriptor additions | |
659 | * @tlen: total packet length (pbc + headers + data) | |
660 | * @cb: callback pointer | |
661 | * | |
662 | * The allocation of the sdma_txreq and it enclosing structure is user | |
663 | * dependent. This routine must be called to initialize the user | |
664 | * independent fields. | |
665 | * | |
666 | * The currently supported flags is SDMA_TXREQ_F_URGENT. | |
667 | * | |
668 | * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the | |
669 | * completion is desired as soon as possible. | |
670 | * | |
671 | * Completions of submitted requests can be gotten on selected | |
672 | * txreqs by giving a completion routine callback to sdma_txinit() or | |
673 | * sdma_txinit_ahg(). The environment in which the callback runs | |
674 | * can be from an ISR, a tasklet, or a thread, so no sleeping | |
675 | * kernel routines can be used. The head size of the sdma ring may | |
676 | * be locked so care should be taken with locking. | |
677 | * | |
678 | * The callback pointer can be NULL to avoid any callback for the packet | |
679 | * being submitted. | |
680 | * | |
681 | * The callback, if non-NULL, will be provided this tx and a status. The | |
682 | * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, | |
683 | * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. | |
684 | * | |
685 | */ | |
686 | static inline int sdma_txinit( | |
687 | struct sdma_txreq *tx, | |
688 | u16 flags, | |
689 | u16 tlen, | |
690 | void (*cb)(struct sdma_txreq *, int, int)) | |
691 | { | |
692 | return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb); | |
693 | } | |
694 | ||
695 | /* helpers - don't use */ | |
696 | static inline int sdma_mapping_type(struct sdma_desc *d) | |
697 | { | |
698 | return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK) | |
699 | >> SDMA_DESC1_GENERATION_SHIFT; | |
700 | } | |
701 | ||
702 | static inline size_t sdma_mapping_len(struct sdma_desc *d) | |
703 | { | |
704 | return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK) | |
705 | >> SDMA_DESC0_BYTE_COUNT_SHIFT; | |
706 | } | |
707 | ||
708 | static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d) | |
709 | { | |
710 | return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK) | |
711 | >> SDMA_DESC0_PHY_ADDR_SHIFT; | |
712 | } | |
713 | ||
714 | static inline void make_tx_sdma_desc( | |
715 | struct sdma_txreq *tx, | |
716 | int type, | |
717 | dma_addr_t addr, | |
718 | size_t len) | |
719 | { | |
720 | struct sdma_desc *desc = &tx->descp[tx->num_desc]; | |
721 | ||
722 | if (!tx->num_desc) { | |
723 | /* qw[0] zero; qw[1] first, ahg mode already in from init */ | |
724 | desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK) | |
725 | << SDMA_DESC1_GENERATION_SHIFT; | |
726 | } else { | |
727 | desc->qw[0] = 0; | |
728 | desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK) | |
729 | << SDMA_DESC1_GENERATION_SHIFT; | |
730 | } | |
731 | desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK) | |
732 | << SDMA_DESC0_PHY_ADDR_SHIFT) | | |
733 | (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK) | |
734 | << SDMA_DESC0_BYTE_COUNT_SHIFT); | |
735 | } | |
736 | ||
737 | /* helper to extend txreq */ | |
738 | int _extend_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *); | |
739 | int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *); | |
740 | void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *); | |
741 | ||
742 | /* helpers used by public routines */ | |
743 | static inline void _sdma_close_tx(struct hfi1_devdata *dd, | |
744 | struct sdma_txreq *tx) | |
745 | { | |
746 | tx->descp[tx->num_desc].qw[0] |= | |
747 | SDMA_DESC0_LAST_DESC_FLAG; | |
748 | tx->descp[tx->num_desc].qw[1] |= | |
749 | dd->default_desc1; | |
750 | if (tx->flags & SDMA_TXREQ_F_URGENT) | |
751 | tx->descp[tx->num_desc].qw[1] |= | |
752 | (SDMA_DESC1_HEAD_TO_HOST_FLAG| | |
753 | SDMA_DESC1_INT_REQ_FLAG); | |
754 | } | |
755 | ||
756 | static inline int _sdma_txadd_daddr( | |
757 | struct hfi1_devdata *dd, | |
758 | int type, | |
759 | struct sdma_txreq *tx, | |
760 | dma_addr_t addr, | |
761 | u16 len) | |
762 | { | |
763 | int rval = 0; | |
764 | ||
765 | if ((unlikely(tx->num_desc == tx->desc_limit))) { | |
766 | rval = _extend_sdma_tx_descs(dd, tx); | |
767 | if (rval) | |
768 | return rval; | |
769 | } | |
770 | make_tx_sdma_desc( | |
771 | tx, | |
772 | type, | |
773 | addr, len); | |
774 | WARN_ON(len > tx->tlen); | |
775 | tx->tlen -= len; | |
776 | /* special cases for last */ | |
777 | if (!tx->tlen) { | |
778 | if (tx->packet_len & (sizeof(u32) - 1)) | |
779 | rval = _pad_sdma_tx_descs(dd, tx); | |
780 | else | |
781 | _sdma_close_tx(dd, tx); | |
782 | } | |
783 | tx->num_desc++; | |
784 | return rval; | |
785 | } | |
786 | ||
787 | /** | |
788 | * sdma_txadd_page() - add a page to the sdma_txreq | |
789 | * @dd: the device to use for mapping | |
790 | * @tx: tx request to which the page is added | |
791 | * @page: page to map | |
792 | * @offset: offset within the page | |
793 | * @len: length in bytes | |
794 | * | |
795 | * This is used to add a page/offset/length descriptor. | |
796 | * | |
797 | * The mapping/unmapping of the page/offset/len is automatically handled. | |
798 | * | |
799 | * Return: | |
800 | * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't | |
801 | * extend descriptor array or couldn't allocate coalesce | |
802 | * buffer. | |
803 | * | |
804 | */ | |
805 | static inline int sdma_txadd_page( | |
806 | struct hfi1_devdata *dd, | |
807 | struct sdma_txreq *tx, | |
808 | struct page *page, | |
809 | unsigned long offset, | |
810 | u16 len) | |
811 | { | |
812 | dma_addr_t addr = | |
813 | dma_map_page( | |
814 | &dd->pcidev->dev, | |
815 | page, | |
816 | offset, | |
817 | len, | |
818 | DMA_TO_DEVICE); | |
819 | if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { | |
820 | sdma_txclean(dd, tx); | |
821 | return -ENOSPC; | |
822 | } | |
823 | return _sdma_txadd_daddr( | |
824 | dd, SDMA_MAP_PAGE, tx, addr, len); | |
825 | } | |
826 | ||
827 | /** | |
828 | * sdma_txadd_daddr() - add a dma address to the sdma_txreq | |
829 | * @dd: the device to use for mapping | |
830 | * @tx: sdma_txreq to which the page is added | |
831 | * @addr: dma address mapped by caller | |
832 | * @len: length in bytes | |
833 | * | |
834 | * This is used to add a descriptor for memory that is already dma mapped. | |
835 | * | |
836 | * In this case, there is no unmapping as part of the progress processing for | |
837 | * this memory location. | |
838 | * | |
839 | * Return: | |
840 | * 0 - success, -ENOMEM - couldn't extend descriptor array | |
841 | */ | |
842 | ||
843 | static inline int sdma_txadd_daddr( | |
844 | struct hfi1_devdata *dd, | |
845 | struct sdma_txreq *tx, | |
846 | dma_addr_t addr, | |
847 | u16 len) | |
848 | { | |
849 | return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len); | |
850 | } | |
851 | ||
852 | /** | |
853 | * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq | |
854 | * @dd: the device to use for mapping | |
855 | * @tx: sdma_txreq to which the page is added | |
856 | * @kvaddr: the kernel virtual address | |
857 | * @len: length in bytes | |
858 | * | |
859 | * This is used to add a descriptor referenced by the indicated kvaddr and | |
860 | * len. | |
861 | * | |
862 | * The mapping/unmapping of the kvaddr and len is automatically handled. | |
863 | * | |
864 | * Return: | |
865 | * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend | |
866 | * descriptor array | |
867 | */ | |
868 | static inline int sdma_txadd_kvaddr( | |
869 | struct hfi1_devdata *dd, | |
870 | struct sdma_txreq *tx, | |
871 | void *kvaddr, | |
872 | u16 len) | |
873 | { | |
874 | dma_addr_t addr = | |
875 | dma_map_single( | |
876 | &dd->pcidev->dev, | |
877 | kvaddr, | |
878 | len, | |
879 | DMA_TO_DEVICE); | |
880 | if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { | |
881 | sdma_txclean(dd, tx); | |
882 | return -ENOSPC; | |
883 | } | |
884 | return _sdma_txadd_daddr( | |
885 | dd, SDMA_MAP_SINGLE, tx, addr, len); | |
886 | } | |
887 | ||
888 | struct iowait; | |
889 | ||
890 | int sdma_send_txreq(struct sdma_engine *sde, | |
891 | struct iowait *wait, | |
892 | struct sdma_txreq *tx); | |
893 | int sdma_send_txlist(struct sdma_engine *sde, | |
894 | struct iowait *wait, | |
895 | struct list_head *tx_list); | |
896 | ||
897 | int sdma_ahg_alloc(struct sdma_engine *sde); | |
898 | void sdma_ahg_free(struct sdma_engine *sde, int ahg_index); | |
899 | ||
900 | /** | |
901 | * sdma_build_ahg - build ahg descriptor | |
902 | * @data | |
903 | * @dwindex | |
904 | * @startbit | |
905 | * @bits | |
906 | * | |
907 | * Build and return a 32 bit descriptor. | |
908 | */ | |
909 | static inline u32 sdma_build_ahg_descriptor( | |
910 | u16 data, | |
911 | u8 dwindex, | |
912 | u8 startbit, | |
913 | u8 bits) | |
914 | { | |
915 | return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT | | |
916 | ((startbit & SDMA_AHG_FIELD_START_MASK) << | |
917 | SDMA_AHG_FIELD_START_SHIFT) | | |
918 | ((bits & SDMA_AHG_FIELD_LEN_MASK) << | |
919 | SDMA_AHG_FIELD_LEN_SHIFT) | | |
920 | ((dwindex & SDMA_AHG_INDEX_MASK) << | |
921 | SDMA_AHG_INDEX_SHIFT) | | |
922 | ((data & SDMA_AHG_VALUE_MASK) << | |
923 | SDMA_AHG_VALUE_SHIFT)); | |
924 | } | |
925 | ||
926 | /** | |
927 | * sdma_progress - use seq number of detect head progress | |
928 | * @sde: sdma_engine to check | |
929 | * @seq: base seq count | |
930 | * @tx: txreq for which we need to check descriptor availability | |
931 | * | |
932 | * This is used in the appropriate spot in the sleep routine | |
933 | * to check for potential ring progress. This routine gets the | |
934 | * seqcount before queuing the iowait structure for progress. | |
935 | * | |
936 | * If the seqcount indicates that progress needs to be checked, | |
937 | * re-submission is detected by checking whether the descriptor | |
938 | * queue has enough descriptor for the txreq. | |
939 | */ | |
940 | static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq, | |
941 | struct sdma_txreq *tx) | |
942 | { | |
943 | if (read_seqretry(&sde->head_lock, seq)) { | |
944 | sde->desc_avail = sdma_descq_freecnt(sde); | |
945 | if (tx->num_desc > sde->desc_avail) | |
946 | return 0; | |
947 | return 1; | |
948 | } | |
949 | return 0; | |
950 | } | |
951 | ||
952 | /** | |
953 | * sdma_iowait_schedule() - initialize wait structure | |
954 | * @sde: sdma_engine to schedule | |
955 | * @wait: wait struct to schedule | |
956 | * | |
957 | * This function initializes the iowait | |
958 | * structure embedded in the QP or PQ. | |
959 | * | |
960 | */ | |
961 | static inline void sdma_iowait_schedule( | |
962 | struct sdma_engine *sde, | |
963 | struct iowait *wait) | |
964 | { | |
965 | iowait_schedule(wait, sde->wq); | |
966 | } | |
967 | ||
968 | /* for use by interrupt handling */ | |
969 | void sdma_engine_error(struct sdma_engine *sde, u64 status); | |
970 | void sdma_engine_interrupt(struct sdma_engine *sde, u64 status); | |
971 | ||
972 | /* | |
973 | * | |
974 | * The diagram below details the relationship of the mapping structures | |
975 | * | |
976 | * Since the mapping now allows for non-uniform engines per vl, the | |
977 | * number of engines for a vl is either the vl_engines[vl] or | |
978 | * a computation based on num_sdma/num_vls: | |
979 | * | |
980 | * For example: | |
981 | * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls | |
982 | * | |
983 | * n = roundup to next highest power of 2 using nactual | |
984 | * | |
985 | * In the case where there are num_sdma/num_vls doesn't divide | |
986 | * evenly, the extras are added from the last vl downward. | |
987 | * | |
988 | * For the case where n > nactual, the engines are assigned | |
989 | * in a round robin fashion wrapping back to the first engine | |
990 | * for a particular vl. | |
991 | * | |
992 | * dd->sdma_map | |
993 | * | sdma_map_elem[0] | |
994 | * | +--------------------+ | |
995 | * v | mask | | |
996 | * sdma_vl_map |--------------------| | |
997 | * +--------------------------+ | sde[0] -> eng 1 | | |
998 | * | list (RCU) | |--------------------| | |
999 | * |--------------------------| ->| sde[1] -> eng 2 | | |
1000 | * | mask | --/ |--------------------| | |
1001 | * |--------------------------| -/ | * | | |
1002 | * | actual_vls (max 8) | -/ |--------------------| | |
1003 | * |--------------------------| --/ | sde[n] -> eng n | | |
1004 | * | vls (max 8) | -/ +--------------------+ | |
1005 | * |--------------------------| --/ | |
1006 | * | map[0] |-/ | |
1007 | * |--------------------------| +--------------------+ | |
1008 | * | map[1] |--- | mask | | |
1009 | * |--------------------------| \---- |--------------------| | |
1010 | * | * | \-- | sde[0] -> eng 1+n | | |
1011 | * | * | \---- |--------------------| | |
1012 | * | * | \->| sde[1] -> eng 2+n | | |
1013 | * |--------------------------| |--------------------| | |
1014 | * | map[vls - 1] |- | * | | |
1015 | * +--------------------------+ \- |--------------------| | |
1016 | * \- | sde[m] -> eng m+n | | |
1017 | * \ +--------------------+ | |
1018 | * \- | |
1019 | * \ | |
1020 | * \- +--------------------+ | |
1021 | * \- | mask | | |
1022 | * \ |--------------------| | |
1023 | * \- | sde[0] -> eng 1+m+n| | |
1024 | * \- |--------------------| | |
1025 | * >| sde[1] -> eng 2+m+n| | |
1026 | * |--------------------| | |
1027 | * | * | | |
1028 | * |--------------------| | |
1029 | * | sde[o] -> eng o+m+n| | |
1030 | * +--------------------+ | |
1031 | * | |
1032 | */ | |
1033 | ||
1034 | /** | |
1035 | * struct sdma_map_elem - mapping for a vl | |
1036 | * @mask - selector mask | |
1037 | * @sde - array of engines for this vl | |
1038 | * | |
1039 | * The mask is used to "mod" the selector | |
1040 | * to produce index into the trailing | |
1041 | * array of sdes. | |
1042 | */ | |
1043 | struct sdma_map_elem { | |
1044 | u32 mask; | |
1045 | struct sdma_engine *sde[0]; | |
1046 | }; | |
1047 | ||
1048 | /** | |
1049 | * struct sdma_map_el - mapping for a vl | |
1050 | * @list - rcu head for free callback | |
1051 | * @mask - vl mask to "mod" the vl to produce an index to map array | |
1052 | * @actual_vls - number of vls | |
1053 | * @vls - number of vls rounded to next power of 2 | |
1054 | * @map - array of sdma_map_elem entries | |
1055 | * | |
1056 | * This is the parent mapping structure. The trailing | |
1057 | * members of the struct point to sdma_map_elem entries, which | |
1058 | * in turn point to an array of sde's for that vl. | |
1059 | */ | |
1060 | struct sdma_vl_map { | |
1061 | struct rcu_head list; | |
1062 | u32 mask; | |
1063 | u8 actual_vls; | |
1064 | u8 vls; | |
1065 | struct sdma_map_elem *map[0]; | |
1066 | }; | |
1067 | ||
1068 | int sdma_map_init( | |
1069 | struct hfi1_devdata *dd, | |
1070 | u8 port, | |
1071 | u8 num_vls, | |
1072 | u8 *vl_engines); | |
1073 | ||
1074 | /* slow path */ | |
1075 | void _sdma_engine_progress_schedule(struct sdma_engine *sde); | |
1076 | ||
1077 | /** | |
1078 | * sdma_engine_progress_schedule() - schedule progress on engine | |
1079 | * @sde: sdma_engine to schedule progress | |
1080 | * | |
1081 | * This is the fast path. | |
1082 | * | |
1083 | */ | |
1084 | static inline void sdma_engine_progress_schedule( | |
1085 | struct sdma_engine *sde) | |
1086 | { | |
1087 | if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8)) | |
1088 | return; | |
1089 | _sdma_engine_progress_schedule(sde); | |
1090 | } | |
1091 | ||
1092 | struct sdma_engine *sdma_select_engine_sc( | |
1093 | struct hfi1_devdata *dd, | |
1094 | u32 selector, | |
1095 | u8 sc5); | |
1096 | ||
1097 | struct sdma_engine *sdma_select_engine_vl( | |
1098 | struct hfi1_devdata *dd, | |
1099 | u32 selector, | |
1100 | u8 vl); | |
1101 | ||
1102 | void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *); | |
1103 | ||
1104 | #ifdef CONFIG_SDMA_VERBOSITY | |
1105 | void sdma_dumpstate(struct sdma_engine *); | |
1106 | #endif | |
1107 | static inline char *slashstrip(char *s) | |
1108 | { | |
1109 | char *r = s; | |
1110 | ||
1111 | while (*s) | |
1112 | if (*s++ == '/') | |
1113 | r = s; | |
1114 | return r; | |
1115 | } | |
1116 | ||
1117 | u16 sdma_get_descq_cnt(void); | |
1118 | ||
1119 | extern uint mod_num_sdma; | |
1120 | ||
1121 | void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid); | |
1122 | ||
1123 | #endif |