Commit | Line | Data |
---|---|---|
fe56b9e6 YM |
1 | /* QLogic qed NIC Driver |
2 | * Copyright (c) 2015 QLogic Corporation | |
3 | * | |
4 | * This software is available under the terms of the GNU General Public License | |
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
6 | * this source tree. | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <asm/byteorder.h> | |
11 | #include <linux/io.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/string.h> | |
21 | #include "qed.h" | |
22 | #include "qed_cxt.h" | |
23 | #include "qed_dev_api.h" | |
24 | #include "qed_hsi.h" | |
25 | #include "qed_hw.h" | |
26 | #include "qed_int.h" | |
27 | #include "qed_mcp.h" | |
28 | #include "qed_reg_addr.h" | |
29 | #include "qed_sp.h" | |
30 | ||
31 | /*************************************************************************** | |
32 | * Structures & Definitions | |
33 | ***************************************************************************/ | |
34 | ||
35 | #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) | |
36 | #define SPQ_BLOCK_SLEEP_LENGTH (1000) | |
37 | ||
38 | /*************************************************************************** | |
39 | * Blocking Imp. (BLOCK/EBLOCK mode) | |
40 | ***************************************************************************/ | |
41 | static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, | |
42 | void *cookie, | |
43 | union event_ring_data *data, | |
44 | u8 fw_return_code) | |
45 | { | |
46 | struct qed_spq_comp_done *comp_done; | |
47 | ||
48 | comp_done = (struct qed_spq_comp_done *)cookie; | |
49 | ||
50 | comp_done->done = 0x1; | |
51 | comp_done->fw_return_code = fw_return_code; | |
52 | ||
53 | /* make update visible to waiting thread */ | |
54 | smp_wmb(); | |
55 | } | |
56 | ||
57 | static int qed_spq_block(struct qed_hwfn *p_hwfn, | |
58 | struct qed_spq_entry *p_ent, | |
59 | u8 *p_fw_ret) | |
60 | { | |
61 | int sleep_count = SPQ_BLOCK_SLEEP_LENGTH; | |
62 | struct qed_spq_comp_done *comp_done; | |
63 | int rc; | |
64 | ||
65 | comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; | |
66 | while (sleep_count) { | |
67 | /* validate we receive completion update */ | |
68 | smp_rmb(); | |
69 | if (comp_done->done == 1) { | |
70 | if (p_fw_ret) | |
71 | *p_fw_ret = comp_done->fw_return_code; | |
72 | return 0; | |
73 | } | |
74 | usleep_range(5000, 10000); | |
75 | sleep_count--; | |
76 | } | |
77 | ||
78 | DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); | |
79 | rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); | |
80 | if (rc != 0) | |
81 | DP_NOTICE(p_hwfn, "MCP drain failed\n"); | |
82 | ||
83 | /* Retry after drain */ | |
84 | sleep_count = SPQ_BLOCK_SLEEP_LENGTH; | |
85 | while (sleep_count) { | |
86 | /* validate we receive completion update */ | |
87 | smp_rmb(); | |
88 | if (comp_done->done == 1) { | |
89 | if (p_fw_ret) | |
90 | *p_fw_ret = comp_done->fw_return_code; | |
91 | return 0; | |
92 | } | |
93 | usleep_range(5000, 10000); | |
94 | sleep_count--; | |
95 | } | |
96 | ||
97 | if (comp_done->done == 1) { | |
98 | if (p_fw_ret) | |
99 | *p_fw_ret = comp_done->fw_return_code; | |
100 | return 0; | |
101 | } | |
102 | ||
103 | DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); | |
104 | ||
105 | return -EBUSY; | |
106 | } | |
107 | ||
108 | /*************************************************************************** | |
109 | * SPQ entries inner API | |
110 | ***************************************************************************/ | |
111 | static int | |
112 | qed_spq_fill_entry(struct qed_hwfn *p_hwfn, | |
113 | struct qed_spq_entry *p_ent) | |
114 | { | |
fe56b9e6 YM |
115 | p_ent->flags = 0; |
116 | ||
117 | switch (p_ent->comp_mode) { | |
118 | case QED_SPQ_MODE_EBLOCK: | |
119 | case QED_SPQ_MODE_BLOCK: | |
120 | p_ent->comp_cb.function = qed_spq_blocking_cb; | |
121 | break; | |
122 | case QED_SPQ_MODE_CB: | |
123 | break; | |
124 | default: | |
125 | DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", | |
126 | p_ent->comp_mode); | |
127 | return -EINVAL; | |
128 | } | |
129 | ||
130 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | |
131 | "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n", | |
132 | p_ent->elem.hdr.cid, | |
133 | p_ent->elem.hdr.cmd_id, | |
134 | p_ent->elem.hdr.protocol_id, | |
135 | p_ent->elem.data_ptr.hi, | |
136 | p_ent->elem.data_ptr.lo, | |
137 | D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, | |
138 | QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", | |
139 | "MODE_CB")); | |
140 | ||
141 | return 0; | |
142 | } | |
143 | ||
144 | /*************************************************************************** | |
145 | * HSI access | |
146 | ***************************************************************************/ | |
147 | static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, | |
148 | struct qed_spq *p_spq) | |
149 | { | |
150 | u16 pq; | |
151 | struct qed_cxt_info cxt_info; | |
152 | struct core_conn_context *p_cxt; | |
153 | union qed_qm_pq_params pq_params; | |
154 | int rc; | |
155 | ||
156 | cxt_info.iid = p_spq->cid; | |
157 | ||
158 | rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); | |
159 | ||
160 | if (rc < 0) { | |
161 | DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", | |
162 | p_spq->cid); | |
163 | return; | |
164 | } | |
165 | ||
166 | p_cxt = cxt_info.p_cxt; | |
167 | ||
168 | SET_FIELD(p_cxt->xstorm_ag_context.flags10, | |
169 | XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); | |
170 | SET_FIELD(p_cxt->xstorm_ag_context.flags1, | |
171 | XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); | |
172 | SET_FIELD(p_cxt->xstorm_ag_context.flags9, | |
173 | XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); | |
174 | ||
175 | /* QM physical queue */ | |
176 | memset(&pq_params, 0, sizeof(pq_params)); | |
177 | pq_params.core.tc = LB_TC; | |
178 | pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); | |
179 | p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq); | |
180 | ||
181 | p_cxt->xstorm_st_context.spq_base_lo = | |
182 | DMA_LO_LE(p_spq->chain.p_phys_addr); | |
183 | p_cxt->xstorm_st_context.spq_base_hi = | |
184 | DMA_HI_LE(p_spq->chain.p_phys_addr); | |
185 | ||
94494598 YM |
186 | DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, |
187 | p_hwfn->p_consq->chain.p_phys_addr); | |
fe56b9e6 YM |
188 | } |
189 | ||
190 | static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, | |
191 | struct qed_spq *p_spq, | |
192 | struct qed_spq_entry *p_ent) | |
193 | { | |
76a9a364 TT |
194 | struct qed_chain *p_chain = &p_hwfn->p_spq->chain; |
195 | u16 echo = qed_chain_get_prod_idx(p_chain); | |
fe56b9e6 YM |
196 | struct slow_path_element *elem; |
197 | struct core_db_data db; | |
198 | ||
76a9a364 | 199 | p_ent->elem.hdr.echo = cpu_to_le16(echo); |
fe56b9e6 YM |
200 | elem = qed_chain_produce(p_chain); |
201 | if (!elem) { | |
202 | DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); | |
203 | return -EINVAL; | |
204 | } | |
205 | ||
206 | *elem = p_ent->elem; /* struct assignment */ | |
207 | ||
208 | /* send a doorbell on the slow hwfn session */ | |
209 | memset(&db, 0, sizeof(db)); | |
210 | SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM); | |
211 | SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); | |
212 | SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, | |
213 | DQ_XCM_CORE_SPQ_PROD_CMD); | |
214 | db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; | |
215 | ||
216 | /* validate producer is up to-date */ | |
217 | rmb(); | |
218 | ||
219 | db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); | |
220 | ||
221 | /* do not reorder */ | |
222 | barrier(); | |
223 | ||
224 | DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); | |
225 | ||
226 | /* make sure doorbell is rang */ | |
227 | mmiowb(); | |
228 | ||
229 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | |
230 | "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", | |
231 | qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), | |
232 | p_spq->cid, db.params, db.agg_flags, | |
233 | qed_chain_get_prod_idx(p_chain)); | |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
238 | /*************************************************************************** | |
239 | * Asynchronous events | |
240 | ***************************************************************************/ | |
241 | static int | |
242 | qed_async_event_completion(struct qed_hwfn *p_hwfn, | |
243 | struct event_ring_entry *p_eqe) | |
244 | { | |
245 | DP_NOTICE(p_hwfn, | |
246 | "Unknown Async completion for protocol: %d\n", | |
247 | p_eqe->protocol_id); | |
248 | return -EINVAL; | |
249 | } | |
250 | ||
251 | /*************************************************************************** | |
252 | * EQ API | |
253 | ***************************************************************************/ | |
254 | void qed_eq_prod_update(struct qed_hwfn *p_hwfn, | |
255 | u16 prod) | |
256 | { | |
257 | u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + | |
258 | USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); | |
259 | ||
260 | REG_WR16(p_hwfn, addr, prod); | |
261 | ||
262 | /* keep prod updates ordered */ | |
263 | mmiowb(); | |
264 | } | |
265 | ||
266 | int qed_eq_completion(struct qed_hwfn *p_hwfn, | |
267 | void *cookie) | |
268 | ||
269 | { | |
270 | struct qed_eq *p_eq = cookie; | |
271 | struct qed_chain *p_chain = &p_eq->chain; | |
272 | int rc = 0; | |
273 | ||
274 | /* take a snapshot of the FW consumer */ | |
275 | u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons); | |
276 | ||
277 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); | |
278 | ||
279 | /* Need to guarantee the fw_cons index we use points to a usuable | |
280 | * element (to comply with our chain), so our macros would comply | |
281 | */ | |
282 | if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) == | |
283 | qed_chain_get_usable_per_page(p_chain)) | |
284 | fw_cons_idx += qed_chain_get_unusable_per_page(p_chain); | |
285 | ||
286 | /* Complete current segment of eq entries */ | |
287 | while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) { | |
288 | struct event_ring_entry *p_eqe = qed_chain_consume(p_chain); | |
289 | ||
290 | if (!p_eqe) { | |
291 | rc = -EINVAL; | |
292 | break; | |
293 | } | |
294 | ||
295 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | |
296 | "op %x prot %x res0 %x echo %x fwret %x flags %x\n", | |
297 | p_eqe->opcode, | |
298 | p_eqe->protocol_id, | |
299 | p_eqe->reserved0, | |
300 | le16_to_cpu(p_eqe->echo), | |
301 | p_eqe->fw_return_code, | |
302 | p_eqe->flags); | |
303 | ||
304 | if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { | |
305 | if (qed_async_event_completion(p_hwfn, p_eqe)) | |
306 | rc = -EINVAL; | |
307 | } else if (qed_spq_completion(p_hwfn, | |
308 | p_eqe->echo, | |
309 | p_eqe->fw_return_code, | |
310 | &p_eqe->data)) { | |
311 | rc = -EINVAL; | |
312 | } | |
313 | ||
314 | qed_chain_recycle_consumed(p_chain); | |
315 | } | |
316 | ||
317 | qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); | |
318 | ||
319 | return rc; | |
320 | } | |
321 | ||
322 | struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, | |
323 | u16 num_elem) | |
324 | { | |
325 | struct qed_eq *p_eq; | |
326 | ||
327 | /* Allocate EQ struct */ | |
60fffb3b | 328 | p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); |
fe56b9e6 YM |
329 | if (!p_eq) { |
330 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n"); | |
331 | return NULL; | |
332 | } | |
333 | ||
334 | /* Allocate and initialize EQ chain*/ | |
335 | if (qed_chain_alloc(p_hwfn->cdev, | |
336 | QED_CHAIN_USE_TO_PRODUCE, | |
337 | QED_CHAIN_MODE_PBL, | |
338 | num_elem, | |
339 | sizeof(union event_ring_element), | |
340 | &p_eq->chain)) { | |
341 | DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n"); | |
342 | goto eq_allocate_fail; | |
343 | } | |
344 | ||
345 | /* register EQ completion on the SP SB */ | |
346 | qed_int_register_cb(p_hwfn, | |
347 | qed_eq_completion, | |
348 | p_eq, | |
349 | &p_eq->eq_sb_index, | |
350 | &p_eq->p_fw_cons); | |
351 | ||
352 | return p_eq; | |
353 | ||
354 | eq_allocate_fail: | |
355 | qed_eq_free(p_hwfn, p_eq); | |
356 | return NULL; | |
357 | } | |
358 | ||
359 | void qed_eq_setup(struct qed_hwfn *p_hwfn, | |
360 | struct qed_eq *p_eq) | |
361 | { | |
362 | qed_chain_reset(&p_eq->chain); | |
363 | } | |
364 | ||
365 | void qed_eq_free(struct qed_hwfn *p_hwfn, | |
366 | struct qed_eq *p_eq) | |
367 | { | |
368 | if (!p_eq) | |
369 | return; | |
370 | qed_chain_free(p_hwfn->cdev, &p_eq->chain); | |
371 | kfree(p_eq); | |
372 | } | |
373 | ||
cee4d264 MC |
374 | /*************************************************************************** |
375 | * CQE API - manipulate EQ functionality | |
376 | ***************************************************************************/ | |
377 | static int qed_cqe_completion( | |
378 | struct qed_hwfn *p_hwfn, | |
379 | struct eth_slow_path_rx_cqe *cqe, | |
380 | enum protocol_type protocol) | |
381 | { | |
382 | /* @@@tmp - it's possible we'll eventually want to handle some | |
383 | * actual commands that can arrive here, but for now this is only | |
384 | * used to complete the ramrod using the echo value on the cqe | |
385 | */ | |
386 | return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); | |
387 | } | |
388 | ||
389 | int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, | |
390 | struct eth_slow_path_rx_cqe *cqe) | |
391 | { | |
392 | int rc; | |
393 | ||
394 | rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); | |
395 | if (rc) | |
396 | DP_NOTICE(p_hwfn, | |
397 | "Failed to handle RXQ CQE [cmd 0x%02x]\n", | |
398 | cqe->ramrod_cmd_id); | |
399 | ||
400 | return rc; | |
401 | } | |
402 | ||
fe56b9e6 YM |
403 | /*************************************************************************** |
404 | * Slow hwfn Queue (spq) | |
405 | ***************************************************************************/ | |
406 | void qed_spq_setup(struct qed_hwfn *p_hwfn) | |
407 | { | |
408 | struct qed_spq *p_spq = p_hwfn->p_spq; | |
409 | struct qed_spq_entry *p_virt = NULL; | |
410 | dma_addr_t p_phys = 0; | |
411 | unsigned int i = 0; | |
412 | ||
413 | INIT_LIST_HEAD(&p_spq->pending); | |
414 | INIT_LIST_HEAD(&p_spq->completion_pending); | |
415 | INIT_LIST_HEAD(&p_spq->free_pool); | |
416 | INIT_LIST_HEAD(&p_spq->unlimited_pending); | |
417 | spin_lock_init(&p_spq->lock); | |
418 | ||
419 | /* SPQ empty pool */ | |
420 | p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); | |
421 | p_virt = p_spq->p_virt; | |
422 | ||
423 | for (i = 0; i < p_spq->chain.capacity; i++) { | |
94494598 | 424 | DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); |
fe56b9e6 YM |
425 | |
426 | list_add_tail(&p_virt->list, &p_spq->free_pool); | |
427 | ||
428 | p_virt++; | |
429 | p_phys += sizeof(struct qed_spq_entry); | |
430 | } | |
431 | ||
432 | /* Statistics */ | |
433 | p_spq->normal_count = 0; | |
434 | p_spq->comp_count = 0; | |
435 | p_spq->comp_sent_count = 0; | |
436 | p_spq->unlimited_pending_count = 0; | |
76a9a364 TT |
437 | |
438 | bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); | |
439 | p_spq->comp_bitmap_idx = 0; | |
fe56b9e6 YM |
440 | |
441 | /* SPQ cid, cannot fail */ | |
442 | qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); | |
443 | qed_spq_hw_initialize(p_hwfn, p_spq); | |
444 | ||
445 | /* reset the chain itself */ | |
446 | qed_chain_reset(&p_spq->chain); | |
447 | } | |
448 | ||
449 | int qed_spq_alloc(struct qed_hwfn *p_hwfn) | |
450 | { | |
451 | struct qed_spq *p_spq = NULL; | |
452 | dma_addr_t p_phys = 0; | |
453 | struct qed_spq_entry *p_virt = NULL; | |
454 | ||
455 | /* SPQ struct */ | |
456 | p_spq = | |
60fffb3b | 457 | kzalloc(sizeof(struct qed_spq), GFP_KERNEL); |
fe56b9e6 YM |
458 | if (!p_spq) { |
459 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); | |
460 | return -ENOMEM; | |
461 | } | |
462 | ||
463 | /* SPQ ring */ | |
464 | if (qed_chain_alloc(p_hwfn->cdev, | |
465 | QED_CHAIN_USE_TO_PRODUCE, | |
466 | QED_CHAIN_MODE_SINGLE, | |
467 | 0, /* N/A when the mode is SINGLE */ | |
468 | sizeof(struct slow_path_element), | |
469 | &p_spq->chain)) { | |
470 | DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n"); | |
471 | goto spq_allocate_fail; | |
472 | } | |
473 | ||
474 | /* allocate and fill the SPQ elements (incl. ramrod data list) */ | |
475 | p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
476 | p_spq->chain.capacity * | |
477 | sizeof(struct qed_spq_entry), | |
478 | &p_phys, | |
479 | GFP_KERNEL); | |
480 | ||
481 | if (!p_virt) | |
482 | goto spq_allocate_fail; | |
483 | ||
484 | p_spq->p_virt = p_virt; | |
485 | p_spq->p_phys = p_phys; | |
486 | p_hwfn->p_spq = p_spq; | |
487 | ||
488 | return 0; | |
489 | ||
490 | spq_allocate_fail: | |
491 | qed_chain_free(p_hwfn->cdev, &p_spq->chain); | |
492 | kfree(p_spq); | |
493 | return -ENOMEM; | |
494 | } | |
495 | ||
496 | void qed_spq_free(struct qed_hwfn *p_hwfn) | |
497 | { | |
498 | struct qed_spq *p_spq = p_hwfn->p_spq; | |
499 | ||
500 | if (!p_spq) | |
501 | return; | |
502 | ||
503 | if (p_spq->p_virt) | |
504 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
505 | p_spq->chain.capacity * | |
506 | sizeof(struct qed_spq_entry), | |
507 | p_spq->p_virt, | |
508 | p_spq->p_phys); | |
509 | ||
510 | qed_chain_free(p_hwfn->cdev, &p_spq->chain); | |
511 | ; | |
512 | kfree(p_spq); | |
513 | } | |
514 | ||
515 | int | |
516 | qed_spq_get_entry(struct qed_hwfn *p_hwfn, | |
517 | struct qed_spq_entry **pp_ent) | |
518 | { | |
519 | struct qed_spq *p_spq = p_hwfn->p_spq; | |
520 | struct qed_spq_entry *p_ent = NULL; | |
521 | int rc = 0; | |
522 | ||
523 | spin_lock_bh(&p_spq->lock); | |
524 | ||
525 | if (list_empty(&p_spq->free_pool)) { | |
526 | p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); | |
527 | if (!p_ent) { | |
528 | rc = -ENOMEM; | |
529 | goto out_unlock; | |
530 | } | |
531 | p_ent->queue = &p_spq->unlimited_pending; | |
532 | } else { | |
533 | p_ent = list_first_entry(&p_spq->free_pool, | |
534 | struct qed_spq_entry, | |
535 | list); | |
536 | list_del(&p_ent->list); | |
537 | p_ent->queue = &p_spq->pending; | |
538 | } | |
539 | ||
540 | *pp_ent = p_ent; | |
541 | ||
542 | out_unlock: | |
543 | spin_unlock_bh(&p_spq->lock); | |
544 | return rc; | |
545 | } | |
546 | ||
547 | /* Locked variant; Should be called while the SPQ lock is taken */ | |
548 | static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, | |
549 | struct qed_spq_entry *p_ent) | |
550 | { | |
551 | list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); | |
552 | } | |
553 | ||
554 | void qed_spq_return_entry(struct qed_hwfn *p_hwfn, | |
555 | struct qed_spq_entry *p_ent) | |
556 | { | |
557 | spin_lock_bh(&p_hwfn->p_spq->lock); | |
558 | __qed_spq_return_entry(p_hwfn, p_ent); | |
559 | spin_unlock_bh(&p_hwfn->p_spq->lock); | |
560 | } | |
561 | ||
562 | /** | |
563 | * @brief qed_spq_add_entry - adds a new entry to the pending | |
564 | * list. Should be used while lock is being held. | |
565 | * | |
566 | * Addes an entry to the pending list is there is room (en empty | |
567 | * element is available in the free_pool), or else places the | |
568 | * entry in the unlimited_pending pool. | |
569 | * | |
570 | * @param p_hwfn | |
571 | * @param p_ent | |
572 | * @param priority | |
573 | * | |
574 | * @return int | |
575 | */ | |
576 | static int | |
577 | qed_spq_add_entry(struct qed_hwfn *p_hwfn, | |
578 | struct qed_spq_entry *p_ent, | |
579 | enum spq_priority priority) | |
580 | { | |
581 | struct qed_spq *p_spq = p_hwfn->p_spq; | |
582 | ||
583 | if (p_ent->queue == &p_spq->unlimited_pending) { | |
fe56b9e6 YM |
584 | |
585 | if (list_empty(&p_spq->free_pool)) { | |
586 | list_add_tail(&p_ent->list, &p_spq->unlimited_pending); | |
587 | p_spq->unlimited_pending_count++; | |
588 | ||
589 | return 0; | |
76a9a364 TT |
590 | } else { |
591 | struct qed_spq_entry *p_en2; | |
fe56b9e6 | 592 | |
76a9a364 TT |
593 | p_en2 = list_first_entry(&p_spq->free_pool, |
594 | struct qed_spq_entry, | |
595 | list); | |
596 | list_del(&p_en2->list); | |
597 | ||
598 | /* Copy the ring element physical pointer to the new | |
599 | * entry, since we are about to override the entire ring | |
600 | * entry and don't want to lose the pointer. | |
601 | */ | |
602 | p_ent->elem.data_ptr = p_en2->elem.data_ptr; | |
fe56b9e6 | 603 | |
76a9a364 | 604 | *p_en2 = *p_ent; |
fe56b9e6 | 605 | |
76a9a364 | 606 | kfree(p_ent); |
fe56b9e6 | 607 | |
76a9a364 TT |
608 | p_ent = p_en2; |
609 | } | |
fe56b9e6 YM |
610 | } |
611 | ||
612 | /* entry is to be placed in 'pending' queue */ | |
613 | switch (priority) { | |
614 | case QED_SPQ_PRIORITY_NORMAL: | |
615 | list_add_tail(&p_ent->list, &p_spq->pending); | |
616 | p_spq->normal_count++; | |
617 | break; | |
618 | case QED_SPQ_PRIORITY_HIGH: | |
619 | list_add(&p_ent->list, &p_spq->pending); | |
620 | p_spq->high_count++; | |
621 | break; | |
622 | default: | |
623 | return -EINVAL; | |
624 | } | |
625 | ||
626 | return 0; | |
627 | } | |
628 | ||
629 | /*************************************************************************** | |
630 | * Accessor | |
631 | ***************************************************************************/ | |
632 | u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) | |
633 | { | |
634 | if (!p_hwfn->p_spq) | |
635 | return 0xffffffff; /* illegal */ | |
636 | return p_hwfn->p_spq->cid; | |
637 | } | |
638 | ||
639 | /*************************************************************************** | |
640 | * Posting new Ramrods | |
641 | ***************************************************************************/ | |
642 | static int qed_spq_post_list(struct qed_hwfn *p_hwfn, | |
643 | struct list_head *head, | |
644 | u32 keep_reserve) | |
645 | { | |
646 | struct qed_spq *p_spq = p_hwfn->p_spq; | |
647 | int rc; | |
648 | ||
649 | while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve && | |
650 | !list_empty(head)) { | |
651 | struct qed_spq_entry *p_ent = | |
652 | list_first_entry(head, struct qed_spq_entry, list); | |
653 | list_del(&p_ent->list); | |
654 | list_add_tail(&p_ent->list, &p_spq->completion_pending); | |
655 | p_spq->comp_sent_count++; | |
656 | ||
657 | rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); | |
658 | if (rc) { | |
659 | list_del(&p_ent->list); | |
660 | __qed_spq_return_entry(p_hwfn, p_ent); | |
661 | return rc; | |
662 | } | |
663 | } | |
664 | ||
665 | return 0; | |
666 | } | |
667 | ||
668 | static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) | |
669 | { | |
670 | struct qed_spq *p_spq = p_hwfn->p_spq; | |
671 | struct qed_spq_entry *p_ent = NULL; | |
672 | ||
673 | while (!list_empty(&p_spq->free_pool)) { | |
674 | if (list_empty(&p_spq->unlimited_pending)) | |
675 | break; | |
676 | ||
677 | p_ent = list_first_entry(&p_spq->unlimited_pending, | |
678 | struct qed_spq_entry, | |
679 | list); | |
680 | if (!p_ent) | |
681 | return -EINVAL; | |
682 | ||
683 | list_del(&p_ent->list); | |
684 | ||
685 | qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); | |
686 | } | |
687 | ||
688 | return qed_spq_post_list(p_hwfn, &p_spq->pending, | |
689 | SPQ_HIGH_PRI_RESERVE_DEFAULT); | |
690 | } | |
691 | ||
692 | int qed_spq_post(struct qed_hwfn *p_hwfn, | |
693 | struct qed_spq_entry *p_ent, | |
694 | u8 *fw_return_code) | |
695 | { | |
696 | int rc = 0; | |
697 | struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; | |
698 | bool b_ret_ent = true; | |
699 | ||
700 | if (!p_hwfn) | |
701 | return -EINVAL; | |
702 | ||
703 | if (!p_ent) { | |
704 | DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); | |
705 | return -EINVAL; | |
706 | } | |
707 | ||
708 | /* Complete the entry */ | |
709 | rc = qed_spq_fill_entry(p_hwfn, p_ent); | |
710 | ||
711 | spin_lock_bh(&p_spq->lock); | |
712 | ||
713 | /* Check return value after LOCK is taken for cleaner error flow */ | |
714 | if (rc) | |
715 | goto spq_post_fail; | |
716 | ||
717 | /* Add the request to the pending queue */ | |
718 | rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); | |
719 | if (rc) | |
720 | goto spq_post_fail; | |
721 | ||
722 | rc = qed_spq_pend_post(p_hwfn); | |
723 | if (rc) { | |
724 | /* Since it's possible that pending failed for a different | |
725 | * entry [although unlikely], the failed entry was already | |
726 | * dealt with; No need to return it here. | |
727 | */ | |
728 | b_ret_ent = false; | |
729 | goto spq_post_fail; | |
730 | } | |
731 | ||
732 | spin_unlock_bh(&p_spq->lock); | |
733 | ||
734 | if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) { | |
735 | /* For entries in QED BLOCK mode, the completion code cannot | |
736 | * perform the necessary cleanup - if it did, we couldn't | |
737 | * access p_ent here to see whether it's successful or not. | |
738 | * Thus, after gaining the answer perform the cleanup here. | |
739 | */ | |
740 | rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); | |
741 | if (rc) | |
742 | goto spq_post_fail2; | |
743 | ||
744 | /* return to pool */ | |
745 | qed_spq_return_entry(p_hwfn, p_ent); | |
746 | } | |
747 | return rc; | |
748 | ||
749 | spq_post_fail2: | |
750 | spin_lock_bh(&p_spq->lock); | |
751 | list_del(&p_ent->list); | |
752 | qed_chain_return_produced(&p_spq->chain); | |
753 | ||
754 | spq_post_fail: | |
755 | /* return to the free pool */ | |
756 | if (b_ret_ent) | |
757 | __qed_spq_return_entry(p_hwfn, p_ent); | |
758 | spin_unlock_bh(&p_spq->lock); | |
759 | ||
760 | return rc; | |
761 | } | |
762 | ||
763 | int qed_spq_completion(struct qed_hwfn *p_hwfn, | |
764 | __le16 echo, | |
765 | u8 fw_return_code, | |
766 | union event_ring_data *p_data) | |
767 | { | |
768 | struct qed_spq *p_spq; | |
769 | struct qed_spq_entry *p_ent = NULL; | |
770 | struct qed_spq_entry *tmp; | |
771 | struct qed_spq_entry *found = NULL; | |
772 | int rc; | |
773 | ||
774 | if (!p_hwfn) | |
775 | return -EINVAL; | |
776 | ||
777 | p_spq = p_hwfn->p_spq; | |
778 | if (!p_spq) | |
779 | return -EINVAL; | |
780 | ||
781 | spin_lock_bh(&p_spq->lock); | |
782 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, | |
783 | list) { | |
784 | if (p_ent->elem.hdr.echo == echo) { | |
76a9a364 TT |
785 | u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; |
786 | ||
fe56b9e6 YM |
787 | list_del(&p_ent->list); |
788 | ||
76a9a364 TT |
789 | /* Avoid overriding of SPQ entries when getting |
790 | * out-of-order completions, by marking the completions | |
791 | * in a bitmap and increasing the chain consumer only | |
792 | * for the first successive completed entries. | |
793 | */ | |
794 | bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); | |
795 | ||
796 | while (test_bit(p_spq->comp_bitmap_idx, | |
797 | p_spq->p_comp_bitmap)) { | |
798 | bitmap_clear(p_spq->p_comp_bitmap, | |
799 | p_spq->comp_bitmap_idx, | |
800 | SPQ_RING_SIZE); | |
801 | p_spq->comp_bitmap_idx++; | |
802 | qed_chain_return_produced(&p_spq->chain); | |
803 | } | |
804 | ||
fe56b9e6 YM |
805 | p_spq->comp_count++; |
806 | found = p_ent; | |
807 | break; | |
808 | } | |
76a9a364 TT |
809 | |
810 | /* This is relatively uncommon - depends on scenarios | |
811 | * which have mutliple per-PF sent ramrods. | |
812 | */ | |
813 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | |
814 | "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", | |
815 | le16_to_cpu(echo), | |
816 | le16_to_cpu(p_ent->elem.hdr.echo)); | |
fe56b9e6 YM |
817 | } |
818 | ||
819 | /* Release lock before callback, as callback may post | |
820 | * an additional ramrod. | |
821 | */ | |
822 | spin_unlock_bh(&p_spq->lock); | |
823 | ||
824 | if (!found) { | |
825 | DP_NOTICE(p_hwfn, | |
826 | "Failed to find an entry this EQE completes\n"); | |
827 | return -EEXIST; | |
828 | } | |
829 | ||
830 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n", | |
831 | p_ent->comp_cb.function, p_ent->comp_cb.cookie); | |
832 | if (found->comp_cb.function) | |
833 | found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, | |
834 | fw_return_code); | |
835 | ||
836 | if (found->comp_mode != QED_SPQ_MODE_EBLOCK) | |
837 | /* EBLOCK is responsible for freeing its own entry */ | |
838 | qed_spq_return_entry(p_hwfn, found); | |
839 | ||
840 | /* Attempt to post pending requests */ | |
841 | spin_lock_bh(&p_spq->lock); | |
842 | rc = qed_spq_pend_post(p_hwfn); | |
843 | spin_unlock_bh(&p_spq->lock); | |
844 | ||
845 | return rc; | |
846 | } | |
847 | ||
848 | struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) | |
849 | { | |
850 | struct qed_consq *p_consq; | |
851 | ||
852 | /* Allocate ConsQ struct */ | |
60fffb3b | 853 | p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); |
fe56b9e6 YM |
854 | if (!p_consq) { |
855 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n"); | |
856 | return NULL; | |
857 | } | |
858 | ||
859 | /* Allocate and initialize EQ chain*/ | |
860 | if (qed_chain_alloc(p_hwfn->cdev, | |
861 | QED_CHAIN_USE_TO_PRODUCE, | |
862 | QED_CHAIN_MODE_PBL, | |
863 | QED_CHAIN_PAGE_SIZE / 0x80, | |
864 | 0x80, | |
865 | &p_consq->chain)) { | |
866 | DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); | |
867 | goto consq_allocate_fail; | |
868 | } | |
869 | ||
870 | return p_consq; | |
871 | ||
872 | consq_allocate_fail: | |
873 | qed_consq_free(p_hwfn, p_consq); | |
874 | return NULL; | |
875 | } | |
876 | ||
877 | void qed_consq_setup(struct qed_hwfn *p_hwfn, | |
878 | struct qed_consq *p_consq) | |
879 | { | |
880 | qed_chain_reset(&p_consq->chain); | |
881 | } | |
882 | ||
883 | void qed_consq_free(struct qed_hwfn *p_hwfn, | |
884 | struct qed_consq *p_consq) | |
885 | { | |
886 | if (!p_consq) | |
887 | return; | |
888 | qed_chain_free(p_hwfn->cdev, &p_consq->chain); | |
889 | kfree(p_consq); | |
890 | } |