brcmfmac: remove unnecessary EXPORT_SYMBOL() usage
[deliverable/linux.git] / drivers / staging / crystalhd / crystalhd_hw.c
1 /***************************************************************************
2 * Copyright (c) 2005-2009, Broadcom Corporation.
3 *
4 * Name: crystalhd_hw . c
5 *
6 * Description:
7 * BCM70010 Linux driver HW layer.
8 *
9 **********************************************************************
10 * This file is part of the crystalhd device driver.
11 *
12 * This driver is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation, version 2 of the License.
15 *
16 * This driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this driver. If not, see <http://www.gnu.org/licenses/>.
23 **********************************************************************/
24
25 #include "crystalhd.h"
26
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30
31 /* Functions internal to this file */
32
33 static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
34 {
35 bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
36 bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
37 }
38
39
40 static void crystalhd_start_dram(struct crystalhd_adp *adp)
41 {
42 bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) |
43 /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */
44 ((15 / 5 - 1) << 7) | /* trp */
45 ((10 / 5 - 1) << 10) | /* trrd */
46 ((15 / 5 + 1) << 12) | /* twr */
47 ((2 + 1) << 16) | /* twtr */
48 ((70 / 5 - 2) << 19) | /* trfc */
49 (0 << 23));
50
51 bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
52 bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
53 bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
54 bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
55 bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
56 bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
57 bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
58 /* setting the refresh rate here */
59 bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
60 }
61
62
63 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
64 {
65 union link_misc_perst_deco_ctrl rst_deco_cntrl;
66 union link_misc_perst_clk_ctrl rst_clk_cntrl;
67 uint32_t temp;
68
69 /*
70 * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
71 * delay to allow PLL to lock Clear alternate clock, stop clock bits
72 */
73 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
74 rst_clk_cntrl.pll_pwr_dn = 0;
75 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
76 msleep_interruptible(50);
77
78 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
79 rst_clk_cntrl.stop_core_clk = 0;
80 rst_clk_cntrl.sel_alt_clk = 0;
81
82 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
83 msleep_interruptible(50);
84
85 /*
86 * Bus Arbiter Timeout: GISB_ARBITER_TIMER
87 * Set internal bus arbiter timeout to 40us based on core clock speed
88 * (63MHz * 40us = 0x9D8)
89 */
90 crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
91
92 /*
93 * Decoder clocks: MISC_PERST_DECODER_CTRL
94 * Enable clocks while 7412 reset is asserted, delay
95 * De-assert 7412 reset
96 */
97 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
98 MISC_PERST_DECODER_CTRL);
99 rst_deco_cntrl.stop_bcm_7412_clk = 0;
100 rst_deco_cntrl.bcm7412_rst = 1;
101 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
102 rst_deco_cntrl.whole_reg);
103 msleep_interruptible(10);
104
105 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
106 MISC_PERST_DECODER_CTRL);
107 rst_deco_cntrl.bcm7412_rst = 0;
108 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
109 rst_deco_cntrl.whole_reg);
110 msleep_interruptible(50);
111
112 /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
113 crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
114
115 /* Clear bit 29 of 0x404 */
116 temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
117 temp &= ~BC_BIT(29);
118 crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
119
120 /* 2.5V regulator must be set to 2.6 volts (+6%) */
121 /* FIXME: jarod: what's the point of this reg read? */
122 temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
123 crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
124
125 return true;
126 }
127
128 static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
129 {
130 union link_misc_perst_deco_ctrl rst_deco_cntrl;
131 union link_misc_perst_clk_ctrl rst_clk_cntrl;
132 uint32_t temp;
133
134 /*
135 * Decoder clocks: MISC_PERST_DECODER_CTRL
136 * Assert 7412 reset, delay
137 * Assert 7412 stop clock
138 */
139 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
140 MISC_PERST_DECODER_CTRL);
141 rst_deco_cntrl.stop_bcm_7412_clk = 1;
142 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
143 rst_deco_cntrl.whole_reg);
144 msleep_interruptible(50);
145
146 /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
147 * Set internal bus arbiter timeout to 40us based on core clock speed
148 * (6.75MHZ * 40us = 0x10E)
149 */
150 crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
151
152 /* Link clocks: MISC_PERST_CLOCK_CTRL
153 * Stop core clk, delay
154 * Set alternate clk, delay, set PLL power down
155 */
156 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
157 rst_clk_cntrl.stop_core_clk = 1;
158 rst_clk_cntrl.sel_alt_clk = 1;
159 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
160 msleep_interruptible(50);
161
162 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
163 rst_clk_cntrl.pll_pwr_dn = 1;
164 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
165
166 /*
167 * Read and restore the Transaction Configuration Register
168 * after core reset
169 */
170 temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
171
172 /*
173 * Link core soft reset: MISC3_RESET_CTRL
174 * - Write BIT[0]=1 and read it back for core reset to take place
175 */
176 crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
177 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
178 msleep_interruptible(50);
179
180 /* restore the transaction configuration register */
181 crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
182
183 return true;
184 }
185
186 static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
187 {
188 union intr_mask_reg intr_mask;
189 intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
190 intr_mask.mask_pcie_err = 1;
191 intr_mask.mask_pcie_rbusmast_err = 1;
192 intr_mask.mask_pcie_rgr_bridge = 1;
193 intr_mask.mask_rx_done = 1;
194 intr_mask.mask_rx_err = 1;
195 intr_mask.mask_tx_done = 1;
196 intr_mask.mask_tx_err = 1;
197 crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
198
199 return;
200 }
201
202 static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
203 {
204 union intr_mask_reg intr_mask;
205 intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
206 intr_mask.mask_pcie_err = 1;
207 intr_mask.mask_pcie_rbusmast_err = 1;
208 intr_mask.mask_pcie_rgr_bridge = 1;
209 intr_mask.mask_rx_done = 1;
210 intr_mask.mask_rx_err = 1;
211 intr_mask.mask_tx_done = 1;
212 intr_mask.mask_tx_err = 1;
213 crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
214
215 return;
216 }
217
218 static void crystalhd_clear_errors(struct crystalhd_adp *adp)
219 {
220 uint32_t reg;
221
222 /* FIXME: jarod: wouldn't we want to write a 0 to the reg?
223 Or does the write clear the bits specified? */
224 reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
225 if (reg)
226 crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
227
228 reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
229 if (reg)
230 crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
231
232 reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
233 if (reg)
234 crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
235 }
236
237 static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
238 {
239 uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
240
241 if (intr_sts) {
242 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
243
244 /* Write End Of Interrupt for PCIE */
245 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
246 }
247 }
248
249 static void crystalhd_soft_rst(struct crystalhd_adp *adp)
250 {
251 uint32_t val;
252
253 /* Assert c011 soft reset*/
254 bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
255 msleep_interruptible(50);
256
257 /* Release c011 soft reset*/
258 bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
259
260 /* Disable Stuffing..*/
261 val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
262 val |= BC_BIT(8);
263 crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
264 }
265
266 static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
267 {
268 uint32_t i = 0, reg;
269
270 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
271
272 crystalhd_reg_wr(adp, AES_CMD, 0);
273 crystalhd_reg_wr(adp, AES_CONFIG_INFO,
274 (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
275 crystalhd_reg_wr(adp, AES_CMD, 0x1);
276
277 /* FIXME: jarod: I've seen this fail,
278 and introducing extra delays helps... */
279 for (i = 0; i < 100; ++i) {
280 reg = crystalhd_reg_rd(adp, AES_STATUS);
281 if (reg & 0x1)
282 return true;
283 msleep_interruptible(10);
284 }
285
286 return false;
287 }
288
289
290 static bool crystalhd_start_device(struct crystalhd_adp *adp)
291 {
292 uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
293
294 BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
295
296 reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
297 reg_pwrmgmt &= ~ASPM_L1_ENABLE;
298
299 crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
300
301 if (!crystalhd_bring_out_of_rst(adp)) {
302 BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
303 return false;
304 }
305
306 crystalhd_disable_interrupts(adp);
307
308 crystalhd_clear_errors(adp);
309
310 crystalhd_clear_interrupts(adp);
311
312 crystalhd_enable_interrupts(adp);
313
314 /* Enable the option for getting the total no. of DWORDS
315 * that have been transferred by the RXDMA engine
316 */
317 dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
318 dbg_options |= 0x10;
319 crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
320
321 /* Enable PCI Global Control options */
322 glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
323 glb_cntrl |= 0x100;
324 glb_cntrl |= 0x8000;
325 crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
326
327 crystalhd_enable_interrupts(adp);
328
329 crystalhd_soft_rst(adp);
330 crystalhd_start_dram(adp);
331 crystalhd_enable_uarts(adp);
332
333 return true;
334 }
335
336 static bool crystalhd_stop_device(struct crystalhd_adp *adp)
337 {
338 uint32_t reg;
339
340 BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
341 /* Clear and disable interrupts */
342 crystalhd_disable_interrupts(adp);
343 crystalhd_clear_errors(adp);
344 crystalhd_clear_interrupts(adp);
345
346 if (!crystalhd_put_in_reset(adp))
347 BCMLOG_ERR("Failed to Put Link To Reset State\n");
348
349 reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
350 reg |= ASPM_L1_ENABLE;
351 crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
352
353 /* Set PCI Clk Req */
354 reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
355 reg |= PCI_CLK_REQ_ENABLE;
356 crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
357
358 return true;
359 }
360
361 static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(
362 struct crystalhd_hw *hw)
363 {
364 unsigned long flags = 0;
365 struct crystalhd_rx_dma_pkt *temp = NULL;
366
367 if (!hw)
368 return NULL;
369
370 spin_lock_irqsave(&hw->lock, flags);
371 temp = hw->rx_pkt_pool_head;
372 if (temp) {
373 hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
374 temp->dio_req = NULL;
375 temp->pkt_tag = 0;
376 temp->flags = 0;
377 }
378 spin_unlock_irqrestore(&hw->lock, flags);
379
380 return temp;
381 }
382
383 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
384 struct crystalhd_rx_dma_pkt *pkt)
385 {
386 unsigned long flags = 0;
387
388 if (!hw || !pkt)
389 return;
390
391 spin_lock_irqsave(&hw->lock, flags);
392 pkt->next = hw->rx_pkt_pool_head;
393 hw->rx_pkt_pool_head = pkt;
394 spin_unlock_irqrestore(&hw->lock, flags);
395 }
396
397 /*
398 * Call back from TX - IOQ deletion.
399 *
400 * This routine will release the TX DMA rings allocated
401 * druing setup_dma rings interface.
402 *
403 * Memory is allocated per DMA ring basis. This is just
404 * a place holder to be able to create the dio queues.
405 */
406 static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
407 {
408 }
409
410 /*
411 * Rx Packet release callback..
412 *
413 * Release All user mapped capture buffers and Our DMA packets
414 * back to our free pool. The actual cleanup of the DMA
415 * ring descriptors happen during dma ring release.
416 */
417 static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
418 {
419 struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
420 struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
421
422 if (!pkt || !hw) {
423 BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
424 return;
425 }
426
427 if (pkt->dio_req)
428 crystalhd_unmap_dio(hw->adp, pkt->dio_req);
429 else
430 BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
431
432 crystalhd_hw_free_rx_pkt(hw, pkt);
433 }
434
435 #define crystalhd_hw_delete_ioq(adp, q) \
436 if (q) { \
437 crystalhd_delete_dioq(adp, q); \
438 q = NULL; \
439 }
440
441 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
442 {
443 if (!hw)
444 return;
445
446 BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
447 crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
448 crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
449 crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
450 crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
451 crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
452 }
453
454 #define crystalhd_hw_create_ioq(sts, hw, q, cb) \
455 do { \
456 sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
457 if (sts != BC_STS_SUCCESS) \
458 goto hw_create_ioq_err; \
459 } while (0)
460
461 /*
462 * Create IOQs..
463 *
464 * TX - Active & Free
465 * RX - Active, Ready and Free.
466 */
467 static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
468 {
469 enum BC_STATUS sts = BC_STS_SUCCESS;
470
471 if (!hw) {
472 BCMLOG_ERR("Invalid Arg!!\n");
473 return BC_STS_INV_ARG;
474 }
475
476 crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
477 crystalhd_tx_desc_rel_call_back);
478 crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
479 crystalhd_tx_desc_rel_call_back);
480
481 crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
482 crystalhd_rx_pkt_rel_call_back);
483 crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
484 crystalhd_rx_pkt_rel_call_back);
485 crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
486 crystalhd_rx_pkt_rel_call_back);
487
488 return sts;
489
490 hw_create_ioq_err:
491 crystalhd_hw_delete_ioqs(hw);
492
493 return sts;
494 }
495
496
497 static bool crystalhd_code_in_full(struct crystalhd_adp *adp,
498 uint32_t needed_sz, bool b_188_byte_pkts, uint8_t flags)
499 {
500 uint32_t base, end, writep, readp;
501 uint32_t cpbSize, cpbFullness, fifoSize;
502
503 if (flags & 0x02) { /* ASF Bit is set */
504 base = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
505 end = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
506 writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
507 readp = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
508 } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
509 base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
510 end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
511 writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
512 readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
513 } else {
514 base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
515 end = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
516 writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
517 readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
518 }
519
520 cpbSize = end - base;
521 if (writep >= readp)
522 cpbFullness = writep - readp;
523 else
524 cpbFullness = (end - base) - (readp - writep);
525
526 fifoSize = cpbSize - cpbFullness;
527
528 if (fifoSize < BC_INFIFO_THRESHOLD)
529 return true;
530
531 if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
532 return true;
533
534 return false;
535 }
536
537 static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
538 uint32_t list_id, enum BC_STATUS cs)
539 {
540 struct tx_dma_pkt *tx_req;
541
542 if (!hw || !list_id) {
543 BCMLOG_ERR("Invalid Arg..\n");
544 return BC_STS_INV_ARG;
545 }
546
547 hw->pwr_lock--;
548
549 tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(
550 hw->tx_actq, list_id);
551 if (!tx_req) {
552 if (cs != BC_STS_IO_USER_ABORT)
553 BCMLOG_ERR("Find and Fetch Did not find req\n");
554 return BC_STS_NO_DATA;
555 }
556
557 if (tx_req->call_back) {
558 tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
559 tx_req->dio_req = NULL;
560 tx_req->cb_event = NULL;
561 tx_req->call_back = NULL;
562 } else {
563 BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
564 tx_req->list_tag);
565 }
566
567 /* Now put back the tx_list back in FreeQ */
568 tx_req->list_tag = 0;
569
570 return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
571 }
572
573 static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw,
574 uint32_t err_sts)
575 {
576 uint32_t err_mask, tmp;
577 unsigned long flags = 0;
578
579 err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
580 MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
581 MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
582
583 if (!(err_sts & err_mask))
584 return false;
585
586 BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
587
588 tmp = err_mask;
589
590 if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
591 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
592
593 if (tmp) {
594 spin_lock_irqsave(&hw->lock, flags);
595 /* reset list index.*/
596 hw->tx_list_post_index = 0;
597 spin_unlock_irqrestore(&hw->lock, flags);
598 }
599
600 tmp = err_sts & err_mask;
601 crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
602
603 return true;
604 }
605
606 static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw,
607 uint32_t err_sts)
608 {
609 uint32_t err_mask, tmp;
610 unsigned long flags = 0;
611
612 err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
613 MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
614 MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
615
616 if (!(err_sts & err_mask))
617 return false;
618
619 BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
620
621 tmp = err_mask;
622
623 if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
624 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
625
626 if (tmp) {
627 spin_lock_irqsave(&hw->lock, flags);
628 /* reset list index.*/
629 hw->tx_list_post_index = 0;
630 spin_unlock_irqrestore(&hw->lock, flags);
631 }
632
633 tmp = err_sts & err_mask;
634 crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
635
636 return true;
637 }
638
639 static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
640 {
641 uint32_t err_sts;
642
643 if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
644 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
645 BC_STS_SUCCESS);
646
647 if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
648 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
649 BC_STS_SUCCESS);
650
651 if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
652 INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
653 /* No error mask set.. */
654 return;
655 }
656
657 /* Handle Tx errors. */
658 err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
659
660 if (crystalhd_tx_list0_handler(hw, err_sts))
661 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
662 BC_STS_ERROR);
663
664 if (crystalhd_tx_list1_handler(hw, err_sts))
665 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
666 BC_STS_ERROR);
667
668 hw->stats.tx_errors++;
669 }
670
671 static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
672 uint32_t ul_desc_index, uint32_t cnt)
673 {
674 uint32_t ix, ll = 0;
675
676 if (!p_dma_desc || !cnt)
677 return;
678
679 /* FIXME: jarod: perhaps a modparam desc_debug to enable this,
680 rather than setting ll (log level, I presume) to non-zero? */
681 if (!ll)
682 return;
683
684 for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
685 BCMLOG(ll,
686 "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
687 ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
688 ul_desc_index,
689 p_dma_desc[ul_desc_index].buff_addr_high,
690 p_dma_desc[ul_desc_index].buff_addr_low,
691 p_dma_desc[ul_desc_index].next_desc_addr_high,
692 p_dma_desc[ul_desc_index].next_desc_addr_low,
693 p_dma_desc[ul_desc_index].xfer_size,
694 p_dma_desc[ul_desc_index].intr_enable,
695 p_dma_desc[ul_desc_index].last_rec_indicator);
696 }
697
698 }
699
700 static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
701 struct dma_descriptor *desc,
702 dma_addr_t desc_paddr_base,
703 uint32_t sg_cnt, uint32_t sg_st_ix,
704 uint32_t sg_st_off, uint32_t xfr_sz)
705 {
706 uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
707 dma_addr_t desc_phy_addr = desc_paddr_base;
708 union addr_64 addr_temp;
709
710 if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
711 (!sg_cnt && !ioreq->uinfo.dir_tx)) {
712 BCMLOG_ERR("Invalid Args\n");
713 return BC_STS_INV_ARG;
714 }
715
716 for (ix = 0; ix < sg_cnt; ix++) {
717
718 /* Setup SGLE index. */
719 sg_ix = ix + sg_st_ix;
720
721 /* Get SGLE length */
722 len = crystalhd_get_sgle_len(ioreq, sg_ix);
723 if (len % 4) {
724 BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix,
725 sg_cnt);
726 return BC_STS_NOT_IMPL;
727 }
728 /* Setup DMA desc with Phy addr & Length at current index. */
729 addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
730 if (sg_ix == sg_st_ix) {
731 addr_temp.full_addr += sg_st_off;
732 len -= sg_st_off;
733 }
734 memset(&desc[ix], 0, sizeof(desc[ix]));
735 desc[ix].buff_addr_low = addr_temp.low_part;
736 desc[ix].buff_addr_high = addr_temp.high_part;
737 desc[ix].dma_dir = ioreq->uinfo.dir_tx;
738
739 /* Chain DMA descriptor. */
740 addr_temp.full_addr = desc_phy_addr +
741 sizeof(struct dma_descriptor);
742 desc[ix].next_desc_addr_low = addr_temp.low_part;
743 desc[ix].next_desc_addr_high = addr_temp.high_part;
744
745 if ((count + len) > xfr_sz)
746 len = xfr_sz - count;
747
748 /* Debug.. */
749 if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
750 BCMLOG_ERR(
751 "inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
752 len, ix, count, xfr_sz, sg_cnt);
753 return BC_STS_ERROR;
754 }
755 /* Length expects Multiple of 4 */
756 desc[ix].xfer_size = (len / 4);
757
758 crystalhd_hw_dump_desc(desc, ix, 1);
759
760 count += len;
761 desc_phy_addr += sizeof(struct dma_descriptor);
762 }
763
764 last_desc_ix = ix - 1;
765
766 if (ioreq->fb_size) {
767 memset(&desc[ix], 0, sizeof(desc[ix]));
768 addr_temp.full_addr = ioreq->fb_pa;
769 desc[ix].buff_addr_low = addr_temp.low_part;
770 desc[ix].buff_addr_high = addr_temp.high_part;
771 desc[ix].dma_dir = ioreq->uinfo.dir_tx;
772 desc[ix].xfer_size = 1;
773 desc[ix].fill_bytes = 4 - ioreq->fb_size;
774 count += ioreq->fb_size;
775 last_desc_ix++;
776 }
777
778 /* setup last descriptor..*/
779 desc[last_desc_ix].last_rec_indicator = 1;
780 desc[last_desc_ix].next_desc_addr_low = 0;
781 desc[last_desc_ix].next_desc_addr_high = 0;
782 desc[last_desc_ix].intr_enable = 1;
783
784 crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
785
786 if (count != xfr_sz) {
787 BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz);
788 return BC_STS_ERROR;
789 }
790
791 return BC_STS_SUCCESS;
792 }
793
794 static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(
795 struct crystalhd_dio_req *ioreq,
796 struct dma_desc_mem *pdesc_mem,
797 uint32_t *uv_desc_index)
798 {
799 struct dma_descriptor *desc = NULL;
800 dma_addr_t desc_paddr_base = 0;
801 uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
802 uint32_t xfr_sz = 0;
803 enum BC_STATUS sts = BC_STS_SUCCESS;
804
805 /* Check params.. */
806 if (!ioreq || !pdesc_mem || !uv_desc_index) {
807 BCMLOG_ERR("Invalid Args\n");
808 return BC_STS_INV_ARG;
809 }
810
811 if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
812 !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
813 BCMLOG_ERR("Invalid Args\n");
814 return BC_STS_INV_ARG;
815 }
816
817 if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
818 BCMLOG_ERR("UV offset for TX??\n");
819 return BC_STS_INV_ARG;
820
821 }
822
823 desc = pdesc_mem->pdma_desc_start;
824 desc_paddr_base = pdesc_mem->phy_addr;
825
826 if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
827 sg_cnt = ioreq->sg_cnt;
828 xfr_sz = ioreq->uinfo.xfr_len;
829 } else {
830 sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
831 xfr_sz = ioreq->uinfo.uv_offset;
832 }
833
834 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
835 sg_st_ix, sg_st_off, xfr_sz);
836
837 if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
838 return sts;
839
840 /* Prepare for UV mapping.. */
841 desc = &pdesc_mem->pdma_desc_start[sg_cnt];
842 desc_paddr_base = pdesc_mem->phy_addr +
843 (sg_cnt * sizeof(struct dma_descriptor));
844
845 /* Done with desc addr.. now update sg stuff.*/
846 sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
847 xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
848 sg_st_ix = ioreq->uinfo.uv_sg_ix;
849 sg_st_off = ioreq->uinfo.uv_sg_off;
850
851 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
852 sg_st_ix, sg_st_off, xfr_sz);
853 if (sts != BC_STS_SUCCESS)
854 return sts;
855
856 *uv_desc_index = sg_st_ix;
857
858 return sts;
859 }
860
861 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
862 {
863 uint32_t dma_cntrl;
864
865 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
866 if (!(dma_cntrl & DMA_START_BIT)) {
867 dma_cntrl |= DMA_START_BIT;
868 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
869 dma_cntrl);
870 }
871
872 return;
873 }
874
875 /* _CHECK_THIS_
876 *
877 * Verify if the Stop generates a completion interrupt or not.
878 * if it does not generate an interrupt, then add polling here.
879 */
880 static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
881 {
882 uint32_t dma_cntrl, cnt = 30;
883 uint32_t l1 = 1, l2 = 1;
884 unsigned long flags = 0;
885
886 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
887
888 BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
889
890 if (!(dma_cntrl & DMA_START_BIT)) {
891 BCMLOG(BCMLOG_DBG, "Already Stopped\n");
892 return BC_STS_SUCCESS;
893 }
894
895 crystalhd_disable_interrupts(hw->adp);
896
897 /* Issue stop to HW */
898 /* This bit when set gave problems. Please check*/
899 dma_cntrl &= ~DMA_START_BIT;
900 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
901
902 BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
903
904 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
905 while ((l1 || l2) && cnt) {
906
907 if (l1) {
908 l1 = crystalhd_reg_rd(hw->adp,
909 MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
910 l1 &= DMA_START_BIT;
911 }
912
913 if (l2) {
914 l2 = crystalhd_reg_rd(hw->adp,
915 MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
916 l2 &= DMA_START_BIT;
917 }
918
919 msleep_interruptible(100);
920
921 cnt--;
922 }
923
924 if (!cnt) {
925 BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
926 crystalhd_enable_interrupts(hw->adp);
927 return BC_STS_ERROR;
928 }
929
930 spin_lock_irqsave(&hw->lock, flags);
931 hw->tx_list_post_index = 0;
932 spin_unlock_irqrestore(&hw->lock, flags);
933 BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
934 crystalhd_enable_interrupts(hw->adp);
935
936 return BC_STS_SUCCESS;
937 }
938
939 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
940 {
941 /*
942 * Position of the PIB Entries can be found at
943 * 0th and the 1st location of the Circular list.
944 */
945 uint32_t Q_addr;
946 uint32_t pib_cnt, r_offset, w_offset;
947
948 Q_addr = hw->pib_del_Q_addr;
949
950 /* Get the Read Pointer */
951 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
952
953 /* Get the Write Pointer */
954 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
955
956 if (r_offset == w_offset)
957 return 0; /* Queue is empty */
958
959 if (w_offset > r_offset)
960 pib_cnt = w_offset - r_offset;
961 else
962 pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
963 (r_offset + MIN_PIB_Q_DEPTH);
964
965 if (pib_cnt > MAX_PIB_Q_DEPTH) {
966 BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
967 return 0;
968 }
969
970 return pib_cnt;
971 }
972
973 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
974 {
975 uint32_t Q_addr;
976 uint32_t addr_entry, r_offset, w_offset;
977
978 Q_addr = hw->pib_del_Q_addr;
979
980 /* Get the Read Pointer 0Th Location is Read Pointer */
981 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
982
983 /* Get the Write Pointer 1st Location is Write pointer */
984 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
985
986 /* Queue is empty */
987 if (r_offset == w_offset)
988 return 0;
989
990 if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
991 return 0;
992
993 /* Get the Actual Address of the PIB */
994 crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
995 1, &addr_entry);
996
997 /* Increment the Read Pointer */
998 r_offset++;
999
1000 if (MAX_PIB_Q_DEPTH == r_offset)
1001 r_offset = MIN_PIB_Q_DEPTH;
1002
1003 /* Write back the read pointer to It's Location */
1004 crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
1005
1006 return addr_entry;
1007 }
1008
1009 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw,
1010 uint32_t addr_to_rel)
1011 {
1012 uint32_t Q_addr;
1013 uint32_t r_offset, w_offset, n_offset;
1014
1015 Q_addr = hw->pib_rel_Q_addr;
1016
1017 /* Get the Read Pointer */
1018 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
1019
1020 /* Get the Write Pointer */
1021 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1022
1023 if ((r_offset < MIN_PIB_Q_DEPTH) ||
1024 (r_offset >= MAX_PIB_Q_DEPTH))
1025 return false;
1026
1027 n_offset = w_offset + 1;
1028
1029 if (MAX_PIB_Q_DEPTH == n_offset)
1030 n_offset = MIN_PIB_Q_DEPTH;
1031
1032 if (r_offset == n_offset)
1033 return false; /* should never happen */
1034
1035 /* Write the DRAM ADDR to the Queue at Next Offset */
1036 crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1037 1, &addr_to_rel);
1038
1039 /* Put the New value of the write pointer in Queue */
1040 crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1041
1042 return true;
1043 }
1044
1045 static void cpy_pib_to_app(struct c011_pib *src_pib,
1046 struct BC_PIC_INFO_BLOCK *dst_pib)
1047 {
1048 if (!src_pib || !dst_pib) {
1049 BCMLOG_ERR("Invalid Arguments\n");
1050 return;
1051 }
1052
1053 dst_pib->timeStamp = 0;
1054 dst_pib->picture_number = src_pib->ppb.picture_number;
1055 dst_pib->width = src_pib->ppb.width;
1056 dst_pib->height = src_pib->ppb.height;
1057 dst_pib->chroma_format = src_pib->ppb.chroma_format;
1058 dst_pib->pulldown = src_pib->ppb.pulldown;
1059 dst_pib->flags = src_pib->ppb.flags;
1060 dst_pib->sess_num = src_pib->ptsStcOffset;
1061 dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
1062 dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
1063 dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1064 dst_pib->frame_rate = src_pib->resolution;
1065 return;
1066 }
1067
1068 static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1069 {
1070 unsigned int cnt;
1071 struct c011_pib src_pib;
1072 uint32_t pib_addr, pib_cnt;
1073 struct BC_PIC_INFO_BLOCK *AppPib;
1074 struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1075
1076 pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1077
1078 if (!pib_cnt)
1079 return;
1080
1081 for (cnt = 0; cnt < pib_cnt; cnt++) {
1082
1083 pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1084 crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
1085 (uint32_t *)&src_pib);
1086
1087 if (src_pib.bFormatChange) {
1088 rx_pkt = (struct crystalhd_rx_dma_pkt *)
1089 crystalhd_dioq_fetch(hw->rx_freeq);
1090 if (!rx_pkt)
1091 return;
1092 rx_pkt->flags = 0;
1093 rx_pkt->flags |= COMP_FLAG_PIB_VALID |
1094 COMP_FLAG_FMT_CHANGE;
1095 AppPib = &rx_pkt->pib;
1096 cpy_pib_to_app(&src_pib, AppPib);
1097
1098 BCMLOG(BCMLOG_DBG,
1099 "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1100 rx_pkt->pib.picture_number,
1101 rx_pkt->pib.aspect_ratio,
1102 rx_pkt->pib.chroma_format,
1103 rx_pkt->pib.colour_primaries,
1104 rx_pkt->pib.frame_rate,
1105 rx_pkt->pib.height,
1106 rx_pkt->pib.height,
1107 rx_pkt->pib.n_drop,
1108 rx_pkt->pib.pulldown,
1109 rx_pkt->pib.ycom);
1110
1111 crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true,
1112 rx_pkt->pkt_tag);
1113
1114 }
1115
1116 crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1117 }
1118 }
1119
1120 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1121 {
1122 uint32_t dma_cntrl;
1123
1124 dma_cntrl = crystalhd_reg_rd(hw->adp,
1125 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1126 if (!(dma_cntrl & DMA_START_BIT)) {
1127 dma_cntrl |= DMA_START_BIT;
1128 crystalhd_reg_wr(hw->adp,
1129 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1130 }
1131
1132 dma_cntrl = crystalhd_reg_rd(hw->adp,
1133 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1134 if (!(dma_cntrl & DMA_START_BIT)) {
1135 dma_cntrl |= DMA_START_BIT;
1136 crystalhd_reg_wr(hw->adp,
1137 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1138 }
1139
1140 return;
1141 }
1142
1143 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1144 {
1145 uint32_t dma_cntrl = 0, count = 30;
1146 uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1147
1148 dma_cntrl = crystalhd_reg_rd(hw->adp,
1149 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1150 if ((dma_cntrl & DMA_START_BIT)) {
1151 dma_cntrl &= ~DMA_START_BIT;
1152 crystalhd_reg_wr(hw->adp,
1153 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1154 }
1155
1156 dma_cntrl = crystalhd_reg_rd(hw->adp,
1157 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1158 if ((dma_cntrl & DMA_START_BIT)) {
1159 dma_cntrl &= ~DMA_START_BIT;
1160 crystalhd_reg_wr(hw->adp,
1161 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1162 }
1163
1164 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1165 while ((l0y || l0uv || l1y || l1uv) && count) {
1166
1167 if (l0y) {
1168 l0y = crystalhd_reg_rd(hw->adp,
1169 MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1170 l0y &= DMA_START_BIT;
1171 if (!l0y)
1172 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1173 }
1174
1175 if (l1y) {
1176 l1y = crystalhd_reg_rd(hw->adp,
1177 MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1178 l1y &= DMA_START_BIT;
1179 if (!l1y)
1180 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1181 }
1182
1183 if (l0uv) {
1184 l0uv = crystalhd_reg_rd(hw->adp,
1185 MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1186 l0uv &= DMA_START_BIT;
1187 if (!l0uv)
1188 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1189 }
1190
1191 if (l1uv) {
1192 l1uv = crystalhd_reg_rd(hw->adp,
1193 MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1194 l1uv &= DMA_START_BIT;
1195 if (!l1uv)
1196 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1197 }
1198 msleep_interruptible(100);
1199 count--;
1200 }
1201
1202 hw->rx_list_post_index = 0;
1203
1204 BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1205 count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1206 }
1207
1208 static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw,
1209 struct crystalhd_rx_dma_pkt *rx_pkt)
1210 {
1211 uint32_t y_low_addr_reg, y_high_addr_reg;
1212 uint32_t uv_low_addr_reg, uv_high_addr_reg;
1213 union addr_64 desc_addr;
1214 unsigned long flags;
1215
1216 if (!hw || !rx_pkt) {
1217 BCMLOG_ERR("Invalid Arguments\n");
1218 return BC_STS_INV_ARG;
1219 }
1220
1221 if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1222 BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1223 return BC_STS_INV_ARG;
1224 }
1225
1226 spin_lock_irqsave(&hw->rx_lock, flags);
1227 /* FIXME: jarod: sts_free is an enum for 0,
1228 in crystalhd_hw.h... yuk... */
1229 if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1230 spin_unlock_irqrestore(&hw->rx_lock, flags);
1231 return BC_STS_BUSY;
1232 }
1233
1234 if (!hw->rx_list_post_index) {
1235 y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1236 y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1237 uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1238 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1239 } else {
1240 y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1241 y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1242 uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1243 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1244 }
1245 rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1246 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1247 if (rx_pkt->uv_phy_addr)
1248 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1249 hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1250 spin_unlock_irqrestore(&hw->rx_lock, flags);
1251
1252 crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false,
1253 rx_pkt->pkt_tag);
1254
1255 crystalhd_start_rx_dma_engine(hw);
1256 /* Program the Y descriptor */
1257 desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1258 crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1259 crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1260
1261 if (rx_pkt->uv_phy_addr) {
1262 /* Program the UV descriptor */
1263 desc_addr.full_addr = rx_pkt->uv_phy_addr;
1264 crystalhd_reg_wr(hw->adp, uv_high_addr_reg,
1265 desc_addr.high_part);
1266 crystalhd_reg_wr(hw->adp, uv_low_addr_reg,
1267 desc_addr.low_part | 0x01);
1268 }
1269
1270 return BC_STS_SUCCESS;
1271 }
1272
1273 static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1274 struct crystalhd_rx_dma_pkt *rx_pkt)
1275 {
1276 enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1277
1278 if (sts == BC_STS_BUSY)
1279 crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1280 false, rx_pkt->pkt_tag);
1281
1282 return sts;
1283 }
1284
1285 static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1286 uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1287 {
1288 uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1289
1290 if (!list_index) {
1291 y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1292 uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1293 } else {
1294 y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1295 uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1296 }
1297
1298 *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1299 *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1300 }
1301
1302 /*
1303 * This function should be called only after making sure that the two DMA
1304 * lists are free. This function does not check if DMA's are active, before
1305 * turning off the DMA.
1306 */
1307 static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1308 {
1309 uint32_t dma_cntrl, aspm;
1310
1311 hw->stop_pending = 0;
1312
1313 dma_cntrl = crystalhd_reg_rd(hw->adp,
1314 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1315 if (dma_cntrl & DMA_START_BIT) {
1316 dma_cntrl &= ~DMA_START_BIT;
1317 crystalhd_reg_wr(hw->adp,
1318 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1319 }
1320
1321 dma_cntrl = crystalhd_reg_rd(hw->adp,
1322 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1323 if (dma_cntrl & DMA_START_BIT) {
1324 dma_cntrl &= ~DMA_START_BIT;
1325 crystalhd_reg_wr(hw->adp,
1326 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1327 }
1328 hw->rx_list_post_index = 0;
1329
1330 aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1331 aspm |= ASPM_L1_ENABLE;
1332 /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1333 crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1334 }
1335
1336 static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw,
1337 uint32_t list_index, enum BC_STATUS comp_sts)
1338 {
1339 struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1340 uint32_t y_dw_dnsz, uv_dw_dnsz;
1341 enum BC_STATUS sts = BC_STS_SUCCESS;
1342
1343 if (!hw || list_index >= DMA_ENGINE_CNT) {
1344 BCMLOG_ERR("Invalid Arguments\n");
1345 return BC_STS_INV_ARG;
1346 }
1347
1348 rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1349 hw->rx_pkt_tag_seed + list_index);
1350 if (!rx_pkt) {
1351 BCMLOG_ERR(
1352 "Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1353 hw->rx_list_post_index, hw->rx_list_sts[0],
1354 hw->rx_list_sts[1], list_index,
1355 hw->rx_pkt_tag_seed + list_index, comp_sts);
1356 return BC_STS_INV_ARG;
1357 }
1358
1359 if (comp_sts == BC_STS_SUCCESS) {
1360 crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1361 rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1362 rx_pkt->flags = COMP_FLAG_DATA_VALID;
1363 if (rx_pkt->uv_phy_addr)
1364 rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1365 crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1366 hw->rx_pkt_tag_seed + list_index);
1367 return sts;
1368 }
1369
1370 /* Check if we can post this DIO again. */
1371 return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1372 }
1373
1374 static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw,
1375 uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
1376 {
1377 uint32_t tmp;
1378 enum list_sts tmp_lsts;
1379
1380 if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1381 return false;
1382
1383 tmp_lsts = hw->rx_list_sts[0];
1384
1385 /* Y0 - DMA */
1386 tmp = y_err_sts & GET_Y0_ERR_MSK;
1387 if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1388 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1389
1390 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1391 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1392 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1393 }
1394
1395 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1396 hw->rx_list_sts[0] &= ~rx_y_mask;
1397 hw->rx_list_sts[0] |= rx_y_error;
1398 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1399 }
1400
1401 if (tmp) {
1402 hw->rx_list_sts[0] &= ~rx_y_mask;
1403 hw->rx_list_sts[0] |= rx_y_error;
1404 hw->rx_list_post_index = 0;
1405 }
1406
1407 /* UV0 - DMA */
1408 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1409 if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1410 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1411
1412 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1413 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1414 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1415 }
1416
1417 if (uv_err_sts &
1418 MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1419 hw->rx_list_sts[0] &= ~rx_uv_mask;
1420 hw->rx_list_sts[0] |= rx_uv_error;
1421 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1422 }
1423
1424 if (tmp) {
1425 hw->rx_list_sts[0] &= ~rx_uv_mask;
1426 hw->rx_list_sts[0] |= rx_uv_error;
1427 hw->rx_list_post_index = 0;
1428 }
1429
1430 if (y_err_sts & GET_Y0_ERR_MSK) {
1431 tmp = y_err_sts & GET_Y0_ERR_MSK;
1432 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1433 }
1434
1435 if (uv_err_sts & GET_UV0_ERR_MSK) {
1436 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1437 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1438 }
1439
1440 return (tmp_lsts != hw->rx_list_sts[0]);
1441 }
1442
1443 static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
1444 uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
1445 {
1446 uint32_t tmp;
1447 enum list_sts tmp_lsts;
1448
1449 if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1450 return false;
1451
1452 tmp_lsts = hw->rx_list_sts[1];
1453
1454 /* Y1 - DMA */
1455 tmp = y_err_sts & GET_Y1_ERR_MSK;
1456 if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1457 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1458
1459 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1460 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1461 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1462 }
1463
1464 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1465 /* Add retry-support..*/
1466 hw->rx_list_sts[1] &= ~rx_y_mask;
1467 hw->rx_list_sts[1] |= rx_y_error;
1468 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1469 }
1470
1471 if (tmp) {
1472 hw->rx_list_sts[1] &= ~rx_y_mask;
1473 hw->rx_list_sts[1] |= rx_y_error;
1474 hw->rx_list_post_index = 0;
1475 }
1476
1477 /* UV1 - DMA */
1478 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1479 if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK)
1480 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1481
1482 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1483 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1484 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1485 }
1486
1487 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1488 /* Add retry-support*/
1489 hw->rx_list_sts[1] &= ~rx_uv_mask;
1490 hw->rx_list_sts[1] |= rx_uv_error;
1491 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1492 }
1493
1494 if (tmp) {
1495 hw->rx_list_sts[1] &= ~rx_uv_mask;
1496 hw->rx_list_sts[1] |= rx_uv_error;
1497 hw->rx_list_post_index = 0;
1498 }
1499
1500 if (y_err_sts & GET_Y1_ERR_MSK) {
1501 tmp = y_err_sts & GET_Y1_ERR_MSK;
1502 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1503 }
1504
1505 if (uv_err_sts & GET_UV1_ERR_MSK) {
1506 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1507 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1508 }
1509
1510 return (tmp_lsts != hw->rx_list_sts[1]);
1511 }
1512
1513
1514 static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1515 {
1516 unsigned long flags;
1517 uint32_t i, list_avail = 0;
1518 enum BC_STATUS comp_sts = BC_STS_NO_DATA;
1519 uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1520 bool ret = false;
1521
1522 if (!hw) {
1523 BCMLOG_ERR("Invalid Arguments\n");
1524 return;
1525 }
1526
1527 if (!(intr_sts & GET_RX_INTR_MASK))
1528 return;
1529
1530 y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1531 uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1532
1533 for (i = 0; i < DMA_ENGINE_CNT; i++) {
1534 /* Update States..*/
1535 spin_lock_irqsave(&hw->rx_lock, flags);
1536 if (i == 0)
1537 ret = crystalhd_rx_list0_handler(hw, intr_sts,
1538 y_err_sts, uv_err_sts);
1539 else
1540 ret = crystalhd_rx_list1_handler(hw, intr_sts,
1541 y_err_sts, uv_err_sts);
1542 if (ret) {
1543 switch (hw->rx_list_sts[i]) {
1544 case sts_free:
1545 comp_sts = BC_STS_SUCCESS;
1546 list_avail = 1;
1547 break;
1548 case rx_y_error:
1549 case rx_uv_error:
1550 case rx_sts_error:
1551 /* We got error on both or Y or uv. */
1552 hw->stats.rx_errors++;
1553 crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1554 /* FIXME: jarod: this is where
1555 my mini pci-e card is tripping up */
1556 BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1557 i, hw->stats.rx_errors, y_err_sts,
1558 uv_err_sts, intr_sts, y_dn_sz,
1559 uv_dn_sz);
1560 hw->rx_list_sts[i] = sts_free;
1561 comp_sts = BC_STS_ERROR;
1562 break;
1563 default:
1564 /* Wait for completion..*/
1565 comp_sts = BC_STS_NO_DATA;
1566 break;
1567 }
1568 }
1569 spin_unlock_irqrestore(&hw->rx_lock, flags);
1570
1571 /* handle completion...*/
1572 if (comp_sts != BC_STS_NO_DATA) {
1573 crystalhd_rx_pkt_done(hw, i, comp_sts);
1574 comp_sts = BC_STS_NO_DATA;
1575 }
1576 }
1577
1578 if (list_avail) {
1579 if (hw->stop_pending) {
1580 if ((hw->rx_list_sts[0] == sts_free) &&
1581 (hw->rx_list_sts[1] == sts_free))
1582 crystalhd_hw_finalize_pause(hw);
1583 } else {
1584 crystalhd_hw_start_capture(hw);
1585 }
1586 }
1587 }
1588
1589 static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1590 struct BC_FW_CMD *fw_cmd)
1591 {
1592 enum BC_STATUS sts = BC_STS_SUCCESS;
1593 struct dec_rsp_channel_start_video *st_rsp = NULL;
1594
1595 switch (fw_cmd->cmd[0]) {
1596 case eCMD_C011_DEC_CHAN_START_VIDEO:
1597 st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
1598 hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1599 hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1600 BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1601 hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1602 break;
1603 case eCMD_C011_INIT:
1604 if (!(crystalhd_load_firmware_config(hw->adp))) {
1605 BCMLOG_ERR("Invalid Params.\n");
1606 sts = BC_STS_FW_AUTH_FAILED;
1607 }
1608 break;
1609 default:
1610 break;
1611 }
1612 return sts;
1613 }
1614
1615 static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1616 {
1617 uint32_t reg;
1618 union link_misc_perst_decoder_ctrl rst_cntrl_reg;
1619
1620 /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1621 rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp,
1622 MISC_PERST_DECODER_CTRL);
1623
1624 rst_cntrl_reg.bcm_7412_rst = 1;
1625 crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
1626 rst_cntrl_reg.whole_reg);
1627 msleep_interruptible(50);
1628
1629 rst_cntrl_reg.bcm_7412_rst = 0;
1630 crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
1631 rst_cntrl_reg.whole_reg);
1632
1633 /* Close all banks, put DDR in idle */
1634 bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1635
1636 /* Set bit 25 (drop CKE pin of DDR) */
1637 reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1638 reg |= 0x02000000;
1639 bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1640
1641 /* Reset the audio block */
1642 bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1643
1644 /* Power down Raptor PLL */
1645 reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1646 reg |= 0x00008000;
1647 bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1648
1649 /* Power down all Audio PLL */
1650 bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1651
1652 /* Power down video clock (75MHz) */
1653 reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1654 reg |= 0x00008000;
1655 bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1656
1657 /* Power down video clock (75MHz) */
1658 reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1659 reg |= 0x00008000;
1660 bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1661
1662 /* Power down core clock (200MHz) */
1663 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1664 reg |= 0x00008000;
1665 bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1666
1667 /* Power down core clock (200MHz) */
1668 reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1669 reg |= 0x00008000;
1670 bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1671
1672 return BC_STS_SUCCESS;
1673 }
1674
1675 /************************************************
1676 **
1677 *************************************************/
1678
1679 enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer,
1680 uint32_t sz)
1681 {
1682 uint32_t reg_data, cnt, *temp_buff;
1683 uint32_t fw_sig_len = 36;
1684 uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1685
1686
1687 if (!adp || !buffer || !sz) {
1688 BCMLOG_ERR("Invalid Params.\n");
1689 return BC_STS_INV_ARG;
1690 }
1691
1692 reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1693 if (!(reg_data & 0x02)) {
1694 BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1695 return BC_STS_ERROR;
1696 }
1697
1698 reg_data = 0;
1699 crystalhd_reg_wr(adp, DCI_CMD, 0);
1700 reg_data |= BC_BIT(0);
1701 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1702
1703 reg_data = 0;
1704 cnt = 1000;
1705 msleep_interruptible(10);
1706
1707 while (reg_data != BC_BIT(4)) {
1708 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1709 reg_data &= BC_BIT(4);
1710 if (--cnt == 0) {
1711 BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1712 return BC_STS_TIMEOUT;
1713 }
1714 }
1715
1716 msleep_interruptible(10);
1717 /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1718 crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1719 temp_buff = (uint32_t *)buffer;
1720 for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1721 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1722 crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1723 dram_offset += 4;
1724 temp_buff++;
1725 }
1726 msleep_interruptible(10);
1727
1728 temp_buff++;
1729
1730 sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1731 for (cnt = 0; cnt < 8; cnt++) {
1732 uint32_t swapped_data = *temp_buff;
1733 swapped_data = bswap_32_1(swapped_data);
1734 crystalhd_reg_wr(adp, sig_reg, swapped_data);
1735 sig_reg -= 4;
1736 temp_buff++;
1737 }
1738 msleep_interruptible(10);
1739
1740 reg_data = 0;
1741 reg_data |= BC_BIT(1);
1742 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1743 msleep_interruptible(10);
1744
1745 reg_data = 0;
1746 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1747
1748 if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1749 cnt = 1000;
1750 while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1751 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1752 reg_data &= BC_BIT(0);
1753 if (!(--cnt))
1754 break;
1755 msleep_interruptible(10);
1756 }
1757 reg_data = 0;
1758 reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1759 reg_data |= BC_BIT(4);
1760 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1761
1762 } else {
1763 BCMLOG_ERR("F/w Signature mismatch\n");
1764 return BC_STS_FW_AUTH_FAILED;
1765 }
1766
1767 BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1768 return BC_STS_SUCCESS;
1769 }
1770
1771 enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
1772 struct BC_FW_CMD *fw_cmd)
1773 {
1774 uint32_t cnt = 0, cmd_res_addr;
1775 uint32_t *cmd_buff, *res_buff;
1776 wait_queue_head_t fw_cmd_event;
1777 int rc = 0;
1778 enum BC_STATUS sts;
1779
1780 crystalhd_create_event(&fw_cmd_event);
1781
1782 if (!hw || !fw_cmd) {
1783 BCMLOG_ERR("Invalid Arguments\n");
1784 return BC_STS_INV_ARG;
1785 }
1786
1787 cmd_buff = fw_cmd->cmd;
1788 res_buff = fw_cmd->rsp;
1789
1790 if (!cmd_buff || !res_buff) {
1791 BCMLOG_ERR("Invalid Parameters for F/W Command\n");
1792 return BC_STS_INV_ARG;
1793 }
1794
1795 hw->pwr_lock++;
1796
1797 hw->fwcmd_evt_sts = 0;
1798 hw->pfw_cmd_event = &fw_cmd_event;
1799
1800 /*Write the command to the memory*/
1801 crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1802
1803 /*Memory Read for memory arbitrator flush*/
1804 crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1805
1806 /* Write the command address to mailbox */
1807 bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1808 msleep_interruptible(50);
1809
1810 crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1811
1812 if (!rc) {
1813 sts = BC_STS_SUCCESS;
1814 } else if (rc == -EBUSY) {
1815 BCMLOG_ERR("Firmware command T/O\n");
1816 sts = BC_STS_TIMEOUT;
1817 } else if (rc == -EINTR) {
1818 BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1819 sts = BC_STS_IO_USER_ABORT;
1820 } else {
1821 BCMLOG_ERR("FwCmd IO Error.\n");
1822 sts = BC_STS_IO_ERROR;
1823 }
1824
1825 if (sts != BC_STS_SUCCESS) {
1826 BCMLOG_ERR("FwCmd Failed.\n");
1827 hw->pwr_lock--;
1828 return sts;
1829 }
1830
1831 /*Get the Response Address*/
1832 cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1833
1834 /*Read the Response*/
1835 crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1836
1837 hw->pwr_lock--;
1838
1839 if (res_buff[2] != C011_RET_SUCCESS) {
1840 BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1841 return BC_STS_FW_CMD_ERR;
1842 }
1843
1844 sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1845 if (sts != BC_STS_SUCCESS)
1846 BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1847
1848 return sts;
1849 }
1850
1851 bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1852 {
1853 uint32_t intr_sts = 0;
1854 uint32_t deco_intr = 0;
1855 bool rc = false;
1856
1857 if (!adp || !hw->dev_started)
1858 return rc;
1859
1860 hw->stats.num_interrupts++;
1861 hw->pwr_lock++;
1862
1863 deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1864 intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1865
1866 if (intr_sts) {
1867 /* let system know we processed interrupt..*/
1868 rc = true;
1869 hw->stats.dev_interrupts++;
1870 }
1871
1872 if (deco_intr && (deco_intr != 0xdeaddead)) {
1873
1874 if (deco_intr & 0x80000000) {
1875 /*Set the Event and the status flag*/
1876 if (hw->pfw_cmd_event) {
1877 hw->fwcmd_evt_sts = 1;
1878 crystalhd_set_event(hw->pfw_cmd_event);
1879 }
1880 }
1881
1882 if (deco_intr & BC_BIT(1))
1883 crystalhd_hw_proc_pib(hw);
1884
1885 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1886 /* FIXME: jarod: No udelay? might this be
1887 the real reason mini pci-e cards were stalling out? */
1888 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1889 rc = true;
1890 }
1891
1892 /* Rx interrupts */
1893 crystalhd_rx_isr(hw, intr_sts);
1894
1895 /* Tx interrupts*/
1896 crystalhd_tx_isr(hw, intr_sts);
1897
1898 /* Clear interrupts */
1899 if (rc) {
1900 if (intr_sts)
1901 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1902
1903 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1904 }
1905
1906 hw->pwr_lock--;
1907
1908 return rc;
1909 }
1910
1911 enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw,
1912 struct crystalhd_adp *adp)
1913 {
1914 if (!hw || !adp) {
1915 BCMLOG_ERR("Invalid Arguments\n");
1916 return BC_STS_INV_ARG;
1917 }
1918
1919 if (hw->dev_started)
1920 return BC_STS_SUCCESS;
1921
1922 memset(hw, 0, sizeof(struct crystalhd_hw));
1923
1924 hw->adp = adp;
1925 spin_lock_init(&hw->lock);
1926 spin_lock_init(&hw->rx_lock);
1927 /* FIXME: jarod: what are these magic numbers?!? */
1928 hw->tx_ioq_tag_seed = 0x70023070;
1929 hw->rx_pkt_tag_seed = 0x70029070;
1930
1931 hw->stop_pending = 0;
1932 crystalhd_start_device(hw->adp);
1933 hw->dev_started = true;
1934
1935 /* set initial core clock */
1936 hw->core_clock_mhz = CLOCK_PRESET;
1937 hw->prev_n = 0;
1938 hw->pwr_lock = 0;
1939 crystalhd_hw_set_core_clock(hw);
1940
1941 return BC_STS_SUCCESS;
1942 }
1943
1944 enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1945 {
1946 if (!hw) {
1947 BCMLOG_ERR("Invalid Arguments\n");
1948 return BC_STS_INV_ARG;
1949 }
1950
1951 if (!hw->dev_started)
1952 return BC_STS_SUCCESS;
1953
1954 /* Stop and DDR sleep will happen in here */
1955 crystalhd_hw_suspend(hw);
1956 hw->dev_started = false;
1957
1958 return BC_STS_SUCCESS;
1959 }
1960
1961 enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1962 {
1963 unsigned int i;
1964 void *mem;
1965 size_t mem_len;
1966 dma_addr_t phy_addr;
1967 enum BC_STATUS sts = BC_STS_SUCCESS;
1968 struct crystalhd_rx_dma_pkt *rpkt;
1969
1970 if (!hw || !hw->adp) {
1971 BCMLOG_ERR("Invalid Arguments\n");
1972 return BC_STS_INV_ARG;
1973 }
1974
1975 sts = crystalhd_hw_create_ioqs(hw);
1976 if (sts != BC_STS_SUCCESS) {
1977 BCMLOG_ERR("Failed to create IOQs..\n");
1978 return sts;
1979 }
1980
1981 mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1982
1983 for (i = 0; i < BC_TX_LIST_CNT; i++) {
1984 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1985 if (mem) {
1986 memset(mem, 0, mem_len);
1987 } else {
1988 BCMLOG_ERR("Insufficient Memory For TX\n");
1989 crystalhd_hw_free_dma_rings(hw);
1990 return BC_STS_INSUFF_RES;
1991 }
1992 /* rx_pkt_pool -- static memory allocation */
1993 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1994 hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1995 hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1996 sizeof(struct dma_descriptor);
1997 hw->tx_pkt_pool[i].list_tag = 0;
1998
1999 /* Add TX dma requests to Free Queue..*/
2000 sts = crystalhd_dioq_add(hw->tx_freeq,
2001 &hw->tx_pkt_pool[i], false, 0);
2002 if (sts != BC_STS_SUCCESS) {
2003 crystalhd_hw_free_dma_rings(hw);
2004 return sts;
2005 }
2006 }
2007
2008 for (i = 0; i < BC_RX_LIST_CNT; i++) {
2009 rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
2010 if (!rpkt) {
2011 BCMLOG_ERR("Insufficient Memory For RX\n");
2012 crystalhd_hw_free_dma_rings(hw);
2013 return BC_STS_INSUFF_RES;
2014 }
2015
2016 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
2017 if (mem) {
2018 memset(mem, 0, mem_len);
2019 } else {
2020 BCMLOG_ERR("Insufficient Memory For RX\n");
2021 crystalhd_hw_free_dma_rings(hw);
2022 kfree(rpkt);
2023 return BC_STS_INSUFF_RES;
2024 }
2025 rpkt->desc_mem.pdma_desc_start = mem;
2026 rpkt->desc_mem.phy_addr = phy_addr;
2027 rpkt->desc_mem.sz = BC_LINK_MAX_SGLS *
2028 sizeof(struct dma_descriptor);
2029 rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
2030 crystalhd_hw_free_rx_pkt(hw, rpkt);
2031 }
2032
2033 return BC_STS_SUCCESS;
2034 }
2035
2036 enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
2037 {
2038 unsigned int i;
2039 struct crystalhd_rx_dma_pkt *rpkt = NULL;
2040
2041 if (!hw || !hw->adp) {
2042 BCMLOG_ERR("Invalid Arguments\n");
2043 return BC_STS_INV_ARG;
2044 }
2045
2046 /* Delete all IOQs.. */
2047 crystalhd_hw_delete_ioqs(hw);
2048
2049 for (i = 0; i < BC_TX_LIST_CNT; i++) {
2050 if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
2051 bc_kern_dma_free(hw->adp,
2052 hw->tx_pkt_pool[i].desc_mem.sz,
2053 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
2054 hw->tx_pkt_pool[i].desc_mem.phy_addr);
2055
2056 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
2057 }
2058 }
2059
2060 BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2061 do {
2062 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2063 if (!rpkt)
2064 break;
2065 bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2066 rpkt->desc_mem.pdma_desc_start,
2067 rpkt->desc_mem.phy_addr);
2068 kfree(rpkt);
2069 } while (rpkt);
2070
2071 return BC_STS_SUCCESS;
2072 }
2073
2074 enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw,
2075 struct crystalhd_dio_req *ioreq,
2076 hw_comp_callback call_back,
2077 wait_queue_head_t *cb_event, uint32_t *list_id,
2078 uint8_t data_flags)
2079 {
2080 struct tx_dma_pkt *tx_dma_packet = NULL;
2081 uint32_t first_desc_u_addr, first_desc_l_addr;
2082 uint32_t low_addr, high_addr;
2083 union addr_64 desc_addr;
2084 enum BC_STATUS sts, add_sts;
2085 uint32_t dummy_index = 0;
2086 unsigned long flags;
2087 bool rc;
2088
2089 if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2090 BCMLOG_ERR("Invalid Arguments\n");
2091 return BC_STS_INV_ARG;
2092 }
2093
2094 /*
2095 * Since we hit code in busy condition very frequently,
2096 * we will check the code in status first before
2097 * checking the availability of free elem.
2098 *
2099 * This will avoid the Q fetch/add in normal condition.
2100 */
2101 rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2102 false, data_flags);
2103 if (rc) {
2104 hw->stats.cin_busy++;
2105 return BC_STS_BUSY;
2106 }
2107
2108 /* Get a list from TxFreeQ */
2109 tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(
2110 hw->tx_freeq);
2111 if (!tx_dma_packet) {
2112 BCMLOG_ERR("No empty elements..\n");
2113 return BC_STS_ERR_USAGE;
2114 }
2115
2116 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2117 &tx_dma_packet->desc_mem,
2118 &dummy_index);
2119 if (sts != BC_STS_SUCCESS) {
2120 add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2121 false, 0);
2122 if (add_sts != BC_STS_SUCCESS)
2123 BCMLOG_ERR("double fault..\n");
2124
2125 return sts;
2126 }
2127
2128 hw->pwr_lock++;
2129
2130 desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2131 low_addr = desc_addr.low_part;
2132 high_addr = desc_addr.high_part;
2133
2134 tx_dma_packet->call_back = call_back;
2135 tx_dma_packet->cb_event = cb_event;
2136 tx_dma_packet->dio_req = ioreq;
2137
2138 spin_lock_irqsave(&hw->lock, flags);
2139
2140 if (hw->tx_list_post_index == 0) {
2141 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2142 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2143 } else {
2144 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2145 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2146 }
2147
2148 *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2149 hw->tx_list_post_index;
2150
2151 hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2152
2153 spin_unlock_irqrestore(&hw->lock, flags);
2154
2155
2156 /* Insert in Active Q..*/
2157 crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2158 tx_dma_packet->list_tag);
2159
2160 /*
2161 * Interrupt will come as soon as you write
2162 * the valid bit. So be ready for that. All
2163 * the initialization should happen before that.
2164 */
2165 crystalhd_start_tx_dma_engine(hw);
2166 crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2167
2168 crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part |
2169 0x01);
2170 /* Be sure we set the valid bit ^^^^ */
2171
2172 return BC_STS_SUCCESS;
2173 }
2174
2175 /*
2176 * This is a force cancel and we are racing with ISR.
2177 *
2178 * Will try to remove the req from ActQ before ISR gets it.
2179 * If ISR gets it first then the completion happens in the
2180 * normal path and we will return _STS_NO_DATA from here.
2181 *
2182 * FIX_ME: Not Tested the actual condition..
2183 */
2184 enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw,
2185 uint32_t list_id)
2186 {
2187 if (!hw || !list_id) {
2188 BCMLOG_ERR("Invalid Arguments\n");
2189 return BC_STS_INV_ARG;
2190 }
2191
2192 crystalhd_stop_tx_dma_engine(hw);
2193 crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2194
2195 return BC_STS_SUCCESS;
2196 }
2197
2198 enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2199 struct crystalhd_dio_req *ioreq, bool en_post)
2200 {
2201 struct crystalhd_rx_dma_pkt *rpkt;
2202 uint32_t tag, uv_desc_ix = 0;
2203 enum BC_STATUS sts;
2204
2205 if (!hw || !ioreq) {
2206 BCMLOG_ERR("Invalid Arguments\n");
2207 return BC_STS_INV_ARG;
2208 }
2209
2210 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2211 if (!rpkt) {
2212 BCMLOG_ERR("Insufficient resources\n");
2213 return BC_STS_INSUFF_RES;
2214 }
2215
2216 rpkt->dio_req = ioreq;
2217 tag = rpkt->pkt_tag;
2218
2219 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem,
2220 &uv_desc_ix);
2221 if (sts != BC_STS_SUCCESS)
2222 return sts;
2223
2224 rpkt->uv_phy_addr = 0;
2225
2226 /* Store the address of UV in the rx packet for post*/
2227 if (uv_desc_ix)
2228 rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2229 (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
2230
2231 if (en_post)
2232 sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2233 else
2234 sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2235
2236 return sts;
2237 }
2238
2239 enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2240 struct BC_PIC_INFO_BLOCK *pib,
2241 struct crystalhd_dio_req **ioreq)
2242 {
2243 struct crystalhd_rx_dma_pkt *rpkt;
2244 uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2245 uint32_t sig_pending = 0;
2246
2247
2248 if (!hw || !ioreq || !pib) {
2249 BCMLOG_ERR("Invalid Arguments\n");
2250 return BC_STS_INV_ARG;
2251 }
2252
2253 rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2254 if (!rpkt) {
2255 if (sig_pending) {
2256 BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n",
2257 sig_pending);
2258 return BC_STS_IO_USER_ABORT;
2259 } else {
2260 return BC_STS_TIMEOUT;
2261 }
2262 }
2263
2264 rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2265
2266 if (rpkt->flags & COMP_FLAG_PIB_VALID)
2267 memcpy(pib, &rpkt->pib, sizeof(*pib));
2268
2269 *ioreq = rpkt->dio_req;
2270
2271 crystalhd_hw_free_rx_pkt(hw, rpkt);
2272
2273 return BC_STS_SUCCESS;
2274 }
2275
2276 enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2277 {
2278 struct crystalhd_rx_dma_pkt *rx_pkt;
2279 enum BC_STATUS sts;
2280 uint32_t i;
2281
2282 if (!hw) {
2283 BCMLOG_ERR("Invalid Arguments\n");
2284 return BC_STS_INV_ARG;
2285 }
2286
2287 /* This is start of capture.. Post to both the lists.. */
2288 for (i = 0; i < DMA_ENGINE_CNT; i++) {
2289 rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2290 if (!rx_pkt)
2291 return BC_STS_NO_DATA;
2292 sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2293 if (BC_STS_SUCCESS != sts)
2294 break;
2295
2296 }
2297
2298 return BC_STS_SUCCESS;
2299 }
2300
2301 enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2302 {
2303 void *temp = NULL;
2304
2305 if (!hw) {
2306 BCMLOG_ERR("Invalid Arguments\n");
2307 return BC_STS_INV_ARG;
2308 }
2309
2310 crystalhd_stop_rx_dma_engine(hw);
2311
2312 do {
2313 temp = crystalhd_dioq_fetch(hw->rx_freeq);
2314 if (temp)
2315 crystalhd_rx_pkt_rel_call_back(hw, temp);
2316 } while (temp);
2317
2318 return BC_STS_SUCCESS;
2319 }
2320
2321 enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2322 {
2323 hw->stats.pause_cnt++;
2324 hw->stop_pending = 1;
2325
2326 if ((hw->rx_list_sts[0] == sts_free) &&
2327 (hw->rx_list_sts[1] == sts_free))
2328 crystalhd_hw_finalize_pause(hw);
2329
2330 return BC_STS_SUCCESS;
2331 }
2332
2333 enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2334 {
2335 enum BC_STATUS sts;
2336 uint32_t aspm;
2337
2338 hw->stop_pending = 0;
2339
2340 aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2341 aspm &= ~ASPM_L1_ENABLE;
2342 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2343 crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2344
2345 sts = crystalhd_hw_start_capture(hw);
2346 return sts;
2347 }
2348
2349 enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2350 {
2351 enum BC_STATUS sts;
2352
2353 if (!hw) {
2354 BCMLOG_ERR("Invalid Arguments\n");
2355 return BC_STS_INV_ARG;
2356 }
2357
2358 sts = crystalhd_put_ddr2sleep(hw);
2359 if (sts != BC_STS_SUCCESS) {
2360 BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2361 return BC_STS_ERROR;
2362 }
2363
2364 if (!crystalhd_stop_device(hw->adp)) {
2365 BCMLOG_ERR("Failed to Stop Device!!\n");
2366 return BC_STS_ERROR;
2367 }
2368
2369 return BC_STS_SUCCESS;
2370 }
2371
2372 void crystalhd_hw_stats(struct crystalhd_hw *hw,
2373 struct crystalhd_hw_stats *stats)
2374 {
2375 if (!hw) {
2376 BCMLOG_ERR("Invalid Arguments\n");
2377 return;
2378 }
2379
2380 /* if called w/NULL stats, its a req to zero out the stats */
2381 if (!stats) {
2382 memset(&hw->stats, 0, sizeof(hw->stats));
2383 return;
2384 }
2385
2386 hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2387 hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq);
2388 memcpy(stats, &hw->stats, sizeof(*stats));
2389 }
2390
2391 enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2392 {
2393 uint32_t reg, n, i;
2394 uint32_t vco_mg, refresh_reg;
2395
2396 if (!hw) {
2397 BCMLOG_ERR("Invalid Arguments\n");
2398 return BC_STS_INV_ARG;
2399 }
2400
2401 /* FIXME: jarod: wha? */
2402 /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2403 n = hw->core_clock_mhz/5;
2404
2405 if (n == hw->prev_n)
2406 return BC_STS_CLK_NOCHG;
2407
2408 if (hw->pwr_lock > 0) {
2409 /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2410 return BC_STS_CLK_NOCHG;
2411 }
2412
2413 i = n * 27;
2414 if (i < 560)
2415 vco_mg = 0;
2416 else if (i < 900)
2417 vco_mg = 1;
2418 else if (i < 1030)
2419 vco_mg = 2;
2420 else
2421 vco_mg = 3;
2422
2423 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2424
2425 reg &= 0xFFFFCFC0;
2426 reg |= n;
2427 reg |= vco_mg << 12;
2428
2429 BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2430 hw->core_clock_mhz, n, vco_mg);
2431
2432 /* Change the DRAM refresh rate to accommodate the new frequency */
2433 /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2434 refresh_reg = (7 * hw->core_clock_mhz / 16);
2435 bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2436
2437 bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2438
2439 i = 0;
2440
2441 for (i = 0; i < 10; i++) {
2442 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2443
2444 if (reg & 0x00020000) {
2445 hw->prev_n = n;
2446 /* FIXME: jarod: outputting
2447 a random "C" is... confusing... */
2448 BCMLOG(BCMLOG_INFO, "C");
2449 return BC_STS_SUCCESS;
2450 } else {
2451 msleep_interruptible(10);
2452 }
2453 }
2454 BCMLOG(BCMLOG_INFO, "clk change failed\n");
2455 return BC_STS_CLK_NOCHG;
2456 }
This page took 0.160755 seconds and 5 git commands to generate.