1 /***************************************************************************
2 * Copyright (c) 2005-2009, Broadcom Corporation.
4 * Name: crystalhd_hw . c
7 * BCM70010 Linux driver HW layer.
9 **********************************************************************
10 * This file is part of the crystalhd device driver.
12 * This driver is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation, version 2 of the License.
16 * This driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this driver. If not, see <http://www.gnu.org/licenses/>.
23 **********************************************************************/
25 #include "crystalhd.h"
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 /* Functions internal to this file */
33 static void crystalhd_enable_uarts(struct crystalhd_adp
*adp
)
35 bc_dec_reg_wr(adp
, UartSelectA
, BSVS_UART_STREAM
);
36 bc_dec_reg_wr(adp
, UartSelectB
, BSVS_UART_DEC_OUTER
);
40 static void crystalhd_start_dram(struct crystalhd_adp
*adp
)
42 bc_dec_reg_wr(adp
, SDRAM_PARAM
, ((40 / 5 - 1) << 0) |
43 /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */
44 ((15 / 5 - 1) << 7) | /* trp */
45 ((10 / 5 - 1) << 10) | /* trrd */
46 ((15 / 5 + 1) << 12) | /* twr */
47 ((2 + 1) << 16) | /* twtr */
48 ((70 / 5 - 2) << 19) | /* trfc */
51 bc_dec_reg_wr(adp
, SDRAM_PRECHARGE
, 0);
52 bc_dec_reg_wr(adp
, SDRAM_EXT_MODE
, 2);
53 bc_dec_reg_wr(adp
, SDRAM_MODE
, 0x132);
54 bc_dec_reg_wr(adp
, SDRAM_PRECHARGE
, 0);
55 bc_dec_reg_wr(adp
, SDRAM_REFRESH
, 0);
56 bc_dec_reg_wr(adp
, SDRAM_REFRESH
, 0);
57 bc_dec_reg_wr(adp
, SDRAM_MODE
, 0x32);
58 /* setting the refresh rate here */
59 bc_dec_reg_wr(adp
, SDRAM_REF_PARAM
, ((1 << 12) | 96));
63 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp
*adp
)
65 union link_misc_perst_deco_ctrl rst_deco_cntrl
;
66 union link_misc_perst_clk_ctrl rst_clk_cntrl
;
70 * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
71 * delay to allow PLL to lock Clear alternate clock, stop clock bits
73 rst_clk_cntrl
.whole_reg
= crystalhd_reg_rd(adp
, MISC_PERST_CLOCK_CTRL
);
74 rst_clk_cntrl
.pll_pwr_dn
= 0;
75 crystalhd_reg_wr(adp
, MISC_PERST_CLOCK_CTRL
, rst_clk_cntrl
.whole_reg
);
76 msleep_interruptible(50);
78 rst_clk_cntrl
.whole_reg
= crystalhd_reg_rd(adp
, MISC_PERST_CLOCK_CTRL
);
79 rst_clk_cntrl
.stop_core_clk
= 0;
80 rst_clk_cntrl
.sel_alt_clk
= 0;
82 crystalhd_reg_wr(adp
, MISC_PERST_CLOCK_CTRL
, rst_clk_cntrl
.whole_reg
);
83 msleep_interruptible(50);
86 * Bus Arbiter Timeout: GISB_ARBITER_TIMER
87 * Set internal bus arbiter timeout to 40us based on core clock speed
88 * (63MHz * 40us = 0x9D8)
90 crystalhd_reg_wr(adp
, GISB_ARBITER_TIMER
, 0x9D8);
93 * Decoder clocks: MISC_PERST_DECODER_CTRL
94 * Enable clocks while 7412 reset is asserted, delay
95 * De-assert 7412 reset
97 rst_deco_cntrl
.whole_reg
= crystalhd_reg_rd(adp
,
98 MISC_PERST_DECODER_CTRL
);
99 rst_deco_cntrl
.stop_bcm_7412_clk
= 0;
100 rst_deco_cntrl
.bcm7412_rst
= 1;
101 crystalhd_reg_wr(adp
, MISC_PERST_DECODER_CTRL
,
102 rst_deco_cntrl
.whole_reg
);
103 msleep_interruptible(10);
105 rst_deco_cntrl
.whole_reg
= crystalhd_reg_rd(adp
,
106 MISC_PERST_DECODER_CTRL
);
107 rst_deco_cntrl
.bcm7412_rst
= 0;
108 crystalhd_reg_wr(adp
, MISC_PERST_DECODER_CTRL
,
109 rst_deco_cntrl
.whole_reg
);
110 msleep_interruptible(50);
112 /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
113 crystalhd_reg_wr(adp
, OTP_CONTENT_MISC
, 0);
115 /* Clear bit 29 of 0x404 */
116 temp
= crystalhd_reg_rd(adp
, PCIE_TL_TRANSACTION_CONFIGURATION
);
118 crystalhd_reg_wr(adp
, PCIE_TL_TRANSACTION_CONFIGURATION
, temp
);
120 /* 2.5V regulator must be set to 2.6 volts (+6%) */
121 /* FIXME: jarod: what's the point of this reg read? */
122 temp
= crystalhd_reg_rd(adp
, MISC_PERST_VREG_CTRL
);
123 crystalhd_reg_wr(adp
, MISC_PERST_VREG_CTRL
, 0xF3);
128 static bool crystalhd_put_in_reset(struct crystalhd_adp
*adp
)
130 union link_misc_perst_deco_ctrl rst_deco_cntrl
;
131 union link_misc_perst_clk_ctrl rst_clk_cntrl
;
135 * Decoder clocks: MISC_PERST_DECODER_CTRL
136 * Assert 7412 reset, delay
137 * Assert 7412 stop clock
139 rst_deco_cntrl
.whole_reg
= crystalhd_reg_rd(adp
,
140 MISC_PERST_DECODER_CTRL
);
141 rst_deco_cntrl
.stop_bcm_7412_clk
= 1;
142 crystalhd_reg_wr(adp
, MISC_PERST_DECODER_CTRL
,
143 rst_deco_cntrl
.whole_reg
);
144 msleep_interruptible(50);
146 /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
147 * Set internal bus arbiter timeout to 40us based on core clock speed
148 * (6.75MHZ * 40us = 0x10E)
150 crystalhd_reg_wr(adp
, GISB_ARBITER_TIMER
, 0x10E);
152 /* Link clocks: MISC_PERST_CLOCK_CTRL
153 * Stop core clk, delay
154 * Set alternate clk, delay, set PLL power down
156 rst_clk_cntrl
.whole_reg
= crystalhd_reg_rd(adp
, MISC_PERST_CLOCK_CTRL
);
157 rst_clk_cntrl
.stop_core_clk
= 1;
158 rst_clk_cntrl
.sel_alt_clk
= 1;
159 crystalhd_reg_wr(adp
, MISC_PERST_CLOCK_CTRL
, rst_clk_cntrl
.whole_reg
);
160 msleep_interruptible(50);
162 rst_clk_cntrl
.whole_reg
= crystalhd_reg_rd(adp
, MISC_PERST_CLOCK_CTRL
);
163 rst_clk_cntrl
.pll_pwr_dn
= 1;
164 crystalhd_reg_wr(adp
, MISC_PERST_CLOCK_CTRL
, rst_clk_cntrl
.whole_reg
);
167 * Read and restore the Transaction Configuration Register
170 temp
= crystalhd_reg_rd(adp
, PCIE_TL_TRANSACTION_CONFIGURATION
);
173 * Link core soft reset: MISC3_RESET_CTRL
174 * - Write BIT[0]=1 and read it back for core reset to take place
176 crystalhd_reg_wr(adp
, MISC3_RESET_CTRL
, 1);
177 rst_deco_cntrl
.whole_reg
= crystalhd_reg_rd(adp
, MISC3_RESET_CTRL
);
178 msleep_interruptible(50);
180 /* restore the transaction configuration register */
181 crystalhd_reg_wr(adp
, PCIE_TL_TRANSACTION_CONFIGURATION
, temp
);
186 static void crystalhd_disable_interrupts(struct crystalhd_adp
*adp
)
188 union intr_mask_reg intr_mask
;
189 intr_mask
.whole_reg
= crystalhd_reg_rd(adp
, INTR_INTR_MSK_STS_REG
);
190 intr_mask
.mask_pcie_err
= 1;
191 intr_mask
.mask_pcie_rbusmast_err
= 1;
192 intr_mask
.mask_pcie_rgr_bridge
= 1;
193 intr_mask
.mask_rx_done
= 1;
194 intr_mask
.mask_rx_err
= 1;
195 intr_mask
.mask_tx_done
= 1;
196 intr_mask
.mask_tx_err
= 1;
197 crystalhd_reg_wr(adp
, INTR_INTR_MSK_SET_REG
, intr_mask
.whole_reg
);
202 static void crystalhd_enable_interrupts(struct crystalhd_adp
*adp
)
204 union intr_mask_reg intr_mask
;
205 intr_mask
.whole_reg
= crystalhd_reg_rd(adp
, INTR_INTR_MSK_STS_REG
);
206 intr_mask
.mask_pcie_err
= 1;
207 intr_mask
.mask_pcie_rbusmast_err
= 1;
208 intr_mask
.mask_pcie_rgr_bridge
= 1;
209 intr_mask
.mask_rx_done
= 1;
210 intr_mask
.mask_rx_err
= 1;
211 intr_mask
.mask_tx_done
= 1;
212 intr_mask
.mask_tx_err
= 1;
213 crystalhd_reg_wr(adp
, INTR_INTR_MSK_CLR_REG
, intr_mask
.whole_reg
);
218 static void crystalhd_clear_errors(struct crystalhd_adp
*adp
)
222 /* FIXME: jarod: wouldn't we want to write a 0 to the reg?
223 Or does the write clear the bits specified? */
224 reg
= crystalhd_reg_rd(adp
, MISC1_Y_RX_ERROR_STATUS
);
226 crystalhd_reg_wr(adp
, MISC1_Y_RX_ERROR_STATUS
, reg
);
228 reg
= crystalhd_reg_rd(adp
, MISC1_UV_RX_ERROR_STATUS
);
230 crystalhd_reg_wr(adp
, MISC1_UV_RX_ERROR_STATUS
, reg
);
232 reg
= crystalhd_reg_rd(adp
, MISC1_TX_DMA_ERROR_STATUS
);
234 crystalhd_reg_wr(adp
, MISC1_TX_DMA_ERROR_STATUS
, reg
);
237 static void crystalhd_clear_interrupts(struct crystalhd_adp
*adp
)
239 uint32_t intr_sts
= crystalhd_reg_rd(adp
, INTR_INTR_STATUS
);
242 crystalhd_reg_wr(adp
, INTR_INTR_CLR_REG
, intr_sts
);
244 /* Write End Of Interrupt for PCIE */
245 crystalhd_reg_wr(adp
, INTR_EOI_CTRL
, 1);
249 static void crystalhd_soft_rst(struct crystalhd_adp
*adp
)
253 /* Assert c011 soft reset*/
254 bc_dec_reg_wr(adp
, DecHt_HostSwReset
, 0x00000001);
255 msleep_interruptible(50);
257 /* Release c011 soft reset*/
258 bc_dec_reg_wr(adp
, DecHt_HostSwReset
, 0x00000000);
260 /* Disable Stuffing..*/
261 val
= crystalhd_reg_rd(adp
, MISC2_GLOBAL_CTRL
);
263 crystalhd_reg_wr(adp
, MISC2_GLOBAL_CTRL
, val
);
266 static bool crystalhd_load_firmware_config(struct crystalhd_adp
*adp
)
270 crystalhd_reg_wr(adp
, DCI_DRAM_BASE_ADDR
, (BC_DRAM_FW_CFG_ADDR
>> 19));
272 crystalhd_reg_wr(adp
, AES_CMD
, 0);
273 crystalhd_reg_wr(adp
, AES_CONFIG_INFO
,
274 (BC_DRAM_FW_CFG_ADDR
& 0x7FFFF));
275 crystalhd_reg_wr(adp
, AES_CMD
, 0x1);
277 /* FIXME: jarod: I've seen this fail,
278 and introducing extra delays helps... */
279 for (i
= 0; i
< 100; ++i
) {
280 reg
= crystalhd_reg_rd(adp
, AES_STATUS
);
283 msleep_interruptible(10);
290 static bool crystalhd_start_device(struct crystalhd_adp
*adp
)
292 uint32_t dbg_options
, glb_cntrl
= 0, reg_pwrmgmt
= 0;
294 BCMLOG(BCMLOG_INFO
, "Starting BCM70012 Device\n");
296 reg_pwrmgmt
= crystalhd_reg_rd(adp
, PCIE_DLL_DATA_LINK_CONTROL
);
297 reg_pwrmgmt
&= ~ASPM_L1_ENABLE
;
299 crystalhd_reg_wr(adp
, PCIE_DLL_DATA_LINK_CONTROL
, reg_pwrmgmt
);
301 if (!crystalhd_bring_out_of_rst(adp
)) {
302 BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
306 crystalhd_disable_interrupts(adp
);
308 crystalhd_clear_errors(adp
);
310 crystalhd_clear_interrupts(adp
);
312 crystalhd_enable_interrupts(adp
);
314 /* Enable the option for getting the total no. of DWORDS
315 * that have been transferred by the RXDMA engine
317 dbg_options
= crystalhd_reg_rd(adp
, MISC1_DMA_DEBUG_OPTIONS_REG
);
319 crystalhd_reg_wr(adp
, MISC1_DMA_DEBUG_OPTIONS_REG
, dbg_options
);
321 /* Enable PCI Global Control options */
322 glb_cntrl
= crystalhd_reg_rd(adp
, MISC2_GLOBAL_CTRL
);
325 crystalhd_reg_wr(adp
, MISC2_GLOBAL_CTRL
, glb_cntrl
);
327 crystalhd_enable_interrupts(adp
);
329 crystalhd_soft_rst(adp
);
330 crystalhd_start_dram(adp
);
331 crystalhd_enable_uarts(adp
);
336 static bool crystalhd_stop_device(struct crystalhd_adp
*adp
)
340 BCMLOG(BCMLOG_INFO
, "Stopping BCM70012 Device\n");
341 /* Clear and disable interrupts */
342 crystalhd_disable_interrupts(adp
);
343 crystalhd_clear_errors(adp
);
344 crystalhd_clear_interrupts(adp
);
346 if (!crystalhd_put_in_reset(adp
))
347 BCMLOG_ERR("Failed to Put Link To Reset State\n");
349 reg
= crystalhd_reg_rd(adp
, PCIE_DLL_DATA_LINK_CONTROL
);
350 reg
|= ASPM_L1_ENABLE
;
351 crystalhd_reg_wr(adp
, PCIE_DLL_DATA_LINK_CONTROL
, reg
);
353 /* Set PCI Clk Req */
354 reg
= crystalhd_reg_rd(adp
, PCIE_CLK_REQ_REG
);
355 reg
|= PCI_CLK_REQ_ENABLE
;
356 crystalhd_reg_wr(adp
, PCIE_CLK_REQ_REG
, reg
);
361 static struct crystalhd_rx_dma_pkt
*crystalhd_hw_alloc_rx_pkt(
362 struct crystalhd_hw
*hw
)
364 unsigned long flags
= 0;
365 struct crystalhd_rx_dma_pkt
*temp
= NULL
;
370 spin_lock_irqsave(&hw
->lock
, flags
);
371 temp
= hw
->rx_pkt_pool_head
;
373 hw
->rx_pkt_pool_head
= hw
->rx_pkt_pool_head
->next
;
374 temp
->dio_req
= NULL
;
378 spin_unlock_irqrestore(&hw
->lock
, flags
);
383 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw
*hw
,
384 struct crystalhd_rx_dma_pkt
*pkt
)
386 unsigned long flags
= 0;
391 spin_lock_irqsave(&hw
->lock
, flags
);
392 pkt
->next
= hw
->rx_pkt_pool_head
;
393 hw
->rx_pkt_pool_head
= pkt
;
394 spin_unlock_irqrestore(&hw
->lock
, flags
);
398 * Call back from TX - IOQ deletion.
400 * This routine will release the TX DMA rings allocated
401 * druing setup_dma rings interface.
403 * Memory is allocated per DMA ring basis. This is just
404 * a place holder to be able to create the dio queues.
406 static void crystalhd_tx_desc_rel_call_back(void *context
, void *data
)
411 * Rx Packet release callback..
413 * Release All user mapped capture buffers and Our DMA packets
414 * back to our free pool. The actual cleanup of the DMA
415 * ring descriptors happen during dma ring release.
417 static void crystalhd_rx_pkt_rel_call_back(void *context
, void *data
)
419 struct crystalhd_hw
*hw
= (struct crystalhd_hw
*)context
;
420 struct crystalhd_rx_dma_pkt
*pkt
= (struct crystalhd_rx_dma_pkt
*)data
;
423 BCMLOG_ERR("Invalid arg - %p %p\n", hw
, pkt
);
428 crystalhd_unmap_dio(hw
->adp
, pkt
->dio_req
);
430 BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt
->pkt_tag
);
432 crystalhd_hw_free_rx_pkt(hw
, pkt
);
435 #define crystalhd_hw_delete_ioq(adp, q) \
437 crystalhd_delete_dioq(adp, q); \
441 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw
*hw
)
446 BCMLOG(BCMLOG_DBG
, "Deleting IOQs\n");
447 crystalhd_hw_delete_ioq(hw
->adp
, hw
->tx_actq
);
448 crystalhd_hw_delete_ioq(hw
->adp
, hw
->tx_freeq
);
449 crystalhd_hw_delete_ioq(hw
->adp
, hw
->rx_actq
);
450 crystalhd_hw_delete_ioq(hw
->adp
, hw
->rx_freeq
);
451 crystalhd_hw_delete_ioq(hw
->adp
, hw
->rx_rdyq
);
454 #define crystalhd_hw_create_ioq(sts, hw, q, cb) \
456 sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
457 if (sts != BC_STS_SUCCESS) \
458 goto hw_create_ioq_err; \
465 * RX - Active, Ready and Free.
467 static enum BC_STATUS
crystalhd_hw_create_ioqs(struct crystalhd_hw
*hw
)
469 enum BC_STATUS sts
= BC_STS_SUCCESS
;
472 BCMLOG_ERR("Invalid Arg!!\n");
473 return BC_STS_INV_ARG
;
476 crystalhd_hw_create_ioq(sts
, hw
, hw
->tx_freeq
,
477 crystalhd_tx_desc_rel_call_back
);
478 crystalhd_hw_create_ioq(sts
, hw
, hw
->tx_actq
,
479 crystalhd_tx_desc_rel_call_back
);
481 crystalhd_hw_create_ioq(sts
, hw
, hw
->rx_freeq
,
482 crystalhd_rx_pkt_rel_call_back
);
483 crystalhd_hw_create_ioq(sts
, hw
, hw
->rx_rdyq
,
484 crystalhd_rx_pkt_rel_call_back
);
485 crystalhd_hw_create_ioq(sts
, hw
, hw
->rx_actq
,
486 crystalhd_rx_pkt_rel_call_back
);
491 crystalhd_hw_delete_ioqs(hw
);
497 static bool crystalhd_code_in_full(struct crystalhd_adp
*adp
,
498 uint32_t needed_sz
, bool b_188_byte_pkts
, uint8_t flags
)
500 uint32_t base
, end
, writep
, readp
;
501 uint32_t cpbSize
, cpbFullness
, fifoSize
;
503 if (flags
& 0x02) { /* ASF Bit is set */
504 base
= bc_dec_reg_rd(adp
, REG_Dec_TsAudCDB2Base
);
505 end
= bc_dec_reg_rd(adp
, REG_Dec_TsAudCDB2End
);
506 writep
= bc_dec_reg_rd(adp
, REG_Dec_TsAudCDB2Wrptr
);
507 readp
= bc_dec_reg_rd(adp
, REG_Dec_TsAudCDB2Rdptr
);
508 } else if (b_188_byte_pkts
) { /*Encrypted 188 byte packets*/
509 base
= bc_dec_reg_rd(adp
, REG_Dec_TsUser0Base
);
510 end
= bc_dec_reg_rd(adp
, REG_Dec_TsUser0End
);
511 writep
= bc_dec_reg_rd(adp
, REG_Dec_TsUser0Wrptr
);
512 readp
= bc_dec_reg_rd(adp
, REG_Dec_TsUser0Rdptr
);
514 base
= bc_dec_reg_rd(adp
, REG_DecCA_RegCinBase
);
515 end
= bc_dec_reg_rd(adp
, REG_DecCA_RegCinEnd
);
516 writep
= bc_dec_reg_rd(adp
, REG_DecCA_RegCinWrPtr
);
517 readp
= bc_dec_reg_rd(adp
, REG_DecCA_RegCinRdPtr
);
520 cpbSize
= end
- base
;
522 cpbFullness
= writep
- readp
;
524 cpbFullness
= (end
- base
) - (readp
- writep
);
526 fifoSize
= cpbSize
- cpbFullness
;
528 if (fifoSize
< BC_INFIFO_THRESHOLD
)
531 if (needed_sz
> (fifoSize
- BC_INFIFO_THRESHOLD
))
537 static enum BC_STATUS
crystalhd_hw_tx_req_complete(struct crystalhd_hw
*hw
,
538 uint32_t list_id
, enum BC_STATUS cs
)
540 struct tx_dma_pkt
*tx_req
;
542 if (!hw
|| !list_id
) {
543 BCMLOG_ERR("Invalid Arg..\n");
544 return BC_STS_INV_ARG
;
549 tx_req
= (struct tx_dma_pkt
*)crystalhd_dioq_find_and_fetch(
550 hw
->tx_actq
, list_id
);
552 if (cs
!= BC_STS_IO_USER_ABORT
)
553 BCMLOG_ERR("Find and Fetch Did not find req\n");
554 return BC_STS_NO_DATA
;
557 if (tx_req
->call_back
) {
558 tx_req
->call_back(tx_req
->dio_req
, tx_req
->cb_event
, cs
);
559 tx_req
->dio_req
= NULL
;
560 tx_req
->cb_event
= NULL
;
561 tx_req
->call_back
= NULL
;
563 BCMLOG(BCMLOG_DBG
, "Missing Tx Callback - %X\n",
567 /* Now put back the tx_list back in FreeQ */
568 tx_req
->list_tag
= 0;
570 return crystalhd_dioq_add(hw
->tx_freeq
, tx_req
, false, 0);
573 static bool crystalhd_tx_list0_handler(struct crystalhd_hw
*hw
,
576 uint32_t err_mask
, tmp
;
577 unsigned long flags
= 0;
579 err_mask
= MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK
|
580 MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK
|
581 MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK
;
583 if (!(err_sts
& err_mask
))
586 BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts
);
590 if (err_sts
& MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK
)
591 tmp
&= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK
;
594 spin_lock_irqsave(&hw
->lock
, flags
);
595 /* reset list index.*/
596 hw
->tx_list_post_index
= 0;
597 spin_unlock_irqrestore(&hw
->lock
, flags
);
600 tmp
= err_sts
& err_mask
;
601 crystalhd_reg_wr(hw
->adp
, MISC1_TX_DMA_ERROR_STATUS
, tmp
);
606 static bool crystalhd_tx_list1_handler(struct crystalhd_hw
*hw
,
609 uint32_t err_mask
, tmp
;
610 unsigned long flags
= 0;
612 err_mask
= MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK
|
613 MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK
|
614 MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK
;
616 if (!(err_sts
& err_mask
))
619 BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts
);
623 if (err_sts
& MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK
)
624 tmp
&= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK
;
627 spin_lock_irqsave(&hw
->lock
, flags
);
628 /* reset list index.*/
629 hw
->tx_list_post_index
= 0;
630 spin_unlock_irqrestore(&hw
->lock
, flags
);
633 tmp
= err_sts
& err_mask
;
634 crystalhd_reg_wr(hw
->adp
, MISC1_TX_DMA_ERROR_STATUS
, tmp
);
639 static void crystalhd_tx_isr(struct crystalhd_hw
*hw
, uint32_t int_sts
)
643 if (int_sts
& INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK
)
644 crystalhd_hw_tx_req_complete(hw
, hw
->tx_ioq_tag_seed
+ 0,
647 if (int_sts
& INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK
)
648 crystalhd_hw_tx_req_complete(hw
, hw
->tx_ioq_tag_seed
+ 1,
651 if (!(int_sts
& (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK
|
652 INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK
))) {
653 /* No error mask set.. */
657 /* Handle Tx errors. */
658 err_sts
= crystalhd_reg_rd(hw
->adp
, MISC1_TX_DMA_ERROR_STATUS
);
660 if (crystalhd_tx_list0_handler(hw
, err_sts
))
661 crystalhd_hw_tx_req_complete(hw
, hw
->tx_ioq_tag_seed
+ 0,
664 if (crystalhd_tx_list1_handler(hw
, err_sts
))
665 crystalhd_hw_tx_req_complete(hw
, hw
->tx_ioq_tag_seed
+ 1,
668 hw
->stats
.tx_errors
++;
671 static void crystalhd_hw_dump_desc(struct dma_descriptor
*p_dma_desc
,
672 uint32_t ul_desc_index
, uint32_t cnt
)
676 if (!p_dma_desc
|| !cnt
)
679 /* FIXME: jarod: perhaps a modparam desc_debug to enable this,
680 rather than setting ll (log level, I presume) to non-zero? */
684 for (ix
= ul_desc_index
; ix
< (ul_desc_index
+ cnt
); ix
++) {
686 "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
687 ((p_dma_desc
[ul_desc_index
].dma_dir
) ? "TDesc" : "RDesc"),
689 p_dma_desc
[ul_desc_index
].buff_addr_high
,
690 p_dma_desc
[ul_desc_index
].buff_addr_low
,
691 p_dma_desc
[ul_desc_index
].next_desc_addr_high
,
692 p_dma_desc
[ul_desc_index
].next_desc_addr_low
,
693 p_dma_desc
[ul_desc_index
].xfer_size
,
694 p_dma_desc
[ul_desc_index
].intr_enable
,
695 p_dma_desc
[ul_desc_index
].last_rec_indicator
);
700 static enum BC_STATUS
crystalhd_hw_fill_desc(struct crystalhd_dio_req
*ioreq
,
701 struct dma_descriptor
*desc
,
702 dma_addr_t desc_paddr_base
,
703 uint32_t sg_cnt
, uint32_t sg_st_ix
,
704 uint32_t sg_st_off
, uint32_t xfr_sz
)
706 uint32_t count
= 0, ix
= 0, sg_ix
= 0, len
= 0, last_desc_ix
= 0;
707 dma_addr_t desc_phy_addr
= desc_paddr_base
;
708 union addr_64 addr_temp
;
710 if (!ioreq
|| !desc
|| !desc_paddr_base
|| !xfr_sz
||
711 (!sg_cnt
&& !ioreq
->uinfo
.dir_tx
)) {
712 BCMLOG_ERR("Invalid Args\n");
713 return BC_STS_INV_ARG
;
716 for (ix
= 0; ix
< sg_cnt
; ix
++) {
718 /* Setup SGLE index. */
719 sg_ix
= ix
+ sg_st_ix
;
721 /* Get SGLE length */
722 len
= crystalhd_get_sgle_len(ioreq
, sg_ix
);
724 BCMLOG_ERR(" len in sg %d %d %d\n", len
, sg_ix
,
726 return BC_STS_NOT_IMPL
;
728 /* Setup DMA desc with Phy addr & Length at current index. */
729 addr_temp
.full_addr
= crystalhd_get_sgle_paddr(ioreq
, sg_ix
);
730 if (sg_ix
== sg_st_ix
) {
731 addr_temp
.full_addr
+= sg_st_off
;
734 memset(&desc
[ix
], 0, sizeof(desc
[ix
]));
735 desc
[ix
].buff_addr_low
= addr_temp
.low_part
;
736 desc
[ix
].buff_addr_high
= addr_temp
.high_part
;
737 desc
[ix
].dma_dir
= ioreq
->uinfo
.dir_tx
;
739 /* Chain DMA descriptor. */
740 addr_temp
.full_addr
= desc_phy_addr
+
741 sizeof(struct dma_descriptor
);
742 desc
[ix
].next_desc_addr_low
= addr_temp
.low_part
;
743 desc
[ix
].next_desc_addr_high
= addr_temp
.high_part
;
745 if ((count
+ len
) > xfr_sz
)
746 len
= xfr_sz
- count
;
749 if ((!len
) || (len
> crystalhd_get_sgle_len(ioreq
, sg_ix
))) {
751 "inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
752 len
, ix
, count
, xfr_sz
, sg_cnt
);
755 /* Length expects Multiple of 4 */
756 desc
[ix
].xfer_size
= (len
/ 4);
758 crystalhd_hw_dump_desc(desc
, ix
, 1);
761 desc_phy_addr
+= sizeof(struct dma_descriptor
);
764 last_desc_ix
= ix
- 1;
766 if (ioreq
->fb_size
) {
767 memset(&desc
[ix
], 0, sizeof(desc
[ix
]));
768 addr_temp
.full_addr
= ioreq
->fb_pa
;
769 desc
[ix
].buff_addr_low
= addr_temp
.low_part
;
770 desc
[ix
].buff_addr_high
= addr_temp
.high_part
;
771 desc
[ix
].dma_dir
= ioreq
->uinfo
.dir_tx
;
772 desc
[ix
].xfer_size
= 1;
773 desc
[ix
].fill_bytes
= 4 - ioreq
->fb_size
;
774 count
+= ioreq
->fb_size
;
778 /* setup last descriptor..*/
779 desc
[last_desc_ix
].last_rec_indicator
= 1;
780 desc
[last_desc_ix
].next_desc_addr_low
= 0;
781 desc
[last_desc_ix
].next_desc_addr_high
= 0;
782 desc
[last_desc_ix
].intr_enable
= 1;
784 crystalhd_hw_dump_desc(desc
, last_desc_ix
, 1);
786 if (count
!= xfr_sz
) {
787 BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count
, xfr_sz
);
791 return BC_STS_SUCCESS
;
794 static enum BC_STATUS
crystalhd_xlat_sgl_to_dma_desc(
795 struct crystalhd_dio_req
*ioreq
,
796 struct dma_desc_mem
*pdesc_mem
,
797 uint32_t *uv_desc_index
)
799 struct dma_descriptor
*desc
= NULL
;
800 dma_addr_t desc_paddr_base
= 0;
801 uint32_t sg_cnt
= 0, sg_st_ix
= 0, sg_st_off
= 0;
803 enum BC_STATUS sts
= BC_STS_SUCCESS
;
806 if (!ioreq
|| !pdesc_mem
|| !uv_desc_index
) {
807 BCMLOG_ERR("Invalid Args\n");
808 return BC_STS_INV_ARG
;
811 if (!pdesc_mem
->sz
|| !pdesc_mem
->pdma_desc_start
||
812 !ioreq
->sg
|| (!ioreq
->sg_cnt
&& !ioreq
->uinfo
.dir_tx
)) {
813 BCMLOG_ERR("Invalid Args\n");
814 return BC_STS_INV_ARG
;
817 if ((ioreq
->uinfo
.dir_tx
) && (ioreq
->uinfo
.uv_offset
)) {
818 BCMLOG_ERR("UV offset for TX??\n");
819 return BC_STS_INV_ARG
;
823 desc
= pdesc_mem
->pdma_desc_start
;
824 desc_paddr_base
= pdesc_mem
->phy_addr
;
826 if (ioreq
->uinfo
.dir_tx
|| (ioreq
->uinfo
.uv_offset
== 0)) {
827 sg_cnt
= ioreq
->sg_cnt
;
828 xfr_sz
= ioreq
->uinfo
.xfr_len
;
830 sg_cnt
= ioreq
->uinfo
.uv_sg_ix
+ 1;
831 xfr_sz
= ioreq
->uinfo
.uv_offset
;
834 sts
= crystalhd_hw_fill_desc(ioreq
, desc
, desc_paddr_base
, sg_cnt
,
835 sg_st_ix
, sg_st_off
, xfr_sz
);
837 if ((sts
!= BC_STS_SUCCESS
) || !ioreq
->uinfo
.uv_offset
)
840 /* Prepare for UV mapping.. */
841 desc
= &pdesc_mem
->pdma_desc_start
[sg_cnt
];
842 desc_paddr_base
= pdesc_mem
->phy_addr
+
843 (sg_cnt
* sizeof(struct dma_descriptor
));
845 /* Done with desc addr.. now update sg stuff.*/
846 sg_cnt
= ioreq
->sg_cnt
- ioreq
->uinfo
.uv_sg_ix
;
847 xfr_sz
= ioreq
->uinfo
.xfr_len
- ioreq
->uinfo
.uv_offset
;
848 sg_st_ix
= ioreq
->uinfo
.uv_sg_ix
;
849 sg_st_off
= ioreq
->uinfo
.uv_sg_off
;
851 sts
= crystalhd_hw_fill_desc(ioreq
, desc
, desc_paddr_base
, sg_cnt
,
852 sg_st_ix
, sg_st_off
, xfr_sz
);
853 if (sts
!= BC_STS_SUCCESS
)
856 *uv_desc_index
= sg_st_ix
;
861 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw
*hw
)
865 dma_cntrl
= crystalhd_reg_rd(hw
->adp
, MISC1_TX_SW_DESC_LIST_CTRL_STS
);
866 if (!(dma_cntrl
& DMA_START_BIT
)) {
867 dma_cntrl
|= DMA_START_BIT
;
868 crystalhd_reg_wr(hw
->adp
, MISC1_TX_SW_DESC_LIST_CTRL_STS
,
877 * Verify if the Stop generates a completion interrupt or not.
878 * if it does not generate an interrupt, then add polling here.
880 static enum BC_STATUS
crystalhd_stop_tx_dma_engine(struct crystalhd_hw
*hw
)
882 uint32_t dma_cntrl
, cnt
= 30;
883 uint32_t l1
= 1, l2
= 1;
884 unsigned long flags
= 0;
886 dma_cntrl
= crystalhd_reg_rd(hw
->adp
, MISC1_TX_SW_DESC_LIST_CTRL_STS
);
888 BCMLOG(BCMLOG_DBG
, "Stopping TX DMA Engine..\n");
890 if (!(dma_cntrl
& DMA_START_BIT
)) {
891 BCMLOG(BCMLOG_DBG
, "Already Stopped\n");
892 return BC_STS_SUCCESS
;
895 crystalhd_disable_interrupts(hw
->adp
);
897 /* Issue stop to HW */
898 /* This bit when set gave problems. Please check*/
899 dma_cntrl
&= ~DMA_START_BIT
;
900 crystalhd_reg_wr(hw
->adp
, MISC1_TX_SW_DESC_LIST_CTRL_STS
, dma_cntrl
);
902 BCMLOG(BCMLOG_DBG
, "Cleared the DMA Start bit\n");
904 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
905 while ((l1
|| l2
) && cnt
) {
908 l1
= crystalhd_reg_rd(hw
->adp
,
909 MISC1_TX_FIRST_DESC_L_ADDR_LIST0
);
914 l2
= crystalhd_reg_rd(hw
->adp
,
915 MISC1_TX_FIRST_DESC_L_ADDR_LIST1
);
919 msleep_interruptible(100);
925 BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1
, l2
);
926 crystalhd_enable_interrupts(hw
->adp
);
930 spin_lock_irqsave(&hw
->lock
, flags
);
931 hw
->tx_list_post_index
= 0;
932 spin_unlock_irqrestore(&hw
->lock
, flags
);
933 BCMLOG(BCMLOG_DBG
, "stopped TX DMA..\n");
934 crystalhd_enable_interrupts(hw
->adp
);
936 return BC_STS_SUCCESS
;
939 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw
*hw
)
942 * Position of the PIB Entries can be found at
943 * 0th and the 1st location of the Circular list.
946 uint32_t pib_cnt
, r_offset
, w_offset
;
948 Q_addr
= hw
->pib_del_Q_addr
;
950 /* Get the Read Pointer */
951 crystalhd_mem_rd(hw
->adp
, Q_addr
, 1, &r_offset
);
953 /* Get the Write Pointer */
954 crystalhd_mem_rd(hw
->adp
, Q_addr
+ sizeof(uint32_t), 1, &w_offset
);
956 if (r_offset
== w_offset
)
957 return 0; /* Queue is empty */
959 if (w_offset
> r_offset
)
960 pib_cnt
= w_offset
- r_offset
;
962 pib_cnt
= (w_offset
+ MAX_PIB_Q_DEPTH
) -
963 (r_offset
+ MIN_PIB_Q_DEPTH
);
965 if (pib_cnt
> MAX_PIB_Q_DEPTH
) {
966 BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt
);
973 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw
*hw
)
976 uint32_t addr_entry
, r_offset
, w_offset
;
978 Q_addr
= hw
->pib_del_Q_addr
;
980 /* Get the Read Pointer 0Th Location is Read Pointer */
981 crystalhd_mem_rd(hw
->adp
, Q_addr
, 1, &r_offset
);
983 /* Get the Write Pointer 1st Location is Write pointer */
984 crystalhd_mem_rd(hw
->adp
, Q_addr
+ sizeof(uint32_t), 1, &w_offset
);
987 if (r_offset
== w_offset
)
990 if ((r_offset
< MIN_PIB_Q_DEPTH
) || (r_offset
>= MAX_PIB_Q_DEPTH
))
993 /* Get the Actual Address of the PIB */
994 crystalhd_mem_rd(hw
->adp
, Q_addr
+ (r_offset
* sizeof(uint32_t)),
997 /* Increment the Read Pointer */
1000 if (MAX_PIB_Q_DEPTH
== r_offset
)
1001 r_offset
= MIN_PIB_Q_DEPTH
;
1003 /* Write back the read pointer to It's Location */
1004 crystalhd_mem_wr(hw
->adp
, Q_addr
, 1, &r_offset
);
1009 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw
*hw
,
1010 uint32_t addr_to_rel
)
1013 uint32_t r_offset
, w_offset
, n_offset
;
1015 Q_addr
= hw
->pib_rel_Q_addr
;
1017 /* Get the Read Pointer */
1018 crystalhd_mem_rd(hw
->adp
, Q_addr
, 1, &r_offset
);
1020 /* Get the Write Pointer */
1021 crystalhd_mem_rd(hw
->adp
, Q_addr
+ sizeof(uint32_t), 1, &w_offset
);
1023 if ((r_offset
< MIN_PIB_Q_DEPTH
) ||
1024 (r_offset
>= MAX_PIB_Q_DEPTH
))
1027 n_offset
= w_offset
+ 1;
1029 if (MAX_PIB_Q_DEPTH
== n_offset
)
1030 n_offset
= MIN_PIB_Q_DEPTH
;
1032 if (r_offset
== n_offset
)
1033 return false; /* should never happen */
1035 /* Write the DRAM ADDR to the Queue at Next Offset */
1036 crystalhd_mem_wr(hw
->adp
, Q_addr
+ (w_offset
* sizeof(uint32_t)),
1039 /* Put the New value of the write pointer in Queue */
1040 crystalhd_mem_wr(hw
->adp
, Q_addr
+ sizeof(uint32_t), 1, &n_offset
);
1045 static void cpy_pib_to_app(struct c011_pib
*src_pib
,
1046 struct BC_PIC_INFO_BLOCK
*dst_pib
)
1048 if (!src_pib
|| !dst_pib
) {
1049 BCMLOG_ERR("Invalid Arguments\n");
1053 dst_pib
->timeStamp
= 0;
1054 dst_pib
->picture_number
= src_pib
->ppb
.picture_number
;
1055 dst_pib
->width
= src_pib
->ppb
.width
;
1056 dst_pib
->height
= src_pib
->ppb
.height
;
1057 dst_pib
->chroma_format
= src_pib
->ppb
.chroma_format
;
1058 dst_pib
->pulldown
= src_pib
->ppb
.pulldown
;
1059 dst_pib
->flags
= src_pib
->ppb
.flags
;
1060 dst_pib
->sess_num
= src_pib
->ptsStcOffset
;
1061 dst_pib
->aspect_ratio
= src_pib
->ppb
.aspect_ratio
;
1062 dst_pib
->colour_primaries
= src_pib
->ppb
.colour_primaries
;
1063 dst_pib
->picture_meta_payload
= src_pib
->ppb
.picture_meta_payload
;
1064 dst_pib
->frame_rate
= src_pib
->resolution
;
1068 static void crystalhd_hw_proc_pib(struct crystalhd_hw
*hw
)
1071 struct c011_pib src_pib
;
1072 uint32_t pib_addr
, pib_cnt
;
1073 struct BC_PIC_INFO_BLOCK
*AppPib
;
1074 struct crystalhd_rx_dma_pkt
*rx_pkt
= NULL
;
1076 pib_cnt
= crystalhd_get_pib_avail_cnt(hw
);
1081 for (cnt
= 0; cnt
< pib_cnt
; cnt
++) {
1083 pib_addr
= crystalhd_get_addr_from_pib_Q(hw
);
1084 crystalhd_mem_rd(hw
->adp
, pib_addr
, sizeof(struct c011_pib
) / 4,
1085 (uint32_t *)&src_pib
);
1087 if (src_pib
.bFormatChange
) {
1088 rx_pkt
= (struct crystalhd_rx_dma_pkt
*)
1089 crystalhd_dioq_fetch(hw
->rx_freeq
);
1093 rx_pkt
->flags
|= COMP_FLAG_PIB_VALID
|
1094 COMP_FLAG_FMT_CHANGE
;
1095 AppPib
= &rx_pkt
->pib
;
1096 cpy_pib_to_app(&src_pib
, AppPib
);
1099 "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1100 rx_pkt
->pib
.picture_number
,
1101 rx_pkt
->pib
.aspect_ratio
,
1102 rx_pkt
->pib
.chroma_format
,
1103 rx_pkt
->pib
.colour_primaries
,
1104 rx_pkt
->pib
.frame_rate
,
1108 rx_pkt
->pib
.pulldown
,
1111 crystalhd_dioq_add(hw
->rx_rdyq
, (void *)rx_pkt
, true,
1116 crystalhd_rel_addr_to_pib_Q(hw
, pib_addr
);
1120 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw
*hw
)
1124 dma_cntrl
= crystalhd_reg_rd(hw
->adp
,
1125 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS
);
1126 if (!(dma_cntrl
& DMA_START_BIT
)) {
1127 dma_cntrl
|= DMA_START_BIT
;
1128 crystalhd_reg_wr(hw
->adp
,
1129 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS
, dma_cntrl
);
1132 dma_cntrl
= crystalhd_reg_rd(hw
->adp
,
1133 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS
);
1134 if (!(dma_cntrl
& DMA_START_BIT
)) {
1135 dma_cntrl
|= DMA_START_BIT
;
1136 crystalhd_reg_wr(hw
->adp
,
1137 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS
, dma_cntrl
);
1143 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw
*hw
)
1145 uint32_t dma_cntrl
= 0, count
= 30;
1146 uint32_t l0y
= 1, l0uv
= 1, l1y
= 1, l1uv
= 1;
1148 dma_cntrl
= crystalhd_reg_rd(hw
->adp
,
1149 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS
);
1150 if ((dma_cntrl
& DMA_START_BIT
)) {
1151 dma_cntrl
&= ~DMA_START_BIT
;
1152 crystalhd_reg_wr(hw
->adp
,
1153 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS
, dma_cntrl
);
1156 dma_cntrl
= crystalhd_reg_rd(hw
->adp
,
1157 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS
);
1158 if ((dma_cntrl
& DMA_START_BIT
)) {
1159 dma_cntrl
&= ~DMA_START_BIT
;
1160 crystalhd_reg_wr(hw
->adp
,
1161 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS
, dma_cntrl
);
1164 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1165 while ((l0y
|| l0uv
|| l1y
|| l1uv
) && count
) {
1168 l0y
= crystalhd_reg_rd(hw
->adp
,
1169 MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0
);
1170 l0y
&= DMA_START_BIT
;
1172 hw
->rx_list_sts
[0] &= ~rx_waiting_y_intr
;
1176 l1y
= crystalhd_reg_rd(hw
->adp
,
1177 MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1
);
1178 l1y
&= DMA_START_BIT
;
1180 hw
->rx_list_sts
[1] &= ~rx_waiting_y_intr
;
1184 l0uv
= crystalhd_reg_rd(hw
->adp
,
1185 MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0
);
1186 l0uv
&= DMA_START_BIT
;
1188 hw
->rx_list_sts
[0] &= ~rx_waiting_uv_intr
;
1192 l1uv
= crystalhd_reg_rd(hw
->adp
,
1193 MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1
);
1194 l1uv
&= DMA_START_BIT
;
1196 hw
->rx_list_sts
[1] &= ~rx_waiting_uv_intr
;
1198 msleep_interruptible(100);
1202 hw
->rx_list_post_index
= 0;
1204 BCMLOG(BCMLOG_SSTEP
, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1205 count
, hw
->rx_list_sts
[0], hw
->rx_list_sts
[1]);
1208 static enum BC_STATUS
crystalhd_hw_prog_rxdma(struct crystalhd_hw
*hw
,
1209 struct crystalhd_rx_dma_pkt
*rx_pkt
)
1211 uint32_t y_low_addr_reg
, y_high_addr_reg
;
1212 uint32_t uv_low_addr_reg
, uv_high_addr_reg
;
1213 union addr_64 desc_addr
;
1214 unsigned long flags
;
1216 if (!hw
|| !rx_pkt
) {
1217 BCMLOG_ERR("Invalid Arguments\n");
1218 return BC_STS_INV_ARG
;
1221 if (hw
->rx_list_post_index
>= DMA_ENGINE_CNT
) {
1222 BCMLOG_ERR("List Out Of bounds %x\n", hw
->rx_list_post_index
);
1223 return BC_STS_INV_ARG
;
1226 spin_lock_irqsave(&hw
->rx_lock
, flags
);
1227 /* FIXME: jarod: sts_free is an enum for 0,
1228 in crystalhd_hw.h... yuk... */
1229 if (sts_free
!= hw
->rx_list_sts
[hw
->rx_list_post_index
]) {
1230 spin_unlock_irqrestore(&hw
->rx_lock
, flags
);
1234 if (!hw
->rx_list_post_index
) {
1235 y_low_addr_reg
= MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0
;
1236 y_high_addr_reg
= MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0
;
1237 uv_low_addr_reg
= MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0
;
1238 uv_high_addr_reg
= MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0
;
1240 y_low_addr_reg
= MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1
;
1241 y_high_addr_reg
= MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1
;
1242 uv_low_addr_reg
= MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1
;
1243 uv_high_addr_reg
= MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1
;
1245 rx_pkt
->pkt_tag
= hw
->rx_pkt_tag_seed
+ hw
->rx_list_post_index
;
1246 hw
->rx_list_sts
[hw
->rx_list_post_index
] |= rx_waiting_y_intr
;
1247 if (rx_pkt
->uv_phy_addr
)
1248 hw
->rx_list_sts
[hw
->rx_list_post_index
] |= rx_waiting_uv_intr
;
1249 hw
->rx_list_post_index
= (hw
->rx_list_post_index
+ 1) % DMA_ENGINE_CNT
;
1250 spin_unlock_irqrestore(&hw
->rx_lock
, flags
);
1252 crystalhd_dioq_add(hw
->rx_actq
, (void *)rx_pkt
, false,
1255 crystalhd_start_rx_dma_engine(hw
);
1256 /* Program the Y descriptor */
1257 desc_addr
.full_addr
= rx_pkt
->desc_mem
.phy_addr
;
1258 crystalhd_reg_wr(hw
->adp
, y_high_addr_reg
, desc_addr
.high_part
);
1259 crystalhd_reg_wr(hw
->adp
, y_low_addr_reg
, desc_addr
.low_part
| 0x01);
1261 if (rx_pkt
->uv_phy_addr
) {
1262 /* Program the UV descriptor */
1263 desc_addr
.full_addr
= rx_pkt
->uv_phy_addr
;
1264 crystalhd_reg_wr(hw
->adp
, uv_high_addr_reg
,
1265 desc_addr
.high_part
);
1266 crystalhd_reg_wr(hw
->adp
, uv_low_addr_reg
,
1267 desc_addr
.low_part
| 0x01);
1270 return BC_STS_SUCCESS
;
1273 static enum BC_STATUS
crystalhd_hw_post_cap_buff(struct crystalhd_hw
*hw
,
1274 struct crystalhd_rx_dma_pkt
*rx_pkt
)
1276 enum BC_STATUS sts
= crystalhd_hw_prog_rxdma(hw
, rx_pkt
);
1278 if (sts
== BC_STS_BUSY
)
1279 crystalhd_dioq_add(hw
->rx_freeq
, (void *)rx_pkt
,
1280 false, rx_pkt
->pkt_tag
);
1285 static void crystalhd_get_dnsz(struct crystalhd_hw
*hw
, uint32_t list_index
,
1286 uint32_t *y_dw_dnsz
, uint32_t *uv_dw_dnsz
)
1288 uint32_t y_dn_sz_reg
, uv_dn_sz_reg
;
1291 y_dn_sz_reg
= MISC1_Y_RX_LIST0_CUR_BYTE_CNT
;
1292 uv_dn_sz_reg
= MISC1_UV_RX_LIST0_CUR_BYTE_CNT
;
1294 y_dn_sz_reg
= MISC1_Y_RX_LIST1_CUR_BYTE_CNT
;
1295 uv_dn_sz_reg
= MISC1_UV_RX_LIST1_CUR_BYTE_CNT
;
1298 *y_dw_dnsz
= crystalhd_reg_rd(hw
->adp
, y_dn_sz_reg
);
1299 *uv_dw_dnsz
= crystalhd_reg_rd(hw
->adp
, uv_dn_sz_reg
);
1303 * This function should be called only after making sure that the two DMA
1304 * lists are free. This function does not check if DMA's are active, before
1305 * turning off the DMA.
1307 static void crystalhd_hw_finalize_pause(struct crystalhd_hw
*hw
)
1309 uint32_t dma_cntrl
, aspm
;
1311 hw
->stop_pending
= 0;
1313 dma_cntrl
= crystalhd_reg_rd(hw
->adp
,
1314 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS
);
1315 if (dma_cntrl
& DMA_START_BIT
) {
1316 dma_cntrl
&= ~DMA_START_BIT
;
1317 crystalhd_reg_wr(hw
->adp
,
1318 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS
, dma_cntrl
);
1321 dma_cntrl
= crystalhd_reg_rd(hw
->adp
,
1322 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS
);
1323 if (dma_cntrl
& DMA_START_BIT
) {
1324 dma_cntrl
&= ~DMA_START_BIT
;
1325 crystalhd_reg_wr(hw
->adp
,
1326 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS
, dma_cntrl
);
1328 hw
->rx_list_post_index
= 0;
1330 aspm
= crystalhd_reg_rd(hw
->adp
, PCIE_DLL_DATA_LINK_CONTROL
);
1331 aspm
|= ASPM_L1_ENABLE
;
1332 /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1333 crystalhd_reg_wr(hw
->adp
, PCIE_DLL_DATA_LINK_CONTROL
, aspm
);
1336 static enum BC_STATUS
crystalhd_rx_pkt_done(struct crystalhd_hw
*hw
,
1337 uint32_t list_index
, enum BC_STATUS comp_sts
)
1339 struct crystalhd_rx_dma_pkt
*rx_pkt
= NULL
;
1340 uint32_t y_dw_dnsz
, uv_dw_dnsz
;
1341 enum BC_STATUS sts
= BC_STS_SUCCESS
;
1343 if (!hw
|| list_index
>= DMA_ENGINE_CNT
) {
1344 BCMLOG_ERR("Invalid Arguments\n");
1345 return BC_STS_INV_ARG
;
1348 rx_pkt
= crystalhd_dioq_find_and_fetch(hw
->rx_actq
,
1349 hw
->rx_pkt_tag_seed
+ list_index
);
1352 "Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1353 hw
->rx_list_post_index
, hw
->rx_list_sts
[0],
1354 hw
->rx_list_sts
[1], list_index
,
1355 hw
->rx_pkt_tag_seed
+ list_index
, comp_sts
);
1356 return BC_STS_INV_ARG
;
1359 if (comp_sts
== BC_STS_SUCCESS
) {
1360 crystalhd_get_dnsz(hw
, list_index
, &y_dw_dnsz
, &uv_dw_dnsz
);
1361 rx_pkt
->dio_req
->uinfo
.y_done_sz
= y_dw_dnsz
;
1362 rx_pkt
->flags
= COMP_FLAG_DATA_VALID
;
1363 if (rx_pkt
->uv_phy_addr
)
1364 rx_pkt
->dio_req
->uinfo
.uv_done_sz
= uv_dw_dnsz
;
1365 crystalhd_dioq_add(hw
->rx_rdyq
, rx_pkt
, true,
1366 hw
->rx_pkt_tag_seed
+ list_index
);
1370 /* Check if we can post this DIO again. */
1371 return crystalhd_hw_post_cap_buff(hw
, rx_pkt
);
1374 static bool crystalhd_rx_list0_handler(struct crystalhd_hw
*hw
,
1375 uint32_t int_sts
, uint32_t y_err_sts
, uint32_t uv_err_sts
)
1378 enum list_sts tmp_lsts
;
1380 if (!(y_err_sts
& GET_Y0_ERR_MSK
) && !(uv_err_sts
& GET_UV0_ERR_MSK
))
1383 tmp_lsts
= hw
->rx_list_sts
[0];
1386 tmp
= y_err_sts
& GET_Y0_ERR_MSK
;
1387 if (int_sts
& INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK
)
1388 hw
->rx_list_sts
[0] &= ~rx_waiting_y_intr
;
1390 if (y_err_sts
& MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK
) {
1391 hw
->rx_list_sts
[0] &= ~rx_waiting_y_intr
;
1392 tmp
&= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK
;
1395 if (y_err_sts
& MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK
) {
1396 hw
->rx_list_sts
[0] &= ~rx_y_mask
;
1397 hw
->rx_list_sts
[0] |= rx_y_error
;
1398 tmp
&= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK
;
1402 hw
->rx_list_sts
[0] &= ~rx_y_mask
;
1403 hw
->rx_list_sts
[0] |= rx_y_error
;
1404 hw
->rx_list_post_index
= 0;
1408 tmp
= uv_err_sts
& GET_UV0_ERR_MSK
;
1409 if (int_sts
& INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK
)
1410 hw
->rx_list_sts
[0] &= ~rx_waiting_uv_intr
;
1412 if (uv_err_sts
& MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK
) {
1413 hw
->rx_list_sts
[0] &= ~rx_waiting_uv_intr
;
1414 tmp
&= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK
;
1418 MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK
) {
1419 hw
->rx_list_sts
[0] &= ~rx_uv_mask
;
1420 hw
->rx_list_sts
[0] |= rx_uv_error
;
1421 tmp
&= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK
;
1425 hw
->rx_list_sts
[0] &= ~rx_uv_mask
;
1426 hw
->rx_list_sts
[0] |= rx_uv_error
;
1427 hw
->rx_list_post_index
= 0;
1430 if (y_err_sts
& GET_Y0_ERR_MSK
) {
1431 tmp
= y_err_sts
& GET_Y0_ERR_MSK
;
1432 crystalhd_reg_wr(hw
->adp
, MISC1_Y_RX_ERROR_STATUS
, tmp
);
1435 if (uv_err_sts
& GET_UV0_ERR_MSK
) {
1436 tmp
= uv_err_sts
& GET_UV0_ERR_MSK
;
1437 crystalhd_reg_wr(hw
->adp
, MISC1_UV_RX_ERROR_STATUS
, tmp
);
1440 return (tmp_lsts
!= hw
->rx_list_sts
[0]);
1443 static bool crystalhd_rx_list1_handler(struct crystalhd_hw
*hw
,
1444 uint32_t int_sts
, uint32_t y_err_sts
, uint32_t uv_err_sts
)
1447 enum list_sts tmp_lsts
;
1449 if (!(y_err_sts
& GET_Y1_ERR_MSK
) && !(uv_err_sts
& GET_UV1_ERR_MSK
))
1452 tmp_lsts
= hw
->rx_list_sts
[1];
1455 tmp
= y_err_sts
& GET_Y1_ERR_MSK
;
1456 if (int_sts
& INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK
)
1457 hw
->rx_list_sts
[1] &= ~rx_waiting_y_intr
;
1459 if (y_err_sts
& MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK
) {
1460 hw
->rx_list_sts
[1] &= ~rx_waiting_y_intr
;
1461 tmp
&= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK
;
1464 if (y_err_sts
& MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK
) {
1465 /* Add retry-support..*/
1466 hw
->rx_list_sts
[1] &= ~rx_y_mask
;
1467 hw
->rx_list_sts
[1] |= rx_y_error
;
1468 tmp
&= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK
;
1472 hw
->rx_list_sts
[1] &= ~rx_y_mask
;
1473 hw
->rx_list_sts
[1] |= rx_y_error
;
1474 hw
->rx_list_post_index
= 0;
1478 tmp
= uv_err_sts
& GET_UV1_ERR_MSK
;
1479 if (int_sts
& INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK
)
1480 hw
->rx_list_sts
[1] &= ~rx_waiting_uv_intr
;
1482 if (uv_err_sts
& MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK
) {
1483 hw
->rx_list_sts
[1] &= ~rx_waiting_uv_intr
;
1484 tmp
&= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK
;
1487 if (uv_err_sts
& MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK
) {
1488 /* Add retry-support*/
1489 hw
->rx_list_sts
[1] &= ~rx_uv_mask
;
1490 hw
->rx_list_sts
[1] |= rx_uv_error
;
1491 tmp
&= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK
;
1495 hw
->rx_list_sts
[1] &= ~rx_uv_mask
;
1496 hw
->rx_list_sts
[1] |= rx_uv_error
;
1497 hw
->rx_list_post_index
= 0;
1500 if (y_err_sts
& GET_Y1_ERR_MSK
) {
1501 tmp
= y_err_sts
& GET_Y1_ERR_MSK
;
1502 crystalhd_reg_wr(hw
->adp
, MISC1_Y_RX_ERROR_STATUS
, tmp
);
1505 if (uv_err_sts
& GET_UV1_ERR_MSK
) {
1506 tmp
= uv_err_sts
& GET_UV1_ERR_MSK
;
1507 crystalhd_reg_wr(hw
->adp
, MISC1_UV_RX_ERROR_STATUS
, tmp
);
1510 return (tmp_lsts
!= hw
->rx_list_sts
[1]);
1514 static void crystalhd_rx_isr(struct crystalhd_hw
*hw
, uint32_t intr_sts
)
1516 unsigned long flags
;
1517 uint32_t i
, list_avail
= 0;
1518 enum BC_STATUS comp_sts
= BC_STS_NO_DATA
;
1519 uint32_t y_err_sts
, uv_err_sts
, y_dn_sz
= 0, uv_dn_sz
= 0;
1523 BCMLOG_ERR("Invalid Arguments\n");
1527 if (!(intr_sts
& GET_RX_INTR_MASK
))
1530 y_err_sts
= crystalhd_reg_rd(hw
->adp
, MISC1_Y_RX_ERROR_STATUS
);
1531 uv_err_sts
= crystalhd_reg_rd(hw
->adp
, MISC1_UV_RX_ERROR_STATUS
);
1533 for (i
= 0; i
< DMA_ENGINE_CNT
; i
++) {
1534 /* Update States..*/
1535 spin_lock_irqsave(&hw
->rx_lock
, flags
);
1537 ret
= crystalhd_rx_list0_handler(hw
, intr_sts
,
1538 y_err_sts
, uv_err_sts
);
1540 ret
= crystalhd_rx_list1_handler(hw
, intr_sts
,
1541 y_err_sts
, uv_err_sts
);
1543 switch (hw
->rx_list_sts
[i
]) {
1545 comp_sts
= BC_STS_SUCCESS
;
1551 /* We got error on both or Y or uv. */
1552 hw
->stats
.rx_errors
++;
1553 crystalhd_get_dnsz(hw
, i
, &y_dn_sz
, &uv_dn_sz
);
1554 /* FIXME: jarod: this is where
1555 my mini pci-e card is tripping up */
1556 BCMLOG(BCMLOG_DBG
, "list_index:%x rx[%d] Y:%x UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1557 i
, hw
->stats
.rx_errors
, y_err_sts
,
1558 uv_err_sts
, intr_sts
, y_dn_sz
,
1560 hw
->rx_list_sts
[i
] = sts_free
;
1561 comp_sts
= BC_STS_ERROR
;
1564 /* Wait for completion..*/
1565 comp_sts
= BC_STS_NO_DATA
;
1569 spin_unlock_irqrestore(&hw
->rx_lock
, flags
);
1571 /* handle completion...*/
1572 if (comp_sts
!= BC_STS_NO_DATA
) {
1573 crystalhd_rx_pkt_done(hw
, i
, comp_sts
);
1574 comp_sts
= BC_STS_NO_DATA
;
1579 if (hw
->stop_pending
) {
1580 if ((hw
->rx_list_sts
[0] == sts_free
) &&
1581 (hw
->rx_list_sts
[1] == sts_free
))
1582 crystalhd_hw_finalize_pause(hw
);
1584 crystalhd_hw_start_capture(hw
);
1589 static enum BC_STATUS
crystalhd_fw_cmd_post_proc(struct crystalhd_hw
*hw
,
1590 struct BC_FW_CMD
*fw_cmd
)
1592 enum BC_STATUS sts
= BC_STS_SUCCESS
;
1593 struct dec_rsp_channel_start_video
*st_rsp
= NULL
;
1595 switch (fw_cmd
->cmd
[0]) {
1596 case eCMD_C011_DEC_CHAN_START_VIDEO
:
1597 st_rsp
= (struct dec_rsp_channel_start_video
*)fw_cmd
->rsp
;
1598 hw
->pib_del_Q_addr
= st_rsp
->picInfoDeliveryQ
;
1599 hw
->pib_rel_Q_addr
= st_rsp
->picInfoReleaseQ
;
1600 BCMLOG(BCMLOG_DBG
, "DelQAddr:%x RelQAddr:%x\n",
1601 hw
->pib_del_Q_addr
, hw
->pib_rel_Q_addr
);
1603 case eCMD_C011_INIT
:
1604 if (!(crystalhd_load_firmware_config(hw
->adp
))) {
1605 BCMLOG_ERR("Invalid Params.\n");
1606 sts
= BC_STS_FW_AUTH_FAILED
;
1615 static enum BC_STATUS
crystalhd_put_ddr2sleep(struct crystalhd_hw
*hw
)
1618 union link_misc_perst_decoder_ctrl rst_cntrl_reg
;
1620 /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1621 rst_cntrl_reg
.whole_reg
= crystalhd_reg_rd(hw
->adp
,
1622 MISC_PERST_DECODER_CTRL
);
1624 rst_cntrl_reg
.bcm_7412_rst
= 1;
1625 crystalhd_reg_wr(hw
->adp
, MISC_PERST_DECODER_CTRL
,
1626 rst_cntrl_reg
.whole_reg
);
1627 msleep_interruptible(50);
1629 rst_cntrl_reg
.bcm_7412_rst
= 0;
1630 crystalhd_reg_wr(hw
->adp
, MISC_PERST_DECODER_CTRL
,
1631 rst_cntrl_reg
.whole_reg
);
1633 /* Close all banks, put DDR in idle */
1634 bc_dec_reg_wr(hw
->adp
, SDRAM_PRECHARGE
, 0);
1636 /* Set bit 25 (drop CKE pin of DDR) */
1637 reg
= bc_dec_reg_rd(hw
->adp
, SDRAM_PARAM
);
1639 bc_dec_reg_wr(hw
->adp
, SDRAM_PARAM
, reg
);
1641 /* Reset the audio block */
1642 bc_dec_reg_wr(hw
->adp
, AUD_DSP_MISC_SOFT_RESET
, 0x1);
1644 /* Power down Raptor PLL */
1645 reg
= bc_dec_reg_rd(hw
->adp
, DecHt_PllCCtl
);
1647 bc_dec_reg_wr(hw
->adp
, DecHt_PllCCtl
, reg
);
1649 /* Power down all Audio PLL */
1650 bc_dec_reg_wr(hw
->adp
, AIO_MISC_PLL_RESET
, 0x1);
1652 /* Power down video clock (75MHz) */
1653 reg
= bc_dec_reg_rd(hw
->adp
, DecHt_PllECtl
);
1655 bc_dec_reg_wr(hw
->adp
, DecHt_PllECtl
, reg
);
1657 /* Power down video clock (75MHz) */
1658 reg
= bc_dec_reg_rd(hw
->adp
, DecHt_PllDCtl
);
1660 bc_dec_reg_wr(hw
->adp
, DecHt_PllDCtl
, reg
);
1662 /* Power down core clock (200MHz) */
1663 reg
= bc_dec_reg_rd(hw
->adp
, DecHt_PllACtl
);
1665 bc_dec_reg_wr(hw
->adp
, DecHt_PllACtl
, reg
);
1667 /* Power down core clock (200MHz) */
1668 reg
= bc_dec_reg_rd(hw
->adp
, DecHt_PllBCtl
);
1670 bc_dec_reg_wr(hw
->adp
, DecHt_PllBCtl
, reg
);
1672 return BC_STS_SUCCESS
;
1675 /************************************************
1677 *************************************************/
1679 enum BC_STATUS
crystalhd_download_fw(struct crystalhd_adp
*adp
, void *buffer
,
1682 uint32_t reg_data
, cnt
, *temp_buff
;
1683 uint32_t fw_sig_len
= 36;
1684 uint32_t dram_offset
= BC_FWIMG_ST_ADDR
, sig_reg
;
1687 if (!adp
|| !buffer
|| !sz
) {
1688 BCMLOG_ERR("Invalid Params.\n");
1689 return BC_STS_INV_ARG
;
1692 reg_data
= crystalhd_reg_rd(adp
, OTP_CMD
);
1693 if (!(reg_data
& 0x02)) {
1694 BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1695 return BC_STS_ERROR
;
1699 crystalhd_reg_wr(adp
, DCI_CMD
, 0);
1700 reg_data
|= BC_BIT(0);
1701 crystalhd_reg_wr(adp
, DCI_CMD
, reg_data
);
1705 msleep_interruptible(10);
1707 while (reg_data
!= BC_BIT(4)) {
1708 reg_data
= crystalhd_reg_rd(adp
, DCI_STATUS
);
1709 reg_data
&= BC_BIT(4);
1711 BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1712 return BC_STS_TIMEOUT
;
1716 msleep_interruptible(10);
1717 /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1718 crystalhd_reg_wr(adp
, DCI_FIRMWARE_ADDR
, dram_offset
);
1719 temp_buff
= (uint32_t *)buffer
;
1720 for (cnt
= 0; cnt
< (sz
- fw_sig_len
); cnt
+= 4) {
1721 crystalhd_reg_wr(adp
, DCI_DRAM_BASE_ADDR
, (dram_offset
>> 19));
1722 crystalhd_reg_wr(adp
, DCI_FIRMWARE_DATA
, *temp_buff
);
1726 msleep_interruptible(10);
1730 sig_reg
= (uint32_t)DCI_SIGNATURE_DATA_7
;
1731 for (cnt
= 0; cnt
< 8; cnt
++) {
1732 uint32_t swapped_data
= *temp_buff
;
1733 swapped_data
= bswap_32_1(swapped_data
);
1734 crystalhd_reg_wr(adp
, sig_reg
, swapped_data
);
1738 msleep_interruptible(10);
1741 reg_data
|= BC_BIT(1);
1742 crystalhd_reg_wr(adp
, DCI_CMD
, reg_data
);
1743 msleep_interruptible(10);
1746 reg_data
= crystalhd_reg_rd(adp
, DCI_STATUS
);
1748 if ((reg_data
& BC_BIT(9)) == BC_BIT(9)) {
1750 while ((reg_data
& BC_BIT(0)) != BC_BIT(0)) {
1751 reg_data
= crystalhd_reg_rd(adp
, DCI_STATUS
);
1752 reg_data
&= BC_BIT(0);
1755 msleep_interruptible(10);
1758 reg_data
= crystalhd_reg_rd(adp
, DCI_CMD
);
1759 reg_data
|= BC_BIT(4);
1760 crystalhd_reg_wr(adp
, DCI_CMD
, reg_data
);
1763 BCMLOG_ERR("F/w Signature mismatch\n");
1764 return BC_STS_FW_AUTH_FAILED
;
1767 BCMLOG(BCMLOG_INFO
, "Firmware Downloaded Successfully\n");
1768 return BC_STS_SUCCESS
;
1771 enum BC_STATUS
crystalhd_do_fw_cmd(struct crystalhd_hw
*hw
,
1772 struct BC_FW_CMD
*fw_cmd
)
1774 uint32_t cnt
= 0, cmd_res_addr
;
1775 uint32_t *cmd_buff
, *res_buff
;
1776 wait_queue_head_t fw_cmd_event
;
1780 crystalhd_create_event(&fw_cmd_event
);
1782 if (!hw
|| !fw_cmd
) {
1783 BCMLOG_ERR("Invalid Arguments\n");
1784 return BC_STS_INV_ARG
;
1787 cmd_buff
= fw_cmd
->cmd
;
1788 res_buff
= fw_cmd
->rsp
;
1790 if (!cmd_buff
|| !res_buff
) {
1791 BCMLOG_ERR("Invalid Parameters for F/W Command\n");
1792 return BC_STS_INV_ARG
;
1797 hw
->fwcmd_evt_sts
= 0;
1798 hw
->pfw_cmd_event
= &fw_cmd_event
;
1800 /*Write the command to the memory*/
1801 crystalhd_mem_wr(hw
->adp
, TS_Host2CpuSnd
, FW_CMD_BUFF_SZ
, cmd_buff
);
1803 /*Memory Read for memory arbitrator flush*/
1804 crystalhd_mem_rd(hw
->adp
, TS_Host2CpuSnd
, 1, &cnt
);
1806 /* Write the command address to mailbox */
1807 bc_dec_reg_wr(hw
->adp
, Hst2CpuMbx1
, TS_Host2CpuSnd
);
1808 msleep_interruptible(50);
1810 crystalhd_wait_on_event(&fw_cmd_event
, hw
->fwcmd_evt_sts
, 20000, rc
, 0);
1813 sts
= BC_STS_SUCCESS
;
1814 } else if (rc
== -EBUSY
) {
1815 BCMLOG_ERR("Firmware command T/O\n");
1816 sts
= BC_STS_TIMEOUT
;
1817 } else if (rc
== -EINTR
) {
1818 BCMLOG(BCMLOG_DBG
, "FwCmd Wait Signal int.\n");
1819 sts
= BC_STS_IO_USER_ABORT
;
1821 BCMLOG_ERR("FwCmd IO Error.\n");
1822 sts
= BC_STS_IO_ERROR
;
1825 if (sts
!= BC_STS_SUCCESS
) {
1826 BCMLOG_ERR("FwCmd Failed.\n");
1831 /*Get the Response Address*/
1832 cmd_res_addr
= bc_dec_reg_rd(hw
->adp
, Cpu2HstMbx1
);
1834 /*Read the Response*/
1835 crystalhd_mem_rd(hw
->adp
, cmd_res_addr
, FW_CMD_BUFF_SZ
, res_buff
);
1839 if (res_buff
[2] != C011_RET_SUCCESS
) {
1840 BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1841 return BC_STS_FW_CMD_ERR
;
1844 sts
= crystalhd_fw_cmd_post_proc(hw
, fw_cmd
);
1845 if (sts
!= BC_STS_SUCCESS
)
1846 BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1851 bool crystalhd_hw_interrupt(struct crystalhd_adp
*adp
, struct crystalhd_hw
*hw
)
1853 uint32_t intr_sts
= 0;
1854 uint32_t deco_intr
= 0;
1857 if (!adp
|| !hw
->dev_started
)
1860 hw
->stats
.num_interrupts
++;
1863 deco_intr
= bc_dec_reg_rd(adp
, Stream2Host_Intr_Sts
);
1864 intr_sts
= crystalhd_reg_rd(adp
, INTR_INTR_STATUS
);
1867 /* let system know we processed interrupt..*/
1869 hw
->stats
.dev_interrupts
++;
1872 if (deco_intr
&& (deco_intr
!= 0xdeaddead)) {
1874 if (deco_intr
& 0x80000000) {
1875 /*Set the Event and the status flag*/
1876 if (hw
->pfw_cmd_event
) {
1877 hw
->fwcmd_evt_sts
= 1;
1878 crystalhd_set_event(hw
->pfw_cmd_event
);
1882 if (deco_intr
& BC_BIT(1))
1883 crystalhd_hw_proc_pib(hw
);
1885 bc_dec_reg_wr(adp
, Stream2Host_Intr_Sts
, deco_intr
);
1886 /* FIXME: jarod: No udelay? might this be
1887 the real reason mini pci-e cards were stalling out? */
1888 bc_dec_reg_wr(adp
, Stream2Host_Intr_Sts
, 0);
1893 crystalhd_rx_isr(hw
, intr_sts
);
1896 crystalhd_tx_isr(hw
, intr_sts
);
1898 /* Clear interrupts */
1901 crystalhd_reg_wr(adp
, INTR_INTR_CLR_REG
, intr_sts
);
1903 crystalhd_reg_wr(adp
, INTR_EOI_CTRL
, 1);
1911 enum BC_STATUS
crystalhd_hw_open(struct crystalhd_hw
*hw
,
1912 struct crystalhd_adp
*adp
)
1915 BCMLOG_ERR("Invalid Arguments\n");
1916 return BC_STS_INV_ARG
;
1919 if (hw
->dev_started
)
1920 return BC_STS_SUCCESS
;
1922 memset(hw
, 0, sizeof(struct crystalhd_hw
));
1925 spin_lock_init(&hw
->lock
);
1926 spin_lock_init(&hw
->rx_lock
);
1927 /* FIXME: jarod: what are these magic numbers?!? */
1928 hw
->tx_ioq_tag_seed
= 0x70023070;
1929 hw
->rx_pkt_tag_seed
= 0x70029070;
1931 hw
->stop_pending
= 0;
1932 crystalhd_start_device(hw
->adp
);
1933 hw
->dev_started
= true;
1935 /* set initial core clock */
1936 hw
->core_clock_mhz
= CLOCK_PRESET
;
1939 crystalhd_hw_set_core_clock(hw
);
1941 return BC_STS_SUCCESS
;
1944 enum BC_STATUS
crystalhd_hw_close(struct crystalhd_hw
*hw
)
1947 BCMLOG_ERR("Invalid Arguments\n");
1948 return BC_STS_INV_ARG
;
1951 if (!hw
->dev_started
)
1952 return BC_STS_SUCCESS
;
1954 /* Stop and DDR sleep will happen in here */
1955 crystalhd_hw_suspend(hw
);
1956 hw
->dev_started
= false;
1958 return BC_STS_SUCCESS
;
1961 enum BC_STATUS
crystalhd_hw_setup_dma_rings(struct crystalhd_hw
*hw
)
1966 dma_addr_t phy_addr
;
1967 enum BC_STATUS sts
= BC_STS_SUCCESS
;
1968 struct crystalhd_rx_dma_pkt
*rpkt
;
1970 if (!hw
|| !hw
->adp
) {
1971 BCMLOG_ERR("Invalid Arguments\n");
1972 return BC_STS_INV_ARG
;
1975 sts
= crystalhd_hw_create_ioqs(hw
);
1976 if (sts
!= BC_STS_SUCCESS
) {
1977 BCMLOG_ERR("Failed to create IOQs..\n");
1981 mem_len
= BC_LINK_MAX_SGLS
* sizeof(struct dma_descriptor
);
1983 for (i
= 0; i
< BC_TX_LIST_CNT
; i
++) {
1984 mem
= bc_kern_dma_alloc(hw
->adp
, mem_len
, &phy_addr
);
1986 memset(mem
, 0, mem_len
);
1988 BCMLOG_ERR("Insufficient Memory For TX\n");
1989 crystalhd_hw_free_dma_rings(hw
);
1990 return BC_STS_INSUFF_RES
;
1992 /* rx_pkt_pool -- static memory allocation */
1993 hw
->tx_pkt_pool
[i
].desc_mem
.pdma_desc_start
= mem
;
1994 hw
->tx_pkt_pool
[i
].desc_mem
.phy_addr
= phy_addr
;
1995 hw
->tx_pkt_pool
[i
].desc_mem
.sz
= BC_LINK_MAX_SGLS
*
1996 sizeof(struct dma_descriptor
);
1997 hw
->tx_pkt_pool
[i
].list_tag
= 0;
1999 /* Add TX dma requests to Free Queue..*/
2000 sts
= crystalhd_dioq_add(hw
->tx_freeq
,
2001 &hw
->tx_pkt_pool
[i
], false, 0);
2002 if (sts
!= BC_STS_SUCCESS
) {
2003 crystalhd_hw_free_dma_rings(hw
);
2008 for (i
= 0; i
< BC_RX_LIST_CNT
; i
++) {
2009 rpkt
= kzalloc(sizeof(*rpkt
), GFP_KERNEL
);
2011 BCMLOG_ERR("Insufficient Memory For RX\n");
2012 crystalhd_hw_free_dma_rings(hw
);
2013 return BC_STS_INSUFF_RES
;
2016 mem
= bc_kern_dma_alloc(hw
->adp
, mem_len
, &phy_addr
);
2018 memset(mem
, 0, mem_len
);
2020 BCMLOG_ERR("Insufficient Memory For RX\n");
2021 crystalhd_hw_free_dma_rings(hw
);
2023 return BC_STS_INSUFF_RES
;
2025 rpkt
->desc_mem
.pdma_desc_start
= mem
;
2026 rpkt
->desc_mem
.phy_addr
= phy_addr
;
2027 rpkt
->desc_mem
.sz
= BC_LINK_MAX_SGLS
*
2028 sizeof(struct dma_descriptor
);
2029 rpkt
->pkt_tag
= hw
->rx_pkt_tag_seed
+ i
;
2030 crystalhd_hw_free_rx_pkt(hw
, rpkt
);
2033 return BC_STS_SUCCESS
;
2036 enum BC_STATUS
crystalhd_hw_free_dma_rings(struct crystalhd_hw
*hw
)
2039 struct crystalhd_rx_dma_pkt
*rpkt
= NULL
;
2041 if (!hw
|| !hw
->adp
) {
2042 BCMLOG_ERR("Invalid Arguments\n");
2043 return BC_STS_INV_ARG
;
2046 /* Delete all IOQs.. */
2047 crystalhd_hw_delete_ioqs(hw
);
2049 for (i
= 0; i
< BC_TX_LIST_CNT
; i
++) {
2050 if (hw
->tx_pkt_pool
[i
].desc_mem
.pdma_desc_start
) {
2051 bc_kern_dma_free(hw
->adp
,
2052 hw
->tx_pkt_pool
[i
].desc_mem
.sz
,
2053 hw
->tx_pkt_pool
[i
].desc_mem
.pdma_desc_start
,
2054 hw
->tx_pkt_pool
[i
].desc_mem
.phy_addr
);
2056 hw
->tx_pkt_pool
[i
].desc_mem
.pdma_desc_start
= NULL
;
2060 BCMLOG(BCMLOG_DBG
, "Releasing RX Pkt pool\n");
2062 rpkt
= crystalhd_hw_alloc_rx_pkt(hw
);
2065 bc_kern_dma_free(hw
->adp
, rpkt
->desc_mem
.sz
,
2066 rpkt
->desc_mem
.pdma_desc_start
,
2067 rpkt
->desc_mem
.phy_addr
);
2071 return BC_STS_SUCCESS
;
2074 enum BC_STATUS
crystalhd_hw_post_tx(struct crystalhd_hw
*hw
,
2075 struct crystalhd_dio_req
*ioreq
,
2076 hw_comp_callback call_back
,
2077 wait_queue_head_t
*cb_event
, uint32_t *list_id
,
2080 struct tx_dma_pkt
*tx_dma_packet
= NULL
;
2081 uint32_t first_desc_u_addr
, first_desc_l_addr
;
2082 uint32_t low_addr
, high_addr
;
2083 union addr_64 desc_addr
;
2084 enum BC_STATUS sts
, add_sts
;
2085 uint32_t dummy_index
= 0;
2086 unsigned long flags
;
2089 if (!hw
|| !ioreq
|| !call_back
|| !cb_event
|| !list_id
) {
2090 BCMLOG_ERR("Invalid Arguments\n");
2091 return BC_STS_INV_ARG
;
2095 * Since we hit code in busy condition very frequently,
2096 * we will check the code in status first before
2097 * checking the availability of free elem.
2099 * This will avoid the Q fetch/add in normal condition.
2101 rc
= crystalhd_code_in_full(hw
->adp
, ioreq
->uinfo
.xfr_len
,
2104 hw
->stats
.cin_busy
++;
2108 /* Get a list from TxFreeQ */
2109 tx_dma_packet
= (struct tx_dma_pkt
*)crystalhd_dioq_fetch(
2111 if (!tx_dma_packet
) {
2112 BCMLOG_ERR("No empty elements..\n");
2113 return BC_STS_ERR_USAGE
;
2116 sts
= crystalhd_xlat_sgl_to_dma_desc(ioreq
,
2117 &tx_dma_packet
->desc_mem
,
2119 if (sts
!= BC_STS_SUCCESS
) {
2120 add_sts
= crystalhd_dioq_add(hw
->tx_freeq
, tx_dma_packet
,
2122 if (add_sts
!= BC_STS_SUCCESS
)
2123 BCMLOG_ERR("double fault..\n");
2130 desc_addr
.full_addr
= tx_dma_packet
->desc_mem
.phy_addr
;
2131 low_addr
= desc_addr
.low_part
;
2132 high_addr
= desc_addr
.high_part
;
2134 tx_dma_packet
->call_back
= call_back
;
2135 tx_dma_packet
->cb_event
= cb_event
;
2136 tx_dma_packet
->dio_req
= ioreq
;
2138 spin_lock_irqsave(&hw
->lock
, flags
);
2140 if (hw
->tx_list_post_index
== 0) {
2141 first_desc_u_addr
= MISC1_TX_FIRST_DESC_U_ADDR_LIST0
;
2142 first_desc_l_addr
= MISC1_TX_FIRST_DESC_L_ADDR_LIST0
;
2144 first_desc_u_addr
= MISC1_TX_FIRST_DESC_U_ADDR_LIST1
;
2145 first_desc_l_addr
= MISC1_TX_FIRST_DESC_L_ADDR_LIST1
;
2148 *list_id
= tx_dma_packet
->list_tag
= hw
->tx_ioq_tag_seed
+
2149 hw
->tx_list_post_index
;
2151 hw
->tx_list_post_index
= (hw
->tx_list_post_index
+ 1) % DMA_ENGINE_CNT
;
2153 spin_unlock_irqrestore(&hw
->lock
, flags
);
2156 /* Insert in Active Q..*/
2157 crystalhd_dioq_add(hw
->tx_actq
, tx_dma_packet
, false,
2158 tx_dma_packet
->list_tag
);
2161 * Interrupt will come as soon as you write
2162 * the valid bit. So be ready for that. All
2163 * the initialization should happen before that.
2165 crystalhd_start_tx_dma_engine(hw
);
2166 crystalhd_reg_wr(hw
->adp
, first_desc_u_addr
, desc_addr
.high_part
);
2168 crystalhd_reg_wr(hw
->adp
, first_desc_l_addr
, desc_addr
.low_part
|
2170 /* Be sure we set the valid bit ^^^^ */
2172 return BC_STS_SUCCESS
;
2176 * This is a force cancel and we are racing with ISR.
2178 * Will try to remove the req from ActQ before ISR gets it.
2179 * If ISR gets it first then the completion happens in the
2180 * normal path and we will return _STS_NO_DATA from here.
2182 * FIX_ME: Not Tested the actual condition..
2184 enum BC_STATUS
crystalhd_hw_cancel_tx(struct crystalhd_hw
*hw
,
2187 if (!hw
|| !list_id
) {
2188 BCMLOG_ERR("Invalid Arguments\n");
2189 return BC_STS_INV_ARG
;
2192 crystalhd_stop_tx_dma_engine(hw
);
2193 crystalhd_hw_tx_req_complete(hw
, list_id
, BC_STS_IO_USER_ABORT
);
2195 return BC_STS_SUCCESS
;
2198 enum BC_STATUS
crystalhd_hw_add_cap_buffer(struct crystalhd_hw
*hw
,
2199 struct crystalhd_dio_req
*ioreq
, bool en_post
)
2201 struct crystalhd_rx_dma_pkt
*rpkt
;
2202 uint32_t tag
, uv_desc_ix
= 0;
2205 if (!hw
|| !ioreq
) {
2206 BCMLOG_ERR("Invalid Arguments\n");
2207 return BC_STS_INV_ARG
;
2210 rpkt
= crystalhd_hw_alloc_rx_pkt(hw
);
2212 BCMLOG_ERR("Insufficient resources\n");
2213 return BC_STS_INSUFF_RES
;
2216 rpkt
->dio_req
= ioreq
;
2217 tag
= rpkt
->pkt_tag
;
2219 sts
= crystalhd_xlat_sgl_to_dma_desc(ioreq
, &rpkt
->desc_mem
,
2221 if (sts
!= BC_STS_SUCCESS
)
2224 rpkt
->uv_phy_addr
= 0;
2226 /* Store the address of UV in the rx packet for post*/
2228 rpkt
->uv_phy_addr
= rpkt
->desc_mem
.phy_addr
+
2229 (sizeof(struct dma_descriptor
) * (uv_desc_ix
+ 1));
2232 sts
= crystalhd_hw_post_cap_buff(hw
, rpkt
);
2234 sts
= crystalhd_dioq_add(hw
->rx_freeq
, rpkt
, false, tag
);
2239 enum BC_STATUS
crystalhd_hw_get_cap_buffer(struct crystalhd_hw
*hw
,
2240 struct BC_PIC_INFO_BLOCK
*pib
,
2241 struct crystalhd_dio_req
**ioreq
)
2243 struct crystalhd_rx_dma_pkt
*rpkt
;
2244 uint32_t timeout
= BC_PROC_OUTPUT_TIMEOUT
/ 1000;
2245 uint32_t sig_pending
= 0;
2248 if (!hw
|| !ioreq
|| !pib
) {
2249 BCMLOG_ERR("Invalid Arguments\n");
2250 return BC_STS_INV_ARG
;
2253 rpkt
= crystalhd_dioq_fetch_wait(hw
->rx_rdyq
, timeout
, &sig_pending
);
2256 BCMLOG(BCMLOG_INFO
, "wait on frame time out %d\n",
2258 return BC_STS_IO_USER_ABORT
;
2260 return BC_STS_TIMEOUT
;
2264 rpkt
->dio_req
->uinfo
.comp_flags
= rpkt
->flags
;
2266 if (rpkt
->flags
& COMP_FLAG_PIB_VALID
)
2267 memcpy(pib
, &rpkt
->pib
, sizeof(*pib
));
2269 *ioreq
= rpkt
->dio_req
;
2271 crystalhd_hw_free_rx_pkt(hw
, rpkt
);
2273 return BC_STS_SUCCESS
;
2276 enum BC_STATUS
crystalhd_hw_start_capture(struct crystalhd_hw
*hw
)
2278 struct crystalhd_rx_dma_pkt
*rx_pkt
;
2283 BCMLOG_ERR("Invalid Arguments\n");
2284 return BC_STS_INV_ARG
;
2287 /* This is start of capture.. Post to both the lists.. */
2288 for (i
= 0; i
< DMA_ENGINE_CNT
; i
++) {
2289 rx_pkt
= crystalhd_dioq_fetch(hw
->rx_freeq
);
2291 return BC_STS_NO_DATA
;
2292 sts
= crystalhd_hw_post_cap_buff(hw
, rx_pkt
);
2293 if (BC_STS_SUCCESS
!= sts
)
2298 return BC_STS_SUCCESS
;
2301 enum BC_STATUS
crystalhd_hw_stop_capture(struct crystalhd_hw
*hw
)
2306 BCMLOG_ERR("Invalid Arguments\n");
2307 return BC_STS_INV_ARG
;
2310 crystalhd_stop_rx_dma_engine(hw
);
2313 temp
= crystalhd_dioq_fetch(hw
->rx_freeq
);
2315 crystalhd_rx_pkt_rel_call_back(hw
, temp
);
2318 return BC_STS_SUCCESS
;
2321 enum BC_STATUS
crystalhd_hw_pause(struct crystalhd_hw
*hw
)
2323 hw
->stats
.pause_cnt
++;
2324 hw
->stop_pending
= 1;
2326 if ((hw
->rx_list_sts
[0] == sts_free
) &&
2327 (hw
->rx_list_sts
[1] == sts_free
))
2328 crystalhd_hw_finalize_pause(hw
);
2330 return BC_STS_SUCCESS
;
2333 enum BC_STATUS
crystalhd_hw_unpause(struct crystalhd_hw
*hw
)
2338 hw
->stop_pending
= 0;
2340 aspm
= crystalhd_reg_rd(hw
->adp
, PCIE_DLL_DATA_LINK_CONTROL
);
2341 aspm
&= ~ASPM_L1_ENABLE
;
2342 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2343 crystalhd_reg_wr(hw
->adp
, PCIE_DLL_DATA_LINK_CONTROL
, aspm
);
2345 sts
= crystalhd_hw_start_capture(hw
);
2349 enum BC_STATUS
crystalhd_hw_suspend(struct crystalhd_hw
*hw
)
2354 BCMLOG_ERR("Invalid Arguments\n");
2355 return BC_STS_INV_ARG
;
2358 sts
= crystalhd_put_ddr2sleep(hw
);
2359 if (sts
!= BC_STS_SUCCESS
) {
2360 BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2361 return BC_STS_ERROR
;
2364 if (!crystalhd_stop_device(hw
->adp
)) {
2365 BCMLOG_ERR("Failed to Stop Device!!\n");
2366 return BC_STS_ERROR
;
2369 return BC_STS_SUCCESS
;
2372 void crystalhd_hw_stats(struct crystalhd_hw
*hw
,
2373 struct crystalhd_hw_stats
*stats
)
2376 BCMLOG_ERR("Invalid Arguments\n");
2380 /* if called w/NULL stats, its a req to zero out the stats */
2382 memset(&hw
->stats
, 0, sizeof(hw
->stats
));
2386 hw
->stats
.freeq_count
= crystalhd_dioq_count(hw
->rx_freeq
);
2387 hw
->stats
.rdyq_count
= crystalhd_dioq_count(hw
->rx_rdyq
);
2388 memcpy(stats
, &hw
->stats
, sizeof(*stats
));
2391 enum BC_STATUS
crystalhd_hw_set_core_clock(struct crystalhd_hw
*hw
)
2394 uint32_t vco_mg
, refresh_reg
;
2397 BCMLOG_ERR("Invalid Arguments\n");
2398 return BC_STS_INV_ARG
;
2401 /* FIXME: jarod: wha? */
2402 /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2403 n
= hw
->core_clock_mhz
/5;
2405 if (n
== hw
->prev_n
)
2406 return BC_STS_CLK_NOCHG
;
2408 if (hw
->pwr_lock
> 0) {
2409 /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2410 return BC_STS_CLK_NOCHG
;
2423 reg
= bc_dec_reg_rd(hw
->adp
, DecHt_PllACtl
);
2427 reg
|= vco_mg
<< 12;
2429 BCMLOG(BCMLOG_INFO
, "clock is moving to %d with n %d with vco_mg %d\n",
2430 hw
->core_clock_mhz
, n
, vco_mg
);
2432 /* Change the DRAM refresh rate to accommodate the new frequency */
2433 /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2434 refresh_reg
= (7 * hw
->core_clock_mhz
/ 16);
2435 bc_dec_reg_wr(hw
->adp
, SDRAM_REF_PARAM
, ((1 << 12) | refresh_reg
));
2437 bc_dec_reg_wr(hw
->adp
, DecHt_PllACtl
, reg
);
2441 for (i
= 0; i
< 10; i
++) {
2442 reg
= bc_dec_reg_rd(hw
->adp
, DecHt_PllACtl
);
2444 if (reg
& 0x00020000) {
2446 /* FIXME: jarod: outputting
2447 a random "C" is... confusing... */
2448 BCMLOG(BCMLOG_INFO
, "C");
2449 return BC_STS_SUCCESS
;
2451 msleep_interruptible(10);
2454 BCMLOG(BCMLOG_INFO
, "clk change failed\n");
2455 return BC_STS_CLK_NOCHG
;