2 * sst.c - Intel SST Driver for audio engine
4 * Copyright (C) 2008-14 Intel Corp
5 * Authors: Vinod Koul <vinod.koul@intel.com>
6 * Harsha Priya <priya.harsha@intel.com>
7 * Dharageswari R <dharageswari.r@intel.com>
8 * KP Jeeja <jeeja.kp@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2 of the License.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/firmware.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_qos.h>
28 #include <linux/async.h>
29 #include <linux/acpi.h>
30 #include <sound/core.h>
31 #include <sound/soc.h>
32 #include <asm/platform_sst_audio.h>
33 #include "../sst-mfld-platform.h"
35 #include "../../common/sst-dsp.h"
37 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
38 MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
39 MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
40 MODULE_LICENSE("GPL v2");
42 static inline bool sst_is_process_reply(u32 msg_id
)
44 return ((msg_id
& PROCESS_MSG
) ? true : false);
47 static inline bool sst_validate_mailbox_size(unsigned int size
)
49 return ((size
<= SST_MAILBOX_SIZE
) ? true : false);
52 static irqreturn_t
intel_sst_interrupt_mrfld(int irq
, void *context
)
54 union interrupt_reg_mrfld isr
;
55 union ipc_header_mrfld header
;
56 union sst_imr_reg_mrfld imr
;
57 struct ipc_post
*msg
= NULL
;
58 unsigned int size
= 0;
59 struct intel_sst_drv
*drv
= (struct intel_sst_drv
*) context
;
60 irqreturn_t retval
= IRQ_HANDLED
;
62 /* Interrupt arrived, check src */
63 isr
.full
= sst_shim_read64(drv
->shim
, SST_ISRX
);
65 if (isr
.part
.done_interrupt
) {
67 spin_lock(&drv
->ipc_spin_lock
);
68 header
.full
= sst_shim_read64(drv
->shim
,
70 header
.p
.header_high
.part
.done
= 0;
71 sst_shim_write64(drv
->shim
, drv
->ipc_reg
.ipcx
, header
.full
);
73 /* write 1 to clear status register */;
74 isr
.part
.done_interrupt
= 1;
75 sst_shim_write64(drv
->shim
, SST_ISRX
, isr
.full
);
76 spin_unlock(&drv
->ipc_spin_lock
);
78 /* we can send more messages to DSP so trigger work */
79 queue_work(drv
->post_msg_wq
, &drv
->ipc_post_msg_wq
);
83 if (isr
.part
.busy_interrupt
) {
84 /* message from dsp so copy that */
85 spin_lock(&drv
->ipc_spin_lock
);
86 imr
.full
= sst_shim_read64(drv
->shim
, SST_IMRX
);
87 imr
.part
.busy_interrupt
= 1;
88 sst_shim_write64(drv
->shim
, SST_IMRX
, imr
.full
);
89 spin_unlock(&drv
->ipc_spin_lock
);
90 header
.full
= sst_shim_read64(drv
->shim
, drv
->ipc_reg
.ipcd
);
92 if (sst_create_ipc_msg(&msg
, header
.p
.header_high
.part
.large
)) {
93 drv
->ops
->clear_interrupt(drv
);
97 if (header
.p
.header_high
.part
.large
) {
98 size
= header
.p
.header_low_payload
;
99 if (sst_validate_mailbox_size(size
)) {
100 memcpy_fromio(msg
->mailbox_data
,
101 drv
->mailbox
+ drv
->mailbox_recv_offset
, size
);
104 "Mailbox not copied, payload size is: %u\n", size
);
105 header
.p
.header_low_payload
= 0;
109 msg
->mrfld_header
= header
;
110 msg
->is_process_reply
=
111 sst_is_process_reply(header
.p
.header_high
.part
.msg_id
);
112 spin_lock(&drv
->rx_msg_lock
);
113 list_add_tail(&msg
->node
, &drv
->rx_list
);
114 spin_unlock(&drv
->rx_msg_lock
);
115 drv
->ops
->clear_interrupt(drv
);
116 retval
= IRQ_WAKE_THREAD
;
121 static irqreturn_t
intel_sst_irq_thread_mrfld(int irq
, void *context
)
123 struct intel_sst_drv
*drv
= (struct intel_sst_drv
*) context
;
124 struct ipc_post
*__msg
, *msg
= NULL
;
125 unsigned long irq_flags
;
127 spin_lock_irqsave(&drv
->rx_msg_lock
, irq_flags
);
128 if (list_empty(&drv
->rx_list
)) {
129 spin_unlock_irqrestore(&drv
->rx_msg_lock
, irq_flags
);
133 list_for_each_entry_safe(msg
, __msg
, &drv
->rx_list
, node
) {
134 list_del(&msg
->node
);
135 spin_unlock_irqrestore(&drv
->rx_msg_lock
, irq_flags
);
136 if (msg
->is_process_reply
)
137 drv
->ops
->process_message(msg
);
139 drv
->ops
->process_reply(drv
, msg
);
142 kfree(msg
->mailbox_data
);
144 spin_lock_irqsave(&drv
->rx_msg_lock
, irq_flags
);
146 spin_unlock_irqrestore(&drv
->rx_msg_lock
, irq_flags
);
150 static int sst_save_dsp_context_v2(struct intel_sst_drv
*sst
)
154 ret
= sst_prepare_and_post_msg(sst
, SST_TASK_ID_MEDIA
, IPC_CMD
,
155 IPC_PREP_D3
, PIPE_RSVD
, 0, NULL
, NULL
,
156 true, true, false, true);
159 dev_err(sst
->dev
, "not suspending FW!!, Err: %d\n", ret
);
167 static struct intel_sst_ops mrfld_ops
= {
168 .interrupt
= intel_sst_interrupt_mrfld
,
169 .irq_thread
= intel_sst_irq_thread_mrfld
,
170 .clear_interrupt
= intel_sst_clear_intr_mrfld
,
171 .start
= sst_start_mrfld
,
172 .reset
= intel_sst_reset_dsp_mrfld
,
173 .post_message
= sst_post_message_mrfld
,
174 .process_reply
= sst_process_reply_mrfld
,
175 .save_dsp_context
= sst_save_dsp_context_v2
,
176 .alloc_stream
= sst_alloc_stream_mrfld
,
177 .post_download
= sst_post_download_mrfld
,
180 int sst_driver_ops(struct intel_sst_drv
*sst
)
183 switch (sst
->dev_id
) {
184 case SST_MRFLD_PCI_ID
:
185 case SST_BYT_ACPI_ID
:
186 case SST_CHV_ACPI_ID
:
187 sst
->tstamp
= SST_TIME_STAMP_MRFLD
;
188 sst
->ops
= &mrfld_ops
;
193 "SST Driver capabilities missing for dev_id: %x",
199 void sst_process_pending_msg(struct work_struct
*work
)
201 struct intel_sst_drv
*ctx
= container_of(work
,
202 struct intel_sst_drv
, ipc_post_msg_wq
);
204 ctx
->ops
->post_message(ctx
, NULL
, false);
207 static int sst_workqueue_init(struct intel_sst_drv
*ctx
)
209 INIT_LIST_HEAD(&ctx
->memcpy_list
);
210 INIT_LIST_HEAD(&ctx
->rx_list
);
211 INIT_LIST_HEAD(&ctx
->ipc_dispatch_list
);
212 INIT_LIST_HEAD(&ctx
->block_list
);
213 INIT_WORK(&ctx
->ipc_post_msg_wq
, sst_process_pending_msg
);
214 init_waitqueue_head(&ctx
->wait_queue
);
217 create_singlethread_workqueue("sst_post_msg_wq");
218 if (!ctx
->post_msg_wq
)
223 static void sst_init_locks(struct intel_sst_drv
*ctx
)
225 mutex_init(&ctx
->sst_lock
);
226 spin_lock_init(&ctx
->rx_msg_lock
);
227 spin_lock_init(&ctx
->ipc_spin_lock
);
228 spin_lock_init(&ctx
->block_lock
);
231 int sst_alloc_drv_context(struct intel_sst_drv
**ctx
,
232 struct device
*dev
, unsigned int dev_id
)
234 *ctx
= devm_kzalloc(dev
, sizeof(struct intel_sst_drv
), GFP_KERNEL
);
239 (*ctx
)->dev_id
= dev_id
;
243 EXPORT_SYMBOL_GPL(sst_alloc_drv_context
);
245 int sst_context_init(struct intel_sst_drv
*ctx
)
252 if (!ctx
->pdata
->probe_data
)
255 memcpy(&ctx
->info
, ctx
->pdata
->probe_data
, sizeof(ctx
->info
));
257 ret
= sst_driver_ops(ctx
);
262 sst_set_fw_state_locked(ctx
, SST_RESET
);
264 /* pvt_id 0 reserved for async messages */
267 ctx
->fw_in_mem
= NULL
;
268 /* we use memcpy, so set to 0 */
272 if (sst_workqueue_init(ctx
))
275 ctx
->mailbox_recv_offset
= ctx
->pdata
->ipc_info
->mbox_recv_off
;
276 ctx
->ipc_reg
.ipcx
= SST_IPCX
+ ctx
->pdata
->ipc_info
->ipc_offset
;
277 ctx
->ipc_reg
.ipcd
= SST_IPCD
+ ctx
->pdata
->ipc_info
->ipc_offset
;
279 dev_info(ctx
->dev
, "Got drv data max stream %d\n",
280 ctx
->info
.max_streams
);
282 for (i
= 1; i
<= ctx
->info
.max_streams
; i
++) {
283 struct stream_info
*stream
= &ctx
->streams
[i
];
285 memset(stream
, 0, sizeof(*stream
));
286 stream
->pipe_id
= PIPE_RSVD
;
287 mutex_init(&stream
->lock
);
290 /* Register the ISR */
291 ret
= devm_request_threaded_irq(ctx
->dev
, ctx
->irq_num
, ctx
->ops
->interrupt
,
292 ctx
->ops
->irq_thread
, 0, SST_DRV_NAME
,
297 dev_dbg(ctx
->dev
, "Registered IRQ %#x\n", ctx
->irq_num
);
299 /* default intr are unmasked so set this as masked */
300 sst_shim_write64(ctx
->shim
, SST_IMRX
, 0xFFFF0038);
302 ctx
->qos
= devm_kzalloc(ctx
->dev
,
303 sizeof(struct pm_qos_request
), GFP_KERNEL
);
308 pm_qos_add_request(ctx
->qos
, PM_QOS_CPU_DMA_LATENCY
,
309 PM_QOS_DEFAULT_VALUE
);
311 dev_dbg(ctx
->dev
, "Requesting FW %s now...\n", ctx
->firmware_name
);
312 ret
= request_firmware_nowait(THIS_MODULE
, true, ctx
->firmware_name
,
313 ctx
->dev
, GFP_KERNEL
, ctx
, sst_firmware_load_cb
);
315 dev_err(ctx
->dev
, "Firmware download failed:%d\n", ret
);
318 sst_register(ctx
->dev
);
322 destroy_workqueue(ctx
->post_msg_wq
);
325 EXPORT_SYMBOL_GPL(sst_context_init
);
327 void sst_context_cleanup(struct intel_sst_drv
*ctx
)
329 pm_runtime_get_noresume(ctx
->dev
);
330 pm_runtime_disable(ctx
->dev
);
331 sst_unregister(ctx
->dev
);
332 sst_set_fw_state_locked(ctx
, SST_SHUTDOWN
);
333 flush_scheduled_work();
334 destroy_workqueue(ctx
->post_msg_wq
);
335 pm_qos_remove_request(ctx
->qos
);
336 kfree(ctx
->fw_sg_list
.src
);
337 kfree(ctx
->fw_sg_list
.dst
);
338 ctx
->fw_sg_list
.list_len
= 0;
339 kfree(ctx
->fw_in_mem
);
340 ctx
->fw_in_mem
= NULL
;
341 sst_memcpy_free_resources(ctx
);
344 EXPORT_SYMBOL_GPL(sst_context_cleanup
);
346 static inline void sst_save_shim64(struct intel_sst_drv
*ctx
,
348 struct sst_shim_regs64
*shim_regs
)
350 unsigned long irq_flags
;
352 spin_lock_irqsave(&ctx
->ipc_spin_lock
, irq_flags
);
354 shim_regs
->imrx
= sst_shim_read64(shim
, SST_IMRX
);
355 shim_regs
->csr
= sst_shim_read64(shim
, SST_CSR
);
358 spin_unlock_irqrestore(&ctx
->ipc_spin_lock
, irq_flags
);
361 static inline void sst_restore_shim64(struct intel_sst_drv
*ctx
,
363 struct sst_shim_regs64
*shim_regs
)
365 unsigned long irq_flags
;
368 * we only need to restore IMRX for this case, rest will be
369 * initialize by FW or driver when firmware is loaded
371 spin_lock_irqsave(&ctx
->ipc_spin_lock
, irq_flags
);
372 sst_shim_write64(shim
, SST_IMRX
, shim_regs
->imrx
);
373 sst_shim_write64(shim
, SST_CSR
, shim_regs
->csr
);
374 spin_unlock_irqrestore(&ctx
->ipc_spin_lock
, irq_flags
);
377 void sst_configure_runtime_pm(struct intel_sst_drv
*ctx
)
379 pm_runtime_set_autosuspend_delay(ctx
->dev
, SST_SUSPEND_DELAY
);
380 pm_runtime_use_autosuspend(ctx
->dev
);
382 * For acpi devices, the actual physical device state is
383 * initially active. So change the state to active before
388 pm_runtime_set_active(ctx
->dev
);
390 pm_runtime_enable(ctx
->dev
);
393 pm_runtime_set_active(ctx
->dev
);
395 pm_runtime_put_noidle(ctx
->dev
);
397 sst_save_shim64(ctx
, ctx
->shim
, ctx
->shim_regs64
);
399 EXPORT_SYMBOL_GPL(sst_configure_runtime_pm
);
401 static int intel_sst_runtime_suspend(struct device
*dev
)
404 struct intel_sst_drv
*ctx
= dev_get_drvdata(dev
);
406 if (ctx
->sst_state
== SST_RESET
) {
407 dev_dbg(dev
, "LPE is already in RESET state, No action\n");
410 /* save fw context */
411 if (ctx
->ops
->save_dsp_context(ctx
))
414 /* Move the SST state to Reset */
415 sst_set_fw_state_locked(ctx
, SST_RESET
);
417 synchronize_irq(ctx
->irq_num
);
418 flush_workqueue(ctx
->post_msg_wq
);
420 ctx
->ops
->reset(ctx
);
421 /* save the shim registers because PMC doesn't save state */
422 sst_save_shim64(ctx
, ctx
->shim
, ctx
->shim_regs64
);
427 static int intel_sst_suspend(struct device
*dev
)
429 struct intel_sst_drv
*ctx
= dev_get_drvdata(dev
);
430 struct sst_fw_save
*fw_save
;
433 /* check first if we are already in SW reset */
434 if (ctx
->sst_state
== SST_RESET
)
438 * check if any stream is active and running
439 * they should already by suspend by soc_suspend
441 for (i
= 1; i
<= ctx
->info
.max_streams
; i
++) {
442 struct stream_info
*stream
= &ctx
->streams
[i
];
444 if (stream
->status
== STREAM_RUNNING
) {
445 dev_err(dev
, "stream %d is running, can't suspend, abort\n", i
);
449 synchronize_irq(ctx
->irq_num
);
450 flush_workqueue(ctx
->post_msg_wq
);
452 /* Move the SST state to Reset */
453 sst_set_fw_state_locked(ctx
, SST_RESET
);
455 /* tell DSP we are suspending */
456 if (ctx
->ops
->save_dsp_context(ctx
))
459 /* save the memories */
460 fw_save
= kzalloc(sizeof(*fw_save
), GFP_KERNEL
);
463 fw_save
->iram
= kzalloc(ctx
->iram_end
- ctx
->iram_base
, GFP_KERNEL
);
464 if (!fw_save
->iram
) {
468 fw_save
->dram
= kzalloc(ctx
->dram_end
- ctx
->dram_base
, GFP_KERNEL
);
469 if (!fw_save
->dram
) {
473 fw_save
->sram
= kzalloc(SST_MAILBOX_SIZE
, GFP_KERNEL
);
474 if (!fw_save
->sram
) {
479 fw_save
->ddr
= kzalloc(ctx
->ddr_end
- ctx
->ddr_base
, GFP_KERNEL
);
485 memcpy32_fromio(fw_save
->iram
, ctx
->iram
, ctx
->iram_end
- ctx
->iram_base
);
486 memcpy32_fromio(fw_save
->dram
, ctx
->dram
, ctx
->dram_end
- ctx
->dram_base
);
487 memcpy32_fromio(fw_save
->sram
, ctx
->mailbox
, SST_MAILBOX_SIZE
);
488 memcpy32_fromio(fw_save
->ddr
, ctx
->ddr
, ctx
->ddr_end
- ctx
->ddr_base
);
490 ctx
->fw_save
= fw_save
;
491 ctx
->ops
->reset(ctx
);
494 kfree(fw_save
->sram
);
496 kfree(fw_save
->dram
);
498 kfree(fw_save
->iram
);
504 static int intel_sst_resume(struct device
*dev
)
506 struct intel_sst_drv
*ctx
= dev_get_drvdata(dev
);
507 struct sst_fw_save
*fw_save
= ctx
->fw_save
;
509 struct sst_block
*block
;
514 sst_set_fw_state_locked(ctx
, SST_FW_LOADING
);
516 /* we have to restore the memory saved */
517 ctx
->ops
->reset(ctx
);
521 memcpy32_toio(ctx
->iram
, fw_save
->iram
, ctx
->iram_end
- ctx
->iram_base
);
522 memcpy32_toio(ctx
->dram
, fw_save
->dram
, ctx
->dram_end
- ctx
->dram_base
);
523 memcpy32_toio(ctx
->mailbox
, fw_save
->sram
, SST_MAILBOX_SIZE
);
524 memcpy32_toio(ctx
->ddr
, fw_save
->ddr
, ctx
->ddr_end
- ctx
->ddr_base
);
526 kfree(fw_save
->sram
);
527 kfree(fw_save
->dram
);
528 kfree(fw_save
->iram
);
532 block
= sst_create_block(ctx
, 0, FW_DWNL_ID
);
537 /* start and wait for ack */
538 ctx
->ops
->start(ctx
);
539 ret
= sst_wait_timeout(ctx
, block
);
541 dev_err(ctx
->dev
, "fw download failed %d\n", ret
);
542 /* FW download failed due to timeout */
546 sst_set_fw_state_locked(ctx
, SST_FW_RUNNING
);
549 sst_free_block(ctx
, block
);
553 const struct dev_pm_ops intel_sst_pm
= {
554 .suspend
= intel_sst_suspend
,
555 .resume
= intel_sst_resume
,
556 .runtime_suspend
= intel_sst_runtime_suspend
,
558 EXPORT_SYMBOL_GPL(intel_sst_pm
);