| 1 | /* |
| 2 | * skl-sst-cldma.c - Code Loader DMA handler |
| 3 | * |
| 4 | * Copyright (C) 2015, Intel Corporation. |
| 5 | * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com> |
| 6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as version 2, as |
| 10 | * published by the Free Software Foundation. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, but |
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * General Public License for more details. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/device.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/kthread.h> |
| 21 | #include <linux/delay.h> |
| 22 | #include "../common/sst-dsp.h" |
| 23 | #include "../common/sst-dsp-priv.h" |
| 24 | |
| 25 | static void skl_cldma_int_enable(struct sst_dsp *ctx) |
| 26 | { |
| 27 | sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC, |
| 28 | SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA); |
| 29 | } |
| 30 | |
| 31 | void skl_cldma_int_disable(struct sst_dsp *ctx) |
| 32 | { |
| 33 | sst_dsp_shim_update_bits_unlocked(ctx, |
| 34 | SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0); |
| 35 | } |
| 36 | |
| 37 | static void skl_cldma_stream_run(struct sst_dsp *ctx, bool enable) |
| 38 | { |
| 39 | unsigned char val; |
| 40 | int timeout; |
| 41 | |
| 42 | sst_dsp_shim_update_bits_unlocked(ctx, |
| 43 | SKL_ADSP_REG_CL_SD_CTL, |
| 44 | CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(enable)); |
| 45 | |
| 46 | udelay(3); |
| 47 | timeout = 300; |
| 48 | do { |
| 49 | /* waiting for hardware to report that the stream Run bit set */ |
| 50 | val = sst_dsp_shim_read(ctx, SKL_ADSP_REG_CL_SD_CTL) & |
| 51 | CL_SD_CTL_RUN_MASK; |
| 52 | if (enable && val) |
| 53 | break; |
| 54 | else if (!enable && !val) |
| 55 | break; |
| 56 | udelay(3); |
| 57 | } while (--timeout); |
| 58 | |
| 59 | if (timeout == 0) |
| 60 | dev_err(ctx->dev, "Failed to set Run bit=%d enable=%d\n", val, enable); |
| 61 | } |
| 62 | |
| 63 | static void skl_cldma_stream_clear(struct sst_dsp *ctx) |
| 64 | { |
| 65 | /* make sure Run bit is cleared before setting stream register */ |
| 66 | skl_cldma_stream_run(ctx, 0); |
| 67 | |
| 68 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 69 | CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0)); |
| 70 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 71 | CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0)); |
| 72 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 73 | CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0)); |
| 74 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 75 | CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0)); |
| 76 | |
| 77 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0)); |
| 78 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0); |
| 79 | |
| 80 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0); |
| 81 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0); |
| 82 | } |
| 83 | |
| 84 | /* Code loader helper APIs */ |
| 85 | static void skl_cldma_setup_bdle(struct sst_dsp *ctx, |
| 86 | struct snd_dma_buffer *dmab_data, |
| 87 | u32 **bdlp, int size, int with_ioc) |
| 88 | { |
| 89 | u32 *bdl = *bdlp; |
| 90 | |
| 91 | ctx->cl_dev.frags = 0; |
| 92 | while (size > 0) { |
| 93 | phys_addr_t addr = virt_to_phys(dmab_data->area + |
| 94 | (ctx->cl_dev.frags * ctx->cl_dev.bufsize)); |
| 95 | |
| 96 | bdl[0] = cpu_to_le32(lower_32_bits(addr)); |
| 97 | bdl[1] = cpu_to_le32(upper_32_bits(addr)); |
| 98 | |
| 99 | bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize); |
| 100 | |
| 101 | size -= ctx->cl_dev.bufsize; |
| 102 | bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01); |
| 103 | |
| 104 | bdl += 4; |
| 105 | ctx->cl_dev.frags++; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | /* |
| 110 | * Setup controller |
| 111 | * Configure the registers to update the dma buffer address and |
| 112 | * enable interrupts. |
| 113 | * Note: Using the channel 1 for transfer |
| 114 | */ |
| 115 | static void skl_cldma_setup_controller(struct sst_dsp *ctx, |
| 116 | struct snd_dma_buffer *dmab_bdl, unsigned int max_size, |
| 117 | u32 count) |
| 118 | { |
| 119 | skl_cldma_stream_clear(ctx); |
| 120 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, |
| 121 | CL_SD_BDLPLBA(dmab_bdl->addr)); |
| 122 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, |
| 123 | CL_SD_BDLPUBA(dmab_bdl->addr)); |
| 124 | |
| 125 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size); |
| 126 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1); |
| 127 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 128 | CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1)); |
| 129 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 130 | CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1)); |
| 131 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 132 | CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1)); |
| 133 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, |
| 134 | CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER)); |
| 135 | } |
| 136 | |
| 137 | static void skl_cldma_setup_spb(struct sst_dsp *ctx, |
| 138 | unsigned int size, bool enable) |
| 139 | { |
| 140 | if (enable) |
| 141 | sst_dsp_shim_update_bits_unlocked(ctx, |
| 142 | SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL, |
| 143 | CL_SPBFIFO_SPBFCCTL_SPIBE_MASK, |
| 144 | CL_SPBFIFO_SPBFCCTL_SPIBE(1)); |
| 145 | |
| 146 | sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size); |
| 147 | } |
| 148 | |
| 149 | static void skl_cldma_cleanup_spb(struct sst_dsp *ctx) |
| 150 | { |
| 151 | sst_dsp_shim_update_bits_unlocked(ctx, |
| 152 | SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL, |
| 153 | CL_SPBFIFO_SPBFCCTL_SPIBE_MASK, |
| 154 | CL_SPBFIFO_SPBFCCTL_SPIBE(0)); |
| 155 | |
| 156 | sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0); |
| 157 | } |
| 158 | |
| 159 | static void skl_cldma_cleanup(struct sst_dsp *ctx) |
| 160 | { |
| 161 | skl_cldma_cleanup_spb(ctx); |
| 162 | skl_cldma_stream_clear(ctx); |
| 163 | |
| 164 | ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); |
| 165 | ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl); |
| 166 | } |
| 167 | |
| 168 | static int skl_cldma_wait_interruptible(struct sst_dsp *ctx) |
| 169 | { |
| 170 | int ret = 0; |
| 171 | |
| 172 | if (!wait_event_timeout(ctx->cl_dev.wait_queue, |
| 173 | ctx->cl_dev.wait_condition, |
| 174 | msecs_to_jiffies(SKL_WAIT_TIMEOUT))) { |
| 175 | dev_err(ctx->dev, "%s: Wait timeout\n", __func__); |
| 176 | ret = -EIO; |
| 177 | goto cleanup; |
| 178 | } |
| 179 | |
| 180 | dev_dbg(ctx->dev, "%s: Event wake\n", __func__); |
| 181 | if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) { |
| 182 | dev_err(ctx->dev, "%s: DMA Error\n", __func__); |
| 183 | ret = -EIO; |
| 184 | } |
| 185 | |
| 186 | cleanup: |
| 187 | ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE; |
| 188 | return ret; |
| 189 | } |
| 190 | |
| 191 | static void skl_cldma_stop(struct sst_dsp *ctx) |
| 192 | { |
| 193 | skl_cldma_stream_run(ctx, false); |
| 194 | } |
| 195 | |
| 196 | static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size, |
| 197 | const void *curr_pos, bool intr_enable, bool trigger) |
| 198 | { |
| 199 | dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable); |
| 200 | dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n", |
| 201 | ctx->cl_dev.dma_buffer_offset, trigger); |
| 202 | dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos); |
| 203 | |
| 204 | /* |
| 205 | * Check if the size exceeds buffer boundary. If it exceeds |
| 206 | * max_buffer size, then copy till buffer size and then copy |
| 207 | * remaining buffer from the start of ring buffer. |
| 208 | */ |
| 209 | if (ctx->cl_dev.dma_buffer_offset + size > ctx->cl_dev.bufsize) { |
| 210 | unsigned int size_b = ctx->cl_dev.bufsize - |
| 211 | ctx->cl_dev.dma_buffer_offset; |
| 212 | memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset, |
| 213 | curr_pos, size_b); |
| 214 | size -= size_b; |
| 215 | curr_pos += size_b; |
| 216 | ctx->cl_dev.dma_buffer_offset = 0; |
| 217 | } |
| 218 | |
| 219 | memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset, |
| 220 | curr_pos, size); |
| 221 | |
| 222 | if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize) |
| 223 | ctx->cl_dev.dma_buffer_offset = 0; |
| 224 | else |
| 225 | ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos; |
| 226 | |
| 227 | ctx->cl_dev.wait_condition = false; |
| 228 | |
| 229 | if (intr_enable) |
| 230 | skl_cldma_int_enable(ctx); |
| 231 | |
| 232 | ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger); |
| 233 | if (trigger) |
| 234 | ctx->cl_dev.ops.cl_trigger(ctx, true); |
| 235 | } |
| 236 | |
| 237 | /* |
| 238 | * The CL dma doesn't have any way to update the transfer status until a BDL |
| 239 | * buffer is fully transferred |
| 240 | * |
| 241 | * So Copying is divided in two parts. |
| 242 | * 1. Interrupt on buffer done where the size to be transferred is more than |
| 243 | * ring buffer size. |
| 244 | * 2. Polling on fw register to identify if data left to transferred doesn't |
| 245 | * fill the ring buffer. Caller takes care of polling the required status |
| 246 | * register to identify the transfer status. |
| 247 | */ |
| 248 | static int |
| 249 | skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size) |
| 250 | { |
| 251 | int ret = 0; |
| 252 | bool start = true; |
| 253 | unsigned int excess_bytes; |
| 254 | u32 size; |
| 255 | unsigned int bytes_left = total_size; |
| 256 | const void *curr_pos = bin; |
| 257 | |
| 258 | if (total_size <= 0) |
| 259 | return -EINVAL; |
| 260 | |
| 261 | dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left); |
| 262 | |
| 263 | while (bytes_left) { |
| 264 | if (bytes_left > ctx->cl_dev.bufsize) { |
| 265 | |
| 266 | /* |
| 267 | * dma transfers only till the write pointer as |
| 268 | * updated in spib |
| 269 | */ |
| 270 | if (ctx->cl_dev.curr_spib_pos == 0) |
| 271 | ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize; |
| 272 | |
| 273 | size = ctx->cl_dev.bufsize; |
| 274 | skl_cldma_fill_buffer(ctx, size, curr_pos, true, start); |
| 275 | |
| 276 | start = false; |
| 277 | ret = skl_cldma_wait_interruptible(ctx); |
| 278 | if (ret < 0) { |
| 279 | skl_cldma_stop(ctx); |
| 280 | return ret; |
| 281 | } |
| 282 | |
| 283 | } else { |
| 284 | skl_cldma_int_disable(ctx); |
| 285 | |
| 286 | if ((ctx->cl_dev.curr_spib_pos + bytes_left) |
| 287 | <= ctx->cl_dev.bufsize) { |
| 288 | ctx->cl_dev.curr_spib_pos += bytes_left; |
| 289 | } else { |
| 290 | excess_bytes = bytes_left - |
| 291 | (ctx->cl_dev.bufsize - |
| 292 | ctx->cl_dev.curr_spib_pos); |
| 293 | ctx->cl_dev.curr_spib_pos = excess_bytes; |
| 294 | } |
| 295 | |
| 296 | size = bytes_left; |
| 297 | skl_cldma_fill_buffer(ctx, size, |
| 298 | curr_pos, false, start); |
| 299 | } |
| 300 | bytes_left -= size; |
| 301 | curr_pos = curr_pos + size; |
| 302 | } |
| 303 | |
| 304 | return ret; |
| 305 | } |
| 306 | |
| 307 | void skl_cldma_process_intr(struct sst_dsp *ctx) |
| 308 | { |
| 309 | u8 cl_dma_intr_status; |
| 310 | |
| 311 | cl_dma_intr_status = |
| 312 | sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS); |
| 313 | |
| 314 | if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE)) |
| 315 | ctx->cl_dev.wake_status = SKL_CL_DMA_ERR; |
| 316 | else |
| 317 | ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE; |
| 318 | |
| 319 | ctx->cl_dev.wait_condition = true; |
| 320 | wake_up(&ctx->cl_dev.wait_queue); |
| 321 | } |
| 322 | |
| 323 | int skl_cldma_prepare(struct sst_dsp *ctx) |
| 324 | { |
| 325 | int ret; |
| 326 | u32 *bdl; |
| 327 | |
| 328 | ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE; |
| 329 | |
| 330 | /* Allocate cl ops */ |
| 331 | ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle; |
| 332 | ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller; |
| 333 | ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb; |
| 334 | ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb; |
| 335 | ctx->cl_dev.ops.cl_trigger = skl_cldma_stream_run; |
| 336 | ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup; |
| 337 | ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf; |
| 338 | ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop; |
| 339 | |
| 340 | /* Allocate buffer*/ |
| 341 | ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev, |
| 342 | &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize); |
| 343 | if (ret < 0) { |
| 344 | dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret); |
| 345 | return ret; |
| 346 | } |
| 347 | /* Setup Code loader BDL */ |
| 348 | ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev, |
| 349 | &ctx->cl_dev.dmab_bdl, PAGE_SIZE); |
| 350 | if (ret < 0) { |
| 351 | dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret); |
| 352 | ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); |
| 353 | return ret; |
| 354 | } |
| 355 | bdl = (u32 *)ctx->cl_dev.dmab_bdl.area; |
| 356 | |
| 357 | /* Allocate BDLs */ |
| 358 | ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data, |
| 359 | &bdl, ctx->cl_dev.bufsize, 1); |
| 360 | ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl, |
| 361 | ctx->cl_dev.bufsize, ctx->cl_dev.frags); |
| 362 | |
| 363 | ctx->cl_dev.curr_spib_pos = 0; |
| 364 | ctx->cl_dev.dma_buffer_offset = 0; |
| 365 | init_waitqueue_head(&ctx->cl_dev.wait_queue); |
| 366 | |
| 367 | return ret; |
| 368 | } |