USB: xhci: don't start a halted endpoint before its new dequeue is set
[deliverable/linux.git] / sound / soc / blackfin / bf5xx-ac97-pcm.c
CommitLineData
aff0510c
CC
1/*
2 * File: sound/soc/blackfin/bf5xx-ac97-pcm.c
3 * Author: Cliff Cai <Cliff.Cai@analog.com>
4 *
5 * Created: Tue June 06 2008
6 * Description: DMA Driver for AC97 sound chip
7 *
8 * Modified:
9 * Copyright 2008 Analog Devices Inc.
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/platform_device.h>
aff0510c 32#include <linux/dma-mapping.h>
5a0e3ad6 33#include <linux/gfp.h>
aff0510c
CC
34
35#include <sound/core.h>
36#include <sound/pcm.h>
37#include <sound/pcm_params.h>
38#include <sound/soc.h>
39
40#include <asm/dma.h>
41
aff0510c
CC
42#include "bf5xx-ac97.h"
43#include "bf5xx-sport.h"
44
67f854b9
CC
45static unsigned int ac97_chan_mask[] = {
46 SP_FL, /* Mono */
47 SP_STEREO, /* Stereo */
48 SP_2DOT1, /* 2.1*/
49 SP_QUAD,/*Quadraquic*/
50 SP_FL | SP_FR | SP_FC | SP_SL | SP_SR,/*5 channels */
51 SP_5DOT1, /* 5.1 */
52};
53
54#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
55static void bf5xx_mmap_copy(struct snd_pcm_substream *substream,
56 snd_pcm_uframes_t count)
57{
58 struct snd_pcm_runtime *runtime = substream->runtime;
59 struct sport_device *sport = runtime->private_data;
67f854b9 60 unsigned int chan_mask = ac97_chan_mask[runtime->channels - 1];
aff0510c 61 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
67f854b9
CC
62 bf5xx_pcm_to_ac97((struct ac97_frame *)sport->tx_dma_buf +
63 sport->tx_pos, (__u16 *)runtime->dma_area + sport->tx_pos *
64 runtime->channels, count, chan_mask);
aff0510c
CC
65 sport->tx_pos += runtime->period_size;
66 if (sport->tx_pos >= runtime->buffer_size)
67 sport->tx_pos %= runtime->buffer_size;
6b58a821 68 sport->tx_delay_pos = sport->tx_pos;
aff0510c 69 } else {
67f854b9
CC
70 bf5xx_ac97_to_pcm((struct ac97_frame *)sport->rx_dma_buf +
71 sport->rx_pos, (__u16 *)runtime->dma_area + sport->rx_pos *
72 runtime->channels, count);
aff0510c
CC
73 sport->rx_pos += runtime->period_size;
74 if (sport->rx_pos >= runtime->buffer_size)
75 sport->rx_pos %= runtime->buffer_size;
76 }
77}
78#endif
79
80static void bf5xx_dma_irq(void *data)
81{
82 struct snd_pcm_substream *pcm = data;
67f854b9 83#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c 84 struct snd_pcm_runtime *runtime = pcm->runtime;
6b58a821 85 struct sport_device *sport = runtime->private_data;
aff0510c 86 bf5xx_mmap_copy(pcm, runtime->period_size);
6b58a821
CC
87 if (pcm->stream == SNDRV_PCM_STREAM_PLAYBACK) {
88 if (sport->once == 0) {
89 snd_pcm_period_elapsed(pcm);
90 bf5xx_mmap_copy(pcm, runtime->period_size);
91 sport->once = 1;
92 }
93 }
aff0510c
CC
94#endif
95 snd_pcm_period_elapsed(pcm);
96}
97
98/* The memory size for pure pcm data is 128*1024 = 0x20000 bytes.
99 * The total rx/tx buffer is for ac97 frame to hold all pcm data
100 * is 0x20000 * sizeof(struct ac97_frame) / 4.
101 */
aff0510c
CC
102static const struct snd_pcm_hardware bf5xx_pcm_hardware = {
103 .info = SNDRV_PCM_INFO_INTERLEAVED |
27b9be5a 104#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
105 SNDRV_PCM_INFO_MMAP |
106 SNDRV_PCM_INFO_MMAP_VALID |
aff0510c 107#endif
27b9be5a
BW
108 SNDRV_PCM_INFO_BLOCK_TRANSFER,
109
aff0510c
CC
110 .period_bytes_min = 32,
111 .period_bytes_max = 0x10000,
112 .periods_min = 1,
113 .periods_max = PAGE_SIZE/32,
114 .buffer_bytes_max = 0x20000, /* 128 kbytes */
115 .fifo_size = 16,
116};
117
118static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream,
119 struct snd_pcm_hw_params *params)
120{
121 size_t size = bf5xx_pcm_hardware.buffer_bytes_max
122 * sizeof(struct ac97_frame) / 4;
123
124 snd_pcm_lib_malloc_pages(substream, size);
125
126 return 0;
127}
128
129static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream)
130{
67f854b9 131#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
6b58a821 132 struct snd_pcm_runtime *runtime = substream->runtime;
67f854b9 133 struct sport_device *sport = runtime->private_data;
6b58a821 134
67f854b9
CC
135 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
136 sport->once = 0;
137 if (runtime->dma_area)
138 memset(runtime->dma_area, 0, runtime->buffer_size);
139 memset(sport->tx_dma_buf, 0, runtime->buffer_size *
140 sizeof(struct ac97_frame));
141 } else
142 memset(sport->rx_dma_buf, 0, runtime->buffer_size *
143 sizeof(struct ac97_frame));
144#endif
aff0510c
CC
145 snd_pcm_lib_free_pages(substream);
146 return 0;
147}
148
149static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream)
150{
151 struct snd_pcm_runtime *runtime = substream->runtime;
152 struct sport_device *sport = runtime->private_data;
153
154 /* An intermediate buffer is introduced for implementing mmap for
155 * SPORT working in TMD mode(include AC97).
156 */
67f854b9 157#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c 158 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
aff0510c
CC
159 sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
160 sport_config_tx_dma(sport, sport->tx_dma_buf, runtime->periods,
161 runtime->period_size * sizeof(struct ac97_frame));
162 } else {
aff0510c
CC
163 sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
164 sport_config_rx_dma(sport, sport->rx_dma_buf, runtime->periods,
165 runtime->period_size * sizeof(struct ac97_frame));
166 }
167#else
168 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
169 sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
170 sport_config_tx_dma(sport, runtime->dma_area, runtime->periods,
171 runtime->period_size * sizeof(struct ac97_frame));
172 } else {
173 sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
174 sport_config_rx_dma(sport, runtime->dma_area, runtime->periods,
175 runtime->period_size * sizeof(struct ac97_frame));
176 }
177#endif
178 return 0;
179}
180
181static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
182{
183 struct snd_pcm_runtime *runtime = substream->runtime;
184 struct sport_device *sport = runtime->private_data;
185 int ret = 0;
186
187 pr_debug("%s enter\n", __func__);
188 switch (cmd) {
189 case SNDRV_PCM_TRIGGER_START:
6b58a821 190 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
a89e611a 191#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
6b58a821 192 bf5xx_mmap_copy(substream, runtime->period_size);
6b58a821 193 sport->tx_delay_pos = 0;
a89e611a 194#endif
aff0510c 195 sport_tx_start(sport);
67f854b9 196 } else
aff0510c
CC
197 sport_rx_start(sport);
198 break;
199 case SNDRV_PCM_TRIGGER_STOP:
200 case SNDRV_PCM_TRIGGER_SUSPEND:
201 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
202 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
67f854b9 203#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
204 sport->tx_pos = 0;
205#endif
206 sport_tx_stop(sport);
207 } else {
67f854b9 208#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
209 sport->rx_pos = 0;
210#endif
211 sport_rx_stop(sport);
212 }
213 break;
214 default:
215 ret = -EINVAL;
216 }
217 return ret;
218}
219
220static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
221{
222 struct snd_pcm_runtime *runtime = substream->runtime;
223 struct sport_device *sport = runtime->private_data;
224 unsigned int curr;
225
67f854b9 226#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c 227 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
6b58a821 228 curr = sport->tx_delay_pos;
aff0510c
CC
229 else
230 curr = sport->rx_pos;
231#else
232
233 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
234 curr = sport_curr_offset_tx(sport) / sizeof(struct ac97_frame);
235 else
236 curr = sport_curr_offset_rx(sport) / sizeof(struct ac97_frame);
237
238#endif
239 return curr;
240}
241
242static int bf5xx_pcm_open(struct snd_pcm_substream *substream)
243{
2c66cb99
BS
244 struct snd_soc_pcm_runtime *rtd = substream->private_data;
245 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
246 struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
aff0510c
CC
247 struct snd_pcm_runtime *runtime = substream->runtime;
248 int ret;
249
250 pr_debug("%s enter\n", __func__);
251 snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware);
252
253 ret = snd_pcm_hw_constraint_integer(runtime,
254 SNDRV_PCM_HW_PARAM_PERIODS);
255 if (ret < 0)
256 goto out;
257
258 if (sport_handle != NULL)
259 runtime->private_data = sport_handle;
260 else {
261 pr_err("sport_handle is NULL\n");
262 return -1;
263 }
264 return 0;
265
266 out:
267 return ret;
268}
269
67f854b9 270#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
271static int bf5xx_pcm_mmap(struct snd_pcm_substream *substream,
272 struct vm_area_struct *vma)
273{
274 struct snd_pcm_runtime *runtime = substream->runtime;
275 size_t size = vma->vm_end - vma->vm_start;
276 vma->vm_start = (unsigned long)runtime->dma_area;
277 vma->vm_end = vma->vm_start + size;
278 vma->vm_flags |= VM_SHARED;
279 return 0 ;
280}
281#else
282static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
283 snd_pcm_uframes_t pos,
284 void __user *buf, snd_pcm_uframes_t count)
285{
286 struct snd_pcm_runtime *runtime = substream->runtime;
a89e611a 287 unsigned int chan_mask = ac97_chan_mask[runtime->channels - 1];
aff0510c
CC
288 pr_debug("%s copy pos:0x%lx count:0x%lx\n",
289 substream->stream ? "Capture" : "Playback", pos, count);
290
291 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
67f854b9
CC
292 bf5xx_pcm_to_ac97((struct ac97_frame *)runtime->dma_area + pos,
293 (__u16 *)buf, count, chan_mask);
aff0510c 294 else
67f854b9
CC
295 bf5xx_ac97_to_pcm((struct ac97_frame *)runtime->dma_area + pos,
296 (__u16 *)buf, count);
aff0510c
CC
297 return 0;
298}
299#endif
300
b2a19d02 301static struct snd_pcm_ops bf5xx_pcm_ac97_ops = {
aff0510c
CC
302 .open = bf5xx_pcm_open,
303 .ioctl = snd_pcm_lib_ioctl,
304 .hw_params = bf5xx_pcm_hw_params,
305 .hw_free = bf5xx_pcm_hw_free,
306 .prepare = bf5xx_pcm_prepare,
307 .trigger = bf5xx_pcm_trigger,
308 .pointer = bf5xx_pcm_pointer,
67f854b9 309#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
310 .mmap = bf5xx_pcm_mmap,
311#else
312 .copy = bf5xx_pcm_copy,
313#endif
314};
315
316static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
317{
2c66cb99
BS
318 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
319 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
320 struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
aff0510c
CC
321 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
322 struct snd_dma_buffer *buf = &substream->dma_buffer;
323 size_t size = bf5xx_pcm_hardware.buffer_bytes_max
324 * sizeof(struct ac97_frame) / 4;
325
326 buf->dev.type = SNDRV_DMA_TYPE_DEV;
327 buf->dev.dev = pcm->card->dev;
328 buf->private_data = NULL;
329 buf->area = dma_alloc_coherent(pcm->card->dev, size,
330 &buf->addr, GFP_KERNEL);
331 if (!buf->area) {
332 pr_err("Failed to allocate dma memory\n");
333 pr_err("Please increase uncached DMA memory region\n");
334 return -ENOMEM;
335 }
336 buf->bytes = size;
337
338 pr_debug("%s, area:%p, size:0x%08lx\n", __func__,
339 buf->area, buf->bytes);
340
341 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
342 sport_handle->tx_buf = buf->area;
343 else
344 sport_handle->rx_buf = buf->area;
345
346/*
347 * Need to allocate local buffer when enable
348 * MMAP for SPORT working in TMD mode (include AC97).
349 */
67f854b9 350#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
351 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
352 if (!sport_handle->tx_dma_buf) {
353 sport_handle->tx_dma_buf = dma_alloc_coherent(NULL, \
354 size, &sport_handle->tx_dma_phy, GFP_KERNEL);
355 if (!sport_handle->tx_dma_buf) {
2f1ff661 356 pr_err("Failed to allocate memory for tx dma buf - Please increase uncached DMA memory region\n");
aff0510c
CC
357 return -ENOMEM;
358 } else
359 memset(sport_handle->tx_dma_buf, 0, size);
360 } else
361 memset(sport_handle->tx_dma_buf, 0, size);
362 } else {
363 if (!sport_handle->rx_dma_buf) {
364 sport_handle->rx_dma_buf = dma_alloc_coherent(NULL, \
365 size, &sport_handle->rx_dma_phy, GFP_KERNEL);
366 if (!sport_handle->rx_dma_buf) {
2f1ff661 367 pr_err("Failed to allocate memory for rx dma buf - Please increase uncached DMA memory region\n");
aff0510c
CC
368 return -ENOMEM;
369 } else
370 memset(sport_handle->rx_dma_buf, 0, size);
371 } else
372 memset(sport_handle->rx_dma_buf, 0, size);
373 }
374#endif
375 return 0;
376}
377
378static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
379{
380 struct snd_pcm_substream *substream;
381 struct snd_dma_buffer *buf;
382 int stream;
67f854b9 383#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
2c66cb99
BS
384 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
385 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
386 struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
aff0510c
CC
387 size_t size = bf5xx_pcm_hardware.buffer_bytes_max *
388 sizeof(struct ac97_frame) / 4;
389#endif
390 for (stream = 0; stream < 2; stream++) {
391 substream = pcm->streams[stream].substream;
392 if (!substream)
393 continue;
394
395 buf = &substream->dma_buffer;
396 if (!buf->area)
397 continue;
398 dma_free_coherent(NULL, buf->bytes, buf->area, 0);
399 buf->area = NULL;
67f854b9 400#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
aff0510c
CC
401 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
402 if (sport_handle->tx_dma_buf)
403 dma_free_coherent(NULL, size, \
404 sport_handle->tx_dma_buf, 0);
405 sport_handle->tx_dma_buf = NULL;
406 } else {
407
408 if (sport_handle->rx_dma_buf)
409 dma_free_coherent(NULL, size, \
410 sport_handle->rx_dma_buf, 0);
411 sport_handle->rx_dma_buf = NULL;
412 }
413#endif
414 }
aff0510c
CC
415}
416
3156cf73 417static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
aff0510c 418{
552d1ef6 419 struct snd_card *card = rtd->card->snd_card;
552d1ef6 420 struct snd_pcm *pcm = rtd->pcm;
c9bd5e69 421 int ret;
aff0510c
CC
422
423 pr_debug("%s enter\n", __func__);
c9bd5e69
RK
424 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
425 if (ret)
426 return ret;
aff0510c 427
25e9e756 428 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
aff0510c
CC
429 ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
430 SNDRV_PCM_STREAM_PLAYBACK);
431 if (ret)
432 goto out;
433 }
434
25e9e756 435 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
aff0510c
CC
436 ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
437 SNDRV_PCM_STREAM_CAPTURE);
438 if (ret)
439 goto out;
440 }
441 out:
442 return ret;
443}
444
f0fba2ad
LG
445static struct snd_soc_platform_driver bf5xx_ac97_soc_platform = {
446 .ops = &bf5xx_pcm_ac97_ops,
aff0510c
CC
447 .pcm_new = bf5xx_pcm_ac97_new,
448 .pcm_free = bf5xx_pcm_free_dma_buffers,
449};
aff0510c 450
dca66dab 451static int bf5xx_soc_platform_probe(struct platform_device *pdev)
958e792c 452{
f0fba2ad 453 return snd_soc_register_platform(&pdev->dev, &bf5xx_ac97_soc_platform);
958e792c 454}
958e792c 455
dca66dab 456static int bf5xx_soc_platform_remove(struct platform_device *pdev)
958e792c 457{
f0fba2ad
LG
458 snd_soc_unregister_platform(&pdev->dev);
459 return 0;
460}
461
462static struct platform_driver bf5xx_pcm_driver = {
463 .driver = {
bfe4ee0a 464 .name = "bfin-ac97-pcm-audio",
f0fba2ad
LG
465 .owner = THIS_MODULE,
466 },
467
468 .probe = bf5xx_soc_platform_probe,
dca66dab 469 .remove = bf5xx_soc_platform_remove,
f0fba2ad
LG
470};
471
fb80297e 472module_platform_driver(bf5xx_pcm_driver);
958e792c 473
aff0510c
CC
474MODULE_AUTHOR("Cliff Cai");
475MODULE_DESCRIPTION("ADI Blackfin AC97 PCM DMA module");
476MODULE_LICENSE("GPL");
This page took 0.3639 seconds and 5 git commands to generate.