ALSA: hda - Relocate RIRB/CORB interface to hda_controller
[deliverable/linux.git] / sound / pci / hda / hda_controller.c
CommitLineData
05e84878
DR
1/*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23#include <linux/clocksource.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <sound/core.h>
29#include <sound/initval.h>
30#include "hda_priv.h"
31#include "hda_controller.h"
32
33#define CREATE_TRACE_POINTS
34#include "hda_intel_trace.h"
35
2b5fd6c2
DR
36/* DSP lock helpers */
37#ifdef CONFIG_SND_HDA_DSP_LOADER
38#define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
39#define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
40#define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
41#define dsp_is_locked(dev) ((dev)->locked)
42#else
43#define dsp_lock_init(dev) do {} while (0)
44#define dsp_lock(dev) do {} while (0)
45#define dsp_unlock(dev) do {} while (0)
46#define dsp_is_locked(dev) 0
47#endif
48
05e84878
DR
49/*
50 * AZX stream operations.
51 */
52
53/* start a stream */
2b5fd6c2 54static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
05e84878
DR
55{
56 /*
57 * Before stream start, initialize parameter
58 */
59 azx_dev->insufficient = 1;
60
61 /* enable SIE */
62 azx_writel(chip, INTCTL,
63 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
64 /* set DMA start and interrupt mask */
65 azx_sd_writeb(chip, azx_dev, SD_CTL,
66 azx_sd_readb(chip, azx_dev, SD_CTL) |
67 SD_CTL_DMA_START | SD_INT_MASK);
68}
05e84878
DR
69
70/* stop DMA */
71static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
72{
73 azx_sd_writeb(chip, azx_dev, SD_CTL,
74 azx_sd_readb(chip, azx_dev, SD_CTL) &
75 ~(SD_CTL_DMA_START | SD_INT_MASK));
76 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
77}
78
79/* stop a stream */
80void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
81{
82 azx_stream_clear(chip, azx_dev);
83 /* disable SIE */
84 azx_writel(chip, INTCTL,
85 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
86}
87EXPORT_SYMBOL_GPL(azx_stream_stop);
88
89/* reset stream */
2b5fd6c2 90static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
05e84878
DR
91{
92 unsigned char val;
93 int timeout;
94
95 azx_stream_clear(chip, azx_dev);
96
97 azx_sd_writeb(chip, azx_dev, SD_CTL,
98 azx_sd_readb(chip, azx_dev, SD_CTL) |
99 SD_CTL_STREAM_RESET);
100 udelay(3);
101 timeout = 300;
102 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
103 SD_CTL_STREAM_RESET) && --timeout)
104 ;
105 val &= ~SD_CTL_STREAM_RESET;
106 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
107 udelay(3);
108
109 timeout = 300;
110 /* waiting for hardware to report that the stream is out of reset */
111 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
112 SD_CTL_STREAM_RESET) && --timeout)
113 ;
114
115 /* reset first position - may not be synced with hw at this time */
116 *azx_dev->posbuf = 0;
117}
05e84878
DR
118
119/*
120 * set up the SD for streaming
121 */
2b5fd6c2 122static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
05e84878
DR
123{
124 unsigned int val;
125 /* make sure the run bit is zero for SD */
126 azx_stream_clear(chip, azx_dev);
127 /* program the stream_tag */
128 val = azx_sd_readl(chip, azx_dev, SD_CTL);
129 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
130 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
131 if (!azx_snoop(chip))
132 val |= SD_CTL_TRAFFIC_PRIO;
133 azx_sd_writel(chip, azx_dev, SD_CTL, val);
134
135 /* program the length of samples in cyclic buffer */
136 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
137
138 /* program the stream format */
139 /* this value needs to be the same as the one programmed */
140 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
141
142 /* program the stream LVI (last valid index) of the BDL */
143 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
144
145 /* program the BDL address */
146 /* lower BDL address */
147 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
148 /* upper BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPU,
150 upper_32_bits(azx_dev->bdl.addr));
151
152 /* enable the position buffer */
153 if (chip->position_fix[0] != POS_FIX_LPIB ||
154 chip->position_fix[1] != POS_FIX_LPIB) {
155 if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
156 azx_writel(chip, DPLBASE,
157 (u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
158 }
159
160 /* set the interrupt enable bits in the descriptor control register */
161 azx_sd_writel(chip, azx_dev, SD_CTL,
162 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
163
164 return 0;
165}
05e84878
DR
166
167/* assign a stream for the PCM */
168static inline struct azx_dev *
169azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
170{
171 int dev, i, nums;
172 struct azx_dev *res = NULL;
173 /* make a non-zero unique key for the substream */
174 int key = (substream->pcm->device << 16) | (substream->number << 2) |
175 (substream->stream + 1);
176
177 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
178 dev = chip->playback_index_offset;
179 nums = chip->playback_streams;
180 } else {
181 dev = chip->capture_index_offset;
182 nums = chip->capture_streams;
183 }
184 for (i = 0; i < nums; i++, dev++) {
185 struct azx_dev *azx_dev = &chip->azx_dev[dev];
186 dsp_lock(azx_dev);
187 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
188 res = azx_dev;
189 if (res->assigned_key == key) {
190 res->opened = 1;
191 res->assigned_key = key;
192 dsp_unlock(azx_dev);
193 return azx_dev;
194 }
195 }
196 dsp_unlock(azx_dev);
197 }
198 if (res) {
199 dsp_lock(res);
200 res->opened = 1;
201 res->assigned_key = key;
202 dsp_unlock(res);
203 }
204 return res;
205}
206
207/* release the assigned stream */
208static inline void azx_release_device(struct azx_dev *azx_dev)
209{
210 azx_dev->opened = 0;
211}
212
213static cycle_t azx_cc_read(const struct cyclecounter *cc)
214{
215 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
216 struct snd_pcm_substream *substream = azx_dev->substream;
217 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
218 struct azx *chip = apcm->chip;
219
220 return azx_readl(chip, WALLCLK);
221}
222
223static void azx_timecounter_init(struct snd_pcm_substream *substream,
224 bool force, cycle_t last)
225{
226 struct azx_dev *azx_dev = get_azx_dev(substream);
227 struct timecounter *tc = &azx_dev->azx_tc;
228 struct cyclecounter *cc = &azx_dev->azx_cc;
229 u64 nsec;
230
231 cc->read = azx_cc_read;
232 cc->mask = CLOCKSOURCE_MASK(32);
233
234 /*
235 * Converting from 24 MHz to ns means applying a 125/3 factor.
236 * To avoid any saturation issues in intermediate operations,
237 * the 125 factor is applied first. The division is applied
238 * last after reading the timecounter value.
239 * Applying the 1/3 factor as part of the multiplication
240 * requires at least 20 bits for a decent precision, however
241 * overflows occur after about 4 hours or less, not a option.
242 */
243
244 cc->mult = 125; /* saturation after 195 years */
245 cc->shift = 0;
246
247 nsec = 0; /* audio time is elapsed time since trigger */
248 timecounter_init(tc, cc, nsec);
249 if (force)
250 /*
251 * force timecounter to use predefined value,
252 * used for synchronized starts
253 */
254 tc->cycle_last = last;
255}
256
257static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
258 u64 nsec)
259{
260 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
261 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
262 u64 codec_frames, codec_nsecs;
263
264 if (!hinfo->ops.get_delay)
265 return nsec;
266
267 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
268 codec_nsecs = div_u64(codec_frames * 1000000000LL,
269 substream->runtime->rate);
270
271 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
272 return nsec + codec_nsecs;
273
274 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
275}
276
277/*
278 * set up a BDL entry
279 */
2b5fd6c2
DR
280static int setup_bdle(struct azx *chip,
281 struct snd_dma_buffer *dmab,
282 struct azx_dev *azx_dev, u32 **bdlp,
283 int ofs, int size, int with_ioc)
05e84878
DR
284{
285 u32 *bdl = *bdlp;
286
287 while (size > 0) {
288 dma_addr_t addr;
289 int chunk;
290
291 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
292 return -EINVAL;
293
294 addr = snd_sgbuf_get_addr(dmab, ofs);
295 /* program the address field of the BDL entry */
296 bdl[0] = cpu_to_le32((u32)addr);
297 bdl[1] = cpu_to_le32(upper_32_bits(addr));
298 /* program the size field of the BDL entry */
299 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
300 /* one BDLE cannot cross 4K boundary on CTHDA chips */
301 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
302 u32 remain = 0x1000 - (ofs & 0xfff);
303 if (chunk > remain)
304 chunk = remain;
305 }
306 bdl[2] = cpu_to_le32(chunk);
307 /* program the IOC to enable interrupt
308 * only when the whole fragment is processed
309 */
310 size -= chunk;
311 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
312 bdl += 4;
313 azx_dev->frags++;
314 ofs += chunk;
315 }
316 *bdlp = bdl;
317 return ofs;
318}
05e84878
DR
319
320/*
321 * set up BDL entries
322 */
323static int azx_setup_periods(struct azx *chip,
324 struct snd_pcm_substream *substream,
325 struct azx_dev *azx_dev)
326{
327 u32 *bdl;
328 int i, ofs, periods, period_bytes;
329 int pos_adj = 0;
330
331 /* reset BDL address */
332 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
333 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
334
335 period_bytes = azx_dev->period_bytes;
336 periods = azx_dev->bufsize / period_bytes;
337
338 /* program the initial BDL entries */
339 bdl = (u32 *)azx_dev->bdl.area;
340 ofs = 0;
341 azx_dev->frags = 0;
342
343 if (chip->bdl_pos_adj)
344 pos_adj = chip->bdl_pos_adj[chip->dev_index];
345 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
346 struct snd_pcm_runtime *runtime = substream->runtime;
347 int pos_align = pos_adj;
348 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
349 if (!pos_adj)
350 pos_adj = pos_align;
351 else
352 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
353 pos_align;
354 pos_adj = frames_to_bytes(runtime, pos_adj);
355 if (pos_adj >= period_bytes) {
356 dev_warn(chip->card->dev,"Too big adjustment %d\n",
357 pos_adj);
358 pos_adj = 0;
359 } else {
360 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
361 azx_dev,
362 &bdl, ofs, pos_adj, true);
363 if (ofs < 0)
364 goto error;
365 }
366 } else
367 pos_adj = 0;
368
369 for (i = 0; i < periods; i++) {
370 if (i == periods - 1 && pos_adj)
371 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
372 azx_dev, &bdl, ofs,
373 period_bytes - pos_adj, 0);
374 else
375 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
376 azx_dev, &bdl, ofs,
377 period_bytes,
378 !azx_dev->no_period_wakeup);
379 if (ofs < 0)
380 goto error;
381 }
382 return 0;
383
384 error:
385 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
386 azx_dev->bufsize, period_bytes);
387 return -EINVAL;
388}
389
390/*
391 * PCM ops
392 */
393
394static int azx_pcm_close(struct snd_pcm_substream *substream)
395{
396 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
397 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
398 struct azx *chip = apcm->chip;
399 struct azx_dev *azx_dev = get_azx_dev(substream);
400 unsigned long flags;
401
402 mutex_lock(&chip->open_mutex);
403 spin_lock_irqsave(&chip->reg_lock, flags);
404 azx_dev->substream = NULL;
405 azx_dev->running = 0;
406 spin_unlock_irqrestore(&chip->reg_lock, flags);
407 azx_release_device(azx_dev);
408 hinfo->ops.close(hinfo, apcm->codec, substream);
409 snd_hda_power_down(apcm->codec);
410 mutex_unlock(&chip->open_mutex);
411 return 0;
412}
413
414static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
415 struct snd_pcm_hw_params *hw_params)
416{
417 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
418 struct azx *chip = apcm->chip;
419 int ret;
420
421 dsp_lock(get_azx_dev(substream));
422 if (dsp_is_locked(get_azx_dev(substream))) {
423 ret = -EBUSY;
424 goto unlock;
425 }
426
427 ret = chip->ops->substream_alloc_pages(chip, substream,
428 params_buffer_bytes(hw_params));
429unlock:
430 dsp_unlock(get_azx_dev(substream));
431 return ret;
432}
433
434static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
435{
436 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
437 struct azx_dev *azx_dev = get_azx_dev(substream);
438 struct azx *chip = apcm->chip;
439 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
440 int err;
441
442 /* reset BDL address */
443 dsp_lock(azx_dev);
444 if (!dsp_is_locked(azx_dev)) {
445 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
446 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
447 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
448 azx_dev->bufsize = 0;
449 azx_dev->period_bytes = 0;
450 azx_dev->format_val = 0;
451 }
452
453 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
454
455 err = chip->ops->substream_free_pages(chip, substream);
456 azx_dev->prepared = 0;
457 dsp_unlock(azx_dev);
458 return err;
459}
460
461static int azx_pcm_prepare(struct snd_pcm_substream *substream)
462{
463 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
464 struct azx *chip = apcm->chip;
465 struct azx_dev *azx_dev = get_azx_dev(substream);
466 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
467 struct snd_pcm_runtime *runtime = substream->runtime;
468 unsigned int bufsize, period_bytes, format_val, stream_tag;
469 int err;
470 struct hda_spdif_out *spdif =
471 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
472 unsigned short ctls = spdif ? spdif->ctls : 0;
473
474 dsp_lock(azx_dev);
475 if (dsp_is_locked(azx_dev)) {
476 err = -EBUSY;
477 goto unlock;
478 }
479
480 azx_stream_reset(chip, azx_dev);
481 format_val = snd_hda_calc_stream_format(runtime->rate,
482 runtime->channels,
483 runtime->format,
484 hinfo->maxbps,
485 ctls);
486 if (!format_val) {
487 dev_err(chip->card->dev,
488 "invalid format_val, rate=%d, ch=%d, format=%d\n",
489 runtime->rate, runtime->channels, runtime->format);
490 err = -EINVAL;
491 goto unlock;
492 }
493
494 bufsize = snd_pcm_lib_buffer_bytes(substream);
495 period_bytes = snd_pcm_lib_period_bytes(substream);
496
497 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
498 bufsize, format_val);
499
500 if (bufsize != azx_dev->bufsize ||
501 period_bytes != azx_dev->period_bytes ||
502 format_val != azx_dev->format_val ||
503 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
504 azx_dev->bufsize = bufsize;
505 azx_dev->period_bytes = period_bytes;
506 azx_dev->format_val = format_val;
507 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
508 err = azx_setup_periods(chip, substream, azx_dev);
509 if (err < 0)
510 goto unlock;
511 }
512
513 /* when LPIB delay correction gives a small negative value,
514 * we ignore it; currently set the threshold statically to
515 * 64 frames
516 */
517 if (runtime->period_size > 64)
518 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
519 else
520 azx_dev->delay_negative_threshold = 0;
521
522 /* wallclk has 24Mhz clock source */
523 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
524 runtime->rate) * 1000);
525 azx_setup_controller(chip, azx_dev);
526 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
527 azx_dev->fifo_size =
528 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
529 else
530 azx_dev->fifo_size = 0;
531
532 stream_tag = azx_dev->stream_tag;
533 /* CA-IBG chips need the playback stream starting from 1 */
534 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
535 stream_tag > chip->capture_streams)
536 stream_tag -= chip->capture_streams;
537 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
538 azx_dev->format_val, substream);
539
540 unlock:
541 if (!err)
542 azx_dev->prepared = 1;
543 dsp_unlock(azx_dev);
544 return err;
545}
546
547static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
548{
549 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
550 struct azx *chip = apcm->chip;
551 struct azx_dev *azx_dev;
552 struct snd_pcm_substream *s;
553 int rstart = 0, start, nsync = 0, sbits = 0;
554 int nwait, timeout;
555
556 azx_dev = get_azx_dev(substream);
557 trace_azx_pcm_trigger(chip, azx_dev, cmd);
558
559 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
560 return -EPIPE;
561
562 switch (cmd) {
563 case SNDRV_PCM_TRIGGER_START:
564 rstart = 1;
565 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
566 case SNDRV_PCM_TRIGGER_RESUME:
567 start = 1;
568 break;
569 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
570 case SNDRV_PCM_TRIGGER_SUSPEND:
571 case SNDRV_PCM_TRIGGER_STOP:
572 start = 0;
573 break;
574 default:
575 return -EINVAL;
576 }
577
578 snd_pcm_group_for_each_entry(s, substream) {
579 if (s->pcm->card != substream->pcm->card)
580 continue;
581 azx_dev = get_azx_dev(s);
582 sbits |= 1 << azx_dev->index;
583 nsync++;
584 snd_pcm_trigger_done(s, substream);
585 }
586
587 spin_lock(&chip->reg_lock);
588
589 /* first, set SYNC bits of corresponding streams */
590 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
591 azx_writel(chip, OLD_SSYNC,
592 azx_readl(chip, OLD_SSYNC) | sbits);
593 else
594 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
595
596 snd_pcm_group_for_each_entry(s, substream) {
597 if (s->pcm->card != substream->pcm->card)
598 continue;
599 azx_dev = get_azx_dev(s);
600 if (start) {
601 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
602 if (!rstart)
603 azx_dev->start_wallclk -=
604 azx_dev->period_wallclk;
605 azx_stream_start(chip, azx_dev);
606 } else {
607 azx_stream_stop(chip, azx_dev);
608 }
609 azx_dev->running = start;
610 }
611 spin_unlock(&chip->reg_lock);
612 if (start) {
613 /* wait until all FIFOs get ready */
614 for (timeout = 5000; timeout; timeout--) {
615 nwait = 0;
616 snd_pcm_group_for_each_entry(s, substream) {
617 if (s->pcm->card != substream->pcm->card)
618 continue;
619 azx_dev = get_azx_dev(s);
620 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
621 SD_STS_FIFO_READY))
622 nwait++;
623 }
624 if (!nwait)
625 break;
626 cpu_relax();
627 }
628 } else {
629 /* wait until all RUN bits are cleared */
630 for (timeout = 5000; timeout; timeout--) {
631 nwait = 0;
632 snd_pcm_group_for_each_entry(s, substream) {
633 if (s->pcm->card != substream->pcm->card)
634 continue;
635 azx_dev = get_azx_dev(s);
636 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
637 SD_CTL_DMA_START)
638 nwait++;
639 }
640 if (!nwait)
641 break;
642 cpu_relax();
643 }
644 }
645 spin_lock(&chip->reg_lock);
646 /* reset SYNC bits */
647 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
648 azx_writel(chip, OLD_SSYNC,
649 azx_readl(chip, OLD_SSYNC) & ~sbits);
650 else
651 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
652 if (start) {
653 azx_timecounter_init(substream, 0, 0);
654 if (nsync > 1) {
655 cycle_t cycle_last;
656
657 /* same start cycle for master and group */
658 azx_dev = get_azx_dev(substream);
659 cycle_last = azx_dev->azx_tc.cycle_last;
660
661 snd_pcm_group_for_each_entry(s, substream) {
662 if (s->pcm->card != substream->pcm->card)
663 continue;
664 azx_timecounter_init(s, 1, cycle_last);
665 }
666 }
667 }
668 spin_unlock(&chip->reg_lock);
669 return 0;
670}
671
672/* get the current DMA position with correction on VIA chips */
673static unsigned int azx_via_get_position(struct azx *chip,
674 struct azx_dev *azx_dev)
675{
676 unsigned int link_pos, mini_pos, bound_pos;
677 unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
678 unsigned int fifo_size;
679
680 link_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
681 if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
682 /* Playback, no problem using link position */
683 return link_pos;
684 }
685
686 /* Capture */
687 /* For new chipset,
688 * use mod to get the DMA position just like old chipset
689 */
690 mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
691 mod_dma_pos %= azx_dev->period_bytes;
692
693 /* azx_dev->fifo_size can't get FIFO size of in stream.
694 * Get from base address + offset.
695 */
696 fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
697
698 if (azx_dev->insufficient) {
699 /* Link position never gather than FIFO size */
700 if (link_pos <= fifo_size)
701 return 0;
702
703 azx_dev->insufficient = 0;
704 }
705
706 if (link_pos <= fifo_size)
707 mini_pos = azx_dev->bufsize + link_pos - fifo_size;
708 else
709 mini_pos = link_pos - fifo_size;
710
711 /* Find nearest previous boudary */
712 mod_mini_pos = mini_pos % azx_dev->period_bytes;
713 mod_link_pos = link_pos % azx_dev->period_bytes;
714 if (mod_link_pos >= fifo_size)
715 bound_pos = link_pos - mod_link_pos;
716 else if (mod_dma_pos >= mod_mini_pos)
717 bound_pos = mini_pos - mod_mini_pos;
718 else {
719 bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
720 if (bound_pos >= azx_dev->bufsize)
721 bound_pos = 0;
722 }
723
724 /* Calculate real DMA position we want */
725 return bound_pos + mod_dma_pos;
726}
727
728unsigned int azx_get_position(struct azx *chip,
729 struct azx_dev *azx_dev,
730 bool with_check)
731{
732 struct snd_pcm_substream *substream = azx_dev->substream;
733 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
734 unsigned int pos;
735 int stream = substream->stream;
736 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
737 int delay = 0;
738
739 switch (chip->position_fix[stream]) {
740 case POS_FIX_LPIB:
741 /* read LPIB */
742 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
743 break;
744 case POS_FIX_VIACOMBO:
745 pos = azx_via_get_position(chip, azx_dev);
746 break;
747 default:
748 /* use the position buffer */
749 pos = le32_to_cpu(*azx_dev->posbuf);
750 if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
751 if (!pos || pos == (u32)-1) {
752 dev_info(chip->card->dev,
753 "Invalid position buffer, using LPIB read method instead.\n");
754 chip->position_fix[stream] = POS_FIX_LPIB;
755 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
756 } else
757 chip->position_fix[stream] = POS_FIX_POSBUF;
758 }
759 break;
760 }
761
762 if (pos >= azx_dev->bufsize)
763 pos = 0;
764
765 /* calculate runtime delay from LPIB */
766 if (substream->runtime &&
767 chip->position_fix[stream] == POS_FIX_POSBUF &&
768 (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
769 unsigned int lpib_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
770 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
771 delay = pos - lpib_pos;
772 else
773 delay = lpib_pos - pos;
774 if (delay < 0) {
775 if (delay >= azx_dev->delay_negative_threshold)
776 delay = 0;
777 else
778 delay += azx_dev->bufsize;
779 }
780 if (delay >= azx_dev->period_bytes) {
781 dev_info(chip->card->dev,
782 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
783 delay, azx_dev->period_bytes);
784 delay = 0;
785 chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
786 }
787 delay = bytes_to_frames(substream->runtime, delay);
788 }
789
790 if (substream->runtime) {
791 if (hinfo->ops.get_delay)
792 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
793 substream);
794 substream->runtime->delay = delay;
795 }
796
797 trace_azx_get_position(chip, azx_dev, pos, delay);
798 return pos;
799}
800EXPORT_SYMBOL_GPL(azx_get_position);
801
802static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
803{
804 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
805 struct azx *chip = apcm->chip;
806 struct azx_dev *azx_dev = get_azx_dev(substream);
807 return bytes_to_frames(substream->runtime,
808 azx_get_position(chip, azx_dev, false));
809}
810
811static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
812 struct timespec *ts)
813{
814 struct azx_dev *azx_dev = get_azx_dev(substream);
815 u64 nsec;
816
817 nsec = timecounter_read(&azx_dev->azx_tc);
818 nsec = div_u64(nsec, 3); /* can be optimized */
819 nsec = azx_adjust_codec_delay(substream, nsec);
820
821 *ts = ns_to_timespec(nsec);
822
823 return 0;
824}
825
826static struct snd_pcm_hardware azx_pcm_hw = {
827 .info = (SNDRV_PCM_INFO_MMAP |
828 SNDRV_PCM_INFO_INTERLEAVED |
829 SNDRV_PCM_INFO_BLOCK_TRANSFER |
830 SNDRV_PCM_INFO_MMAP_VALID |
831 /* No full-resume yet implemented */
832 /* SNDRV_PCM_INFO_RESUME |*/
833 SNDRV_PCM_INFO_PAUSE |
834 SNDRV_PCM_INFO_SYNC_START |
835 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
836 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
837 .formats = SNDRV_PCM_FMTBIT_S16_LE,
838 .rates = SNDRV_PCM_RATE_48000,
839 .rate_min = 48000,
840 .rate_max = 48000,
841 .channels_min = 2,
842 .channels_max = 2,
843 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
844 .period_bytes_min = 128,
845 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
846 .periods_min = 2,
847 .periods_max = AZX_MAX_FRAG,
848 .fifo_size = 0,
849};
850
851static int azx_pcm_open(struct snd_pcm_substream *substream)
852{
853 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
854 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
855 struct azx *chip = apcm->chip;
856 struct azx_dev *azx_dev;
857 struct snd_pcm_runtime *runtime = substream->runtime;
858 unsigned long flags;
859 int err;
860 int buff_step;
861
862 mutex_lock(&chip->open_mutex);
863 azx_dev = azx_assign_device(chip, substream);
864 if (azx_dev == NULL) {
865 mutex_unlock(&chip->open_mutex);
866 return -EBUSY;
867 }
868 runtime->hw = azx_pcm_hw;
869 runtime->hw.channels_min = hinfo->channels_min;
870 runtime->hw.channels_max = hinfo->channels_max;
871 runtime->hw.formats = hinfo->formats;
872 runtime->hw.rates = hinfo->rates;
873 snd_pcm_limit_hw_rates(runtime);
874 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
875
876 /* avoid wrap-around with wall-clock */
877 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
878 20,
879 178000000);
880
881 if (chip->align_buffer_size)
882 /* constrain buffer sizes to be multiple of 128
883 bytes. This is more efficient in terms of memory
884 access but isn't required by the HDA spec and
885 prevents users from specifying exact period/buffer
886 sizes. For example for 44.1kHz, a period size set
887 to 20ms will be rounded to 19.59ms. */
888 buff_step = 128;
889 else
890 /* Don't enforce steps on buffer sizes, still need to
891 be multiple of 4 bytes (HDA spec). Tested on Intel
892 HDA controllers, may not work on all devices where
893 option needs to be disabled */
894 buff_step = 4;
895
896 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
897 buff_step);
898 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
899 buff_step);
900 snd_hda_power_up_d3wait(apcm->codec);
901 err = hinfo->ops.open(hinfo, apcm->codec, substream);
902 if (err < 0) {
903 azx_release_device(azx_dev);
904 snd_hda_power_down(apcm->codec);
905 mutex_unlock(&chip->open_mutex);
906 return err;
907 }
908 snd_pcm_limit_hw_rates(runtime);
909 /* sanity check */
910 if (snd_BUG_ON(!runtime->hw.channels_min) ||
911 snd_BUG_ON(!runtime->hw.channels_max) ||
912 snd_BUG_ON(!runtime->hw.formats) ||
913 snd_BUG_ON(!runtime->hw.rates)) {
914 azx_release_device(azx_dev);
915 hinfo->ops.close(hinfo, apcm->codec, substream);
916 snd_hda_power_down(apcm->codec);
917 mutex_unlock(&chip->open_mutex);
918 return -EINVAL;
919 }
920
921 /* disable WALLCLOCK timestamps for capture streams
922 until we figure out how to handle digital inputs */
923 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
924 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
925
926 spin_lock_irqsave(&chip->reg_lock, flags);
927 azx_dev->substream = substream;
928 azx_dev->running = 0;
929 spin_unlock_irqrestore(&chip->reg_lock, flags);
930
931 runtime->private_data = azx_dev;
932 snd_pcm_set_sync(substream);
933 mutex_unlock(&chip->open_mutex);
934 return 0;
935}
936
937static int azx_pcm_mmap(struct snd_pcm_substream *substream,
938 struct vm_area_struct *area)
939{
940 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
941 struct azx *chip = apcm->chip;
942 if (chip->ops->pcm_mmap_prepare)
943 chip->ops->pcm_mmap_prepare(substream, area);
944 return snd_pcm_lib_default_mmap(substream, area);
945}
946
947static struct snd_pcm_ops azx_pcm_ops = {
948 .open = azx_pcm_open,
949 .close = azx_pcm_close,
950 .ioctl = snd_pcm_lib_ioctl,
951 .hw_params = azx_pcm_hw_params,
952 .hw_free = azx_pcm_hw_free,
953 .prepare = azx_pcm_prepare,
954 .trigger = azx_pcm_trigger,
955 .pointer = azx_pcm_pointer,
956 .wall_clock = azx_get_wallclock_tstamp,
957 .mmap = azx_pcm_mmap,
958 .page = snd_pcm_sgbuf_ops_page,
959};
960
961static void azx_pcm_free(struct snd_pcm *pcm)
962{
963 struct azx_pcm *apcm = pcm->private_data;
964 if (apcm) {
965 list_del(&apcm->list);
966 kfree(apcm);
967 }
968}
969
970#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
971
972int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
973 struct hda_pcm *cpcm)
974{
975 struct azx *chip = bus->private_data;
976 struct snd_pcm *pcm;
977 struct azx_pcm *apcm;
978 int pcm_dev = cpcm->device;
979 unsigned int size;
980 int s, err;
981
982 list_for_each_entry(apcm, &chip->pcm_list, list) {
983 if (apcm->pcm->device == pcm_dev) {
984 dev_err(chip->card->dev, "PCM %d already exists\n",
985 pcm_dev);
986 return -EBUSY;
987 }
988 }
989 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
990 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
991 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
992 &pcm);
993 if (err < 0)
994 return err;
995 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
996 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
997 if (apcm == NULL)
998 return -ENOMEM;
999 apcm->chip = chip;
1000 apcm->pcm = pcm;
1001 apcm->codec = codec;
1002 pcm->private_data = apcm;
1003 pcm->private_free = azx_pcm_free;
1004 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
1005 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
1006 list_add_tail(&apcm->list, &chip->pcm_list);
1007 cpcm->pcm = pcm;
1008 for (s = 0; s < 2; s++) {
1009 apcm->hinfo[s] = &cpcm->stream[s];
1010 if (cpcm->stream[s].substreams)
1011 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
1012 }
1013 /* buffer pre-allocation */
1014 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
1015 if (size > MAX_PREALLOC_SIZE)
1016 size = MAX_PREALLOC_SIZE;
1017 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1018 chip->card->dev,
1019 size, MAX_PREALLOC_SIZE);
1020 /* link to codec */
1021 pcm->dev = &codec->dev;
1022 return 0;
1023}
1024EXPORT_SYMBOL_GPL(azx_attach_pcm_stream);
1025
6e85dddc
DR
1026/*
1027 * CORB / RIRB interface
1028 */
1029int azx_alloc_cmd_io(struct azx *chip)
1030{
1031 int err;
1032
1033 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
1034 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1035 PAGE_SIZE, &chip->rb);
1036 if (err < 0)
1037 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
1038 return err;
1039}
1040EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
1041
1042void azx_init_cmd_io(struct azx *chip)
1043{
1044 int timeout;
1045
1046 spin_lock_irq(&chip->reg_lock);
1047 /* CORB set up */
1048 chip->corb.addr = chip->rb.addr;
1049 chip->corb.buf = (u32 *)chip->rb.area;
1050 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1051 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1052
1053 /* set the corb size to 256 entries (ULI requires explicitly) */
1054 azx_writeb(chip, CORBSIZE, 0x02);
1055 /* set the corb write pointer to 0 */
1056 azx_writew(chip, CORBWP, 0);
1057
1058 /* reset the corb hw read pointer */
1059 azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
1060 for (timeout = 1000; timeout > 0; timeout--) {
1061 if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
1062 break;
1063 udelay(1);
1064 }
1065 if (timeout <= 0)
1066 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1067 azx_readw(chip, CORBRP));
1068
1069 azx_writew(chip, CORBRP, 0);
1070 for (timeout = 1000; timeout > 0; timeout--) {
1071 if (azx_readw(chip, CORBRP) == 0)
1072 break;
1073 udelay(1);
1074 }
1075 if (timeout <= 0)
1076 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1077 azx_readw(chip, CORBRP));
1078
1079 /* enable corb dma */
1080 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1081
1082 /* RIRB set up */
1083 chip->rirb.addr = chip->rb.addr + 2048;
1084 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1085 chip->rirb.wp = chip->rirb.rp = 0;
1086 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1087 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1088 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1089
1090 /* set the rirb size to 256 entries (ULI requires explicitly) */
1091 azx_writeb(chip, RIRBSIZE, 0x02);
1092 /* reset the rirb hw write pointer */
1093 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1094 /* set N=1, get RIRB response interrupt for new entry */
1095 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1096 azx_writew(chip, RINTCNT, 0xc0);
1097 else
1098 azx_writew(chip, RINTCNT, 1);
1099 /* enable rirb dma and response irq */
1100 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1101 spin_unlock_irq(&chip->reg_lock);
1102}
1103EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1104
1105void azx_free_cmd_io(struct azx *chip)
1106{
1107 spin_lock_irq(&chip->reg_lock);
1108 /* disable ringbuffer DMAs */
1109 azx_writeb(chip, RIRBCTL, 0);
1110 azx_writeb(chip, CORBCTL, 0);
1111 spin_unlock_irq(&chip->reg_lock);
1112}
1113EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1114
1115static unsigned int azx_command_addr(u32 cmd)
1116{
1117 unsigned int addr = cmd >> 28;
1118
1119 if (addr >= AZX_MAX_CODECS) {
1120 snd_BUG();
1121 addr = 0;
1122 }
1123
1124 return addr;
1125}
1126
1127/* send a command */
1128static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1129{
1130 struct azx *chip = bus->private_data;
1131 unsigned int addr = azx_command_addr(val);
1132 unsigned int wp, rp;
1133
1134 spin_lock_irq(&chip->reg_lock);
1135
1136 /* add command to corb */
1137 wp = azx_readw(chip, CORBWP);
1138 if (wp == 0xffff) {
1139 /* something wrong, controller likely turned to D3 */
1140 spin_unlock_irq(&chip->reg_lock);
1141 return -EIO;
1142 }
1143 wp++;
1144 wp %= ICH6_MAX_CORB_ENTRIES;
1145
1146 rp = azx_readw(chip, CORBRP);
1147 if (wp == rp) {
1148 /* oops, it's full */
1149 spin_unlock_irq(&chip->reg_lock);
1150 return -EAGAIN;
1151 }
1152
1153 chip->rirb.cmds[addr]++;
1154 chip->corb.buf[wp] = cpu_to_le32(val);
1155 azx_writew(chip, CORBWP, wp);
1156
1157 spin_unlock_irq(&chip->reg_lock);
1158
1159 return 0;
1160}
1161
1162#define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1163
1164/* retrieve RIRB entry - called from interrupt handler */
1165void azx_update_rirb(struct azx *chip)
1166{
1167 unsigned int rp, wp;
1168 unsigned int addr;
1169 u32 res, res_ex;
1170
1171 wp = azx_readw(chip, RIRBWP);
1172 if (wp == 0xffff) {
1173 /* something wrong, controller likely turned to D3 */
1174 return;
1175 }
1176
1177 if (wp == chip->rirb.wp)
1178 return;
1179 chip->rirb.wp = wp;
1180
1181 while (chip->rirb.rp != wp) {
1182 chip->rirb.rp++;
1183 chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1184
1185 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1186 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1187 res = le32_to_cpu(chip->rirb.buf[rp]);
1188 addr = res_ex & 0xf;
1189 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1190 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1191 res, res_ex,
1192 chip->rirb.rp, wp);
1193 snd_BUG();
1194 }
1195 else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1196 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1197 else if (chip->rirb.cmds[addr]) {
1198 chip->rirb.res[addr] = res;
1199 smp_wmb();
1200 chip->rirb.cmds[addr]--;
1201 } else if (printk_ratelimit()) {
1202 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1203 res, res_ex,
1204 chip->last_cmd[addr]);
1205 }
1206 }
1207}
1208EXPORT_SYMBOL_GPL(azx_update_rirb);
1209
1210/* receive a response */
1211static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1212 unsigned int addr)
1213{
1214 struct azx *chip = bus->private_data;
1215 unsigned long timeout;
1216 unsigned long loopcounter;
1217 int do_poll = 0;
1218
1219 again:
1220 timeout = jiffies + msecs_to_jiffies(1000);
1221
1222 for (loopcounter = 0;; loopcounter++) {
1223 if (chip->polling_mode || do_poll) {
1224 spin_lock_irq(&chip->reg_lock);
1225 azx_update_rirb(chip);
1226 spin_unlock_irq(&chip->reg_lock);
1227 }
1228 if (!chip->rirb.cmds[addr]) {
1229 smp_rmb();
1230 bus->rirb_error = 0;
1231
1232 if (!do_poll)
1233 chip->poll_count = 0;
1234 return chip->rirb.res[addr]; /* the last value */
1235 }
1236 if (time_after(jiffies, timeout))
1237 break;
1238 if (bus->needs_damn_long_delay || loopcounter > 3000)
1239 msleep(2); /* temporary workaround */
1240 else {
1241 udelay(10);
1242 cond_resched();
1243 }
1244 }
1245
1246 if (!bus->no_response_fallback)
1247 return -1;
1248
1249 if (!chip->polling_mode && chip->poll_count < 2) {
1250 dev_dbg(chip->card->dev,
1251 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1252 chip->last_cmd[addr]);
1253 do_poll = 1;
1254 chip->poll_count++;
1255 goto again;
1256 }
1257
1258
1259 if (!chip->polling_mode) {
1260 dev_warn(chip->card->dev,
1261 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1262 chip->last_cmd[addr]);
1263 chip->polling_mode = 1;
1264 goto again;
1265 }
1266
1267 if (chip->msi) {
1268 dev_warn(chip->card->dev,
1269 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1270 chip->last_cmd[addr]);
1271 if (chip->ops->disable_msi_reset_irq(chip) &&
1272 chip->ops->disable_msi_reset_irq(chip) < 0) {
1273 bus->rirb_error = 1;
1274 return -1;
1275 }
1276 goto again;
1277 }
1278
1279 if (chip->probing) {
1280 /* If this critical timeout happens during the codec probing
1281 * phase, this is likely an access to a non-existing codec
1282 * slot. Better to return an error and reset the system.
1283 */
1284 return -1;
1285 }
1286
1287 /* a fatal communication error; need either to reset or to fallback
1288 * to the single_cmd mode
1289 */
1290 bus->rirb_error = 1;
1291 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1292 bus->response_reset = 1;
1293 return -1; /* give a chance to retry */
1294 }
1295
1296 dev_err(chip->card->dev,
1297 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1298 chip->last_cmd[addr]);
1299 chip->single_cmd = 1;
1300 bus->response_reset = 0;
1301 /* release CORB/RIRB */
1302 azx_free_cmd_io(chip);
1303 /* disable unsolicited responses */
1304 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1305 return -1;
1306}
1307
1308/*
1309 * Use the single immediate command instead of CORB/RIRB for simplicity
1310 *
1311 * Note: according to Intel, this is not preferred use. The command was
1312 * intended for the BIOS only, and may get confused with unsolicited
1313 * responses. So, we shouldn't use it for normal operation from the
1314 * driver.
1315 * I left the codes, however, for debugging/testing purposes.
1316 */
1317
1318/* receive a response */
1319static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1320{
1321 int timeout = 50;
1322
1323 while (timeout--) {
1324 /* check IRV busy bit */
1325 if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1326 /* reuse rirb.res as the response return value */
1327 chip->rirb.res[addr] = azx_readl(chip, IR);
1328 return 0;
1329 }
1330 udelay(1);
1331 }
1332 if (printk_ratelimit())
1333 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1334 azx_readw(chip, IRS));
1335 chip->rirb.res[addr] = -1;
1336 return -EIO;
1337}
1338
1339/* send a command */
1340static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1341{
1342 struct azx *chip = bus->private_data;
1343 unsigned int addr = azx_command_addr(val);
1344 int timeout = 50;
1345
1346 bus->rirb_error = 0;
1347 while (timeout--) {
1348 /* check ICB busy bit */
1349 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1350 /* Clear IRV valid bit */
1351 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1352 ICH6_IRS_VALID);
1353 azx_writel(chip, IC, val);
1354 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1355 ICH6_IRS_BUSY);
1356 return azx_single_wait_for_response(chip, addr);
1357 }
1358 udelay(1);
1359 }
1360 if (printk_ratelimit())
1361 dev_dbg(chip->card->dev,
1362 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1363 azx_readw(chip, IRS), val);
1364 return -EIO;
1365}
1366
1367/* receive a response */
1368static unsigned int azx_single_get_response(struct hda_bus *bus,
1369 unsigned int addr)
1370{
1371 struct azx *chip = bus->private_data;
1372 return chip->rirb.res[addr];
1373}
1374
1375/*
1376 * The below are the main callbacks from hda_codec.
1377 *
1378 * They are just the skeleton to call sub-callbacks according to the
1379 * current setting of chip->single_cmd.
1380 */
1381
1382/* send a command */
1383int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1384{
1385 struct azx *chip = bus->private_data;
1386
1387 if (chip->disabled)
1388 return 0;
1389 chip->last_cmd[azx_command_addr(val)] = val;
1390 if (chip->single_cmd)
1391 return azx_single_send_cmd(bus, val);
1392 else
1393 return azx_corb_send_cmd(bus, val);
1394}
1395EXPORT_SYMBOL_GPL(azx_send_cmd);
1396
1397/* get a response */
1398unsigned int azx_get_response(struct hda_bus *bus,
1399 unsigned int addr)
1400{
1401 struct azx *chip = bus->private_data;
1402 if (chip->disabled)
1403 return 0;
1404 if (chip->single_cmd)
1405 return azx_single_get_response(bus, addr);
1406 else
1407 return azx_rirb_get_response(bus, addr);
1408}
1409EXPORT_SYMBOL_GPL(azx_get_response);
1410
2b5fd6c2
DR
1411#ifdef CONFIG_SND_HDA_DSP_LOADER
1412/*
1413 * DSP loading code (e.g. for CA0132)
1414 */
1415
1416/* use the first stream for loading DSP */
1417static struct azx_dev *
1418azx_get_dsp_loader_dev(struct azx *chip)
1419{
1420 return &chip->azx_dev[chip->playback_index_offset];
1421}
1422
1423int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1424 unsigned int byte_size,
1425 struct snd_dma_buffer *bufp)
1426{
1427 u32 *bdl;
1428 struct azx *chip = bus->private_data;
1429 struct azx_dev *azx_dev;
1430 int err;
1431
1432 azx_dev = azx_get_dsp_loader_dev(chip);
1433
1434 dsp_lock(azx_dev);
1435 spin_lock_irq(&chip->reg_lock);
1436 if (azx_dev->running || azx_dev->locked) {
1437 spin_unlock_irq(&chip->reg_lock);
1438 err = -EBUSY;
1439 goto unlock;
1440 }
1441 azx_dev->prepared = 0;
1442 chip->saved_azx_dev = *azx_dev;
1443 azx_dev->locked = 1;
1444 spin_unlock_irq(&chip->reg_lock);
1445
1446 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1447 byte_size, bufp);
1448 if (err < 0)
1449 goto err_alloc;
1450
1451 azx_dev->bufsize = byte_size;
1452 azx_dev->period_bytes = byte_size;
1453 azx_dev->format_val = format;
1454
1455 azx_stream_reset(chip, azx_dev);
1456
1457 /* reset BDL address */
1458 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1459 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1460
1461 azx_dev->frags = 0;
1462 bdl = (u32 *)azx_dev->bdl.area;
1463 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1464 if (err < 0)
1465 goto error;
1466
1467 azx_setup_controller(chip, azx_dev);
1468 dsp_unlock(azx_dev);
1469 return azx_dev->stream_tag;
1470
1471 error:
1472 chip->ops->dma_free_pages(chip, bufp);
1473 err_alloc:
1474 spin_lock_irq(&chip->reg_lock);
1475 if (azx_dev->opened)
1476 *azx_dev = chip->saved_azx_dev;
1477 azx_dev->locked = 0;
1478 spin_unlock_irq(&chip->reg_lock);
1479 unlock:
1480 dsp_unlock(azx_dev);
1481 return err;
1482}
1483EXPORT_SYMBOL_GPL(azx_load_dsp_prepare);
1484
1485void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1486{
1487 struct azx *chip = bus->private_data;
1488 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1489
1490 if (start)
1491 azx_stream_start(chip, azx_dev);
1492 else
1493 azx_stream_stop(chip, azx_dev);
1494 azx_dev->running = start;
1495}
1496EXPORT_SYMBOL_GPL(azx_load_dsp_trigger);
1497
1498void azx_load_dsp_cleanup(struct hda_bus *bus,
1499 struct snd_dma_buffer *dmab)
1500{
1501 struct azx *chip = bus->private_data;
1502 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1503
1504 if (!dmab->area || !azx_dev->locked)
1505 return;
1506
1507 dsp_lock(azx_dev);
1508 /* reset BDL address */
1509 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1510 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1511 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1512 azx_dev->bufsize = 0;
1513 azx_dev->period_bytes = 0;
1514 azx_dev->format_val = 0;
1515
1516 chip->ops->dma_free_pages(chip, dmab);
1517 dmab->area = NULL;
1518
1519 spin_lock_irq(&chip->reg_lock);
1520 if (azx_dev->opened)
1521 *azx_dev = chip->saved_azx_dev;
1522 azx_dev->locked = 0;
1523 spin_unlock_irq(&chip->reg_lock);
1524 dsp_unlock(azx_dev);
1525}
1526EXPORT_SYMBOL_GPL(azx_load_dsp_cleanup);
1527#endif /* CONFIG_SND_HDA_DSP_LOADER */
1528
67908994
DR
1529int azx_alloc_stream_pages(struct azx *chip)
1530{
1531 int i, err;
1532 struct snd_card *card = chip->card;
1533
1534 for (i = 0; i < chip->num_streams; i++) {
1535 dsp_lock_init(&chip->azx_dev[i]);
1536 /* allocate memory for the BDL for each stream */
1537 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1538 BDL_SIZE,
1539 &chip->azx_dev[i].bdl);
1540 if (err < 0) {
1541 dev_err(card->dev, "cannot allocate BDL\n");
1542 return -ENOMEM;
1543 }
1544 }
1545 /* allocate memory for the position buffer */
1546 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1547 chip->num_streams * 8, &chip->posbuf);
1548 if (err < 0) {
1549 dev_err(card->dev, "cannot allocate posbuf\n");
1550 return -ENOMEM;
1551 }
1552 return 0;
1553}
1554EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1555
1556void azx_free_stream_pages(struct azx *chip)
1557{
1558 int i;
1559 if (chip->azx_dev) {
1560 for (i = 0; i < chip->num_streams; i++)
1561 if (chip->azx_dev[i].bdl.area)
1562 chip->ops->dma_free_pages(
1563 chip, &chip->azx_dev[i].bdl);
1564 }
1565 if (chip->rb.area)
1566 chip->ops->dma_free_pages(chip, &chip->rb);
1567 if (chip->posbuf.area)
1568 chip->ops->dma_free_pages(chip, &chip->posbuf);
1569}
1570EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1571
05e84878
DR
1572MODULE_LICENSE("GPL");
1573MODULE_DESCRIPTION("Common HDA driver funcitons");
This page took 0.090341 seconds and 5 git commands to generate.