ALSA: hda - Add new GPU codec ID 0x10de0070 to snd-hda
[deliverable/linux.git] / sound / pci / hda / hda_controller.c
CommitLineData
05e84878
DR
1/*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23#include <linux/clocksource.h>
24#include <linux/delay.h>
f0b1df88 25#include <linux/interrupt.h>
05e84878
DR
26#include <linux/kernel.h>
27#include <linux/module.h>
154867cf 28#include <linux/pm_runtime.h>
05e84878
DR
29#include <linux/slab.h>
30#include <sound/core.h>
31#include <sound/initval.h>
32#include "hda_priv.h"
33#include "hda_controller.h"
34
35#define CREATE_TRACE_POINTS
36#include "hda_intel_trace.h"
37
2b5fd6c2
DR
38/* DSP lock helpers */
39#ifdef CONFIG_SND_HDA_DSP_LOADER
40#define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41#define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42#define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43#define dsp_is_locked(dev) ((dev)->locked)
44#else
45#define dsp_lock_init(dev) do {} while (0)
46#define dsp_lock(dev) do {} while (0)
47#define dsp_unlock(dev) do {} while (0)
48#define dsp_is_locked(dev) 0
49#endif
50
05e84878
DR
51/*
52 * AZX stream operations.
53 */
54
55/* start a stream */
2b5fd6c2 56static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
05e84878
DR
57{
58 /*
59 * Before stream start, initialize parameter
60 */
61 azx_dev->insufficient = 1;
62
63 /* enable SIE */
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
70}
05e84878
DR
71
72/* stop DMA */
73static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
74{
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
79}
80
81/* stop a stream */
82void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
83{
84 azx_stream_clear(chip, azx_dev);
85 /* disable SIE */
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
88}
89EXPORT_SYMBOL_GPL(azx_stream_stop);
90
91/* reset stream */
2b5fd6c2 92static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
05e84878
DR
93{
94 unsigned char val;
95 int timeout;
96
97 azx_stream_clear(chip, azx_dev);
98
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
102 udelay(3);
103 timeout = 300;
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
106 ;
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
109 udelay(3);
110
111 timeout = 300;
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
115 ;
116
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
119}
05e84878
DR
120
121/*
122 * set up the SD for streaming
123 */
2b5fd6c2 124static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
05e84878
DR
125{
126 unsigned int val;
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
136
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
139
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
143
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
146
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
153
154 /* enable the position buffer */
155 if (chip->position_fix[0] != POS_FIX_LPIB ||
156 chip->position_fix[1] != POS_FIX_LPIB) {
157 if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
160 }
161
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
165
166 return 0;
167}
05e84878
DR
168
169/* assign a stream for the PCM */
170static inline struct azx_dev *
171azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
172{
173 int dev, i, nums;
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
178
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
182 } else {
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
185 }
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
188 dsp_lock(azx_dev);
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
dcb32ecd
AH
190 if (azx_dev->assigned_key == key) {
191 azx_dev->opened = 1;
192 azx_dev->assigned_key = key;
05e84878
DR
193 dsp_unlock(azx_dev);
194 return azx_dev;
195 }
dcb32ecd
AH
196 if (!res)
197 res = azx_dev;
05e84878
DR
198 }
199 dsp_unlock(azx_dev);
200 }
201 if (res) {
202 dsp_lock(res);
203 res->opened = 1;
204 res->assigned_key = key;
205 dsp_unlock(res);
206 }
207 return res;
208}
209
210/* release the assigned stream */
211static inline void azx_release_device(struct azx_dev *azx_dev)
212{
213 azx_dev->opened = 0;
214}
215
216static cycle_t azx_cc_read(const struct cyclecounter *cc)
217{
218 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
219 struct snd_pcm_substream *substream = azx_dev->substream;
220 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
221 struct azx *chip = apcm->chip;
222
223 return azx_readl(chip, WALLCLK);
224}
225
226static void azx_timecounter_init(struct snd_pcm_substream *substream,
227 bool force, cycle_t last)
228{
229 struct azx_dev *azx_dev = get_azx_dev(substream);
230 struct timecounter *tc = &azx_dev->azx_tc;
231 struct cyclecounter *cc = &azx_dev->azx_cc;
232 u64 nsec;
233
234 cc->read = azx_cc_read;
235 cc->mask = CLOCKSOURCE_MASK(32);
236
237 /*
238 * Converting from 24 MHz to ns means applying a 125/3 factor.
239 * To avoid any saturation issues in intermediate operations,
240 * the 125 factor is applied first. The division is applied
241 * last after reading the timecounter value.
242 * Applying the 1/3 factor as part of the multiplication
243 * requires at least 20 bits for a decent precision, however
244 * overflows occur after about 4 hours or less, not a option.
245 */
246
247 cc->mult = 125; /* saturation after 195 years */
248 cc->shift = 0;
249
250 nsec = 0; /* audio time is elapsed time since trigger */
251 timecounter_init(tc, cc, nsec);
252 if (force)
253 /*
254 * force timecounter to use predefined value,
255 * used for synchronized starts
256 */
257 tc->cycle_last = last;
258}
259
260static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
261 u64 nsec)
262{
263 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
264 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
265 u64 codec_frames, codec_nsecs;
266
267 if (!hinfo->ops.get_delay)
268 return nsec;
269
270 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
271 codec_nsecs = div_u64(codec_frames * 1000000000LL,
272 substream->runtime->rate);
273
274 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
275 return nsec + codec_nsecs;
276
277 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
278}
279
280/*
281 * set up a BDL entry
282 */
2b5fd6c2
DR
283static int setup_bdle(struct azx *chip,
284 struct snd_dma_buffer *dmab,
285 struct azx_dev *azx_dev, u32 **bdlp,
286 int ofs, int size, int with_ioc)
05e84878
DR
287{
288 u32 *bdl = *bdlp;
289
290 while (size > 0) {
291 dma_addr_t addr;
292 int chunk;
293
294 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
295 return -EINVAL;
296
297 addr = snd_sgbuf_get_addr(dmab, ofs);
298 /* program the address field of the BDL entry */
299 bdl[0] = cpu_to_le32((u32)addr);
300 bdl[1] = cpu_to_le32(upper_32_bits(addr));
301 /* program the size field of the BDL entry */
302 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
303 /* one BDLE cannot cross 4K boundary on CTHDA chips */
304 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
305 u32 remain = 0x1000 - (ofs & 0xfff);
306 if (chunk > remain)
307 chunk = remain;
308 }
309 bdl[2] = cpu_to_le32(chunk);
310 /* program the IOC to enable interrupt
311 * only when the whole fragment is processed
312 */
313 size -= chunk;
314 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
315 bdl += 4;
316 azx_dev->frags++;
317 ofs += chunk;
318 }
319 *bdlp = bdl;
320 return ofs;
321}
05e84878
DR
322
323/*
324 * set up BDL entries
325 */
326static int azx_setup_periods(struct azx *chip,
327 struct snd_pcm_substream *substream,
328 struct azx_dev *azx_dev)
329{
330 u32 *bdl;
331 int i, ofs, periods, period_bytes;
332 int pos_adj = 0;
333
334 /* reset BDL address */
335 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
336 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
337
338 period_bytes = azx_dev->period_bytes;
339 periods = azx_dev->bufsize / period_bytes;
340
341 /* program the initial BDL entries */
342 bdl = (u32 *)azx_dev->bdl.area;
343 ofs = 0;
344 azx_dev->frags = 0;
345
346 if (chip->bdl_pos_adj)
347 pos_adj = chip->bdl_pos_adj[chip->dev_index];
348 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
349 struct snd_pcm_runtime *runtime = substream->runtime;
350 int pos_align = pos_adj;
351 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
352 if (!pos_adj)
353 pos_adj = pos_align;
354 else
355 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
356 pos_align;
357 pos_adj = frames_to_bytes(runtime, pos_adj);
358 if (pos_adj >= period_bytes) {
359 dev_warn(chip->card->dev,"Too big adjustment %d\n",
360 pos_adj);
361 pos_adj = 0;
362 } else {
363 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
364 azx_dev,
365 &bdl, ofs, pos_adj, true);
366 if (ofs < 0)
367 goto error;
368 }
369 } else
370 pos_adj = 0;
371
372 for (i = 0; i < periods; i++) {
373 if (i == periods - 1 && pos_adj)
374 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
375 azx_dev, &bdl, ofs,
376 period_bytes - pos_adj, 0);
377 else
378 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
379 azx_dev, &bdl, ofs,
380 period_bytes,
381 !azx_dev->no_period_wakeup);
382 if (ofs < 0)
383 goto error;
384 }
385 return 0;
386
387 error:
388 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
389 azx_dev->bufsize, period_bytes);
390 return -EINVAL;
391}
392
393/*
394 * PCM ops
395 */
396
397static int azx_pcm_close(struct snd_pcm_substream *substream)
398{
399 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
400 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
401 struct azx *chip = apcm->chip;
402 struct azx_dev *azx_dev = get_azx_dev(substream);
403 unsigned long flags;
404
405 mutex_lock(&chip->open_mutex);
406 spin_lock_irqsave(&chip->reg_lock, flags);
407 azx_dev->substream = NULL;
408 azx_dev->running = 0;
409 spin_unlock_irqrestore(&chip->reg_lock, flags);
410 azx_release_device(azx_dev);
411 hinfo->ops.close(hinfo, apcm->codec, substream);
412 snd_hda_power_down(apcm->codec);
413 mutex_unlock(&chip->open_mutex);
414 return 0;
415}
416
417static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
418 struct snd_pcm_hw_params *hw_params)
419{
420 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
421 struct azx *chip = apcm->chip;
422 int ret;
423
424 dsp_lock(get_azx_dev(substream));
425 if (dsp_is_locked(get_azx_dev(substream))) {
426 ret = -EBUSY;
427 goto unlock;
428 }
429
430 ret = chip->ops->substream_alloc_pages(chip, substream,
431 params_buffer_bytes(hw_params));
432unlock:
433 dsp_unlock(get_azx_dev(substream));
434 return ret;
435}
436
437static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
438{
439 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
440 struct azx_dev *azx_dev = get_azx_dev(substream);
441 struct azx *chip = apcm->chip;
442 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
443 int err;
444
445 /* reset BDL address */
446 dsp_lock(azx_dev);
447 if (!dsp_is_locked(azx_dev)) {
448 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
449 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
450 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
451 azx_dev->bufsize = 0;
452 azx_dev->period_bytes = 0;
453 azx_dev->format_val = 0;
454 }
455
456 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
457
458 err = chip->ops->substream_free_pages(chip, substream);
459 azx_dev->prepared = 0;
460 dsp_unlock(azx_dev);
461 return err;
462}
463
464static int azx_pcm_prepare(struct snd_pcm_substream *substream)
465{
466 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
467 struct azx *chip = apcm->chip;
468 struct azx_dev *azx_dev = get_azx_dev(substream);
469 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
470 struct snd_pcm_runtime *runtime = substream->runtime;
471 unsigned int bufsize, period_bytes, format_val, stream_tag;
472 int err;
473 struct hda_spdif_out *spdif =
474 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
475 unsigned short ctls = spdif ? spdif->ctls : 0;
476
477 dsp_lock(azx_dev);
478 if (dsp_is_locked(azx_dev)) {
479 err = -EBUSY;
480 goto unlock;
481 }
482
483 azx_stream_reset(chip, azx_dev);
484 format_val = snd_hda_calc_stream_format(runtime->rate,
485 runtime->channels,
486 runtime->format,
487 hinfo->maxbps,
488 ctls);
489 if (!format_val) {
490 dev_err(chip->card->dev,
491 "invalid format_val, rate=%d, ch=%d, format=%d\n",
492 runtime->rate, runtime->channels, runtime->format);
493 err = -EINVAL;
494 goto unlock;
495 }
496
497 bufsize = snd_pcm_lib_buffer_bytes(substream);
498 period_bytes = snd_pcm_lib_period_bytes(substream);
499
500 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
501 bufsize, format_val);
502
503 if (bufsize != azx_dev->bufsize ||
504 period_bytes != azx_dev->period_bytes ||
505 format_val != azx_dev->format_val ||
506 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
507 azx_dev->bufsize = bufsize;
508 azx_dev->period_bytes = period_bytes;
509 azx_dev->format_val = format_val;
510 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
511 err = azx_setup_periods(chip, substream, azx_dev);
512 if (err < 0)
513 goto unlock;
514 }
515
516 /* when LPIB delay correction gives a small negative value,
517 * we ignore it; currently set the threshold statically to
518 * 64 frames
519 */
520 if (runtime->period_size > 64)
521 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
522 else
523 azx_dev->delay_negative_threshold = 0;
524
525 /* wallclk has 24Mhz clock source */
526 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
527 runtime->rate) * 1000);
528 azx_setup_controller(chip, azx_dev);
529 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
530 azx_dev->fifo_size =
531 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
532 else
533 azx_dev->fifo_size = 0;
534
535 stream_tag = azx_dev->stream_tag;
536 /* CA-IBG chips need the playback stream starting from 1 */
537 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
538 stream_tag > chip->capture_streams)
539 stream_tag -= chip->capture_streams;
540 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
541 azx_dev->format_val, substream);
542
543 unlock:
544 if (!err)
545 azx_dev->prepared = 1;
546 dsp_unlock(azx_dev);
547 return err;
548}
549
550static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
551{
552 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
553 struct azx *chip = apcm->chip;
554 struct azx_dev *azx_dev;
555 struct snd_pcm_substream *s;
556 int rstart = 0, start, nsync = 0, sbits = 0;
557 int nwait, timeout;
558
559 azx_dev = get_azx_dev(substream);
560 trace_azx_pcm_trigger(chip, azx_dev, cmd);
561
562 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
563 return -EPIPE;
564
565 switch (cmd) {
566 case SNDRV_PCM_TRIGGER_START:
567 rstart = 1;
568 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
569 case SNDRV_PCM_TRIGGER_RESUME:
570 start = 1;
571 break;
572 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
573 case SNDRV_PCM_TRIGGER_SUSPEND:
574 case SNDRV_PCM_TRIGGER_STOP:
575 start = 0;
576 break;
577 default:
578 return -EINVAL;
579 }
580
581 snd_pcm_group_for_each_entry(s, substream) {
582 if (s->pcm->card != substream->pcm->card)
583 continue;
584 azx_dev = get_azx_dev(s);
585 sbits |= 1 << azx_dev->index;
586 nsync++;
587 snd_pcm_trigger_done(s, substream);
588 }
589
590 spin_lock(&chip->reg_lock);
591
592 /* first, set SYNC bits of corresponding streams */
593 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
594 azx_writel(chip, OLD_SSYNC,
595 azx_readl(chip, OLD_SSYNC) | sbits);
596 else
597 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
598
599 snd_pcm_group_for_each_entry(s, substream) {
600 if (s->pcm->card != substream->pcm->card)
601 continue;
602 azx_dev = get_azx_dev(s);
603 if (start) {
604 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
605 if (!rstart)
606 azx_dev->start_wallclk -=
607 azx_dev->period_wallclk;
608 azx_stream_start(chip, azx_dev);
609 } else {
610 azx_stream_stop(chip, azx_dev);
611 }
612 azx_dev->running = start;
613 }
614 spin_unlock(&chip->reg_lock);
615 if (start) {
616 /* wait until all FIFOs get ready */
617 for (timeout = 5000; timeout; timeout--) {
618 nwait = 0;
619 snd_pcm_group_for_each_entry(s, substream) {
620 if (s->pcm->card != substream->pcm->card)
621 continue;
622 azx_dev = get_azx_dev(s);
623 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
624 SD_STS_FIFO_READY))
625 nwait++;
626 }
627 if (!nwait)
628 break;
629 cpu_relax();
630 }
631 } else {
632 /* wait until all RUN bits are cleared */
633 for (timeout = 5000; timeout; timeout--) {
634 nwait = 0;
635 snd_pcm_group_for_each_entry(s, substream) {
636 if (s->pcm->card != substream->pcm->card)
637 continue;
638 azx_dev = get_azx_dev(s);
639 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
640 SD_CTL_DMA_START)
641 nwait++;
642 }
643 if (!nwait)
644 break;
645 cpu_relax();
646 }
647 }
648 spin_lock(&chip->reg_lock);
649 /* reset SYNC bits */
650 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
651 azx_writel(chip, OLD_SSYNC,
652 azx_readl(chip, OLD_SSYNC) & ~sbits);
653 else
654 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
655 if (start) {
656 azx_timecounter_init(substream, 0, 0);
657 if (nsync > 1) {
658 cycle_t cycle_last;
659
660 /* same start cycle for master and group */
661 azx_dev = get_azx_dev(substream);
662 cycle_last = azx_dev->azx_tc.cycle_last;
663
664 snd_pcm_group_for_each_entry(s, substream) {
665 if (s->pcm->card != substream->pcm->card)
666 continue;
667 azx_timecounter_init(s, 1, cycle_last);
668 }
669 }
670 }
671 spin_unlock(&chip->reg_lock);
672 return 0;
673}
674
675/* get the current DMA position with correction on VIA chips */
676static unsigned int azx_via_get_position(struct azx *chip,
677 struct azx_dev *azx_dev)
678{
679 unsigned int link_pos, mini_pos, bound_pos;
680 unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
681 unsigned int fifo_size;
682
683 link_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
684 if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
685 /* Playback, no problem using link position */
686 return link_pos;
687 }
688
689 /* Capture */
690 /* For new chipset,
691 * use mod to get the DMA position just like old chipset
692 */
693 mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
694 mod_dma_pos %= azx_dev->period_bytes;
695
696 /* azx_dev->fifo_size can't get FIFO size of in stream.
697 * Get from base address + offset.
698 */
699 fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
700
701 if (azx_dev->insufficient) {
702 /* Link position never gather than FIFO size */
703 if (link_pos <= fifo_size)
704 return 0;
705
706 azx_dev->insufficient = 0;
707 }
708
709 if (link_pos <= fifo_size)
710 mini_pos = azx_dev->bufsize + link_pos - fifo_size;
711 else
712 mini_pos = link_pos - fifo_size;
713
714 /* Find nearest previous boudary */
715 mod_mini_pos = mini_pos % azx_dev->period_bytes;
716 mod_link_pos = link_pos % azx_dev->period_bytes;
717 if (mod_link_pos >= fifo_size)
718 bound_pos = link_pos - mod_link_pos;
719 else if (mod_dma_pos >= mod_mini_pos)
720 bound_pos = mini_pos - mod_mini_pos;
721 else {
722 bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
723 if (bound_pos >= azx_dev->bufsize)
724 bound_pos = 0;
725 }
726
727 /* Calculate real DMA position we want */
728 return bound_pos + mod_dma_pos;
729}
730
731unsigned int azx_get_position(struct azx *chip,
732 struct azx_dev *azx_dev,
733 bool with_check)
734{
735 struct snd_pcm_substream *substream = azx_dev->substream;
736 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
737 unsigned int pos;
738 int stream = substream->stream;
739 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
740 int delay = 0;
741
742 switch (chip->position_fix[stream]) {
743 case POS_FIX_LPIB:
744 /* read LPIB */
745 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
746 break;
747 case POS_FIX_VIACOMBO:
748 pos = azx_via_get_position(chip, azx_dev);
749 break;
750 default:
751 /* use the position buffer */
752 pos = le32_to_cpu(*azx_dev->posbuf);
753 if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
754 if (!pos || pos == (u32)-1) {
755 dev_info(chip->card->dev,
756 "Invalid position buffer, using LPIB read method instead.\n");
757 chip->position_fix[stream] = POS_FIX_LPIB;
758 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
759 } else
760 chip->position_fix[stream] = POS_FIX_POSBUF;
761 }
762 break;
763 }
764
765 if (pos >= azx_dev->bufsize)
766 pos = 0;
767
768 /* calculate runtime delay from LPIB */
769 if (substream->runtime &&
770 chip->position_fix[stream] == POS_FIX_POSBUF &&
771 (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
772 unsigned int lpib_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
773 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
774 delay = pos - lpib_pos;
775 else
776 delay = lpib_pos - pos;
777 if (delay < 0) {
778 if (delay >= azx_dev->delay_negative_threshold)
779 delay = 0;
780 else
781 delay += azx_dev->bufsize;
782 }
783 if (delay >= azx_dev->period_bytes) {
784 dev_info(chip->card->dev,
785 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
786 delay, azx_dev->period_bytes);
787 delay = 0;
788 chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
789 }
790 delay = bytes_to_frames(substream->runtime, delay);
791 }
792
793 if (substream->runtime) {
794 if (hinfo->ops.get_delay)
795 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
796 substream);
797 substream->runtime->delay = delay;
798 }
799
800 trace_azx_get_position(chip, azx_dev, pos, delay);
801 return pos;
802}
803EXPORT_SYMBOL_GPL(azx_get_position);
804
805static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
806{
807 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
808 struct azx *chip = apcm->chip;
809 struct azx_dev *azx_dev = get_azx_dev(substream);
810 return bytes_to_frames(substream->runtime,
811 azx_get_position(chip, azx_dev, false));
812}
813
814static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
815 struct timespec *ts)
816{
817 struct azx_dev *azx_dev = get_azx_dev(substream);
818 u64 nsec;
819
820 nsec = timecounter_read(&azx_dev->azx_tc);
821 nsec = div_u64(nsec, 3); /* can be optimized */
822 nsec = azx_adjust_codec_delay(substream, nsec);
823
824 *ts = ns_to_timespec(nsec);
825
826 return 0;
827}
828
829static struct snd_pcm_hardware azx_pcm_hw = {
830 .info = (SNDRV_PCM_INFO_MMAP |
831 SNDRV_PCM_INFO_INTERLEAVED |
832 SNDRV_PCM_INFO_BLOCK_TRANSFER |
833 SNDRV_PCM_INFO_MMAP_VALID |
834 /* No full-resume yet implemented */
835 /* SNDRV_PCM_INFO_RESUME |*/
836 SNDRV_PCM_INFO_PAUSE |
837 SNDRV_PCM_INFO_SYNC_START |
838 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
839 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
840 .formats = SNDRV_PCM_FMTBIT_S16_LE,
841 .rates = SNDRV_PCM_RATE_48000,
842 .rate_min = 48000,
843 .rate_max = 48000,
844 .channels_min = 2,
845 .channels_max = 2,
846 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
847 .period_bytes_min = 128,
848 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
849 .periods_min = 2,
850 .periods_max = AZX_MAX_FRAG,
851 .fifo_size = 0,
852};
853
854static int azx_pcm_open(struct snd_pcm_substream *substream)
855{
856 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
857 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
858 struct azx *chip = apcm->chip;
859 struct azx_dev *azx_dev;
860 struct snd_pcm_runtime *runtime = substream->runtime;
861 unsigned long flags;
862 int err;
863 int buff_step;
864
865 mutex_lock(&chip->open_mutex);
866 azx_dev = azx_assign_device(chip, substream);
867 if (azx_dev == NULL) {
868 mutex_unlock(&chip->open_mutex);
869 return -EBUSY;
870 }
871 runtime->hw = azx_pcm_hw;
872 runtime->hw.channels_min = hinfo->channels_min;
873 runtime->hw.channels_max = hinfo->channels_max;
874 runtime->hw.formats = hinfo->formats;
875 runtime->hw.rates = hinfo->rates;
876 snd_pcm_limit_hw_rates(runtime);
877 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
878
879 /* avoid wrap-around with wall-clock */
880 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
881 20,
882 178000000);
883
884 if (chip->align_buffer_size)
885 /* constrain buffer sizes to be multiple of 128
886 bytes. This is more efficient in terms of memory
887 access but isn't required by the HDA spec and
888 prevents users from specifying exact period/buffer
889 sizes. For example for 44.1kHz, a period size set
890 to 20ms will be rounded to 19.59ms. */
891 buff_step = 128;
892 else
893 /* Don't enforce steps on buffer sizes, still need to
894 be multiple of 4 bytes (HDA spec). Tested on Intel
895 HDA controllers, may not work on all devices where
896 option needs to be disabled */
897 buff_step = 4;
898
899 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
900 buff_step);
901 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
902 buff_step);
903 snd_hda_power_up_d3wait(apcm->codec);
904 err = hinfo->ops.open(hinfo, apcm->codec, substream);
905 if (err < 0) {
906 azx_release_device(azx_dev);
907 snd_hda_power_down(apcm->codec);
908 mutex_unlock(&chip->open_mutex);
909 return err;
910 }
911 snd_pcm_limit_hw_rates(runtime);
912 /* sanity check */
913 if (snd_BUG_ON(!runtime->hw.channels_min) ||
914 snd_BUG_ON(!runtime->hw.channels_max) ||
915 snd_BUG_ON(!runtime->hw.formats) ||
916 snd_BUG_ON(!runtime->hw.rates)) {
917 azx_release_device(azx_dev);
918 hinfo->ops.close(hinfo, apcm->codec, substream);
919 snd_hda_power_down(apcm->codec);
920 mutex_unlock(&chip->open_mutex);
921 return -EINVAL;
922 }
923
924 /* disable WALLCLOCK timestamps for capture streams
925 until we figure out how to handle digital inputs */
926 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
927 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
928
929 spin_lock_irqsave(&chip->reg_lock, flags);
930 azx_dev->substream = substream;
931 azx_dev->running = 0;
932 spin_unlock_irqrestore(&chip->reg_lock, flags);
933
934 runtime->private_data = azx_dev;
935 snd_pcm_set_sync(substream);
936 mutex_unlock(&chip->open_mutex);
937 return 0;
938}
939
940static int azx_pcm_mmap(struct snd_pcm_substream *substream,
941 struct vm_area_struct *area)
942{
943 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
944 struct azx *chip = apcm->chip;
945 if (chip->ops->pcm_mmap_prepare)
946 chip->ops->pcm_mmap_prepare(substream, area);
947 return snd_pcm_lib_default_mmap(substream, area);
948}
949
950static struct snd_pcm_ops azx_pcm_ops = {
951 .open = azx_pcm_open,
952 .close = azx_pcm_close,
953 .ioctl = snd_pcm_lib_ioctl,
954 .hw_params = azx_pcm_hw_params,
955 .hw_free = azx_pcm_hw_free,
956 .prepare = azx_pcm_prepare,
957 .trigger = azx_pcm_trigger,
958 .pointer = azx_pcm_pointer,
959 .wall_clock = azx_get_wallclock_tstamp,
960 .mmap = azx_pcm_mmap,
961 .page = snd_pcm_sgbuf_ops_page,
962};
963
964static void azx_pcm_free(struct snd_pcm *pcm)
965{
966 struct azx_pcm *apcm = pcm->private_data;
967 if (apcm) {
968 list_del(&apcm->list);
969 kfree(apcm);
970 }
971}
972
973#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
974
7c3e438a
DR
975static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
976 struct hda_pcm *cpcm)
05e84878
DR
977{
978 struct azx *chip = bus->private_data;
979 struct snd_pcm *pcm;
980 struct azx_pcm *apcm;
981 int pcm_dev = cpcm->device;
982 unsigned int size;
983 int s, err;
984
985 list_for_each_entry(apcm, &chip->pcm_list, list) {
986 if (apcm->pcm->device == pcm_dev) {
987 dev_err(chip->card->dev, "PCM %d already exists\n",
988 pcm_dev);
989 return -EBUSY;
990 }
991 }
992 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
993 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
994 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
995 &pcm);
996 if (err < 0)
997 return err;
998 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
999 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
1000 if (apcm == NULL)
1001 return -ENOMEM;
1002 apcm->chip = chip;
1003 apcm->pcm = pcm;
1004 apcm->codec = codec;
1005 pcm->private_data = apcm;
1006 pcm->private_free = azx_pcm_free;
1007 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
1008 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
1009 list_add_tail(&apcm->list, &chip->pcm_list);
1010 cpcm->pcm = pcm;
1011 for (s = 0; s < 2; s++) {
1012 apcm->hinfo[s] = &cpcm->stream[s];
1013 if (cpcm->stream[s].substreams)
1014 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
1015 }
1016 /* buffer pre-allocation */
1017 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
1018 if (size > MAX_PREALLOC_SIZE)
1019 size = MAX_PREALLOC_SIZE;
1020 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1021 chip->card->dev,
1022 size, MAX_PREALLOC_SIZE);
1023 /* link to codec */
1024 pcm->dev = &codec->dev;
1025 return 0;
1026}
05e84878 1027
6e85dddc
DR
1028/*
1029 * CORB / RIRB interface
1030 */
f19c3ec2 1031static int azx_alloc_cmd_io(struct azx *chip)
6e85dddc
DR
1032{
1033 int err;
1034
1035 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
1036 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1037 PAGE_SIZE, &chip->rb);
1038 if (err < 0)
1039 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
1040 return err;
1041}
1042EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
1043
f43923ff 1044static void azx_init_cmd_io(struct azx *chip)
6e85dddc
DR
1045{
1046 int timeout;
1047
1048 spin_lock_irq(&chip->reg_lock);
1049 /* CORB set up */
1050 chip->corb.addr = chip->rb.addr;
1051 chip->corb.buf = (u32 *)chip->rb.area;
1052 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1053 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1054
1055 /* set the corb size to 256 entries (ULI requires explicitly) */
1056 azx_writeb(chip, CORBSIZE, 0x02);
1057 /* set the corb write pointer to 0 */
1058 azx_writew(chip, CORBWP, 0);
1059
1060 /* reset the corb hw read pointer */
1061 azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
6ba736dd
TI
1062 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1063 for (timeout = 1000; timeout > 0; timeout--) {
1064 if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
1065 break;
1066 udelay(1);
1067 }
1068 if (timeout <= 0)
1069 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1070 azx_readw(chip, CORBRP));
1071
1072 azx_writew(chip, CORBRP, 0);
1073 for (timeout = 1000; timeout > 0; timeout--) {
1074 if (azx_readw(chip, CORBRP) == 0)
1075 break;
1076 udelay(1);
1077 }
1078 if (timeout <= 0)
1079 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1080 azx_readw(chip, CORBRP));
6e85dddc 1081 }
6e85dddc
DR
1082
1083 /* enable corb dma */
1084 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1085
1086 /* RIRB set up */
1087 chip->rirb.addr = chip->rb.addr + 2048;
1088 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1089 chip->rirb.wp = chip->rirb.rp = 0;
1090 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1091 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1092 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1093
1094 /* set the rirb size to 256 entries (ULI requires explicitly) */
1095 azx_writeb(chip, RIRBSIZE, 0x02);
1096 /* reset the rirb hw write pointer */
1097 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1098 /* set N=1, get RIRB response interrupt for new entry */
1099 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1100 azx_writew(chip, RINTCNT, 0xc0);
1101 else
1102 azx_writew(chip, RINTCNT, 1);
1103 /* enable rirb dma and response irq */
1104 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1105 spin_unlock_irq(&chip->reg_lock);
1106}
1107EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1108
f43923ff 1109static void azx_free_cmd_io(struct azx *chip)
6e85dddc
DR
1110{
1111 spin_lock_irq(&chip->reg_lock);
1112 /* disable ringbuffer DMAs */
1113 azx_writeb(chip, RIRBCTL, 0);
1114 azx_writeb(chip, CORBCTL, 0);
1115 spin_unlock_irq(&chip->reg_lock);
1116}
1117EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1118
1119static unsigned int azx_command_addr(u32 cmd)
1120{
1121 unsigned int addr = cmd >> 28;
1122
1123 if (addr >= AZX_MAX_CODECS) {
1124 snd_BUG();
1125 addr = 0;
1126 }
1127
1128 return addr;
1129}
1130
1131/* send a command */
1132static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1133{
1134 struct azx *chip = bus->private_data;
1135 unsigned int addr = azx_command_addr(val);
1136 unsigned int wp, rp;
1137
1138 spin_lock_irq(&chip->reg_lock);
1139
1140 /* add command to corb */
1141 wp = azx_readw(chip, CORBWP);
1142 if (wp == 0xffff) {
1143 /* something wrong, controller likely turned to D3 */
1144 spin_unlock_irq(&chip->reg_lock);
1145 return -EIO;
1146 }
1147 wp++;
1148 wp %= ICH6_MAX_CORB_ENTRIES;
1149
1150 rp = azx_readw(chip, CORBRP);
1151 if (wp == rp) {
1152 /* oops, it's full */
1153 spin_unlock_irq(&chip->reg_lock);
1154 return -EAGAIN;
1155 }
1156
1157 chip->rirb.cmds[addr]++;
1158 chip->corb.buf[wp] = cpu_to_le32(val);
1159 azx_writew(chip, CORBWP, wp);
1160
1161 spin_unlock_irq(&chip->reg_lock);
1162
1163 return 0;
1164}
1165
1166#define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1167
1168/* retrieve RIRB entry - called from interrupt handler */
f0b1df88 1169static void azx_update_rirb(struct azx *chip)
6e85dddc
DR
1170{
1171 unsigned int rp, wp;
1172 unsigned int addr;
1173 u32 res, res_ex;
1174
1175 wp = azx_readw(chip, RIRBWP);
1176 if (wp == 0xffff) {
1177 /* something wrong, controller likely turned to D3 */
1178 return;
1179 }
1180
1181 if (wp == chip->rirb.wp)
1182 return;
1183 chip->rirb.wp = wp;
1184
1185 while (chip->rirb.rp != wp) {
1186 chip->rirb.rp++;
1187 chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1188
1189 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1190 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1191 res = le32_to_cpu(chip->rirb.buf[rp]);
1192 addr = res_ex & 0xf;
1193 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1194 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1195 res, res_ex,
1196 chip->rirb.rp, wp);
1197 snd_BUG();
1198 }
1199 else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1200 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1201 else if (chip->rirb.cmds[addr]) {
1202 chip->rirb.res[addr] = res;
1203 smp_wmb();
1204 chip->rirb.cmds[addr]--;
1205 } else if (printk_ratelimit()) {
1206 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1207 res, res_ex,
1208 chip->last_cmd[addr]);
1209 }
1210 }
1211}
6e85dddc
DR
1212
1213/* receive a response */
1214static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1215 unsigned int addr)
1216{
1217 struct azx *chip = bus->private_data;
1218 unsigned long timeout;
1219 unsigned long loopcounter;
1220 int do_poll = 0;
1221
1222 again:
1223 timeout = jiffies + msecs_to_jiffies(1000);
1224
1225 for (loopcounter = 0;; loopcounter++) {
1226 if (chip->polling_mode || do_poll) {
1227 spin_lock_irq(&chip->reg_lock);
1228 azx_update_rirb(chip);
1229 spin_unlock_irq(&chip->reg_lock);
1230 }
1231 if (!chip->rirb.cmds[addr]) {
1232 smp_rmb();
1233 bus->rirb_error = 0;
1234
1235 if (!do_poll)
1236 chip->poll_count = 0;
1237 return chip->rirb.res[addr]; /* the last value */
1238 }
1239 if (time_after(jiffies, timeout))
1240 break;
1241 if (bus->needs_damn_long_delay || loopcounter > 3000)
1242 msleep(2); /* temporary workaround */
1243 else {
1244 udelay(10);
1245 cond_resched();
1246 }
1247 }
1248
1249 if (!bus->no_response_fallback)
1250 return -1;
1251
1252 if (!chip->polling_mode && chip->poll_count < 2) {
1253 dev_dbg(chip->card->dev,
1254 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1255 chip->last_cmd[addr]);
1256 do_poll = 1;
1257 chip->poll_count++;
1258 goto again;
1259 }
1260
1261
1262 if (!chip->polling_mode) {
1263 dev_warn(chip->card->dev,
1264 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1265 chip->last_cmd[addr]);
1266 chip->polling_mode = 1;
1267 goto again;
1268 }
1269
1270 if (chip->msi) {
1271 dev_warn(chip->card->dev,
1272 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1273 chip->last_cmd[addr]);
1274 if (chip->ops->disable_msi_reset_irq(chip) &&
1275 chip->ops->disable_msi_reset_irq(chip) < 0) {
1276 bus->rirb_error = 1;
1277 return -1;
1278 }
1279 goto again;
1280 }
1281
1282 if (chip->probing) {
1283 /* If this critical timeout happens during the codec probing
1284 * phase, this is likely an access to a non-existing codec
1285 * slot. Better to return an error and reset the system.
1286 */
1287 return -1;
1288 }
1289
1290 /* a fatal communication error; need either to reset or to fallback
1291 * to the single_cmd mode
1292 */
1293 bus->rirb_error = 1;
1294 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1295 bus->response_reset = 1;
1296 return -1; /* give a chance to retry */
1297 }
1298
1299 dev_err(chip->card->dev,
1300 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1301 chip->last_cmd[addr]);
1302 chip->single_cmd = 1;
1303 bus->response_reset = 0;
1304 /* release CORB/RIRB */
1305 azx_free_cmd_io(chip);
1306 /* disable unsolicited responses */
1307 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1308 return -1;
1309}
1310
1311/*
1312 * Use the single immediate command instead of CORB/RIRB for simplicity
1313 *
1314 * Note: according to Intel, this is not preferred use. The command was
1315 * intended for the BIOS only, and may get confused with unsolicited
1316 * responses. So, we shouldn't use it for normal operation from the
1317 * driver.
1318 * I left the codes, however, for debugging/testing purposes.
1319 */
1320
1321/* receive a response */
1322static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1323{
1324 int timeout = 50;
1325
1326 while (timeout--) {
1327 /* check IRV busy bit */
1328 if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1329 /* reuse rirb.res as the response return value */
1330 chip->rirb.res[addr] = azx_readl(chip, IR);
1331 return 0;
1332 }
1333 udelay(1);
1334 }
1335 if (printk_ratelimit())
1336 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1337 azx_readw(chip, IRS));
1338 chip->rirb.res[addr] = -1;
1339 return -EIO;
1340}
1341
1342/* send a command */
1343static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1344{
1345 struct azx *chip = bus->private_data;
1346 unsigned int addr = azx_command_addr(val);
1347 int timeout = 50;
1348
1349 bus->rirb_error = 0;
1350 while (timeout--) {
1351 /* check ICB busy bit */
1352 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1353 /* Clear IRV valid bit */
1354 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1355 ICH6_IRS_VALID);
1356 azx_writel(chip, IC, val);
1357 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1358 ICH6_IRS_BUSY);
1359 return azx_single_wait_for_response(chip, addr);
1360 }
1361 udelay(1);
1362 }
1363 if (printk_ratelimit())
1364 dev_dbg(chip->card->dev,
1365 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1366 azx_readw(chip, IRS), val);
1367 return -EIO;
1368}
1369
1370/* receive a response */
1371static unsigned int azx_single_get_response(struct hda_bus *bus,
1372 unsigned int addr)
1373{
1374 struct azx *chip = bus->private_data;
1375 return chip->rirb.res[addr];
1376}
1377
1378/*
1379 * The below are the main callbacks from hda_codec.
1380 *
1381 * They are just the skeleton to call sub-callbacks according to the
1382 * current setting of chip->single_cmd.
1383 */
1384
1385/* send a command */
154867cf 1386static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
6e85dddc
DR
1387{
1388 struct azx *chip = bus->private_data;
1389
1390 if (chip->disabled)
1391 return 0;
1392 chip->last_cmd[azx_command_addr(val)] = val;
1393 if (chip->single_cmd)
1394 return azx_single_send_cmd(bus, val);
1395 else
1396 return azx_corb_send_cmd(bus, val);
1397}
1398EXPORT_SYMBOL_GPL(azx_send_cmd);
1399
1400/* get a response */
154867cf 1401static unsigned int azx_get_response(struct hda_bus *bus,
6e85dddc
DR
1402 unsigned int addr)
1403{
1404 struct azx *chip = bus->private_data;
1405 if (chip->disabled)
1406 return 0;
1407 if (chip->single_cmd)
1408 return azx_single_get_response(bus, addr);
1409 else
1410 return azx_rirb_get_response(bus, addr);
1411}
1412EXPORT_SYMBOL_GPL(azx_get_response);
1413
2b5fd6c2
DR
1414#ifdef CONFIG_SND_HDA_DSP_LOADER
1415/*
1416 * DSP loading code (e.g. for CA0132)
1417 */
1418
1419/* use the first stream for loading DSP */
1420static struct azx_dev *
1421azx_get_dsp_loader_dev(struct azx *chip)
1422{
1423 return &chip->azx_dev[chip->playback_index_offset];
1424}
1425
154867cf
DR
1426static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1427 unsigned int byte_size,
1428 struct snd_dma_buffer *bufp)
2b5fd6c2
DR
1429{
1430 u32 *bdl;
1431 struct azx *chip = bus->private_data;
1432 struct azx_dev *azx_dev;
1433 int err;
1434
1435 azx_dev = azx_get_dsp_loader_dev(chip);
1436
1437 dsp_lock(azx_dev);
1438 spin_lock_irq(&chip->reg_lock);
1439 if (azx_dev->running || azx_dev->locked) {
1440 spin_unlock_irq(&chip->reg_lock);
1441 err = -EBUSY;
1442 goto unlock;
1443 }
1444 azx_dev->prepared = 0;
1445 chip->saved_azx_dev = *azx_dev;
1446 azx_dev->locked = 1;
1447 spin_unlock_irq(&chip->reg_lock);
1448
1449 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1450 byte_size, bufp);
1451 if (err < 0)
1452 goto err_alloc;
1453
1454 azx_dev->bufsize = byte_size;
1455 azx_dev->period_bytes = byte_size;
1456 azx_dev->format_val = format;
1457
1458 azx_stream_reset(chip, azx_dev);
1459
1460 /* reset BDL address */
1461 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1462 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1463
1464 azx_dev->frags = 0;
1465 bdl = (u32 *)azx_dev->bdl.area;
1466 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1467 if (err < 0)
1468 goto error;
1469
1470 azx_setup_controller(chip, azx_dev);
1471 dsp_unlock(azx_dev);
1472 return azx_dev->stream_tag;
1473
1474 error:
1475 chip->ops->dma_free_pages(chip, bufp);
1476 err_alloc:
1477 spin_lock_irq(&chip->reg_lock);
1478 if (azx_dev->opened)
1479 *azx_dev = chip->saved_azx_dev;
1480 azx_dev->locked = 0;
1481 spin_unlock_irq(&chip->reg_lock);
1482 unlock:
1483 dsp_unlock(azx_dev);
1484 return err;
1485}
2b5fd6c2 1486
154867cf 1487static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
2b5fd6c2
DR
1488{
1489 struct azx *chip = bus->private_data;
1490 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1491
1492 if (start)
1493 azx_stream_start(chip, azx_dev);
1494 else
1495 azx_stream_stop(chip, azx_dev);
1496 azx_dev->running = start;
1497}
2b5fd6c2 1498
154867cf
DR
1499static void azx_load_dsp_cleanup(struct hda_bus *bus,
1500 struct snd_dma_buffer *dmab)
2b5fd6c2
DR
1501{
1502 struct azx *chip = bus->private_data;
1503 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1504
1505 if (!dmab->area || !azx_dev->locked)
1506 return;
1507
1508 dsp_lock(azx_dev);
1509 /* reset BDL address */
1510 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1511 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1512 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1513 azx_dev->bufsize = 0;
1514 azx_dev->period_bytes = 0;
1515 azx_dev->format_val = 0;
1516
1517 chip->ops->dma_free_pages(chip, dmab);
1518 dmab->area = NULL;
1519
1520 spin_lock_irq(&chip->reg_lock);
1521 if (azx_dev->opened)
1522 *azx_dev = chip->saved_azx_dev;
1523 azx_dev->locked = 0;
1524 spin_unlock_irq(&chip->reg_lock);
1525 dsp_unlock(azx_dev);
1526}
2b5fd6c2
DR
1527#endif /* CONFIG_SND_HDA_DSP_LOADER */
1528
67908994
DR
1529int azx_alloc_stream_pages(struct azx *chip)
1530{
1531 int i, err;
1532 struct snd_card *card = chip->card;
1533
1534 for (i = 0; i < chip->num_streams; i++) {
1535 dsp_lock_init(&chip->azx_dev[i]);
1536 /* allocate memory for the BDL for each stream */
1537 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1538 BDL_SIZE,
1539 &chip->azx_dev[i].bdl);
1540 if (err < 0) {
1541 dev_err(card->dev, "cannot allocate BDL\n");
1542 return -ENOMEM;
1543 }
1544 }
1545 /* allocate memory for the position buffer */
1546 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1547 chip->num_streams * 8, &chip->posbuf);
1548 if (err < 0) {
1549 dev_err(card->dev, "cannot allocate posbuf\n");
1550 return -ENOMEM;
1551 }
f19c3ec2
DR
1552
1553 /* allocate CORB/RIRB */
1554 err = azx_alloc_cmd_io(chip);
1555 if (err < 0)
1556 return err;
67908994
DR
1557 return 0;
1558}
1559EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1560
1561void azx_free_stream_pages(struct azx *chip)
1562{
1563 int i;
1564 if (chip->azx_dev) {
1565 for (i = 0; i < chip->num_streams; i++)
1566 if (chip->azx_dev[i].bdl.area)
1567 chip->ops->dma_free_pages(
1568 chip, &chip->azx_dev[i].bdl);
1569 }
1570 if (chip->rb.area)
1571 chip->ops->dma_free_pages(chip, &chip->rb);
1572 if (chip->posbuf.area)
1573 chip->ops->dma_free_pages(chip, &chip->posbuf);
1574}
1575EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1576
f43923ff
DR
1577/*
1578 * Lowlevel interface
1579 */
1580
1581/* enter link reset */
1582void azx_enter_link_reset(struct azx *chip)
1583{
1584 unsigned long timeout;
1585
1586 /* reset controller */
1587 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
1588
1589 timeout = jiffies + msecs_to_jiffies(100);
1590 while ((azx_readb(chip, GCTL) & ICH6_GCTL_RESET) &&
1591 time_before(jiffies, timeout))
1592 usleep_range(500, 1000);
1593}
1594EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1595
1596/* exit link reset */
1597static void azx_exit_link_reset(struct azx *chip)
1598{
1599 unsigned long timeout;
1600
1601 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
1602
1603 timeout = jiffies + msecs_to_jiffies(100);
1604 while (!azx_readb(chip, GCTL) &&
1605 time_before(jiffies, timeout))
1606 usleep_range(500, 1000);
1607}
1608
1609/* reset codec link */
17c3ad03 1610static int azx_reset(struct azx *chip, bool full_reset)
f43923ff
DR
1611{
1612 if (!full_reset)
1613 goto __skip;
1614
1615 /* clear STATESTS */
1616 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1617
1618 /* reset controller */
1619 azx_enter_link_reset(chip);
1620
1621 /* delay for >= 100us for codec PLL to settle per spec
1622 * Rev 0.9 section 5.5.1
1623 */
1624 usleep_range(500, 1000);
1625
1626 /* Bring controller out of reset */
1627 azx_exit_link_reset(chip);
1628
1629 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1630 usleep_range(1000, 1200);
1631
1632 __skip:
1633 /* check to see if controller is ready */
1634 if (!azx_readb(chip, GCTL)) {
1635 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1636 return -EBUSY;
1637 }
1638
1639 /* Accept unsolicited responses */
1640 if (!chip->single_cmd)
1641 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1642 ICH6_GCTL_UNSOL);
1643
1644 /* detect codecs */
1645 if (!chip->codec_mask) {
1646 chip->codec_mask = azx_readw(chip, STATESTS);
1647 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1648 chip->codec_mask);
1649 }
1650
1651 return 0;
1652}
1653
1654/* enable interrupts */
1655static void azx_int_enable(struct azx *chip)
1656{
1657 /* enable controller CIE and GIE */
1658 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1659 ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
1660}
1661
1662/* disable interrupts */
1663static void azx_int_disable(struct azx *chip)
1664{
1665 int i;
1666
1667 /* disable interrupts in stream descriptor */
1668 for (i = 0; i < chip->num_streams; i++) {
1669 struct azx_dev *azx_dev = &chip->azx_dev[i];
1670 azx_sd_writeb(chip, azx_dev, SD_CTL,
1671 azx_sd_readb(chip, azx_dev, SD_CTL) &
1672 ~SD_INT_MASK);
1673 }
1674
1675 /* disable SIE for all streams */
1676 azx_writeb(chip, INTCTL, 0);
1677
1678 /* disable controller CIE and GIE */
1679 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1680 ~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
1681}
1682
1683/* clear interrupts */
1684static void azx_int_clear(struct azx *chip)
1685{
1686 int i;
1687
1688 /* clear stream status */
1689 for (i = 0; i < chip->num_streams; i++) {
1690 struct azx_dev *azx_dev = &chip->azx_dev[i];
1691 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1692 }
1693
1694 /* clear STATESTS */
1695 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1696
1697 /* clear rirb status */
1698 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1699
1700 /* clear int status */
1701 azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
1702}
1703
1704/*
1705 * reset and start the controller registers
1706 */
17c3ad03 1707void azx_init_chip(struct azx *chip, bool full_reset)
f43923ff
DR
1708{
1709 if (chip->initialized)
1710 return;
1711
1712 /* reset controller */
1713 azx_reset(chip, full_reset);
1714
1715 /* initialize interrupts */
1716 azx_int_clear(chip);
1717 azx_int_enable(chip);
1718
1719 /* initialize the codec command I/O */
1720 if (!chip->single_cmd)
1721 azx_init_cmd_io(chip);
1722
1723 /* program the position buffer */
1724 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1725 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1726
1727 chip->initialized = 1;
1728}
1729EXPORT_SYMBOL_GPL(azx_init_chip);
1730
1731void azx_stop_chip(struct azx *chip)
1732{
1733 if (!chip->initialized)
1734 return;
1735
1736 /* disable interrupts */
1737 azx_int_disable(chip);
1738 azx_int_clear(chip);
1739
1740 /* disable CORB/RIRB */
1741 azx_free_cmd_io(chip);
1742
1743 /* disable position buffer */
1744 azx_writel(chip, DPLBASE, 0);
1745 azx_writel(chip, DPUBASE, 0);
1746
1747 chip->initialized = 0;
1748}
154867cf 1749EXPORT_SYMBOL_GPL(azx_stop_chip);
f43923ff 1750
f0b1df88
DR
1751/*
1752 * interrupt handler
1753 */
1754irqreturn_t azx_interrupt(int irq, void *dev_id)
1755{
1756 struct azx *chip = dev_id;
1757 struct azx_dev *azx_dev;
1758 u32 status;
1759 u8 sd_status;
1760 int i;
1761
1762#ifdef CONFIG_PM_RUNTIME
1763 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
7b0a48f3 1764 if (!pm_runtime_active(chip->card->dev))
f0b1df88
DR
1765 return IRQ_NONE;
1766#endif
1767
1768 spin_lock(&chip->reg_lock);
1769
1770 if (chip->disabled) {
1771 spin_unlock(&chip->reg_lock);
1772 return IRQ_NONE;
1773 }
1774
1775 status = azx_readl(chip, INTSTS);
1776 if (status == 0 || status == 0xffffffff) {
1777 spin_unlock(&chip->reg_lock);
1778 return IRQ_NONE;
1779 }
1780
1781 for (i = 0; i < chip->num_streams; i++) {
1782 azx_dev = &chip->azx_dev[i];
1783 if (status & azx_dev->sd_int_sta_mask) {
1784 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1785 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1786 if (!azx_dev->substream || !azx_dev->running ||
1787 !(sd_status & SD_INT_COMPLETE))
1788 continue;
1789 /* check whether this IRQ is really acceptable */
1790 if (!chip->ops->position_check ||
1791 chip->ops->position_check(chip, azx_dev)) {
1792 spin_unlock(&chip->reg_lock);
1793 snd_pcm_period_elapsed(azx_dev->substream);
1794 spin_lock(&chip->reg_lock);
1795 }
1796 }
1797 }
1798
1799 /* clear rirb int */
1800 status = azx_readb(chip, RIRBSTS);
1801 if (status & RIRB_INT_MASK) {
1802 if (status & RIRB_INT_RESPONSE) {
1803 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1804 udelay(80);
1805 azx_update_rirb(chip);
1806 }
1807 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1808 }
1809
1810 spin_unlock(&chip->reg_lock);
1811
1812 return IRQ_HANDLED;
1813}
1814EXPORT_SYMBOL_GPL(azx_interrupt);
1815
154867cf
DR
1816/*
1817 * Codec initerface
1818 */
1819
1820/*
1821 * Probe the given codec address
1822 */
1823static int probe_codec(struct azx *chip, int addr)
1824{
1825 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1826 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1827 unsigned int res;
1828
1829 mutex_lock(&chip->bus->cmd_mutex);
1830 chip->probing = 1;
1831 azx_send_cmd(chip->bus, cmd);
1832 res = azx_get_response(chip->bus, addr);
1833 chip->probing = 0;
1834 mutex_unlock(&chip->bus->cmd_mutex);
1835 if (res == -1)
1836 return -EIO;
1837 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1838 return 0;
1839}
1840
1841static void azx_bus_reset(struct hda_bus *bus)
1842{
1843 struct azx *chip = bus->private_data;
1844
1845 bus->in_reset = 1;
1846 azx_stop_chip(chip);
17c3ad03 1847 azx_init_chip(chip, true);
154867cf
DR
1848#ifdef CONFIG_PM
1849 if (chip->initialized) {
1850 struct azx_pcm *p;
1851 list_for_each_entry(p, &chip->pcm_list, list)
1852 snd_pcm_suspend_all(p->pcm);
1853 snd_hda_suspend(chip->bus);
1854 snd_hda_resume(chip->bus);
1855 }
1856#endif
1857 bus->in_reset = 0;
1858}
1859
1860#ifdef CONFIG_PM
1861/* power-up/down the controller */
1862static void azx_power_notify(struct hda_bus *bus, bool power_up)
1863{
1864 struct azx *chip = bus->private_data;
1865
1866 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1867 return;
1868
1869 if (power_up)
1870 pm_runtime_get_sync(chip->card->dev);
1871 else
1872 pm_runtime_put_sync(chip->card->dev);
1873}
1874#endif
1875
1876static int get_jackpoll_interval(struct azx *chip)
1877{
1878 int i;
1879 unsigned int j;
1880
1881 if (!chip->jackpoll_ms)
1882 return 0;
1883
1884 i = chip->jackpoll_ms[chip->dev_index];
1885 if (i == 0)
1886 return 0;
1887 if (i < 50 || i > 60000)
1888 j = 0;
1889 else
1890 j = msecs_to_jiffies(i);
1891 if (j == 0)
1892 dev_warn(chip->card->dev,
1893 "jackpoll_ms value out of range: %d\n", i);
1894 return j;
1895}
1896
1897/* Codec initialization */
1898int azx_codec_create(struct azx *chip, const char *model,
1899 unsigned int max_slots,
1900 int *power_save_to)
1901{
1902 struct hda_bus_template bus_temp;
1903 int c, codecs, err;
1904
1905 memset(&bus_temp, 0, sizeof(bus_temp));
1906 bus_temp.private_data = chip;
1907 bus_temp.modelname = model;
1908 bus_temp.pci = chip->pci;
1909 bus_temp.ops.command = azx_send_cmd;
1910 bus_temp.ops.get_response = azx_get_response;
1911 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1912 bus_temp.ops.bus_reset = azx_bus_reset;
1913#ifdef CONFIG_PM
1914 bus_temp.power_save = power_save_to;
1915 bus_temp.ops.pm_notify = azx_power_notify;
1916#endif
1917#ifdef CONFIG_SND_HDA_DSP_LOADER
1918 bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1919 bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1920 bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1921#endif
1922
1923 err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1924 if (err < 0)
1925 return err;
1926
1927 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1928 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1929 chip->bus->needs_damn_long_delay = 1;
1930 }
1931
1932 codecs = 0;
1933 if (!max_slots)
1934 max_slots = AZX_DEFAULT_CODECS;
1935
1936 /* First try to probe all given codec slots */
1937 for (c = 0; c < max_slots; c++) {
1938 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1939 if (probe_codec(chip, c) < 0) {
1940 /* Some BIOSen give you wrong codec addresses
1941 * that don't exist
1942 */
1943 dev_warn(chip->card->dev,
1944 "Codec #%d probe error; disabling it...\n", c);
1945 chip->codec_mask &= ~(1 << c);
1946 /* More badly, accessing to a non-existing
1947 * codec often screws up the controller chip,
1948 * and disturbs the further communications.
1949 * Thus if an error occurs during probing,
1950 * better to reset the controller chip to
1951 * get back to the sanity state.
1952 */
1953 azx_stop_chip(chip);
17c3ad03 1954 azx_init_chip(chip, true);
154867cf
DR
1955 }
1956 }
1957 }
1958
1959 /* AMD chipsets often cause the communication stalls upon certain
1960 * sequence like the pin-detection. It seems that forcing the synced
1961 * access works around the stall. Grrr...
1962 */
1963 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1964 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1965 chip->bus->sync_write = 1;
1966 chip->bus->allow_bus_reset = 1;
1967 }
1968
1969 /* Then create codec instances */
1970 for (c = 0; c < max_slots; c++) {
1971 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1972 struct hda_codec *codec;
1973 err = snd_hda_codec_new(chip->bus, c, &codec);
1974 if (err < 0)
1975 continue;
1976 codec->jackpoll_interval = get_jackpoll_interval(chip);
1977 codec->beep_mode = chip->beep_mode;
1978 codecs++;
1979 }
1980 }
1981 if (!codecs) {
1982 dev_err(chip->card->dev, "no codecs initialized\n");
1983 return -ENXIO;
1984 }
1985 return 0;
1986}
1987EXPORT_SYMBOL_GPL(azx_codec_create);
1988
1989/* configure each codec instance */
1990int azx_codec_configure(struct azx *chip)
1991{
1992 struct hda_codec *codec;
1993 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1994 snd_hda_codec_configure(codec);
1995 }
1996 return 0;
1997}
1998EXPORT_SYMBOL_GPL(azx_codec_configure);
1999
2000/* mixer creation - all stuff is implemented in hda module */
2001int azx_mixer_create(struct azx *chip)
2002{
2003 return snd_hda_build_controls(chip->bus);
2004}
2005EXPORT_SYMBOL_GPL(azx_mixer_create);
2006
2007
2008/* initialize SD streams */
2009int azx_init_stream(struct azx *chip)
2010{
2011 int i;
2012
2013 /* initialize each stream (aka device)
2014 * assign the starting bdl address to each stream (device)
2015 * and initialize
2016 */
2017 for (i = 0; i < chip->num_streams; i++) {
2018 struct azx_dev *azx_dev = &chip->azx_dev[i];
2019 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
2020 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
2021 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
2022 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
2023 azx_dev->sd_int_sta_mask = 1 << i;
2024 /* stream tag: must be non-zero and unique */
2025 azx_dev->index = i;
2026 azx_dev->stream_tag = i + 1;
2027 }
2028
2029 return 0;
2030}
2031EXPORT_SYMBOL_GPL(azx_init_stream);
2032
05e84878
DR
2033MODULE_LICENSE("GPL");
2034MODULE_DESCRIPTION("Common HDA driver funcitons");
This page took 0.11436 seconds and 5 git commands to generate.