97993e17f46a76d5470269795c5b34998c563246
[deliverable/linux.git] / sound / pci / hda / hda_controller.c
1 /*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
32 #include "hda_priv.h"
33 #include "hda_controller.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
37
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
44 #else
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
49 #endif
50
51 /*
52 * AZX stream operations.
53 */
54
55 /* start a stream */
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
57 {
58 /*
59 * Before stream start, initialize parameter
60 */
61 azx_dev->insufficient = 1;
62
63 /* enable SIE */
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
70 }
71
72 /* stop DMA */
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
74 {
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
79 }
80
81 /* stop a stream */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
83 {
84 azx_stream_clear(chip, azx_dev);
85 /* disable SIE */
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
88 }
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
90
91 /* reset stream */
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
93 {
94 unsigned char val;
95 int timeout;
96
97 azx_stream_clear(chip, azx_dev);
98
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
102 udelay(3);
103 timeout = 300;
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
106 ;
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
109 udelay(3);
110
111 timeout = 300;
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
115 ;
116
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
119 }
120
121 /*
122 * set up the SD for streaming
123 */
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
125 {
126 unsigned int val;
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
136
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
139
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
143
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
146
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
153
154 /* enable the position buffer */
155 if (chip->position_fix[0] != POS_FIX_LPIB ||
156 chip->position_fix[1] != POS_FIX_LPIB) {
157 if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
160 }
161
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
165
166 return 0;
167 }
168
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
172 {
173 int dev, i, nums;
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
178
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
182 } else {
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
185 }
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
188 dsp_lock(azx_dev);
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 res = azx_dev;
191 if (res->assigned_key == key) {
192 res->opened = 1;
193 res->assigned_key = key;
194 dsp_unlock(azx_dev);
195 return azx_dev;
196 }
197 }
198 dsp_unlock(azx_dev);
199 }
200 if (res) {
201 dsp_lock(res);
202 res->opened = 1;
203 res->assigned_key = key;
204 dsp_unlock(res);
205 }
206 return res;
207 }
208
209 /* release the assigned stream */
210 static inline void azx_release_device(struct azx_dev *azx_dev)
211 {
212 azx_dev->opened = 0;
213 }
214
215 static cycle_t azx_cc_read(const struct cyclecounter *cc)
216 {
217 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
218 struct snd_pcm_substream *substream = azx_dev->substream;
219 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
220 struct azx *chip = apcm->chip;
221
222 return azx_readl(chip, WALLCLK);
223 }
224
225 static void azx_timecounter_init(struct snd_pcm_substream *substream,
226 bool force, cycle_t last)
227 {
228 struct azx_dev *azx_dev = get_azx_dev(substream);
229 struct timecounter *tc = &azx_dev->azx_tc;
230 struct cyclecounter *cc = &azx_dev->azx_cc;
231 u64 nsec;
232
233 cc->read = azx_cc_read;
234 cc->mask = CLOCKSOURCE_MASK(32);
235
236 /*
237 * Converting from 24 MHz to ns means applying a 125/3 factor.
238 * To avoid any saturation issues in intermediate operations,
239 * the 125 factor is applied first. The division is applied
240 * last after reading the timecounter value.
241 * Applying the 1/3 factor as part of the multiplication
242 * requires at least 20 bits for a decent precision, however
243 * overflows occur after about 4 hours or less, not a option.
244 */
245
246 cc->mult = 125; /* saturation after 195 years */
247 cc->shift = 0;
248
249 nsec = 0; /* audio time is elapsed time since trigger */
250 timecounter_init(tc, cc, nsec);
251 if (force)
252 /*
253 * force timecounter to use predefined value,
254 * used for synchronized starts
255 */
256 tc->cycle_last = last;
257 }
258
259 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
260 u64 nsec)
261 {
262 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
263 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
264 u64 codec_frames, codec_nsecs;
265
266 if (!hinfo->ops.get_delay)
267 return nsec;
268
269 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
270 codec_nsecs = div_u64(codec_frames * 1000000000LL,
271 substream->runtime->rate);
272
273 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
274 return nsec + codec_nsecs;
275
276 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
277 }
278
279 /*
280 * set up a BDL entry
281 */
282 static int setup_bdle(struct azx *chip,
283 struct snd_dma_buffer *dmab,
284 struct azx_dev *azx_dev, u32 **bdlp,
285 int ofs, int size, int with_ioc)
286 {
287 u32 *bdl = *bdlp;
288
289 while (size > 0) {
290 dma_addr_t addr;
291 int chunk;
292
293 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
294 return -EINVAL;
295
296 addr = snd_sgbuf_get_addr(dmab, ofs);
297 /* program the address field of the BDL entry */
298 bdl[0] = cpu_to_le32((u32)addr);
299 bdl[1] = cpu_to_le32(upper_32_bits(addr));
300 /* program the size field of the BDL entry */
301 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
302 /* one BDLE cannot cross 4K boundary on CTHDA chips */
303 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
304 u32 remain = 0x1000 - (ofs & 0xfff);
305 if (chunk > remain)
306 chunk = remain;
307 }
308 bdl[2] = cpu_to_le32(chunk);
309 /* program the IOC to enable interrupt
310 * only when the whole fragment is processed
311 */
312 size -= chunk;
313 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
314 bdl += 4;
315 azx_dev->frags++;
316 ofs += chunk;
317 }
318 *bdlp = bdl;
319 return ofs;
320 }
321
322 /*
323 * set up BDL entries
324 */
325 static int azx_setup_periods(struct azx *chip,
326 struct snd_pcm_substream *substream,
327 struct azx_dev *azx_dev)
328 {
329 u32 *bdl;
330 int i, ofs, periods, period_bytes;
331 int pos_adj = 0;
332
333 /* reset BDL address */
334 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
335 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
336
337 period_bytes = azx_dev->period_bytes;
338 periods = azx_dev->bufsize / period_bytes;
339
340 /* program the initial BDL entries */
341 bdl = (u32 *)azx_dev->bdl.area;
342 ofs = 0;
343 azx_dev->frags = 0;
344
345 if (chip->bdl_pos_adj)
346 pos_adj = chip->bdl_pos_adj[chip->dev_index];
347 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
348 struct snd_pcm_runtime *runtime = substream->runtime;
349 int pos_align = pos_adj;
350 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
351 if (!pos_adj)
352 pos_adj = pos_align;
353 else
354 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
355 pos_align;
356 pos_adj = frames_to_bytes(runtime, pos_adj);
357 if (pos_adj >= period_bytes) {
358 dev_warn(chip->card->dev,"Too big adjustment %d\n",
359 pos_adj);
360 pos_adj = 0;
361 } else {
362 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
363 azx_dev,
364 &bdl, ofs, pos_adj, true);
365 if (ofs < 0)
366 goto error;
367 }
368 } else
369 pos_adj = 0;
370
371 for (i = 0; i < periods; i++) {
372 if (i == periods - 1 && pos_adj)
373 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
374 azx_dev, &bdl, ofs,
375 period_bytes - pos_adj, 0);
376 else
377 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
378 azx_dev, &bdl, ofs,
379 period_bytes,
380 !azx_dev->no_period_wakeup);
381 if (ofs < 0)
382 goto error;
383 }
384 return 0;
385
386 error:
387 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
388 azx_dev->bufsize, period_bytes);
389 return -EINVAL;
390 }
391
392 /*
393 * PCM ops
394 */
395
396 static int azx_pcm_close(struct snd_pcm_substream *substream)
397 {
398 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
399 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
400 struct azx *chip = apcm->chip;
401 struct azx_dev *azx_dev = get_azx_dev(substream);
402 unsigned long flags;
403
404 mutex_lock(&chip->open_mutex);
405 spin_lock_irqsave(&chip->reg_lock, flags);
406 azx_dev->substream = NULL;
407 azx_dev->running = 0;
408 spin_unlock_irqrestore(&chip->reg_lock, flags);
409 azx_release_device(azx_dev);
410 hinfo->ops.close(hinfo, apcm->codec, substream);
411 snd_hda_power_down(apcm->codec);
412 mutex_unlock(&chip->open_mutex);
413 return 0;
414 }
415
416 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
417 struct snd_pcm_hw_params *hw_params)
418 {
419 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
420 struct azx *chip = apcm->chip;
421 int ret;
422
423 dsp_lock(get_azx_dev(substream));
424 if (dsp_is_locked(get_azx_dev(substream))) {
425 ret = -EBUSY;
426 goto unlock;
427 }
428
429 ret = chip->ops->substream_alloc_pages(chip, substream,
430 params_buffer_bytes(hw_params));
431 unlock:
432 dsp_unlock(get_azx_dev(substream));
433 return ret;
434 }
435
436 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
437 {
438 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
439 struct azx_dev *azx_dev = get_azx_dev(substream);
440 struct azx *chip = apcm->chip;
441 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
442 int err;
443
444 /* reset BDL address */
445 dsp_lock(azx_dev);
446 if (!dsp_is_locked(azx_dev)) {
447 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
448 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
449 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
450 azx_dev->bufsize = 0;
451 azx_dev->period_bytes = 0;
452 azx_dev->format_val = 0;
453 }
454
455 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
456
457 err = chip->ops->substream_free_pages(chip, substream);
458 azx_dev->prepared = 0;
459 dsp_unlock(azx_dev);
460 return err;
461 }
462
463 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
464 {
465 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
466 struct azx *chip = apcm->chip;
467 struct azx_dev *azx_dev = get_azx_dev(substream);
468 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
469 struct snd_pcm_runtime *runtime = substream->runtime;
470 unsigned int bufsize, period_bytes, format_val, stream_tag;
471 int err;
472 struct hda_spdif_out *spdif =
473 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
474 unsigned short ctls = spdif ? spdif->ctls : 0;
475
476 dsp_lock(azx_dev);
477 if (dsp_is_locked(azx_dev)) {
478 err = -EBUSY;
479 goto unlock;
480 }
481
482 azx_stream_reset(chip, azx_dev);
483 format_val = snd_hda_calc_stream_format(runtime->rate,
484 runtime->channels,
485 runtime->format,
486 hinfo->maxbps,
487 ctls);
488 if (!format_val) {
489 dev_err(chip->card->dev,
490 "invalid format_val, rate=%d, ch=%d, format=%d\n",
491 runtime->rate, runtime->channels, runtime->format);
492 err = -EINVAL;
493 goto unlock;
494 }
495
496 bufsize = snd_pcm_lib_buffer_bytes(substream);
497 period_bytes = snd_pcm_lib_period_bytes(substream);
498
499 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
500 bufsize, format_val);
501
502 if (bufsize != azx_dev->bufsize ||
503 period_bytes != azx_dev->period_bytes ||
504 format_val != azx_dev->format_val ||
505 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
506 azx_dev->bufsize = bufsize;
507 azx_dev->period_bytes = period_bytes;
508 azx_dev->format_val = format_val;
509 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
510 err = azx_setup_periods(chip, substream, azx_dev);
511 if (err < 0)
512 goto unlock;
513 }
514
515 /* when LPIB delay correction gives a small negative value,
516 * we ignore it; currently set the threshold statically to
517 * 64 frames
518 */
519 if (runtime->period_size > 64)
520 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
521 else
522 azx_dev->delay_negative_threshold = 0;
523
524 /* wallclk has 24Mhz clock source */
525 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
526 runtime->rate) * 1000);
527 azx_setup_controller(chip, azx_dev);
528 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
529 azx_dev->fifo_size =
530 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
531 else
532 azx_dev->fifo_size = 0;
533
534 stream_tag = azx_dev->stream_tag;
535 /* CA-IBG chips need the playback stream starting from 1 */
536 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
537 stream_tag > chip->capture_streams)
538 stream_tag -= chip->capture_streams;
539 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
540 azx_dev->format_val, substream);
541
542 unlock:
543 if (!err)
544 azx_dev->prepared = 1;
545 dsp_unlock(azx_dev);
546 return err;
547 }
548
549 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
550 {
551 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
552 struct azx *chip = apcm->chip;
553 struct azx_dev *azx_dev;
554 struct snd_pcm_substream *s;
555 int rstart = 0, start, nsync = 0, sbits = 0;
556 int nwait, timeout;
557
558 azx_dev = get_azx_dev(substream);
559 trace_azx_pcm_trigger(chip, azx_dev, cmd);
560
561 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
562 return -EPIPE;
563
564 switch (cmd) {
565 case SNDRV_PCM_TRIGGER_START:
566 rstart = 1;
567 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
568 case SNDRV_PCM_TRIGGER_RESUME:
569 start = 1;
570 break;
571 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
572 case SNDRV_PCM_TRIGGER_SUSPEND:
573 case SNDRV_PCM_TRIGGER_STOP:
574 start = 0;
575 break;
576 default:
577 return -EINVAL;
578 }
579
580 snd_pcm_group_for_each_entry(s, substream) {
581 if (s->pcm->card != substream->pcm->card)
582 continue;
583 azx_dev = get_azx_dev(s);
584 sbits |= 1 << azx_dev->index;
585 nsync++;
586 snd_pcm_trigger_done(s, substream);
587 }
588
589 spin_lock(&chip->reg_lock);
590
591 /* first, set SYNC bits of corresponding streams */
592 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
593 azx_writel(chip, OLD_SSYNC,
594 azx_readl(chip, OLD_SSYNC) | sbits);
595 else
596 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
597
598 snd_pcm_group_for_each_entry(s, substream) {
599 if (s->pcm->card != substream->pcm->card)
600 continue;
601 azx_dev = get_azx_dev(s);
602 if (start) {
603 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
604 if (!rstart)
605 azx_dev->start_wallclk -=
606 azx_dev->period_wallclk;
607 azx_stream_start(chip, azx_dev);
608 } else {
609 azx_stream_stop(chip, azx_dev);
610 }
611 azx_dev->running = start;
612 }
613 spin_unlock(&chip->reg_lock);
614 if (start) {
615 /* wait until all FIFOs get ready */
616 for (timeout = 5000; timeout; timeout--) {
617 nwait = 0;
618 snd_pcm_group_for_each_entry(s, substream) {
619 if (s->pcm->card != substream->pcm->card)
620 continue;
621 azx_dev = get_azx_dev(s);
622 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
623 SD_STS_FIFO_READY))
624 nwait++;
625 }
626 if (!nwait)
627 break;
628 cpu_relax();
629 }
630 } else {
631 /* wait until all RUN bits are cleared */
632 for (timeout = 5000; timeout; timeout--) {
633 nwait = 0;
634 snd_pcm_group_for_each_entry(s, substream) {
635 if (s->pcm->card != substream->pcm->card)
636 continue;
637 azx_dev = get_azx_dev(s);
638 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
639 SD_CTL_DMA_START)
640 nwait++;
641 }
642 if (!nwait)
643 break;
644 cpu_relax();
645 }
646 }
647 spin_lock(&chip->reg_lock);
648 /* reset SYNC bits */
649 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
650 azx_writel(chip, OLD_SSYNC,
651 azx_readl(chip, OLD_SSYNC) & ~sbits);
652 else
653 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
654 if (start) {
655 azx_timecounter_init(substream, 0, 0);
656 if (nsync > 1) {
657 cycle_t cycle_last;
658
659 /* same start cycle for master and group */
660 azx_dev = get_azx_dev(substream);
661 cycle_last = azx_dev->azx_tc.cycle_last;
662
663 snd_pcm_group_for_each_entry(s, substream) {
664 if (s->pcm->card != substream->pcm->card)
665 continue;
666 azx_timecounter_init(s, 1, cycle_last);
667 }
668 }
669 }
670 spin_unlock(&chip->reg_lock);
671 return 0;
672 }
673
674 /* get the current DMA position with correction on VIA chips */
675 static unsigned int azx_via_get_position(struct azx *chip,
676 struct azx_dev *azx_dev)
677 {
678 unsigned int link_pos, mini_pos, bound_pos;
679 unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
680 unsigned int fifo_size;
681
682 link_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
683 if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
684 /* Playback, no problem using link position */
685 return link_pos;
686 }
687
688 /* Capture */
689 /* For new chipset,
690 * use mod to get the DMA position just like old chipset
691 */
692 mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
693 mod_dma_pos %= azx_dev->period_bytes;
694
695 /* azx_dev->fifo_size can't get FIFO size of in stream.
696 * Get from base address + offset.
697 */
698 fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
699
700 if (azx_dev->insufficient) {
701 /* Link position never gather than FIFO size */
702 if (link_pos <= fifo_size)
703 return 0;
704
705 azx_dev->insufficient = 0;
706 }
707
708 if (link_pos <= fifo_size)
709 mini_pos = azx_dev->bufsize + link_pos - fifo_size;
710 else
711 mini_pos = link_pos - fifo_size;
712
713 /* Find nearest previous boudary */
714 mod_mini_pos = mini_pos % azx_dev->period_bytes;
715 mod_link_pos = link_pos % azx_dev->period_bytes;
716 if (mod_link_pos >= fifo_size)
717 bound_pos = link_pos - mod_link_pos;
718 else if (mod_dma_pos >= mod_mini_pos)
719 bound_pos = mini_pos - mod_mini_pos;
720 else {
721 bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
722 if (bound_pos >= azx_dev->bufsize)
723 bound_pos = 0;
724 }
725
726 /* Calculate real DMA position we want */
727 return bound_pos + mod_dma_pos;
728 }
729
730 unsigned int azx_get_position(struct azx *chip,
731 struct azx_dev *azx_dev,
732 bool with_check)
733 {
734 struct snd_pcm_substream *substream = azx_dev->substream;
735 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
736 unsigned int pos;
737 int stream = substream->stream;
738 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
739 int delay = 0;
740
741 switch (chip->position_fix[stream]) {
742 case POS_FIX_LPIB:
743 /* read LPIB */
744 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
745 break;
746 case POS_FIX_VIACOMBO:
747 pos = azx_via_get_position(chip, azx_dev);
748 break;
749 default:
750 /* use the position buffer */
751 pos = le32_to_cpu(*azx_dev->posbuf);
752 if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
753 if (!pos || pos == (u32)-1) {
754 dev_info(chip->card->dev,
755 "Invalid position buffer, using LPIB read method instead.\n");
756 chip->position_fix[stream] = POS_FIX_LPIB;
757 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
758 } else
759 chip->position_fix[stream] = POS_FIX_POSBUF;
760 }
761 break;
762 }
763
764 if (pos >= azx_dev->bufsize)
765 pos = 0;
766
767 /* calculate runtime delay from LPIB */
768 if (substream->runtime &&
769 chip->position_fix[stream] == POS_FIX_POSBUF &&
770 (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
771 unsigned int lpib_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
772 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
773 delay = pos - lpib_pos;
774 else
775 delay = lpib_pos - pos;
776 if (delay < 0) {
777 if (delay >= azx_dev->delay_negative_threshold)
778 delay = 0;
779 else
780 delay += azx_dev->bufsize;
781 }
782 if (delay >= azx_dev->period_bytes) {
783 dev_info(chip->card->dev,
784 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
785 delay, azx_dev->period_bytes);
786 delay = 0;
787 chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
788 }
789 delay = bytes_to_frames(substream->runtime, delay);
790 }
791
792 if (substream->runtime) {
793 if (hinfo->ops.get_delay)
794 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
795 substream);
796 substream->runtime->delay = delay;
797 }
798
799 trace_azx_get_position(chip, azx_dev, pos, delay);
800 return pos;
801 }
802 EXPORT_SYMBOL_GPL(azx_get_position);
803
804 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
805 {
806 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
807 struct azx *chip = apcm->chip;
808 struct azx_dev *azx_dev = get_azx_dev(substream);
809 return bytes_to_frames(substream->runtime,
810 azx_get_position(chip, azx_dev, false));
811 }
812
813 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
814 struct timespec *ts)
815 {
816 struct azx_dev *azx_dev = get_azx_dev(substream);
817 u64 nsec;
818
819 nsec = timecounter_read(&azx_dev->azx_tc);
820 nsec = div_u64(nsec, 3); /* can be optimized */
821 nsec = azx_adjust_codec_delay(substream, nsec);
822
823 *ts = ns_to_timespec(nsec);
824
825 return 0;
826 }
827
828 static struct snd_pcm_hardware azx_pcm_hw = {
829 .info = (SNDRV_PCM_INFO_MMAP |
830 SNDRV_PCM_INFO_INTERLEAVED |
831 SNDRV_PCM_INFO_BLOCK_TRANSFER |
832 SNDRV_PCM_INFO_MMAP_VALID |
833 /* No full-resume yet implemented */
834 /* SNDRV_PCM_INFO_RESUME |*/
835 SNDRV_PCM_INFO_PAUSE |
836 SNDRV_PCM_INFO_SYNC_START |
837 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
838 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
839 .formats = SNDRV_PCM_FMTBIT_S16_LE,
840 .rates = SNDRV_PCM_RATE_48000,
841 .rate_min = 48000,
842 .rate_max = 48000,
843 .channels_min = 2,
844 .channels_max = 2,
845 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
846 .period_bytes_min = 128,
847 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
848 .periods_min = 2,
849 .periods_max = AZX_MAX_FRAG,
850 .fifo_size = 0,
851 };
852
853 static int azx_pcm_open(struct snd_pcm_substream *substream)
854 {
855 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
856 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
857 struct azx *chip = apcm->chip;
858 struct azx_dev *azx_dev;
859 struct snd_pcm_runtime *runtime = substream->runtime;
860 unsigned long flags;
861 int err;
862 int buff_step;
863
864 mutex_lock(&chip->open_mutex);
865 azx_dev = azx_assign_device(chip, substream);
866 if (azx_dev == NULL) {
867 mutex_unlock(&chip->open_mutex);
868 return -EBUSY;
869 }
870 runtime->hw = azx_pcm_hw;
871 runtime->hw.channels_min = hinfo->channels_min;
872 runtime->hw.channels_max = hinfo->channels_max;
873 runtime->hw.formats = hinfo->formats;
874 runtime->hw.rates = hinfo->rates;
875 snd_pcm_limit_hw_rates(runtime);
876 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
877
878 /* avoid wrap-around with wall-clock */
879 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
880 20,
881 178000000);
882
883 if (chip->align_buffer_size)
884 /* constrain buffer sizes to be multiple of 128
885 bytes. This is more efficient in terms of memory
886 access but isn't required by the HDA spec and
887 prevents users from specifying exact period/buffer
888 sizes. For example for 44.1kHz, a period size set
889 to 20ms will be rounded to 19.59ms. */
890 buff_step = 128;
891 else
892 /* Don't enforce steps on buffer sizes, still need to
893 be multiple of 4 bytes (HDA spec). Tested on Intel
894 HDA controllers, may not work on all devices where
895 option needs to be disabled */
896 buff_step = 4;
897
898 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
899 buff_step);
900 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
901 buff_step);
902 snd_hda_power_up_d3wait(apcm->codec);
903 err = hinfo->ops.open(hinfo, apcm->codec, substream);
904 if (err < 0) {
905 azx_release_device(azx_dev);
906 snd_hda_power_down(apcm->codec);
907 mutex_unlock(&chip->open_mutex);
908 return err;
909 }
910 snd_pcm_limit_hw_rates(runtime);
911 /* sanity check */
912 if (snd_BUG_ON(!runtime->hw.channels_min) ||
913 snd_BUG_ON(!runtime->hw.channels_max) ||
914 snd_BUG_ON(!runtime->hw.formats) ||
915 snd_BUG_ON(!runtime->hw.rates)) {
916 azx_release_device(azx_dev);
917 hinfo->ops.close(hinfo, apcm->codec, substream);
918 snd_hda_power_down(apcm->codec);
919 mutex_unlock(&chip->open_mutex);
920 return -EINVAL;
921 }
922
923 /* disable WALLCLOCK timestamps for capture streams
924 until we figure out how to handle digital inputs */
925 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
926 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
927
928 spin_lock_irqsave(&chip->reg_lock, flags);
929 azx_dev->substream = substream;
930 azx_dev->running = 0;
931 spin_unlock_irqrestore(&chip->reg_lock, flags);
932
933 runtime->private_data = azx_dev;
934 snd_pcm_set_sync(substream);
935 mutex_unlock(&chip->open_mutex);
936 return 0;
937 }
938
939 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
940 struct vm_area_struct *area)
941 {
942 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
943 struct azx *chip = apcm->chip;
944 if (chip->ops->pcm_mmap_prepare)
945 chip->ops->pcm_mmap_prepare(substream, area);
946 return snd_pcm_lib_default_mmap(substream, area);
947 }
948
949 static struct snd_pcm_ops azx_pcm_ops = {
950 .open = azx_pcm_open,
951 .close = azx_pcm_close,
952 .ioctl = snd_pcm_lib_ioctl,
953 .hw_params = azx_pcm_hw_params,
954 .hw_free = azx_pcm_hw_free,
955 .prepare = azx_pcm_prepare,
956 .trigger = azx_pcm_trigger,
957 .pointer = azx_pcm_pointer,
958 .wall_clock = azx_get_wallclock_tstamp,
959 .mmap = azx_pcm_mmap,
960 .page = snd_pcm_sgbuf_ops_page,
961 };
962
963 static void azx_pcm_free(struct snd_pcm *pcm)
964 {
965 struct azx_pcm *apcm = pcm->private_data;
966 if (apcm) {
967 list_del(&apcm->list);
968 kfree(apcm);
969 }
970 }
971
972 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
973
974 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
975 struct hda_pcm *cpcm)
976 {
977 struct azx *chip = bus->private_data;
978 struct snd_pcm *pcm;
979 struct azx_pcm *apcm;
980 int pcm_dev = cpcm->device;
981 unsigned int size;
982 int s, err;
983
984 list_for_each_entry(apcm, &chip->pcm_list, list) {
985 if (apcm->pcm->device == pcm_dev) {
986 dev_err(chip->card->dev, "PCM %d already exists\n",
987 pcm_dev);
988 return -EBUSY;
989 }
990 }
991 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
992 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
993 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
994 &pcm);
995 if (err < 0)
996 return err;
997 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
998 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
999 if (apcm == NULL)
1000 return -ENOMEM;
1001 apcm->chip = chip;
1002 apcm->pcm = pcm;
1003 apcm->codec = codec;
1004 pcm->private_data = apcm;
1005 pcm->private_free = azx_pcm_free;
1006 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
1007 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
1008 list_add_tail(&apcm->list, &chip->pcm_list);
1009 cpcm->pcm = pcm;
1010 for (s = 0; s < 2; s++) {
1011 apcm->hinfo[s] = &cpcm->stream[s];
1012 if (cpcm->stream[s].substreams)
1013 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
1014 }
1015 /* buffer pre-allocation */
1016 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
1017 if (size > MAX_PREALLOC_SIZE)
1018 size = MAX_PREALLOC_SIZE;
1019 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1020 chip->card->dev,
1021 size, MAX_PREALLOC_SIZE);
1022 /* link to codec */
1023 pcm->dev = &codec->dev;
1024 return 0;
1025 }
1026
1027 /*
1028 * CORB / RIRB interface
1029 */
1030 static int azx_alloc_cmd_io(struct azx *chip)
1031 {
1032 int err;
1033
1034 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
1035 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1036 PAGE_SIZE, &chip->rb);
1037 if (err < 0)
1038 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
1039 return err;
1040 }
1041 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
1042
1043 static void azx_init_cmd_io(struct azx *chip)
1044 {
1045 int timeout;
1046
1047 spin_lock_irq(&chip->reg_lock);
1048 /* CORB set up */
1049 chip->corb.addr = chip->rb.addr;
1050 chip->corb.buf = (u32 *)chip->rb.area;
1051 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1052 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1053
1054 /* set the corb size to 256 entries (ULI requires explicitly) */
1055 azx_writeb(chip, CORBSIZE, 0x02);
1056 /* set the corb write pointer to 0 */
1057 azx_writew(chip, CORBWP, 0);
1058
1059 /* reset the corb hw read pointer */
1060 azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
1061 for (timeout = 1000; timeout > 0; timeout--) {
1062 if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
1063 break;
1064 udelay(1);
1065 }
1066 if (timeout <= 0)
1067 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1068 azx_readw(chip, CORBRP));
1069
1070 azx_writew(chip, CORBRP, 0);
1071 for (timeout = 1000; timeout > 0; timeout--) {
1072 if (azx_readw(chip, CORBRP) == 0)
1073 break;
1074 udelay(1);
1075 }
1076 if (timeout <= 0)
1077 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1078 azx_readw(chip, CORBRP));
1079
1080 /* enable corb dma */
1081 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1082
1083 /* RIRB set up */
1084 chip->rirb.addr = chip->rb.addr + 2048;
1085 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1086 chip->rirb.wp = chip->rirb.rp = 0;
1087 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1088 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1089 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1090
1091 /* set the rirb size to 256 entries (ULI requires explicitly) */
1092 azx_writeb(chip, RIRBSIZE, 0x02);
1093 /* reset the rirb hw write pointer */
1094 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1095 /* set N=1, get RIRB response interrupt for new entry */
1096 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1097 azx_writew(chip, RINTCNT, 0xc0);
1098 else
1099 azx_writew(chip, RINTCNT, 1);
1100 /* enable rirb dma and response irq */
1101 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1102 spin_unlock_irq(&chip->reg_lock);
1103 }
1104 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1105
1106 static void azx_free_cmd_io(struct azx *chip)
1107 {
1108 spin_lock_irq(&chip->reg_lock);
1109 /* disable ringbuffer DMAs */
1110 azx_writeb(chip, RIRBCTL, 0);
1111 azx_writeb(chip, CORBCTL, 0);
1112 spin_unlock_irq(&chip->reg_lock);
1113 }
1114 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1115
1116 static unsigned int azx_command_addr(u32 cmd)
1117 {
1118 unsigned int addr = cmd >> 28;
1119
1120 if (addr >= AZX_MAX_CODECS) {
1121 snd_BUG();
1122 addr = 0;
1123 }
1124
1125 return addr;
1126 }
1127
1128 /* send a command */
1129 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1130 {
1131 struct azx *chip = bus->private_data;
1132 unsigned int addr = azx_command_addr(val);
1133 unsigned int wp, rp;
1134
1135 spin_lock_irq(&chip->reg_lock);
1136
1137 /* add command to corb */
1138 wp = azx_readw(chip, CORBWP);
1139 if (wp == 0xffff) {
1140 /* something wrong, controller likely turned to D3 */
1141 spin_unlock_irq(&chip->reg_lock);
1142 return -EIO;
1143 }
1144 wp++;
1145 wp %= ICH6_MAX_CORB_ENTRIES;
1146
1147 rp = azx_readw(chip, CORBRP);
1148 if (wp == rp) {
1149 /* oops, it's full */
1150 spin_unlock_irq(&chip->reg_lock);
1151 return -EAGAIN;
1152 }
1153
1154 chip->rirb.cmds[addr]++;
1155 chip->corb.buf[wp] = cpu_to_le32(val);
1156 azx_writew(chip, CORBWP, wp);
1157
1158 spin_unlock_irq(&chip->reg_lock);
1159
1160 return 0;
1161 }
1162
1163 #define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1164
1165 /* retrieve RIRB entry - called from interrupt handler */
1166 static void azx_update_rirb(struct azx *chip)
1167 {
1168 unsigned int rp, wp;
1169 unsigned int addr;
1170 u32 res, res_ex;
1171
1172 wp = azx_readw(chip, RIRBWP);
1173 if (wp == 0xffff) {
1174 /* something wrong, controller likely turned to D3 */
1175 return;
1176 }
1177
1178 if (wp == chip->rirb.wp)
1179 return;
1180 chip->rirb.wp = wp;
1181
1182 while (chip->rirb.rp != wp) {
1183 chip->rirb.rp++;
1184 chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1185
1186 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1187 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1188 res = le32_to_cpu(chip->rirb.buf[rp]);
1189 addr = res_ex & 0xf;
1190 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1191 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1192 res, res_ex,
1193 chip->rirb.rp, wp);
1194 snd_BUG();
1195 }
1196 else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1197 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1198 else if (chip->rirb.cmds[addr]) {
1199 chip->rirb.res[addr] = res;
1200 smp_wmb();
1201 chip->rirb.cmds[addr]--;
1202 } else if (printk_ratelimit()) {
1203 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1204 res, res_ex,
1205 chip->last_cmd[addr]);
1206 }
1207 }
1208 }
1209
1210 /* receive a response */
1211 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1212 unsigned int addr)
1213 {
1214 struct azx *chip = bus->private_data;
1215 unsigned long timeout;
1216 unsigned long loopcounter;
1217 int do_poll = 0;
1218
1219 again:
1220 timeout = jiffies + msecs_to_jiffies(1000);
1221
1222 for (loopcounter = 0;; loopcounter++) {
1223 if (chip->polling_mode || do_poll) {
1224 spin_lock_irq(&chip->reg_lock);
1225 azx_update_rirb(chip);
1226 spin_unlock_irq(&chip->reg_lock);
1227 }
1228 if (!chip->rirb.cmds[addr]) {
1229 smp_rmb();
1230 bus->rirb_error = 0;
1231
1232 if (!do_poll)
1233 chip->poll_count = 0;
1234 return chip->rirb.res[addr]; /* the last value */
1235 }
1236 if (time_after(jiffies, timeout))
1237 break;
1238 if (bus->needs_damn_long_delay || loopcounter > 3000)
1239 msleep(2); /* temporary workaround */
1240 else {
1241 udelay(10);
1242 cond_resched();
1243 }
1244 }
1245
1246 if (!bus->no_response_fallback)
1247 return -1;
1248
1249 if (!chip->polling_mode && chip->poll_count < 2) {
1250 dev_dbg(chip->card->dev,
1251 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1252 chip->last_cmd[addr]);
1253 do_poll = 1;
1254 chip->poll_count++;
1255 goto again;
1256 }
1257
1258
1259 if (!chip->polling_mode) {
1260 dev_warn(chip->card->dev,
1261 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1262 chip->last_cmd[addr]);
1263 chip->polling_mode = 1;
1264 goto again;
1265 }
1266
1267 if (chip->msi) {
1268 dev_warn(chip->card->dev,
1269 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1270 chip->last_cmd[addr]);
1271 if (chip->ops->disable_msi_reset_irq(chip) &&
1272 chip->ops->disable_msi_reset_irq(chip) < 0) {
1273 bus->rirb_error = 1;
1274 return -1;
1275 }
1276 goto again;
1277 }
1278
1279 if (chip->probing) {
1280 /* If this critical timeout happens during the codec probing
1281 * phase, this is likely an access to a non-existing codec
1282 * slot. Better to return an error and reset the system.
1283 */
1284 return -1;
1285 }
1286
1287 /* a fatal communication error; need either to reset or to fallback
1288 * to the single_cmd mode
1289 */
1290 bus->rirb_error = 1;
1291 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1292 bus->response_reset = 1;
1293 return -1; /* give a chance to retry */
1294 }
1295
1296 dev_err(chip->card->dev,
1297 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1298 chip->last_cmd[addr]);
1299 chip->single_cmd = 1;
1300 bus->response_reset = 0;
1301 /* release CORB/RIRB */
1302 azx_free_cmd_io(chip);
1303 /* disable unsolicited responses */
1304 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1305 return -1;
1306 }
1307
1308 /*
1309 * Use the single immediate command instead of CORB/RIRB for simplicity
1310 *
1311 * Note: according to Intel, this is not preferred use. The command was
1312 * intended for the BIOS only, and may get confused with unsolicited
1313 * responses. So, we shouldn't use it for normal operation from the
1314 * driver.
1315 * I left the codes, however, for debugging/testing purposes.
1316 */
1317
1318 /* receive a response */
1319 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1320 {
1321 int timeout = 50;
1322
1323 while (timeout--) {
1324 /* check IRV busy bit */
1325 if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1326 /* reuse rirb.res as the response return value */
1327 chip->rirb.res[addr] = azx_readl(chip, IR);
1328 return 0;
1329 }
1330 udelay(1);
1331 }
1332 if (printk_ratelimit())
1333 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1334 azx_readw(chip, IRS));
1335 chip->rirb.res[addr] = -1;
1336 return -EIO;
1337 }
1338
1339 /* send a command */
1340 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1341 {
1342 struct azx *chip = bus->private_data;
1343 unsigned int addr = azx_command_addr(val);
1344 int timeout = 50;
1345
1346 bus->rirb_error = 0;
1347 while (timeout--) {
1348 /* check ICB busy bit */
1349 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1350 /* Clear IRV valid bit */
1351 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1352 ICH6_IRS_VALID);
1353 azx_writel(chip, IC, val);
1354 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1355 ICH6_IRS_BUSY);
1356 return azx_single_wait_for_response(chip, addr);
1357 }
1358 udelay(1);
1359 }
1360 if (printk_ratelimit())
1361 dev_dbg(chip->card->dev,
1362 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1363 azx_readw(chip, IRS), val);
1364 return -EIO;
1365 }
1366
1367 /* receive a response */
1368 static unsigned int azx_single_get_response(struct hda_bus *bus,
1369 unsigned int addr)
1370 {
1371 struct azx *chip = bus->private_data;
1372 return chip->rirb.res[addr];
1373 }
1374
1375 /*
1376 * The below are the main callbacks from hda_codec.
1377 *
1378 * They are just the skeleton to call sub-callbacks according to the
1379 * current setting of chip->single_cmd.
1380 */
1381
1382 /* send a command */
1383 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1384 {
1385 struct azx *chip = bus->private_data;
1386
1387 if (chip->disabled)
1388 return 0;
1389 chip->last_cmd[azx_command_addr(val)] = val;
1390 if (chip->single_cmd)
1391 return azx_single_send_cmd(bus, val);
1392 else
1393 return azx_corb_send_cmd(bus, val);
1394 }
1395 EXPORT_SYMBOL_GPL(azx_send_cmd);
1396
1397 /* get a response */
1398 static unsigned int azx_get_response(struct hda_bus *bus,
1399 unsigned int addr)
1400 {
1401 struct azx *chip = bus->private_data;
1402 if (chip->disabled)
1403 return 0;
1404 if (chip->single_cmd)
1405 return azx_single_get_response(bus, addr);
1406 else
1407 return azx_rirb_get_response(bus, addr);
1408 }
1409 EXPORT_SYMBOL_GPL(azx_get_response);
1410
1411 #ifdef CONFIG_SND_HDA_DSP_LOADER
1412 /*
1413 * DSP loading code (e.g. for CA0132)
1414 */
1415
1416 /* use the first stream for loading DSP */
1417 static struct azx_dev *
1418 azx_get_dsp_loader_dev(struct azx *chip)
1419 {
1420 return &chip->azx_dev[chip->playback_index_offset];
1421 }
1422
1423 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1424 unsigned int byte_size,
1425 struct snd_dma_buffer *bufp)
1426 {
1427 u32 *bdl;
1428 struct azx *chip = bus->private_data;
1429 struct azx_dev *azx_dev;
1430 int err;
1431
1432 azx_dev = azx_get_dsp_loader_dev(chip);
1433
1434 dsp_lock(azx_dev);
1435 spin_lock_irq(&chip->reg_lock);
1436 if (azx_dev->running || azx_dev->locked) {
1437 spin_unlock_irq(&chip->reg_lock);
1438 err = -EBUSY;
1439 goto unlock;
1440 }
1441 azx_dev->prepared = 0;
1442 chip->saved_azx_dev = *azx_dev;
1443 azx_dev->locked = 1;
1444 spin_unlock_irq(&chip->reg_lock);
1445
1446 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1447 byte_size, bufp);
1448 if (err < 0)
1449 goto err_alloc;
1450
1451 azx_dev->bufsize = byte_size;
1452 azx_dev->period_bytes = byte_size;
1453 azx_dev->format_val = format;
1454
1455 azx_stream_reset(chip, azx_dev);
1456
1457 /* reset BDL address */
1458 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1459 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1460
1461 azx_dev->frags = 0;
1462 bdl = (u32 *)azx_dev->bdl.area;
1463 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1464 if (err < 0)
1465 goto error;
1466
1467 azx_setup_controller(chip, azx_dev);
1468 dsp_unlock(azx_dev);
1469 return azx_dev->stream_tag;
1470
1471 error:
1472 chip->ops->dma_free_pages(chip, bufp);
1473 err_alloc:
1474 spin_lock_irq(&chip->reg_lock);
1475 if (azx_dev->opened)
1476 *azx_dev = chip->saved_azx_dev;
1477 azx_dev->locked = 0;
1478 spin_unlock_irq(&chip->reg_lock);
1479 unlock:
1480 dsp_unlock(azx_dev);
1481 return err;
1482 }
1483
1484 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1485 {
1486 struct azx *chip = bus->private_data;
1487 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1488
1489 if (start)
1490 azx_stream_start(chip, azx_dev);
1491 else
1492 azx_stream_stop(chip, azx_dev);
1493 azx_dev->running = start;
1494 }
1495
1496 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1497 struct snd_dma_buffer *dmab)
1498 {
1499 struct azx *chip = bus->private_data;
1500 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1501
1502 if (!dmab->area || !azx_dev->locked)
1503 return;
1504
1505 dsp_lock(azx_dev);
1506 /* reset BDL address */
1507 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1508 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1509 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1510 azx_dev->bufsize = 0;
1511 azx_dev->period_bytes = 0;
1512 azx_dev->format_val = 0;
1513
1514 chip->ops->dma_free_pages(chip, dmab);
1515 dmab->area = NULL;
1516
1517 spin_lock_irq(&chip->reg_lock);
1518 if (azx_dev->opened)
1519 *azx_dev = chip->saved_azx_dev;
1520 azx_dev->locked = 0;
1521 spin_unlock_irq(&chip->reg_lock);
1522 dsp_unlock(azx_dev);
1523 }
1524 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1525
1526 int azx_alloc_stream_pages(struct azx *chip)
1527 {
1528 int i, err;
1529 struct snd_card *card = chip->card;
1530
1531 for (i = 0; i < chip->num_streams; i++) {
1532 dsp_lock_init(&chip->azx_dev[i]);
1533 /* allocate memory for the BDL for each stream */
1534 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1535 BDL_SIZE,
1536 &chip->azx_dev[i].bdl);
1537 if (err < 0) {
1538 dev_err(card->dev, "cannot allocate BDL\n");
1539 return -ENOMEM;
1540 }
1541 }
1542 /* allocate memory for the position buffer */
1543 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1544 chip->num_streams * 8, &chip->posbuf);
1545 if (err < 0) {
1546 dev_err(card->dev, "cannot allocate posbuf\n");
1547 return -ENOMEM;
1548 }
1549
1550 /* allocate CORB/RIRB */
1551 err = azx_alloc_cmd_io(chip);
1552 if (err < 0)
1553 return err;
1554 return 0;
1555 }
1556 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1557
1558 void azx_free_stream_pages(struct azx *chip)
1559 {
1560 int i;
1561 if (chip->azx_dev) {
1562 for (i = 0; i < chip->num_streams; i++)
1563 if (chip->azx_dev[i].bdl.area)
1564 chip->ops->dma_free_pages(
1565 chip, &chip->azx_dev[i].bdl);
1566 }
1567 if (chip->rb.area)
1568 chip->ops->dma_free_pages(chip, &chip->rb);
1569 if (chip->posbuf.area)
1570 chip->ops->dma_free_pages(chip, &chip->posbuf);
1571 }
1572 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1573
1574 /*
1575 * Lowlevel interface
1576 */
1577
1578 /* enter link reset */
1579 void azx_enter_link_reset(struct azx *chip)
1580 {
1581 unsigned long timeout;
1582
1583 /* reset controller */
1584 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
1585
1586 timeout = jiffies + msecs_to_jiffies(100);
1587 while ((azx_readb(chip, GCTL) & ICH6_GCTL_RESET) &&
1588 time_before(jiffies, timeout))
1589 usleep_range(500, 1000);
1590 }
1591 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1592
1593 /* exit link reset */
1594 static void azx_exit_link_reset(struct azx *chip)
1595 {
1596 unsigned long timeout;
1597
1598 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
1599
1600 timeout = jiffies + msecs_to_jiffies(100);
1601 while (!azx_readb(chip, GCTL) &&
1602 time_before(jiffies, timeout))
1603 usleep_range(500, 1000);
1604 }
1605
1606 /* reset codec link */
1607 static int azx_reset(struct azx *chip, int full_reset)
1608 {
1609 if (!full_reset)
1610 goto __skip;
1611
1612 /* clear STATESTS */
1613 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1614
1615 /* reset controller */
1616 azx_enter_link_reset(chip);
1617
1618 /* delay for >= 100us for codec PLL to settle per spec
1619 * Rev 0.9 section 5.5.1
1620 */
1621 usleep_range(500, 1000);
1622
1623 /* Bring controller out of reset */
1624 azx_exit_link_reset(chip);
1625
1626 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1627 usleep_range(1000, 1200);
1628
1629 __skip:
1630 /* check to see if controller is ready */
1631 if (!azx_readb(chip, GCTL)) {
1632 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1633 return -EBUSY;
1634 }
1635
1636 /* Accept unsolicited responses */
1637 if (!chip->single_cmd)
1638 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1639 ICH6_GCTL_UNSOL);
1640
1641 /* detect codecs */
1642 if (!chip->codec_mask) {
1643 chip->codec_mask = azx_readw(chip, STATESTS);
1644 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1645 chip->codec_mask);
1646 }
1647
1648 return 0;
1649 }
1650
1651 /* enable interrupts */
1652 static void azx_int_enable(struct azx *chip)
1653 {
1654 /* enable controller CIE and GIE */
1655 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1656 ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
1657 }
1658
1659 /* disable interrupts */
1660 static void azx_int_disable(struct azx *chip)
1661 {
1662 int i;
1663
1664 /* disable interrupts in stream descriptor */
1665 for (i = 0; i < chip->num_streams; i++) {
1666 struct azx_dev *azx_dev = &chip->azx_dev[i];
1667 azx_sd_writeb(chip, azx_dev, SD_CTL,
1668 azx_sd_readb(chip, azx_dev, SD_CTL) &
1669 ~SD_INT_MASK);
1670 }
1671
1672 /* disable SIE for all streams */
1673 azx_writeb(chip, INTCTL, 0);
1674
1675 /* disable controller CIE and GIE */
1676 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1677 ~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
1678 }
1679
1680 /* clear interrupts */
1681 static void azx_int_clear(struct azx *chip)
1682 {
1683 int i;
1684
1685 /* clear stream status */
1686 for (i = 0; i < chip->num_streams; i++) {
1687 struct azx_dev *azx_dev = &chip->azx_dev[i];
1688 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1689 }
1690
1691 /* clear STATESTS */
1692 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1693
1694 /* clear rirb status */
1695 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1696
1697 /* clear int status */
1698 azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
1699 }
1700
1701 /*
1702 * reset and start the controller registers
1703 */
1704 void azx_init_chip(struct azx *chip, int full_reset)
1705 {
1706 if (chip->initialized)
1707 return;
1708
1709 /* reset controller */
1710 azx_reset(chip, full_reset);
1711
1712 /* initialize interrupts */
1713 azx_int_clear(chip);
1714 azx_int_enable(chip);
1715
1716 /* initialize the codec command I/O */
1717 if (!chip->single_cmd)
1718 azx_init_cmd_io(chip);
1719
1720 /* program the position buffer */
1721 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1722 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1723
1724 chip->initialized = 1;
1725 }
1726 EXPORT_SYMBOL_GPL(azx_init_chip);
1727
1728 void azx_stop_chip(struct azx *chip)
1729 {
1730 if (!chip->initialized)
1731 return;
1732
1733 /* disable interrupts */
1734 azx_int_disable(chip);
1735 azx_int_clear(chip);
1736
1737 /* disable CORB/RIRB */
1738 azx_free_cmd_io(chip);
1739
1740 /* disable position buffer */
1741 azx_writel(chip, DPLBASE, 0);
1742 azx_writel(chip, DPUBASE, 0);
1743
1744 chip->initialized = 0;
1745 }
1746 EXPORT_SYMBOL_GPL(azx_stop_chip);
1747
1748 /*
1749 * interrupt handler
1750 */
1751 irqreturn_t azx_interrupt(int irq, void *dev_id)
1752 {
1753 struct azx *chip = dev_id;
1754 struct azx_dev *azx_dev;
1755 u32 status;
1756 u8 sd_status;
1757 int i;
1758
1759 #ifdef CONFIG_PM_RUNTIME
1760 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1761 if (chip->card->dev->power.runtime_status != RPM_ACTIVE)
1762 return IRQ_NONE;
1763 #endif
1764
1765 spin_lock(&chip->reg_lock);
1766
1767 if (chip->disabled) {
1768 spin_unlock(&chip->reg_lock);
1769 return IRQ_NONE;
1770 }
1771
1772 status = azx_readl(chip, INTSTS);
1773 if (status == 0 || status == 0xffffffff) {
1774 spin_unlock(&chip->reg_lock);
1775 return IRQ_NONE;
1776 }
1777
1778 for (i = 0; i < chip->num_streams; i++) {
1779 azx_dev = &chip->azx_dev[i];
1780 if (status & azx_dev->sd_int_sta_mask) {
1781 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1782 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1783 if (!azx_dev->substream || !azx_dev->running ||
1784 !(sd_status & SD_INT_COMPLETE))
1785 continue;
1786 /* check whether this IRQ is really acceptable */
1787 if (!chip->ops->position_check ||
1788 chip->ops->position_check(chip, azx_dev)) {
1789 spin_unlock(&chip->reg_lock);
1790 snd_pcm_period_elapsed(azx_dev->substream);
1791 spin_lock(&chip->reg_lock);
1792 }
1793 }
1794 }
1795
1796 /* clear rirb int */
1797 status = azx_readb(chip, RIRBSTS);
1798 if (status & RIRB_INT_MASK) {
1799 if (status & RIRB_INT_RESPONSE) {
1800 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1801 udelay(80);
1802 azx_update_rirb(chip);
1803 }
1804 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1805 }
1806
1807 spin_unlock(&chip->reg_lock);
1808
1809 return IRQ_HANDLED;
1810 }
1811 EXPORT_SYMBOL_GPL(azx_interrupt);
1812
1813 /*
1814 * Codec initerface
1815 */
1816
1817 /*
1818 * Probe the given codec address
1819 */
1820 static int probe_codec(struct azx *chip, int addr)
1821 {
1822 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1823 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1824 unsigned int res;
1825
1826 mutex_lock(&chip->bus->cmd_mutex);
1827 chip->probing = 1;
1828 azx_send_cmd(chip->bus, cmd);
1829 res = azx_get_response(chip->bus, addr);
1830 chip->probing = 0;
1831 mutex_unlock(&chip->bus->cmd_mutex);
1832 if (res == -1)
1833 return -EIO;
1834 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1835 return 0;
1836 }
1837
1838 static void azx_bus_reset(struct hda_bus *bus)
1839 {
1840 struct azx *chip = bus->private_data;
1841
1842 bus->in_reset = 1;
1843 azx_stop_chip(chip);
1844 azx_init_chip(chip, 1);
1845 #ifdef CONFIG_PM
1846 if (chip->initialized) {
1847 struct azx_pcm *p;
1848 list_for_each_entry(p, &chip->pcm_list, list)
1849 snd_pcm_suspend_all(p->pcm);
1850 snd_hda_suspend(chip->bus);
1851 snd_hda_resume(chip->bus);
1852 }
1853 #endif
1854 bus->in_reset = 0;
1855 }
1856
1857 #ifdef CONFIG_PM
1858 /* power-up/down the controller */
1859 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1860 {
1861 struct azx *chip = bus->private_data;
1862
1863 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1864 return;
1865
1866 if (power_up)
1867 pm_runtime_get_sync(chip->card->dev);
1868 else
1869 pm_runtime_put_sync(chip->card->dev);
1870 }
1871 #endif
1872
1873 static int get_jackpoll_interval(struct azx *chip)
1874 {
1875 int i;
1876 unsigned int j;
1877
1878 if (!chip->jackpoll_ms)
1879 return 0;
1880
1881 i = chip->jackpoll_ms[chip->dev_index];
1882 if (i == 0)
1883 return 0;
1884 if (i < 50 || i > 60000)
1885 j = 0;
1886 else
1887 j = msecs_to_jiffies(i);
1888 if (j == 0)
1889 dev_warn(chip->card->dev,
1890 "jackpoll_ms value out of range: %d\n", i);
1891 return j;
1892 }
1893
1894 /* Codec initialization */
1895 int azx_codec_create(struct azx *chip, const char *model,
1896 unsigned int max_slots,
1897 int *power_save_to)
1898 {
1899 struct hda_bus_template bus_temp;
1900 int c, codecs, err;
1901
1902 memset(&bus_temp, 0, sizeof(bus_temp));
1903 bus_temp.private_data = chip;
1904 bus_temp.modelname = model;
1905 bus_temp.pci = chip->pci;
1906 bus_temp.ops.command = azx_send_cmd;
1907 bus_temp.ops.get_response = azx_get_response;
1908 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1909 bus_temp.ops.bus_reset = azx_bus_reset;
1910 #ifdef CONFIG_PM
1911 bus_temp.power_save = power_save_to;
1912 bus_temp.ops.pm_notify = azx_power_notify;
1913 #endif
1914 #ifdef CONFIG_SND_HDA_DSP_LOADER
1915 bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1916 bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1917 bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1918 #endif
1919
1920 err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1921 if (err < 0)
1922 return err;
1923
1924 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1925 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1926 chip->bus->needs_damn_long_delay = 1;
1927 }
1928
1929 codecs = 0;
1930 if (!max_slots)
1931 max_slots = AZX_DEFAULT_CODECS;
1932
1933 /* First try to probe all given codec slots */
1934 for (c = 0; c < max_slots; c++) {
1935 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1936 if (probe_codec(chip, c) < 0) {
1937 /* Some BIOSen give you wrong codec addresses
1938 * that don't exist
1939 */
1940 dev_warn(chip->card->dev,
1941 "Codec #%d probe error; disabling it...\n", c);
1942 chip->codec_mask &= ~(1 << c);
1943 /* More badly, accessing to a non-existing
1944 * codec often screws up the controller chip,
1945 * and disturbs the further communications.
1946 * Thus if an error occurs during probing,
1947 * better to reset the controller chip to
1948 * get back to the sanity state.
1949 */
1950 azx_stop_chip(chip);
1951 azx_init_chip(chip, 1);
1952 }
1953 }
1954 }
1955
1956 /* AMD chipsets often cause the communication stalls upon certain
1957 * sequence like the pin-detection. It seems that forcing the synced
1958 * access works around the stall. Grrr...
1959 */
1960 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1961 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1962 chip->bus->sync_write = 1;
1963 chip->bus->allow_bus_reset = 1;
1964 }
1965
1966 /* Then create codec instances */
1967 for (c = 0; c < max_slots; c++) {
1968 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1969 struct hda_codec *codec;
1970 err = snd_hda_codec_new(chip->bus, c, &codec);
1971 if (err < 0)
1972 continue;
1973 codec->jackpoll_interval = get_jackpoll_interval(chip);
1974 codec->beep_mode = chip->beep_mode;
1975 codecs++;
1976 }
1977 }
1978 if (!codecs) {
1979 dev_err(chip->card->dev, "no codecs initialized\n");
1980 return -ENXIO;
1981 }
1982 return 0;
1983 }
1984 EXPORT_SYMBOL_GPL(azx_codec_create);
1985
1986 /* configure each codec instance */
1987 int azx_codec_configure(struct azx *chip)
1988 {
1989 struct hda_codec *codec;
1990 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1991 snd_hda_codec_configure(codec);
1992 }
1993 return 0;
1994 }
1995 EXPORT_SYMBOL_GPL(azx_codec_configure);
1996
1997 /* mixer creation - all stuff is implemented in hda module */
1998 int azx_mixer_create(struct azx *chip)
1999 {
2000 return snd_hda_build_controls(chip->bus);
2001 }
2002 EXPORT_SYMBOL_GPL(azx_mixer_create);
2003
2004
2005 /* initialize SD streams */
2006 int azx_init_stream(struct azx *chip)
2007 {
2008 int i;
2009
2010 /* initialize each stream (aka device)
2011 * assign the starting bdl address to each stream (device)
2012 * and initialize
2013 */
2014 for (i = 0; i < chip->num_streams; i++) {
2015 struct azx_dev *azx_dev = &chip->azx_dev[i];
2016 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
2017 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
2018 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
2019 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
2020 azx_dev->sd_int_sta_mask = 1 << i;
2021 /* stream tag: must be non-zero and unique */
2022 azx_dev->index = i;
2023 azx_dev->stream_tag = i + 1;
2024 }
2025
2026 return 0;
2027 }
2028 EXPORT_SYMBOL_GPL(azx_init_stream);
2029
2030 MODULE_LICENSE("GPL");
2031 MODULE_DESCRIPTION("Common HDA driver funcitons");
This page took 0.071888 seconds and 4 git commands to generate.