ALSA: hda - Replace open codes with snd_hdac_stream_set_params()
[deliverable/linux.git] / sound / pci / hda / hda_controller.c
1 /*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
32 #include "hda_controller.h"
33
34 #define CREATE_TRACE_POINTS
35 #include "hda_intel_trace.h"
36
37 /* DSP lock helpers */
38 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev))
39 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev))
40 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev))
41
42 /* assign a stream for the PCM */
43 static inline struct azx_dev *
44 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
45 {
46 struct hdac_stream *s;
47
48 s = snd_hdac_stream_assign(azx_bus(chip), substream);
49 if (!s)
50 return NULL;
51 return stream_to_azx_dev(s);
52 }
53
54 /* release the assigned stream */
55 static inline void azx_release_device(struct azx_dev *azx_dev)
56 {
57 snd_hdac_stream_release(azx_stream(azx_dev));
58 }
59
60 static inline struct hda_pcm_stream *
61 to_hda_pcm_stream(struct snd_pcm_substream *substream)
62 {
63 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
64 return &apcm->info->stream[substream->stream];
65 }
66
67 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
68 u64 nsec)
69 {
70 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
71 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
72 u64 codec_frames, codec_nsecs;
73
74 if (!hinfo->ops.get_delay)
75 return nsec;
76
77 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
78 codec_nsecs = div_u64(codec_frames * 1000000000LL,
79 substream->runtime->rate);
80
81 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
82 return nsec + codec_nsecs;
83
84 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
85 }
86
87 /*
88 * PCM ops
89 */
90
91 static int azx_pcm_close(struct snd_pcm_substream *substream)
92 {
93 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
94 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
95 struct azx *chip = apcm->chip;
96 struct azx_dev *azx_dev = get_azx_dev(substream);
97
98 mutex_lock(&chip->open_mutex);
99 azx_release_device(azx_dev);
100 if (hinfo->ops.close)
101 hinfo->ops.close(hinfo, apcm->codec, substream);
102 snd_hda_power_down(apcm->codec);
103 mutex_unlock(&chip->open_mutex);
104 snd_hda_codec_pcm_put(apcm->info);
105 return 0;
106 }
107
108 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
109 struct snd_pcm_hw_params *hw_params)
110 {
111 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
112 struct azx *chip = apcm->chip;
113 struct azx_dev *azx_dev = get_azx_dev(substream);
114 int ret;
115
116 dsp_lock(azx_dev);
117 if (dsp_is_locked(azx_dev)) {
118 ret = -EBUSY;
119 goto unlock;
120 }
121
122 azx_dev->core.bufsize = 0;
123 azx_dev->core.period_bytes = 0;
124 azx_dev->core.format_val = 0;
125 ret = chip->ops->substream_alloc_pages(chip, substream,
126 params_buffer_bytes(hw_params));
127 unlock:
128 dsp_unlock(azx_dev);
129 return ret;
130 }
131
132 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
133 {
134 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
135 struct azx_dev *azx_dev = get_azx_dev(substream);
136 struct azx *chip = apcm->chip;
137 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
138 int err;
139
140 /* reset BDL address */
141 dsp_lock(azx_dev);
142 if (!dsp_is_locked(azx_dev))
143 snd_hdac_stream_cleanup(azx_stream(azx_dev));
144
145 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
146
147 err = chip->ops->substream_free_pages(chip, substream);
148 azx_stream(azx_dev)->prepared = 0;
149 dsp_unlock(azx_dev);
150 return err;
151 }
152
153 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
154 {
155 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
156 struct azx *chip = apcm->chip;
157 struct azx_dev *azx_dev = get_azx_dev(substream);
158 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
159 struct snd_pcm_runtime *runtime = substream->runtime;
160 unsigned int format_val, stream_tag;
161 int err;
162 struct hda_spdif_out *spdif =
163 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
164 unsigned short ctls = spdif ? spdif->ctls : 0;
165
166 dsp_lock(azx_dev);
167 if (dsp_is_locked(azx_dev)) {
168 err = -EBUSY;
169 goto unlock;
170 }
171
172 snd_hdac_stream_reset(azx_stream(azx_dev));
173 format_val = snd_hdac_calc_stream_format(runtime->rate,
174 runtime->channels,
175 runtime->format,
176 hinfo->maxbps,
177 ctls);
178 if (!format_val) {
179 dev_err(chip->card->dev,
180 "invalid format_val, rate=%d, ch=%d, format=%d\n",
181 runtime->rate, runtime->channels, runtime->format);
182 err = -EINVAL;
183 goto unlock;
184 }
185
186 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
187 if (err < 0)
188 goto unlock;
189
190 snd_hdac_stream_setup(azx_stream(azx_dev));
191
192 stream_tag = azx_dev->core.stream_tag;
193 /* CA-IBG chips need the playback stream starting from 1 */
194 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
195 stream_tag > chip->capture_streams)
196 stream_tag -= chip->capture_streams;
197 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
198 azx_dev->core.format_val, substream);
199
200 unlock:
201 if (!err)
202 azx_stream(azx_dev)->prepared = 1;
203 dsp_unlock(azx_dev);
204 return err;
205 }
206
207 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
208 {
209 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
210 struct azx *chip = apcm->chip;
211 struct hdac_bus *bus = azx_bus(chip);
212 struct azx_dev *azx_dev;
213 struct snd_pcm_substream *s;
214 struct hdac_stream *hstr;
215 bool start;
216 int sbits = 0;
217 int sync_reg;
218
219 azx_dev = get_azx_dev(substream);
220 trace_azx_pcm_trigger(chip, azx_dev, cmd);
221
222 hstr = azx_stream(azx_dev);
223 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
224 sync_reg = AZX_REG_OLD_SSYNC;
225 else
226 sync_reg = AZX_REG_SSYNC;
227
228 if (dsp_is_locked(azx_dev) || !hstr->prepared)
229 return -EPIPE;
230
231 switch (cmd) {
232 case SNDRV_PCM_TRIGGER_START:
233 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
234 case SNDRV_PCM_TRIGGER_RESUME:
235 start = true;
236 break;
237 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
238 case SNDRV_PCM_TRIGGER_SUSPEND:
239 case SNDRV_PCM_TRIGGER_STOP:
240 start = false;
241 break;
242 default:
243 return -EINVAL;
244 }
245
246 snd_pcm_group_for_each_entry(s, substream) {
247 if (s->pcm->card != substream->pcm->card)
248 continue;
249 azx_dev = get_azx_dev(s);
250 sbits |= 1 << azx_dev->core.index;
251 snd_pcm_trigger_done(s, substream);
252 }
253
254 spin_lock(&bus->reg_lock);
255
256 /* first, set SYNC bits of corresponding streams */
257 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
258
259 snd_pcm_group_for_each_entry(s, substream) {
260 if (s->pcm->card != substream->pcm->card)
261 continue;
262 azx_dev = get_azx_dev(s);
263 if (start) {
264 azx_dev->insufficient = 1;
265 snd_hdac_stream_start(azx_stream(azx_dev), true);
266 } else {
267 snd_hdac_stream_stop(azx_stream(azx_dev));
268 }
269 }
270 spin_unlock(&bus->reg_lock);
271
272 snd_hdac_stream_sync(hstr, start, sbits);
273
274 spin_lock(&bus->reg_lock);
275 /* reset SYNC bits */
276 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
277 if (start)
278 snd_hdac_stream_timecounter_init(hstr, sbits);
279 spin_unlock(&bus->reg_lock);
280 return 0;
281 }
282
283 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
284 {
285 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
286 }
287 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
288
289 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
290 {
291 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
292 }
293 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
294
295 unsigned int azx_get_position(struct azx *chip,
296 struct azx_dev *azx_dev)
297 {
298 struct snd_pcm_substream *substream = azx_dev->core.substream;
299 unsigned int pos;
300 int stream = substream->stream;
301 int delay = 0;
302
303 if (chip->get_position[stream])
304 pos = chip->get_position[stream](chip, azx_dev);
305 else /* use the position buffer as default */
306 pos = azx_get_pos_posbuf(chip, azx_dev);
307
308 if (pos >= azx_dev->core.bufsize)
309 pos = 0;
310
311 if (substream->runtime) {
312 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
313 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
314
315 if (chip->get_delay[stream])
316 delay += chip->get_delay[stream](chip, azx_dev, pos);
317 if (hinfo->ops.get_delay)
318 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
319 substream);
320 substream->runtime->delay = delay;
321 }
322
323 trace_azx_get_position(chip, azx_dev, pos, delay);
324 return pos;
325 }
326 EXPORT_SYMBOL_GPL(azx_get_position);
327
328 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
329 {
330 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
331 struct azx *chip = apcm->chip;
332 struct azx_dev *azx_dev = get_azx_dev(substream);
333 return bytes_to_frames(substream->runtime,
334 azx_get_position(chip, azx_dev));
335 }
336
337 static int azx_get_time_info(struct snd_pcm_substream *substream,
338 struct timespec *system_ts, struct timespec *audio_ts,
339 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
340 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
341 {
342 struct azx_dev *azx_dev = get_azx_dev(substream);
343 u64 nsec;
344
345 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
346 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
347
348 snd_pcm_gettime(substream->runtime, system_ts);
349
350 nsec = timecounter_read(&azx_dev->core.tc);
351 nsec = div_u64(nsec, 3); /* can be optimized */
352 if (audio_tstamp_config->report_delay)
353 nsec = azx_adjust_codec_delay(substream, nsec);
354
355 *audio_ts = ns_to_timespec(nsec);
356
357 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
358 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
359 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
360
361 } else
362 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
363
364 return 0;
365 }
366
367 static struct snd_pcm_hardware azx_pcm_hw = {
368 .info = (SNDRV_PCM_INFO_MMAP |
369 SNDRV_PCM_INFO_INTERLEAVED |
370 SNDRV_PCM_INFO_BLOCK_TRANSFER |
371 SNDRV_PCM_INFO_MMAP_VALID |
372 /* No full-resume yet implemented */
373 /* SNDRV_PCM_INFO_RESUME |*/
374 SNDRV_PCM_INFO_PAUSE |
375 SNDRV_PCM_INFO_SYNC_START |
376 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
377 SNDRV_PCM_INFO_HAS_LINK_ATIME |
378 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
379 .formats = SNDRV_PCM_FMTBIT_S16_LE,
380 .rates = SNDRV_PCM_RATE_48000,
381 .rate_min = 48000,
382 .rate_max = 48000,
383 .channels_min = 2,
384 .channels_max = 2,
385 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
386 .period_bytes_min = 128,
387 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
388 .periods_min = 2,
389 .periods_max = AZX_MAX_FRAG,
390 .fifo_size = 0,
391 };
392
393 static int azx_pcm_open(struct snd_pcm_substream *substream)
394 {
395 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
396 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
397 struct azx *chip = apcm->chip;
398 struct azx_dev *azx_dev;
399 struct snd_pcm_runtime *runtime = substream->runtime;
400 int err;
401 int buff_step;
402
403 snd_hda_codec_pcm_get(apcm->info);
404 mutex_lock(&chip->open_mutex);
405 azx_dev = azx_assign_device(chip, substream);
406 if (azx_dev == NULL) {
407 err = -EBUSY;
408 goto unlock;
409 }
410 runtime->private_data = azx_dev;
411 runtime->hw = azx_pcm_hw;
412 runtime->hw.channels_min = hinfo->channels_min;
413 runtime->hw.channels_max = hinfo->channels_max;
414 runtime->hw.formats = hinfo->formats;
415 runtime->hw.rates = hinfo->rates;
416 snd_pcm_limit_hw_rates(runtime);
417 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
418
419 /* avoid wrap-around with wall-clock */
420 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
421 20,
422 178000000);
423
424 if (chip->align_buffer_size)
425 /* constrain buffer sizes to be multiple of 128
426 bytes. This is more efficient in terms of memory
427 access but isn't required by the HDA spec and
428 prevents users from specifying exact period/buffer
429 sizes. For example for 44.1kHz, a period size set
430 to 20ms will be rounded to 19.59ms. */
431 buff_step = 128;
432 else
433 /* Don't enforce steps on buffer sizes, still need to
434 be multiple of 4 bytes (HDA spec). Tested on Intel
435 HDA controllers, may not work on all devices where
436 option needs to be disabled */
437 buff_step = 4;
438
439 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
440 buff_step);
441 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
442 buff_step);
443 snd_hda_power_up(apcm->codec);
444 if (hinfo->ops.open)
445 err = hinfo->ops.open(hinfo, apcm->codec, substream);
446 else
447 err = -ENODEV;
448 if (err < 0) {
449 azx_release_device(azx_dev);
450 goto powerdown;
451 }
452 snd_pcm_limit_hw_rates(runtime);
453 /* sanity check */
454 if (snd_BUG_ON(!runtime->hw.channels_min) ||
455 snd_BUG_ON(!runtime->hw.channels_max) ||
456 snd_BUG_ON(!runtime->hw.formats) ||
457 snd_BUG_ON(!runtime->hw.rates)) {
458 azx_release_device(azx_dev);
459 if (hinfo->ops.close)
460 hinfo->ops.close(hinfo, apcm->codec, substream);
461 err = -EINVAL;
462 goto powerdown;
463 }
464
465 /* disable LINK_ATIME timestamps for capture streams
466 until we figure out how to handle digital inputs */
467 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
468 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
469 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
470 }
471
472 snd_pcm_set_sync(substream);
473 mutex_unlock(&chip->open_mutex);
474 return 0;
475
476 powerdown:
477 snd_hda_power_down(apcm->codec);
478 unlock:
479 mutex_unlock(&chip->open_mutex);
480 snd_hda_codec_pcm_put(apcm->info);
481 return err;
482 }
483
484 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
485 struct vm_area_struct *area)
486 {
487 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
488 struct azx *chip = apcm->chip;
489 if (chip->ops->pcm_mmap_prepare)
490 chip->ops->pcm_mmap_prepare(substream, area);
491 return snd_pcm_lib_default_mmap(substream, area);
492 }
493
494 static struct snd_pcm_ops azx_pcm_ops = {
495 .open = azx_pcm_open,
496 .close = azx_pcm_close,
497 .ioctl = snd_pcm_lib_ioctl,
498 .hw_params = azx_pcm_hw_params,
499 .hw_free = azx_pcm_hw_free,
500 .prepare = azx_pcm_prepare,
501 .trigger = azx_pcm_trigger,
502 .pointer = azx_pcm_pointer,
503 .get_time_info = azx_get_time_info,
504 .mmap = azx_pcm_mmap,
505 .page = snd_pcm_sgbuf_ops_page,
506 };
507
508 static void azx_pcm_free(struct snd_pcm *pcm)
509 {
510 struct azx_pcm *apcm = pcm->private_data;
511 if (apcm) {
512 list_del(&apcm->list);
513 apcm->info->pcm = NULL;
514 kfree(apcm);
515 }
516 }
517
518 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
519
520 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
521 struct hda_pcm *cpcm)
522 {
523 struct hdac_bus *bus = &_bus->core;
524 struct azx *chip = bus_to_azx(bus);
525 struct snd_pcm *pcm;
526 struct azx_pcm *apcm;
527 int pcm_dev = cpcm->device;
528 unsigned int size;
529 int s, err;
530
531 list_for_each_entry(apcm, &chip->pcm_list, list) {
532 if (apcm->pcm->device == pcm_dev) {
533 dev_err(chip->card->dev, "PCM %d already exists\n",
534 pcm_dev);
535 return -EBUSY;
536 }
537 }
538 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
539 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
540 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
541 &pcm);
542 if (err < 0)
543 return err;
544 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
545 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
546 if (apcm == NULL)
547 return -ENOMEM;
548 apcm->chip = chip;
549 apcm->pcm = pcm;
550 apcm->codec = codec;
551 apcm->info = cpcm;
552 pcm->private_data = apcm;
553 pcm->private_free = azx_pcm_free;
554 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
555 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
556 list_add_tail(&apcm->list, &chip->pcm_list);
557 cpcm->pcm = pcm;
558 for (s = 0; s < 2; s++) {
559 if (cpcm->stream[s].substreams)
560 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
561 }
562 /* buffer pre-allocation */
563 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
564 if (size > MAX_PREALLOC_SIZE)
565 size = MAX_PREALLOC_SIZE;
566 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
567 chip->card->dev,
568 size, MAX_PREALLOC_SIZE);
569 return 0;
570 }
571
572 static unsigned int azx_command_addr(u32 cmd)
573 {
574 unsigned int addr = cmd >> 28;
575
576 if (addr >= AZX_MAX_CODECS) {
577 snd_BUG();
578 addr = 0;
579 }
580
581 return addr;
582 }
583
584 /* receive a response */
585 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
586 unsigned int *res)
587 {
588 struct azx *chip = bus_to_azx(bus);
589 struct hda_bus *hbus = &chip->bus;
590 unsigned long timeout;
591 unsigned long loopcounter;
592 int do_poll = 0;
593
594 again:
595 timeout = jiffies + msecs_to_jiffies(1000);
596
597 for (loopcounter = 0;; loopcounter++) {
598 spin_lock_irq(&bus->reg_lock);
599 if (chip->polling_mode || do_poll)
600 snd_hdac_bus_update_rirb(bus);
601 if (!bus->rirb.cmds[addr]) {
602 if (!do_poll)
603 chip->poll_count = 0;
604 if (res)
605 *res = bus->rirb.res[addr]; /* the last value */
606 spin_unlock_irq(&bus->reg_lock);
607 return 0;
608 }
609 spin_unlock_irq(&bus->reg_lock);
610 if (time_after(jiffies, timeout))
611 break;
612 if (hbus->needs_damn_long_delay || loopcounter > 3000)
613 msleep(2); /* temporary workaround */
614 else {
615 udelay(10);
616 cond_resched();
617 }
618 }
619
620 if (hbus->no_response_fallback)
621 return -EIO;
622
623 if (!chip->polling_mode && chip->poll_count < 2) {
624 dev_dbg(chip->card->dev,
625 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
626 bus->last_cmd[addr]);
627 do_poll = 1;
628 chip->poll_count++;
629 goto again;
630 }
631
632
633 if (!chip->polling_mode) {
634 dev_warn(chip->card->dev,
635 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
636 bus->last_cmd[addr]);
637 chip->polling_mode = 1;
638 goto again;
639 }
640
641 if (chip->msi) {
642 dev_warn(chip->card->dev,
643 "No response from codec, disabling MSI: last cmd=0x%08x\n",
644 bus->last_cmd[addr]);
645 if (chip->ops->disable_msi_reset_irq &&
646 chip->ops->disable_msi_reset_irq(chip) < 0)
647 return -EIO;
648 goto again;
649 }
650
651 if (chip->probing) {
652 /* If this critical timeout happens during the codec probing
653 * phase, this is likely an access to a non-existing codec
654 * slot. Better to return an error and reset the system.
655 */
656 return -EIO;
657 }
658
659 /* a fatal communication error; need either to reset or to fallback
660 * to the single_cmd mode
661 */
662 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
663 hbus->response_reset = 1;
664 return -EAGAIN; /* give a chance to retry */
665 }
666
667 dev_err(chip->card->dev,
668 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
669 bus->last_cmd[addr]);
670 chip->single_cmd = 1;
671 hbus->response_reset = 0;
672 snd_hdac_bus_stop_cmd_io(bus);
673 return -EIO;
674 }
675
676 /*
677 * Use the single immediate command instead of CORB/RIRB for simplicity
678 *
679 * Note: according to Intel, this is not preferred use. The command was
680 * intended for the BIOS only, and may get confused with unsolicited
681 * responses. So, we shouldn't use it for normal operation from the
682 * driver.
683 * I left the codes, however, for debugging/testing purposes.
684 */
685
686 /* receive a response */
687 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
688 {
689 int timeout = 50;
690
691 while (timeout--) {
692 /* check IRV busy bit */
693 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
694 /* reuse rirb.res as the response return value */
695 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
696 return 0;
697 }
698 udelay(1);
699 }
700 if (printk_ratelimit())
701 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
702 azx_readw(chip, IRS));
703 azx_bus(chip)->rirb.res[addr] = -1;
704 return -EIO;
705 }
706
707 /* send a command */
708 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
709 {
710 struct azx *chip = bus_to_azx(bus);
711 unsigned int addr = azx_command_addr(val);
712 int timeout = 50;
713
714 bus->last_cmd[azx_command_addr(val)] = val;
715 while (timeout--) {
716 /* check ICB busy bit */
717 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
718 /* Clear IRV valid bit */
719 azx_writew(chip, IRS, azx_readw(chip, IRS) |
720 AZX_IRS_VALID);
721 azx_writel(chip, IC, val);
722 azx_writew(chip, IRS, azx_readw(chip, IRS) |
723 AZX_IRS_BUSY);
724 return azx_single_wait_for_response(chip, addr);
725 }
726 udelay(1);
727 }
728 if (printk_ratelimit())
729 dev_dbg(chip->card->dev,
730 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
731 azx_readw(chip, IRS), val);
732 return -EIO;
733 }
734
735 /* receive a response */
736 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
737 unsigned int *res)
738 {
739 if (res)
740 *res = bus->rirb.res[addr];
741 return 0;
742 }
743
744 /*
745 * The below are the main callbacks from hda_codec.
746 *
747 * They are just the skeleton to call sub-callbacks according to the
748 * current setting of chip->single_cmd.
749 */
750
751 /* send a command */
752 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
753 {
754 struct azx *chip = bus_to_azx(bus);
755
756 if (chip->disabled)
757 return 0;
758 if (chip->single_cmd)
759 return azx_single_send_cmd(bus, val);
760 else
761 return snd_hdac_bus_send_cmd(bus, val);
762 }
763
764 /* get a response */
765 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
766 unsigned int *res)
767 {
768 struct azx *chip = bus_to_azx(bus);
769
770 if (chip->disabled)
771 return 0;
772 if (chip->single_cmd)
773 return azx_single_get_response(bus, addr, res);
774 else
775 return azx_rirb_get_response(bus, addr, res);
776 }
777
778 static const struct hdac_bus_ops bus_core_ops = {
779 .command = azx_send_cmd,
780 .get_response = azx_get_response,
781 };
782
783 #ifdef CONFIG_SND_HDA_DSP_LOADER
784 /*
785 * DSP loading code (e.g. for CA0132)
786 */
787
788 /* use the first stream for loading DSP */
789 static struct azx_dev *
790 azx_get_dsp_loader_dev(struct azx *chip)
791 {
792 struct hdac_bus *bus = azx_bus(chip);
793 struct hdac_stream *s;
794
795 list_for_each_entry(s, &bus->stream_list, list)
796 if (s->index == chip->playback_index_offset)
797 return stream_to_azx_dev(s);
798
799 return NULL;
800 }
801
802 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
803 unsigned int byte_size,
804 struct snd_dma_buffer *bufp)
805 {
806 struct hdac_bus *bus = &codec->bus->core;
807 struct azx *chip = bus_to_azx(bus);
808 struct azx_dev *azx_dev;
809 struct hdac_stream *hstr;
810 bool saved = false;
811 int err;
812
813 azx_dev = azx_get_dsp_loader_dev(chip);
814 hstr = azx_stream(azx_dev);
815 spin_lock_irq(&bus->reg_lock);
816 if (hstr->opened) {
817 chip->saved_azx_dev = *azx_dev;
818 saved = true;
819 }
820 spin_unlock_irq(&bus->reg_lock);
821
822 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
823 if (err < 0) {
824 spin_lock_irq(&bus->reg_lock);
825 if (saved)
826 *azx_dev = chip->saved_azx_dev;
827 spin_unlock_irq(&bus->reg_lock);
828 return err;
829 }
830
831 hstr->prepared = 0;
832 return err;
833 }
834 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
835
836 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
837 {
838 struct hdac_bus *bus = &codec->bus->core;
839 struct azx *chip = bus_to_azx(bus);
840 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
841
842 snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
843 }
844 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
845
846 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
847 struct snd_dma_buffer *dmab)
848 {
849 struct hdac_bus *bus = &codec->bus->core;
850 struct azx *chip = bus_to_azx(bus);
851 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
852 struct hdac_stream *hstr = azx_stream(azx_dev);
853
854 if (!dmab->area || !hstr->locked)
855 return;
856
857 snd_hdac_dsp_cleanup(hstr, dmab);
858 spin_lock_irq(&bus->reg_lock);
859 if (hstr->opened)
860 *azx_dev = chip->saved_azx_dev;
861 hstr->locked = false;
862 spin_unlock_irq(&bus->reg_lock);
863 }
864 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
865 #endif /* CONFIG_SND_HDA_DSP_LOADER */
866
867 /*
868 * reset and start the controller registers
869 */
870 void azx_init_chip(struct azx *chip, bool full_reset)
871 {
872 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
873 /* correct RINTCNT for CXT */
874 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
875 azx_writew(chip, RINTCNT, 0xc0);
876 }
877 }
878 EXPORT_SYMBOL_GPL(azx_init_chip);
879
880 void azx_stop_all_streams(struct azx *chip)
881 {
882 struct hdac_bus *bus = azx_bus(chip);
883 struct hdac_stream *s;
884
885 list_for_each_entry(s, &bus->stream_list, list)
886 snd_hdac_stream_stop(s);
887 }
888 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
889
890 void azx_stop_chip(struct azx *chip)
891 {
892 snd_hdac_bus_stop_chip(azx_bus(chip));
893 }
894 EXPORT_SYMBOL_GPL(azx_stop_chip);
895
896 /*
897 * interrupt handler
898 */
899 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
900 {
901 struct azx *chip = bus_to_azx(bus);
902 struct azx_dev *azx_dev = stream_to_azx_dev(s);
903
904 /* check whether this IRQ is really acceptable */
905 if (!chip->ops->position_check ||
906 chip->ops->position_check(chip, azx_dev)) {
907 spin_unlock(&bus->reg_lock);
908 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
909 spin_lock(&bus->reg_lock);
910 }
911 }
912
913 irqreturn_t azx_interrupt(int irq, void *dev_id)
914 {
915 struct azx *chip = dev_id;
916 struct hdac_bus *bus = azx_bus(chip);
917 u32 status;
918
919 #ifdef CONFIG_PM
920 if (azx_has_pm_runtime(chip))
921 if (!pm_runtime_active(chip->card->dev))
922 return IRQ_NONE;
923 #endif
924
925 spin_lock(&bus->reg_lock);
926
927 if (chip->disabled) {
928 spin_unlock(&bus->reg_lock);
929 return IRQ_NONE;
930 }
931
932 status = azx_readl(chip, INTSTS);
933 if (status == 0 || status == 0xffffffff) {
934 spin_unlock(&bus->reg_lock);
935 return IRQ_NONE;
936 }
937
938 snd_hdac_bus_handle_stream_irq(bus, status, stream_update);
939
940 /* clear rirb int */
941 status = azx_readb(chip, RIRBSTS);
942 if (status & RIRB_INT_MASK) {
943 if (status & RIRB_INT_RESPONSE) {
944 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
945 udelay(80);
946 snd_hdac_bus_update_rirb(bus);
947 }
948 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
949 }
950
951 spin_unlock(&bus->reg_lock);
952
953 return IRQ_HANDLED;
954 }
955 EXPORT_SYMBOL_GPL(azx_interrupt);
956
957 /*
958 * Codec initerface
959 */
960
961 /*
962 * Probe the given codec address
963 */
964 static int probe_codec(struct azx *chip, int addr)
965 {
966 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
967 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
968 struct hdac_bus *bus = azx_bus(chip);
969 int err;
970 unsigned int res = -1;
971
972 mutex_lock(&bus->cmd_mutex);
973 chip->probing = 1;
974 azx_send_cmd(bus, cmd);
975 err = azx_get_response(bus, addr, &res);
976 chip->probing = 0;
977 mutex_unlock(&bus->cmd_mutex);
978 if (err < 0 || res == -1)
979 return -EIO;
980 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
981 return 0;
982 }
983
984 void snd_hda_bus_reset(struct hda_bus *bus)
985 {
986 struct azx *chip = bus_to_azx(&bus->core);
987
988 bus->in_reset = 1;
989 azx_stop_chip(chip);
990 azx_init_chip(chip, true);
991 if (bus->core.chip_init)
992 snd_hda_bus_reset_codecs(bus);
993 bus->in_reset = 0;
994 }
995
996 static int get_jackpoll_interval(struct azx *chip)
997 {
998 int i;
999 unsigned int j;
1000
1001 if (!chip->jackpoll_ms)
1002 return 0;
1003
1004 i = chip->jackpoll_ms[chip->dev_index];
1005 if (i == 0)
1006 return 0;
1007 if (i < 50 || i > 60000)
1008 j = 0;
1009 else
1010 j = msecs_to_jiffies(i);
1011 if (j == 0)
1012 dev_warn(chip->card->dev,
1013 "jackpoll_ms value out of range: %d\n", i);
1014 return j;
1015 }
1016
1017 /* HD-audio bus initialization */
1018 int azx_bus_init(struct azx *chip, const char *model,
1019 const struct hdac_io_ops *io_ops)
1020 {
1021 struct hda_bus *bus = &chip->bus;
1022 int err;
1023
1024 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops,
1025 io_ops);
1026 if (err < 0)
1027 return err;
1028
1029 bus->card = chip->card;
1030 mutex_init(&bus->prepare_mutex);
1031 bus->pci = chip->pci;
1032 bus->modelname = model;
1033 bus->core.snoop = azx_snoop(chip);
1034 if (chip->get_position[0] != azx_get_pos_lpib ||
1035 chip->get_position[1] != azx_get_pos_lpib)
1036 bus->core.use_posbuf = true;
1037 if (chip->bdl_pos_adj)
1038 bus->core.bdl_pos_adj = chip->bdl_pos_adj[chip->dev_index];
1039 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1040 bus->core.corbrp_self_clear = true;
1041
1042 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1043 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1044 bus->needs_damn_long_delay = 1;
1045 }
1046
1047 /* AMD chipsets often cause the communication stalls upon certain
1048 * sequence like the pin-detection. It seems that forcing the synced
1049 * access works around the stall. Grrr...
1050 */
1051 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1052 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1053 bus->core.sync_write = 1;
1054 bus->allow_bus_reset = 1;
1055 }
1056
1057 return 0;
1058 }
1059 EXPORT_SYMBOL_GPL(azx_bus_init);
1060
1061 /* Probe codecs */
1062 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1063 {
1064 struct hdac_bus *bus = azx_bus(chip);
1065 int c, codecs, err;
1066
1067 codecs = 0;
1068 if (!max_slots)
1069 max_slots = AZX_DEFAULT_CODECS;
1070
1071 /* First try to probe all given codec slots */
1072 for (c = 0; c < max_slots; c++) {
1073 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1074 if (probe_codec(chip, c) < 0) {
1075 /* Some BIOSen give you wrong codec addresses
1076 * that don't exist
1077 */
1078 dev_warn(chip->card->dev,
1079 "Codec #%d probe error; disabling it...\n", c);
1080 bus->codec_mask &= ~(1 << c);
1081 /* More badly, accessing to a non-existing
1082 * codec often screws up the controller chip,
1083 * and disturbs the further communications.
1084 * Thus if an error occurs during probing,
1085 * better to reset the controller chip to
1086 * get back to the sanity state.
1087 */
1088 azx_stop_chip(chip);
1089 azx_init_chip(chip, true);
1090 }
1091 }
1092 }
1093
1094 /* Then create codec instances */
1095 for (c = 0; c < max_slots; c++) {
1096 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1097 struct hda_codec *codec;
1098 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1099 if (err < 0)
1100 continue;
1101 codec->jackpoll_interval = get_jackpoll_interval(chip);
1102 codec->beep_mode = chip->beep_mode;
1103 codecs++;
1104 }
1105 }
1106 if (!codecs) {
1107 dev_err(chip->card->dev, "no codecs initialized\n");
1108 return -ENXIO;
1109 }
1110 return 0;
1111 }
1112 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1113
1114 /* configure each codec instance */
1115 int azx_codec_configure(struct azx *chip)
1116 {
1117 struct hda_codec *codec;
1118 list_for_each_codec(codec, &chip->bus) {
1119 snd_hda_codec_configure(codec);
1120 }
1121 return 0;
1122 }
1123 EXPORT_SYMBOL_GPL(azx_codec_configure);
1124
1125 static int stream_direction(struct azx *chip, unsigned char index)
1126 {
1127 if (index >= chip->capture_index_offset &&
1128 index < chip->capture_index_offset + chip->capture_streams)
1129 return SNDRV_PCM_STREAM_CAPTURE;
1130 return SNDRV_PCM_STREAM_PLAYBACK;
1131 }
1132
1133 /* initialize SD streams */
1134 int azx_init_streams(struct azx *chip)
1135 {
1136 int i;
1137 int stream_tags[2] = { 0, 0 };
1138
1139 /* initialize each stream (aka device)
1140 * assign the starting bdl address to each stream (device)
1141 * and initialize
1142 */
1143 for (i = 0; i < chip->num_streams; i++) {
1144 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1145 int dir, tag;
1146
1147 if (!azx_dev)
1148 return -ENOMEM;
1149
1150 dir = stream_direction(chip, i);
1151 /* stream tag must be unique throughout
1152 * the stream direction group,
1153 * valid values 1...15
1154 * use separate stream tag if the flag
1155 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1156 */
1157 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1158 tag = ++stream_tags[dir];
1159 else
1160 tag = i + 1;
1161 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1162 i, dir, tag);
1163 }
1164
1165 return 0;
1166 }
1167 EXPORT_SYMBOL_GPL(azx_init_streams);
1168
1169 void azx_free_streams(struct azx *chip)
1170 {
1171 struct hdac_bus *bus = azx_bus(chip);
1172 struct hdac_stream *s;
1173
1174 while (!list_empty(&bus->stream_list)) {
1175 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1176 list_del(&s->list);
1177 kfree(stream_to_azx_dev(s));
1178 }
1179 }
1180 EXPORT_SYMBOL_GPL(azx_free_streams);
This page took 0.076817 seconds and 5 git commands to generate.