]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - sound/pci/hda/hda_controller.c
Merge tag 'for-linus-5.2b-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / sound / pci / hda / hda_controller.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Implementation of primary alsa driver code base for Intel HD Audio.
5 *
6 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 *
8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9 * PeiSen Hou <pshou@realtek.com.tw>
10 */
11
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
28
29 #define CREATE_TRACE_POINTS
30 #include "hda_controller_trace.h"
31
32 /* DSP lock helpers */
33 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev))
34 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev))
35 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev))
36
37 /* assign a stream for the PCM */
38 static inline struct azx_dev *
39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
40 {
41 struct hdac_stream *s;
42
43 s = snd_hdac_stream_assign(azx_bus(chip), substream);
44 if (!s)
45 return NULL;
46 return stream_to_azx_dev(s);
47 }
48
49 /* release the assigned stream */
50 static inline void azx_release_device(struct azx_dev *azx_dev)
51 {
52 snd_hdac_stream_release(azx_stream(azx_dev));
53 }
54
55 static inline struct hda_pcm_stream *
56 to_hda_pcm_stream(struct snd_pcm_substream *substream)
57 {
58 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
59 return &apcm->info->stream[substream->stream];
60 }
61
62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
63 u64 nsec)
64 {
65 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
66 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
67 u64 codec_frames, codec_nsecs;
68
69 if (!hinfo->ops.get_delay)
70 return nsec;
71
72 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
73 codec_nsecs = div_u64(codec_frames * 1000000000LL,
74 substream->runtime->rate);
75
76 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
77 return nsec + codec_nsecs;
78
79 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
80 }
81
82 /*
83 * PCM ops
84 */
85
86 static int azx_pcm_close(struct snd_pcm_substream *substream)
87 {
88 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
89 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
90 struct azx *chip = apcm->chip;
91 struct azx_dev *azx_dev = get_azx_dev(substream);
92
93 trace_azx_pcm_close(chip, azx_dev);
94 mutex_lock(&chip->open_mutex);
95 azx_release_device(azx_dev);
96 if (hinfo->ops.close)
97 hinfo->ops.close(hinfo, apcm->codec, substream);
98 snd_hda_power_down(apcm->codec);
99 mutex_unlock(&chip->open_mutex);
100 snd_hda_codec_pcm_put(apcm->info);
101 return 0;
102 }
103
104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
105 struct snd_pcm_hw_params *hw_params)
106 {
107 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
108 struct azx *chip = apcm->chip;
109 struct azx_dev *azx_dev = get_azx_dev(substream);
110 int ret;
111
112 trace_azx_pcm_hw_params(chip, azx_dev);
113 dsp_lock(azx_dev);
114 if (dsp_is_locked(azx_dev)) {
115 ret = -EBUSY;
116 goto unlock;
117 }
118
119 azx_dev->core.bufsize = 0;
120 azx_dev->core.period_bytes = 0;
121 azx_dev->core.format_val = 0;
122 ret = snd_pcm_lib_malloc_pages(substream,
123 params_buffer_bytes(hw_params));
124
125 unlock:
126 dsp_unlock(azx_dev);
127 return ret;
128 }
129
130 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
131 {
132 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
133 struct azx_dev *azx_dev = get_azx_dev(substream);
134 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
135 int err;
136
137 /* reset BDL address */
138 dsp_lock(azx_dev);
139 if (!dsp_is_locked(azx_dev))
140 snd_hdac_stream_cleanup(azx_stream(azx_dev));
141
142 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
143
144 err = snd_pcm_lib_free_pages(substream);
145 azx_stream(azx_dev)->prepared = 0;
146 dsp_unlock(azx_dev);
147 return err;
148 }
149
150 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
151 {
152 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
153 struct azx *chip = apcm->chip;
154 struct azx_dev *azx_dev = get_azx_dev(substream);
155 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
156 struct snd_pcm_runtime *runtime = substream->runtime;
157 unsigned int format_val, stream_tag;
158 int err;
159 struct hda_spdif_out *spdif =
160 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
161 unsigned short ctls = spdif ? spdif->ctls : 0;
162
163 trace_azx_pcm_prepare(chip, azx_dev);
164 dsp_lock(azx_dev);
165 if (dsp_is_locked(azx_dev)) {
166 err = -EBUSY;
167 goto unlock;
168 }
169
170 snd_hdac_stream_reset(azx_stream(azx_dev));
171 format_val = snd_hdac_calc_stream_format(runtime->rate,
172 runtime->channels,
173 runtime->format,
174 hinfo->maxbps,
175 ctls);
176 if (!format_val) {
177 dev_err(chip->card->dev,
178 "invalid format_val, rate=%d, ch=%d, format=%d\n",
179 runtime->rate, runtime->channels, runtime->format);
180 err = -EINVAL;
181 goto unlock;
182 }
183
184 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
185 if (err < 0)
186 goto unlock;
187
188 snd_hdac_stream_setup(azx_stream(azx_dev));
189
190 stream_tag = azx_dev->core.stream_tag;
191 /* CA-IBG chips need the playback stream starting from 1 */
192 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
193 stream_tag > chip->capture_streams)
194 stream_tag -= chip->capture_streams;
195 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
196 azx_dev->core.format_val, substream);
197
198 unlock:
199 if (!err)
200 azx_stream(azx_dev)->prepared = 1;
201 dsp_unlock(azx_dev);
202 return err;
203 }
204
205 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
206 {
207 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
208 struct azx *chip = apcm->chip;
209 struct hdac_bus *bus = azx_bus(chip);
210 struct azx_dev *azx_dev;
211 struct snd_pcm_substream *s;
212 struct hdac_stream *hstr;
213 bool start;
214 int sbits = 0;
215 int sync_reg;
216
217 azx_dev = get_azx_dev(substream);
218 trace_azx_pcm_trigger(chip, azx_dev, cmd);
219
220 hstr = azx_stream(azx_dev);
221 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
222 sync_reg = AZX_REG_OLD_SSYNC;
223 else
224 sync_reg = AZX_REG_SSYNC;
225
226 if (dsp_is_locked(azx_dev) || !hstr->prepared)
227 return -EPIPE;
228
229 switch (cmd) {
230 case SNDRV_PCM_TRIGGER_START:
231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
232 case SNDRV_PCM_TRIGGER_RESUME:
233 start = true;
234 break;
235 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
236 case SNDRV_PCM_TRIGGER_SUSPEND:
237 case SNDRV_PCM_TRIGGER_STOP:
238 start = false;
239 break;
240 default:
241 return -EINVAL;
242 }
243
244 snd_pcm_group_for_each_entry(s, substream) {
245 if (s->pcm->card != substream->pcm->card)
246 continue;
247 azx_dev = get_azx_dev(s);
248 sbits |= 1 << azx_dev->core.index;
249 snd_pcm_trigger_done(s, substream);
250 }
251
252 spin_lock(&bus->reg_lock);
253
254 /* first, set SYNC bits of corresponding streams */
255 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
256
257 snd_pcm_group_for_each_entry(s, substream) {
258 if (s->pcm->card != substream->pcm->card)
259 continue;
260 azx_dev = get_azx_dev(s);
261 if (start) {
262 azx_dev->insufficient = 1;
263 snd_hdac_stream_start(azx_stream(azx_dev), true);
264 } else {
265 snd_hdac_stream_stop(azx_stream(azx_dev));
266 }
267 }
268 spin_unlock(&bus->reg_lock);
269
270 snd_hdac_stream_sync(hstr, start, sbits);
271
272 spin_lock(&bus->reg_lock);
273 /* reset SYNC bits */
274 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
275 if (start)
276 snd_hdac_stream_timecounter_init(hstr, sbits);
277 spin_unlock(&bus->reg_lock);
278 return 0;
279 }
280
281 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
282 {
283 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
284 }
285 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
286
287 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
288 {
289 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
290 }
291 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
292
293 unsigned int azx_get_position(struct azx *chip,
294 struct azx_dev *azx_dev)
295 {
296 struct snd_pcm_substream *substream = azx_dev->core.substream;
297 unsigned int pos;
298 int stream = substream->stream;
299 int delay = 0;
300
301 if (chip->get_position[stream])
302 pos = chip->get_position[stream](chip, azx_dev);
303 else /* use the position buffer as default */
304 pos = azx_get_pos_posbuf(chip, azx_dev);
305
306 if (pos >= azx_dev->core.bufsize)
307 pos = 0;
308
309 if (substream->runtime) {
310 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
311 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
312
313 if (chip->get_delay[stream])
314 delay += chip->get_delay[stream](chip, azx_dev, pos);
315 if (hinfo->ops.get_delay)
316 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
317 substream);
318 substream->runtime->delay = delay;
319 }
320
321 trace_azx_get_position(chip, azx_dev, pos, delay);
322 return pos;
323 }
324 EXPORT_SYMBOL_GPL(azx_get_position);
325
326 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
327 {
328 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
329 struct azx *chip = apcm->chip;
330 struct azx_dev *azx_dev = get_azx_dev(substream);
331 return bytes_to_frames(substream->runtime,
332 azx_get_position(chip, azx_dev));
333 }
334
335 /*
336 * azx_scale64: Scale base by mult/div while not overflowing sanely
337 *
338 * Derived from scale64_check_overflow in kernel/time/timekeeping.c
339 *
340 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
341 * is about 384307 ie ~4.5 days.
342 *
343 * This scales the calculation so that overflow will happen but after 2^64 /
344 * 48000 secs, which is pretty large!
345 *
346 * In caln below:
347 * base may overflow, but since there isn’t any additional division
348 * performed on base it’s OK
349 * rem can’t overflow because both are 32-bit values
350 */
351
352 #ifdef CONFIG_X86
353 static u64 azx_scale64(u64 base, u32 num, u32 den)
354 {
355 u64 rem;
356
357 rem = do_div(base, den);
358
359 base *= num;
360 rem *= num;
361
362 do_div(rem, den);
363
364 return base + rem;
365 }
366
367 static int azx_get_sync_time(ktime_t *device,
368 struct system_counterval_t *system, void *ctx)
369 {
370 struct snd_pcm_substream *substream = ctx;
371 struct azx_dev *azx_dev = get_azx_dev(substream);
372 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
373 struct azx *chip = apcm->chip;
374 struct snd_pcm_runtime *runtime;
375 u64 ll_counter, ll_counter_l, ll_counter_h;
376 u64 tsc_counter, tsc_counter_l, tsc_counter_h;
377 u32 wallclk_ctr, wallclk_cycles;
378 bool direction;
379 u32 dma_select;
380 u32 timeout = 200;
381 u32 retry_count = 0;
382
383 runtime = substream->runtime;
384
385 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
386 direction = 1;
387 else
388 direction = 0;
389
390 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
391 do {
392 timeout = 100;
393 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
394 (azx_dev->core.stream_tag - 1);
395 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
396
397 /* Enable the capture */
398 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
399
400 while (timeout) {
401 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
402 GTSCC_TSCCD_MASK)
403 break;
404
405 timeout--;
406 }
407
408 if (!timeout) {
409 dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
410 return -EIO;
411 }
412
413 /* Read wall clock counter */
414 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
415
416 /* Read TSC counter */
417 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
418 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
419
420 /* Read Link counter */
421 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
422 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
423
424 /* Ack: registers read done */
425 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
426
427 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
428 tsc_counter_l;
429
430 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l;
431 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
432
433 /*
434 * An error occurs near frame "rollover". The clocks in
435 * frame value indicates whether this error may have
436 * occurred. Here we use the value of 10 i.e.,
437 * HDA_MAX_CYCLE_OFFSET
438 */
439 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
440 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
441 break;
442
443 /*
444 * Sleep before we read again, else we may again get
445 * value near to MAX_CYCLE. Try to sleep for different
446 * amount of time so we dont hit the same number again
447 */
448 udelay(retry_count++);
449
450 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
451
452 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
453 dev_err_ratelimited(chip->card->dev,
454 "Error in WALFCC cycle count\n");
455 return -EIO;
456 }
457
458 *device = ns_to_ktime(azx_scale64(ll_counter,
459 NSEC_PER_SEC, runtime->rate));
460 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
461 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
462
463 *system = convert_art_to_tsc(tsc_counter);
464
465 return 0;
466 }
467
468 #else
469 static int azx_get_sync_time(ktime_t *device,
470 struct system_counterval_t *system, void *ctx)
471 {
472 return -ENXIO;
473 }
474 #endif
475
476 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
477 struct system_device_crosststamp *xtstamp)
478 {
479 return get_device_system_crosststamp(azx_get_sync_time,
480 substream, NULL, xtstamp);
481 }
482
483 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
484 struct snd_pcm_audio_tstamp_config *ts)
485 {
486 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
487 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
488 return true;
489
490 return false;
491 }
492
493 static int azx_get_time_info(struct snd_pcm_substream *substream,
494 struct timespec *system_ts, struct timespec *audio_ts,
495 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
496 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
497 {
498 struct azx_dev *azx_dev = get_azx_dev(substream);
499 struct snd_pcm_runtime *runtime = substream->runtime;
500 struct system_device_crosststamp xtstamp;
501 int ret;
502 u64 nsec;
503
504 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
505 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
506
507 snd_pcm_gettime(substream->runtime, system_ts);
508
509 nsec = timecounter_read(&azx_dev->core.tc);
510 nsec = div_u64(nsec, 3); /* can be optimized */
511 if (audio_tstamp_config->report_delay)
512 nsec = azx_adjust_codec_delay(substream, nsec);
513
514 *audio_ts = ns_to_timespec(nsec);
515
516 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
517 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
518 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
519
520 } else if (is_link_time_supported(runtime, audio_tstamp_config)) {
521
522 ret = azx_get_crosststamp(substream, &xtstamp);
523 if (ret)
524 return ret;
525
526 switch (runtime->tstamp_type) {
527 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
528 return -EINVAL;
529
530 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
531 *system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
532 break;
533
534 default:
535 *system_ts = ktime_to_timespec(xtstamp.sys_realtime);
536 break;
537
538 }
539
540 *audio_ts = ktime_to_timespec(xtstamp.device);
541
542 audio_tstamp_report->actual_type =
543 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
544 audio_tstamp_report->accuracy_report = 1;
545 /* 24 MHz WallClock == 42ns resolution */
546 audio_tstamp_report->accuracy = 42;
547
548 } else {
549 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
550 }
551
552 return 0;
553 }
554
555 static struct snd_pcm_hardware azx_pcm_hw = {
556 .info = (SNDRV_PCM_INFO_MMAP |
557 SNDRV_PCM_INFO_INTERLEAVED |
558 SNDRV_PCM_INFO_BLOCK_TRANSFER |
559 SNDRV_PCM_INFO_MMAP_VALID |
560 /* No full-resume yet implemented */
561 /* SNDRV_PCM_INFO_RESUME |*/
562 SNDRV_PCM_INFO_PAUSE |
563 SNDRV_PCM_INFO_SYNC_START |
564 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
565 SNDRV_PCM_INFO_HAS_LINK_ATIME |
566 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
567 .formats = SNDRV_PCM_FMTBIT_S16_LE,
568 .rates = SNDRV_PCM_RATE_48000,
569 .rate_min = 48000,
570 .rate_max = 48000,
571 .channels_min = 2,
572 .channels_max = 2,
573 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
574 .period_bytes_min = 128,
575 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
576 .periods_min = 2,
577 .periods_max = AZX_MAX_FRAG,
578 .fifo_size = 0,
579 };
580
581 static int azx_pcm_open(struct snd_pcm_substream *substream)
582 {
583 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
584 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
585 struct azx *chip = apcm->chip;
586 struct azx_dev *azx_dev;
587 struct snd_pcm_runtime *runtime = substream->runtime;
588 int err;
589 int buff_step;
590
591 snd_hda_codec_pcm_get(apcm->info);
592 mutex_lock(&chip->open_mutex);
593 azx_dev = azx_assign_device(chip, substream);
594 trace_azx_pcm_open(chip, azx_dev);
595 if (azx_dev == NULL) {
596 err = -EBUSY;
597 goto unlock;
598 }
599 runtime->private_data = azx_dev;
600
601 if (chip->gts_present)
602 azx_pcm_hw.info = azx_pcm_hw.info |
603 SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
604
605 runtime->hw = azx_pcm_hw;
606 runtime->hw.channels_min = hinfo->channels_min;
607 runtime->hw.channels_max = hinfo->channels_max;
608 runtime->hw.formats = hinfo->formats;
609 runtime->hw.rates = hinfo->rates;
610 snd_pcm_limit_hw_rates(runtime);
611 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
612
613 /* avoid wrap-around with wall-clock */
614 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
615 20,
616 178000000);
617
618 if (chip->align_buffer_size)
619 /* constrain buffer sizes to be multiple of 128
620 bytes. This is more efficient in terms of memory
621 access but isn't required by the HDA spec and
622 prevents users from specifying exact period/buffer
623 sizes. For example for 44.1kHz, a period size set
624 to 20ms will be rounded to 19.59ms. */
625 buff_step = 128;
626 else
627 /* Don't enforce steps on buffer sizes, still need to
628 be multiple of 4 bytes (HDA spec). Tested on Intel
629 HDA controllers, may not work on all devices where
630 option needs to be disabled */
631 buff_step = 4;
632
633 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
634 buff_step);
635 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
636 buff_step);
637 snd_hda_power_up(apcm->codec);
638 if (hinfo->ops.open)
639 err = hinfo->ops.open(hinfo, apcm->codec, substream);
640 else
641 err = -ENODEV;
642 if (err < 0) {
643 azx_release_device(azx_dev);
644 goto powerdown;
645 }
646 snd_pcm_limit_hw_rates(runtime);
647 /* sanity check */
648 if (snd_BUG_ON(!runtime->hw.channels_min) ||
649 snd_BUG_ON(!runtime->hw.channels_max) ||
650 snd_BUG_ON(!runtime->hw.formats) ||
651 snd_BUG_ON(!runtime->hw.rates)) {
652 azx_release_device(azx_dev);
653 if (hinfo->ops.close)
654 hinfo->ops.close(hinfo, apcm->codec, substream);
655 err = -EINVAL;
656 goto powerdown;
657 }
658
659 /* disable LINK_ATIME timestamps for capture streams
660 until we figure out how to handle digital inputs */
661 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
662 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
663 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
664 }
665
666 snd_pcm_set_sync(substream);
667 mutex_unlock(&chip->open_mutex);
668 return 0;
669
670 powerdown:
671 snd_hda_power_down(apcm->codec);
672 unlock:
673 mutex_unlock(&chip->open_mutex);
674 snd_hda_codec_pcm_put(apcm->info);
675 return err;
676 }
677
678 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
679 struct vm_area_struct *area)
680 {
681 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
682 struct azx *chip = apcm->chip;
683 if (chip->ops->pcm_mmap_prepare)
684 chip->ops->pcm_mmap_prepare(substream, area);
685 return snd_pcm_lib_default_mmap(substream, area);
686 }
687
688 static const struct snd_pcm_ops azx_pcm_ops = {
689 .open = azx_pcm_open,
690 .close = azx_pcm_close,
691 .ioctl = snd_pcm_lib_ioctl,
692 .hw_params = azx_pcm_hw_params,
693 .hw_free = azx_pcm_hw_free,
694 .prepare = azx_pcm_prepare,
695 .trigger = azx_pcm_trigger,
696 .pointer = azx_pcm_pointer,
697 .get_time_info = azx_get_time_info,
698 .mmap = azx_pcm_mmap,
699 .page = snd_pcm_sgbuf_ops_page,
700 };
701
702 static void azx_pcm_free(struct snd_pcm *pcm)
703 {
704 struct azx_pcm *apcm = pcm->private_data;
705 if (apcm) {
706 list_del(&apcm->list);
707 apcm->info->pcm = NULL;
708 kfree(apcm);
709 }
710 }
711
712 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
713
714 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
715 struct hda_pcm *cpcm)
716 {
717 struct hdac_bus *bus = &_bus->core;
718 struct azx *chip = bus_to_azx(bus);
719 struct snd_pcm *pcm;
720 struct azx_pcm *apcm;
721 int pcm_dev = cpcm->device;
722 unsigned int size;
723 int s, err;
724 int type = SNDRV_DMA_TYPE_DEV_SG;
725
726 list_for_each_entry(apcm, &chip->pcm_list, list) {
727 if (apcm->pcm->device == pcm_dev) {
728 dev_err(chip->card->dev, "PCM %d already exists\n",
729 pcm_dev);
730 return -EBUSY;
731 }
732 }
733 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
734 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
735 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
736 &pcm);
737 if (err < 0)
738 return err;
739 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
740 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
741 if (apcm == NULL) {
742 snd_device_free(chip->card, pcm);
743 return -ENOMEM;
744 }
745 apcm->chip = chip;
746 apcm->pcm = pcm;
747 apcm->codec = codec;
748 apcm->info = cpcm;
749 pcm->private_data = apcm;
750 pcm->private_free = azx_pcm_free;
751 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
752 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
753 list_add_tail(&apcm->list, &chip->pcm_list);
754 cpcm->pcm = pcm;
755 for (s = 0; s < 2; s++) {
756 if (cpcm->stream[s].substreams)
757 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
758 }
759 /* buffer pre-allocation */
760 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
761 if (size > MAX_PREALLOC_SIZE)
762 size = MAX_PREALLOC_SIZE;
763 if (chip->uc_buffer)
764 type = SNDRV_DMA_TYPE_DEV_UC_SG;
765 snd_pcm_lib_preallocate_pages_for_all(pcm, type,
766 chip->card->dev,
767 size, MAX_PREALLOC_SIZE);
768 return 0;
769 }
770
771 static unsigned int azx_command_addr(u32 cmd)
772 {
773 unsigned int addr = cmd >> 28;
774
775 if (addr >= AZX_MAX_CODECS) {
776 snd_BUG();
777 addr = 0;
778 }
779
780 return addr;
781 }
782
783 /* receive a response */
784 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
785 unsigned int *res)
786 {
787 struct azx *chip = bus_to_azx(bus);
788 struct hda_bus *hbus = &chip->bus;
789 unsigned long timeout;
790 unsigned long loopcounter;
791 int do_poll = 0;
792
793 again:
794 timeout = jiffies + msecs_to_jiffies(1000);
795
796 for (loopcounter = 0;; loopcounter++) {
797 spin_lock_irq(&bus->reg_lock);
798 if (chip->polling_mode || do_poll)
799 snd_hdac_bus_update_rirb(bus);
800 if (!bus->rirb.cmds[addr]) {
801 if (!do_poll)
802 chip->poll_count = 0;
803 if (res)
804 *res = bus->rirb.res[addr]; /* the last value */
805 spin_unlock_irq(&bus->reg_lock);
806 return 0;
807 }
808 spin_unlock_irq(&bus->reg_lock);
809 if (time_after(jiffies, timeout))
810 break;
811 if (hbus->needs_damn_long_delay || loopcounter > 3000)
812 msleep(2); /* temporary workaround */
813 else {
814 udelay(10);
815 cond_resched();
816 }
817 }
818
819 if (hbus->no_response_fallback)
820 return -EIO;
821
822 if (!chip->polling_mode && chip->poll_count < 2) {
823 dev_dbg(chip->card->dev,
824 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
825 bus->last_cmd[addr]);
826 do_poll = 1;
827 chip->poll_count++;
828 goto again;
829 }
830
831
832 if (!chip->polling_mode) {
833 dev_warn(chip->card->dev,
834 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
835 bus->last_cmd[addr]);
836 chip->polling_mode = 1;
837 goto again;
838 }
839
840 if (chip->msi) {
841 dev_warn(chip->card->dev,
842 "No response from codec, disabling MSI: last cmd=0x%08x\n",
843 bus->last_cmd[addr]);
844 if (chip->ops->disable_msi_reset_irq &&
845 chip->ops->disable_msi_reset_irq(chip) < 0)
846 return -EIO;
847 goto again;
848 }
849
850 if (chip->probing) {
851 /* If this critical timeout happens during the codec probing
852 * phase, this is likely an access to a non-existing codec
853 * slot. Better to return an error and reset the system.
854 */
855 return -EIO;
856 }
857
858 /* no fallback mechanism? */
859 if (!chip->fallback_to_single_cmd)
860 return -EIO;
861
862 /* a fatal communication error; need either to reset or to fallback
863 * to the single_cmd mode
864 */
865 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
866 hbus->response_reset = 1;
867 return -EAGAIN; /* give a chance to retry */
868 }
869
870 dev_err(chip->card->dev,
871 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
872 bus->last_cmd[addr]);
873 chip->single_cmd = 1;
874 hbus->response_reset = 0;
875 snd_hdac_bus_stop_cmd_io(bus);
876 return -EIO;
877 }
878
879 /*
880 * Use the single immediate command instead of CORB/RIRB for simplicity
881 *
882 * Note: according to Intel, this is not preferred use. The command was
883 * intended for the BIOS only, and may get confused with unsolicited
884 * responses. So, we shouldn't use it for normal operation from the
885 * driver.
886 * I left the codes, however, for debugging/testing purposes.
887 */
888
889 /* receive a response */
890 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
891 {
892 int timeout = 50;
893
894 while (timeout--) {
895 /* check IRV busy bit */
896 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
897 /* reuse rirb.res as the response return value */
898 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
899 return 0;
900 }
901 udelay(1);
902 }
903 if (printk_ratelimit())
904 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
905 azx_readw(chip, IRS));
906 azx_bus(chip)->rirb.res[addr] = -1;
907 return -EIO;
908 }
909
910 /* send a command */
911 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
912 {
913 struct azx *chip = bus_to_azx(bus);
914 unsigned int addr = azx_command_addr(val);
915 int timeout = 50;
916
917 bus->last_cmd[azx_command_addr(val)] = val;
918 while (timeout--) {
919 /* check ICB busy bit */
920 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
921 /* Clear IRV valid bit */
922 azx_writew(chip, IRS, azx_readw(chip, IRS) |
923 AZX_IRS_VALID);
924 azx_writel(chip, IC, val);
925 azx_writew(chip, IRS, azx_readw(chip, IRS) |
926 AZX_IRS_BUSY);
927 return azx_single_wait_for_response(chip, addr);
928 }
929 udelay(1);
930 }
931 if (printk_ratelimit())
932 dev_dbg(chip->card->dev,
933 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
934 azx_readw(chip, IRS), val);
935 return -EIO;
936 }
937
938 /* receive a response */
939 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
940 unsigned int *res)
941 {
942 if (res)
943 *res = bus->rirb.res[addr];
944 return 0;
945 }
946
947 /*
948 * The below are the main callbacks from hda_codec.
949 *
950 * They are just the skeleton to call sub-callbacks according to the
951 * current setting of chip->single_cmd.
952 */
953
954 /* send a command */
955 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
956 {
957 struct azx *chip = bus_to_azx(bus);
958
959 if (chip->disabled)
960 return 0;
961 if (chip->single_cmd)
962 return azx_single_send_cmd(bus, val);
963 else
964 return snd_hdac_bus_send_cmd(bus, val);
965 }
966
967 /* get a response */
968 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
969 unsigned int *res)
970 {
971 struct azx *chip = bus_to_azx(bus);
972
973 if (chip->disabled)
974 return 0;
975 if (chip->single_cmd)
976 return azx_single_get_response(bus, addr, res);
977 else
978 return azx_rirb_get_response(bus, addr, res);
979 }
980
981 static const struct hdac_bus_ops bus_core_ops = {
982 .command = azx_send_cmd,
983 .get_response = azx_get_response,
984 };
985
986 #ifdef CONFIG_SND_HDA_DSP_LOADER
987 /*
988 * DSP loading code (e.g. for CA0132)
989 */
990
991 /* use the first stream for loading DSP */
992 static struct azx_dev *
993 azx_get_dsp_loader_dev(struct azx *chip)
994 {
995 struct hdac_bus *bus = azx_bus(chip);
996 struct hdac_stream *s;
997
998 list_for_each_entry(s, &bus->stream_list, list)
999 if (s->index == chip->playback_index_offset)
1000 return stream_to_azx_dev(s);
1001
1002 return NULL;
1003 }
1004
1005 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
1006 unsigned int byte_size,
1007 struct snd_dma_buffer *bufp)
1008 {
1009 struct hdac_bus *bus = &codec->bus->core;
1010 struct azx *chip = bus_to_azx(bus);
1011 struct azx_dev *azx_dev;
1012 struct hdac_stream *hstr;
1013 bool saved = false;
1014 int err;
1015
1016 azx_dev = azx_get_dsp_loader_dev(chip);
1017 hstr = azx_stream(azx_dev);
1018 spin_lock_irq(&bus->reg_lock);
1019 if (hstr->opened) {
1020 chip->saved_azx_dev = *azx_dev;
1021 saved = true;
1022 }
1023 spin_unlock_irq(&bus->reg_lock);
1024
1025 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
1026 if (err < 0) {
1027 spin_lock_irq(&bus->reg_lock);
1028 if (saved)
1029 *azx_dev = chip->saved_azx_dev;
1030 spin_unlock_irq(&bus->reg_lock);
1031 return err;
1032 }
1033
1034 hstr->prepared = 0;
1035 return err;
1036 }
1037 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1038
1039 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1040 {
1041 struct hdac_bus *bus = &codec->bus->core;
1042 struct azx *chip = bus_to_azx(bus);
1043 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1044
1045 snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1046 }
1047 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1048
1049 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1050 struct snd_dma_buffer *dmab)
1051 {
1052 struct hdac_bus *bus = &codec->bus->core;
1053 struct azx *chip = bus_to_azx(bus);
1054 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1055 struct hdac_stream *hstr = azx_stream(azx_dev);
1056
1057 if (!dmab->area || !hstr->locked)
1058 return;
1059
1060 snd_hdac_dsp_cleanup(hstr, dmab);
1061 spin_lock_irq(&bus->reg_lock);
1062 if (hstr->opened)
1063 *azx_dev = chip->saved_azx_dev;
1064 hstr->locked = false;
1065 spin_unlock_irq(&bus->reg_lock);
1066 }
1067 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1068 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1069
1070 /*
1071 * reset and start the controller registers
1072 */
1073 void azx_init_chip(struct azx *chip, bool full_reset)
1074 {
1075 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1076 /* correct RINTCNT for CXT */
1077 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1078 azx_writew(chip, RINTCNT, 0xc0);
1079 }
1080 }
1081 EXPORT_SYMBOL_GPL(azx_init_chip);
1082
1083 void azx_stop_all_streams(struct azx *chip)
1084 {
1085 struct hdac_bus *bus = azx_bus(chip);
1086 struct hdac_stream *s;
1087
1088 list_for_each_entry(s, &bus->stream_list, list)
1089 snd_hdac_stream_stop(s);
1090 }
1091 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1092
1093 void azx_stop_chip(struct azx *chip)
1094 {
1095 snd_hdac_bus_stop_chip(azx_bus(chip));
1096 }
1097 EXPORT_SYMBOL_GPL(azx_stop_chip);
1098
1099 /*
1100 * interrupt handler
1101 */
1102 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1103 {
1104 struct azx *chip = bus_to_azx(bus);
1105 struct azx_dev *azx_dev = stream_to_azx_dev(s);
1106
1107 /* check whether this IRQ is really acceptable */
1108 if (!chip->ops->position_check ||
1109 chip->ops->position_check(chip, azx_dev)) {
1110 spin_unlock(&bus->reg_lock);
1111 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1112 spin_lock(&bus->reg_lock);
1113 }
1114 }
1115
1116 irqreturn_t azx_interrupt(int irq, void *dev_id)
1117 {
1118 struct azx *chip = dev_id;
1119 struct hdac_bus *bus = azx_bus(chip);
1120 u32 status;
1121 bool active, handled = false;
1122 int repeat = 0; /* count for avoiding endless loop */
1123
1124 #ifdef CONFIG_PM
1125 if (azx_has_pm_runtime(chip))
1126 if (!pm_runtime_active(chip->card->dev))
1127 return IRQ_NONE;
1128 #endif
1129
1130 spin_lock(&bus->reg_lock);
1131
1132 if (chip->disabled)
1133 goto unlock;
1134
1135 do {
1136 status = azx_readl(chip, INTSTS);
1137 if (status == 0 || status == 0xffffffff)
1138 break;
1139
1140 handled = true;
1141 active = false;
1142 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1143 active = true;
1144
1145 /* clear rirb int */
1146 status = azx_readb(chip, RIRBSTS);
1147 if (status & RIRB_INT_MASK) {
1148 active = true;
1149 if (status & RIRB_INT_RESPONSE) {
1150 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1151 udelay(80);
1152 snd_hdac_bus_update_rirb(bus);
1153 }
1154 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1155 }
1156 } while (active && ++repeat < 10);
1157
1158 unlock:
1159 spin_unlock(&bus->reg_lock);
1160
1161 return IRQ_RETVAL(handled);
1162 }
1163 EXPORT_SYMBOL_GPL(azx_interrupt);
1164
1165 /*
1166 * Codec initerface
1167 */
1168
1169 /*
1170 * Probe the given codec address
1171 */
1172 static int probe_codec(struct azx *chip, int addr)
1173 {
1174 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1175 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1176 struct hdac_bus *bus = azx_bus(chip);
1177 int err;
1178 unsigned int res = -1;
1179
1180 mutex_lock(&bus->cmd_mutex);
1181 chip->probing = 1;
1182 azx_send_cmd(bus, cmd);
1183 err = azx_get_response(bus, addr, &res);
1184 chip->probing = 0;
1185 mutex_unlock(&bus->cmd_mutex);
1186 if (err < 0 || res == -1)
1187 return -EIO;
1188 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1189 return 0;
1190 }
1191
1192 void snd_hda_bus_reset(struct hda_bus *bus)
1193 {
1194 struct azx *chip = bus_to_azx(&bus->core);
1195
1196 bus->in_reset = 1;
1197 azx_stop_chip(chip);
1198 azx_init_chip(chip, true);
1199 if (bus->core.chip_init)
1200 snd_hda_bus_reset_codecs(bus);
1201 bus->in_reset = 0;
1202 }
1203
1204 /* HD-audio bus initialization */
1205 int azx_bus_init(struct azx *chip, const char *model,
1206 const struct hdac_io_ops *io_ops)
1207 {
1208 struct hda_bus *bus = &chip->bus;
1209 int err;
1210
1211 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops,
1212 io_ops);
1213 if (err < 0)
1214 return err;
1215
1216 bus->card = chip->card;
1217 mutex_init(&bus->prepare_mutex);
1218 bus->pci = chip->pci;
1219 bus->modelname = model;
1220 bus->mixer_assigned = -1;
1221 bus->core.snoop = azx_snoop(chip);
1222 if (chip->get_position[0] != azx_get_pos_lpib ||
1223 chip->get_position[1] != azx_get_pos_lpib)
1224 bus->core.use_posbuf = true;
1225 bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1226 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1227 bus->core.corbrp_self_clear = true;
1228
1229 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1230 bus->core.align_bdle_4k = true;
1231
1232 /* AMD chipsets often cause the communication stalls upon certain
1233 * sequence like the pin-detection. It seems that forcing the synced
1234 * access works around the stall. Grrr...
1235 */
1236 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1237 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1238 bus->core.sync_write = 1;
1239 bus->allow_bus_reset = 1;
1240 }
1241
1242 return 0;
1243 }
1244 EXPORT_SYMBOL_GPL(azx_bus_init);
1245
1246 /* Probe codecs */
1247 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1248 {
1249 struct hdac_bus *bus = azx_bus(chip);
1250 int c, codecs, err;
1251
1252 codecs = 0;
1253 if (!max_slots)
1254 max_slots = AZX_DEFAULT_CODECS;
1255
1256 /* First try to probe all given codec slots */
1257 for (c = 0; c < max_slots; c++) {
1258 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1259 if (probe_codec(chip, c) < 0) {
1260 /* Some BIOSen give you wrong codec addresses
1261 * that don't exist
1262 */
1263 dev_warn(chip->card->dev,
1264 "Codec #%d probe error; disabling it...\n", c);
1265 bus->codec_mask &= ~(1 << c);
1266 /* More badly, accessing to a non-existing
1267 * codec often screws up the controller chip,
1268 * and disturbs the further communications.
1269 * Thus if an error occurs during probing,
1270 * better to reset the controller chip to
1271 * get back to the sanity state.
1272 */
1273 azx_stop_chip(chip);
1274 azx_init_chip(chip, true);
1275 }
1276 }
1277 }
1278
1279 /* Then create codec instances */
1280 for (c = 0; c < max_slots; c++) {
1281 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1282 struct hda_codec *codec;
1283 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1284 if (err < 0)
1285 continue;
1286 codec->jackpoll_interval = chip->jackpoll_interval;
1287 codec->beep_mode = chip->beep_mode;
1288 codecs++;
1289 }
1290 }
1291 if (!codecs) {
1292 dev_err(chip->card->dev, "no codecs initialized\n");
1293 return -ENXIO;
1294 }
1295 return 0;
1296 }
1297 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1298
1299 /* configure each codec instance */
1300 int azx_codec_configure(struct azx *chip)
1301 {
1302 struct hda_codec *codec, *next;
1303
1304 /* use _safe version here since snd_hda_codec_configure() deregisters
1305 * the device upon error and deletes itself from the bus list.
1306 */
1307 list_for_each_codec_safe(codec, next, &chip->bus) {
1308 snd_hda_codec_configure(codec);
1309 }
1310
1311 if (!azx_bus(chip)->num_codecs)
1312 return -ENODEV;
1313 return 0;
1314 }
1315 EXPORT_SYMBOL_GPL(azx_codec_configure);
1316
1317 static int stream_direction(struct azx *chip, unsigned char index)
1318 {
1319 if (index >= chip->capture_index_offset &&
1320 index < chip->capture_index_offset + chip->capture_streams)
1321 return SNDRV_PCM_STREAM_CAPTURE;
1322 return SNDRV_PCM_STREAM_PLAYBACK;
1323 }
1324
1325 /* initialize SD streams */
1326 int azx_init_streams(struct azx *chip)
1327 {
1328 int i;
1329 int stream_tags[2] = { 0, 0 };
1330
1331 /* initialize each stream (aka device)
1332 * assign the starting bdl address to each stream (device)
1333 * and initialize
1334 */
1335 for (i = 0; i < chip->num_streams; i++) {
1336 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1337 int dir, tag;
1338
1339 if (!azx_dev)
1340 return -ENOMEM;
1341
1342 dir = stream_direction(chip, i);
1343 /* stream tag must be unique throughout
1344 * the stream direction group,
1345 * valid values 1...15
1346 * use separate stream tag if the flag
1347 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1348 */
1349 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1350 tag = ++stream_tags[dir];
1351 else
1352 tag = i + 1;
1353 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1354 i, dir, tag);
1355 }
1356
1357 return 0;
1358 }
1359 EXPORT_SYMBOL_GPL(azx_init_streams);
1360
1361 void azx_free_streams(struct azx *chip)
1362 {
1363 struct hdac_bus *bus = azx_bus(chip);
1364 struct hdac_stream *s;
1365
1366 while (!list_empty(&bus->stream_list)) {
1367 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1368 list_del(&s->list);
1369 kfree(stream_to_azx_dev(s));
1370 }
1371 }
1372 EXPORT_SYMBOL_GPL(azx_free_streams);