]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - sound/pci/hda/hda_controller.c
8d9398a4c7c905b5431480f75960b62296dd374f
[mirror_ubuntu-artful-kernel.git] / sound / pci / hda / hda_controller.c
1 /*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_priv.h"
34 #include "hda_controller.h"
35
36 #define CREATE_TRACE_POINTS
37 #include "hda_intel_trace.h"
38
39 /* DSP lock helpers */
40 #ifdef CONFIG_SND_HDA_DSP_LOADER
41 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
42 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
43 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
44 #define dsp_is_locked(dev) ((dev)->locked)
45 #else
46 #define dsp_lock_init(dev) do {} while (0)
47 #define dsp_lock(dev) do {} while (0)
48 #define dsp_unlock(dev) do {} while (0)
49 #define dsp_is_locked(dev) 0
50 #endif
51
52 /*
53 * AZX stream operations.
54 */
55
56 /* start a stream */
57 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
58 {
59 /*
60 * Before stream start, initialize parameter
61 */
62 azx_dev->insufficient = 1;
63
64 /* enable SIE */
65 azx_writel(chip, INTCTL,
66 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
67 /* set DMA start and interrupt mask */
68 azx_sd_writeb(chip, azx_dev, SD_CTL,
69 azx_sd_readb(chip, azx_dev, SD_CTL) |
70 SD_CTL_DMA_START | SD_INT_MASK);
71 }
72
73 /* stop DMA */
74 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 {
76 azx_sd_writeb(chip, azx_dev, SD_CTL,
77 azx_sd_readb(chip, azx_dev, SD_CTL) &
78 ~(SD_CTL_DMA_START | SD_INT_MASK));
79 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
80 }
81
82 /* stop a stream */
83 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 {
85 azx_stream_clear(chip, azx_dev);
86 /* disable SIE */
87 azx_writel(chip, INTCTL,
88 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 }
90 EXPORT_SYMBOL_GPL(azx_stream_stop);
91
92 /* reset stream */
93 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
94 {
95 unsigned char val;
96 int timeout;
97
98 azx_stream_clear(chip, azx_dev);
99
100 azx_sd_writeb(chip, azx_dev, SD_CTL,
101 azx_sd_readb(chip, azx_dev, SD_CTL) |
102 SD_CTL_STREAM_RESET);
103 udelay(3);
104 timeout = 300;
105 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
106 SD_CTL_STREAM_RESET) && --timeout)
107 ;
108 val &= ~SD_CTL_STREAM_RESET;
109 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
110 udelay(3);
111
112 timeout = 300;
113 /* waiting for hardware to report that the stream is out of reset */
114 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
115 SD_CTL_STREAM_RESET) && --timeout)
116 ;
117
118 /* reset first position - may not be synced with hw at this time */
119 *azx_dev->posbuf = 0;
120 }
121
122 /*
123 * set up the SD for streaming
124 */
125 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
126 {
127 unsigned int val;
128 /* make sure the run bit is zero for SD */
129 azx_stream_clear(chip, azx_dev);
130 /* program the stream_tag */
131 val = azx_sd_readl(chip, azx_dev, SD_CTL);
132 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
133 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
134 if (!azx_snoop(chip))
135 val |= SD_CTL_TRAFFIC_PRIO;
136 azx_sd_writel(chip, azx_dev, SD_CTL, val);
137
138 /* program the length of samples in cyclic buffer */
139 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140
141 /* program the stream format */
142 /* this value needs to be the same as the one programmed */
143 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144
145 /* program the stream LVI (last valid index) of the BDL */
146 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147
148 /* program the BDL address */
149 /* lower BDL address */
150 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
151 /* upper BDL address */
152 azx_sd_writel(chip, azx_dev, SD_BDLPU,
153 upper_32_bits(azx_dev->bdl.addr));
154
155 /* enable the position buffer */
156 if (chip->get_position[0] != azx_get_pos_lpib ||
157 chip->get_position[1] != azx_get_pos_lpib) {
158 if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
159 azx_writel(chip, DPLBASE,
160 (u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
161 }
162
163 /* set the interrupt enable bits in the descriptor control register */
164 azx_sd_writel(chip, azx_dev, SD_CTL,
165 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
166
167 return 0;
168 }
169
170 /* assign a stream for the PCM */
171 static inline struct azx_dev *
172 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
173 {
174 int dev, i, nums;
175 struct azx_dev *res = NULL;
176 /* make a non-zero unique key for the substream */
177 int key = (substream->pcm->device << 16) | (substream->number << 2) |
178 (substream->stream + 1);
179
180 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
181 dev = chip->playback_index_offset;
182 nums = chip->playback_streams;
183 } else {
184 dev = chip->capture_index_offset;
185 nums = chip->capture_streams;
186 }
187 for (i = 0; i < nums; i++, dev++) {
188 struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 dsp_lock(azx_dev);
190 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
191 if (azx_dev->assigned_key == key) {
192 azx_dev->opened = 1;
193 azx_dev->assigned_key = key;
194 dsp_unlock(azx_dev);
195 return azx_dev;
196 }
197 if (!res)
198 res = azx_dev;
199 }
200 dsp_unlock(azx_dev);
201 }
202 if (res) {
203 dsp_lock(res);
204 res->opened = 1;
205 res->assigned_key = key;
206 dsp_unlock(res);
207 }
208 return res;
209 }
210
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev *azx_dev)
213 {
214 azx_dev->opened = 0;
215 }
216
217 static cycle_t azx_cc_read(const struct cyclecounter *cc)
218 {
219 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
220 struct snd_pcm_substream *substream = azx_dev->substream;
221 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
222 struct azx *chip = apcm->chip;
223
224 return azx_readl(chip, WALLCLK);
225 }
226
227 static void azx_timecounter_init(struct snd_pcm_substream *substream,
228 bool force, cycle_t last)
229 {
230 struct azx_dev *azx_dev = get_azx_dev(substream);
231 struct timecounter *tc = &azx_dev->azx_tc;
232 struct cyclecounter *cc = &azx_dev->azx_cc;
233 u64 nsec;
234
235 cc->read = azx_cc_read;
236 cc->mask = CLOCKSOURCE_MASK(32);
237
238 /*
239 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 * To avoid any saturation issues in intermediate operations,
241 * the 125 factor is applied first. The division is applied
242 * last after reading the timecounter value.
243 * Applying the 1/3 factor as part of the multiplication
244 * requires at least 20 bits for a decent precision, however
245 * overflows occur after about 4 hours or less, not a option.
246 */
247
248 cc->mult = 125; /* saturation after 195 years */
249 cc->shift = 0;
250
251 nsec = 0; /* audio time is elapsed time since trigger */
252 timecounter_init(tc, cc, nsec);
253 if (force)
254 /*
255 * force timecounter to use predefined value,
256 * used for synchronized starts
257 */
258 tc->cycle_last = last;
259 }
260
261 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
262 u64 nsec)
263 {
264 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
265 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
266 u64 codec_frames, codec_nsecs;
267
268 if (!hinfo->ops.get_delay)
269 return nsec;
270
271 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
272 codec_nsecs = div_u64(codec_frames * 1000000000LL,
273 substream->runtime->rate);
274
275 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
276 return nsec + codec_nsecs;
277
278 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
279 }
280
281 /*
282 * set up a BDL entry
283 */
284 static int setup_bdle(struct azx *chip,
285 struct snd_dma_buffer *dmab,
286 struct azx_dev *azx_dev, u32 **bdlp,
287 int ofs, int size, int with_ioc)
288 {
289 u32 *bdl = *bdlp;
290
291 while (size > 0) {
292 dma_addr_t addr;
293 int chunk;
294
295 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
296 return -EINVAL;
297
298 addr = snd_sgbuf_get_addr(dmab, ofs);
299 /* program the address field of the BDL entry */
300 bdl[0] = cpu_to_le32((u32)addr);
301 bdl[1] = cpu_to_le32(upper_32_bits(addr));
302 /* program the size field of the BDL entry */
303 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
304 /* one BDLE cannot cross 4K boundary on CTHDA chips */
305 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
306 u32 remain = 0x1000 - (ofs & 0xfff);
307 if (chunk > remain)
308 chunk = remain;
309 }
310 bdl[2] = cpu_to_le32(chunk);
311 /* program the IOC to enable interrupt
312 * only when the whole fragment is processed
313 */
314 size -= chunk;
315 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
316 bdl += 4;
317 azx_dev->frags++;
318 ofs += chunk;
319 }
320 *bdlp = bdl;
321 return ofs;
322 }
323
324 /*
325 * set up BDL entries
326 */
327 static int azx_setup_periods(struct azx *chip,
328 struct snd_pcm_substream *substream,
329 struct azx_dev *azx_dev)
330 {
331 u32 *bdl;
332 int i, ofs, periods, period_bytes;
333 int pos_adj = 0;
334
335 /* reset BDL address */
336 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
337 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
338
339 period_bytes = azx_dev->period_bytes;
340 periods = azx_dev->bufsize / period_bytes;
341
342 /* program the initial BDL entries */
343 bdl = (u32 *)azx_dev->bdl.area;
344 ofs = 0;
345 azx_dev->frags = 0;
346
347 if (chip->bdl_pos_adj)
348 pos_adj = chip->bdl_pos_adj[chip->dev_index];
349 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
350 struct snd_pcm_runtime *runtime = substream->runtime;
351 int pos_align = pos_adj;
352 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
353 if (!pos_adj)
354 pos_adj = pos_align;
355 else
356 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
357 pos_align;
358 pos_adj = frames_to_bytes(runtime, pos_adj);
359 if (pos_adj >= period_bytes) {
360 dev_warn(chip->card->dev,"Too big adjustment %d\n",
361 pos_adj);
362 pos_adj = 0;
363 } else {
364 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
365 azx_dev,
366 &bdl, ofs, pos_adj, true);
367 if (ofs < 0)
368 goto error;
369 }
370 } else
371 pos_adj = 0;
372
373 for (i = 0; i < periods; i++) {
374 if (i == periods - 1 && pos_adj)
375 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
376 azx_dev, &bdl, ofs,
377 period_bytes - pos_adj, 0);
378 else
379 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
380 azx_dev, &bdl, ofs,
381 period_bytes,
382 !azx_dev->no_period_wakeup);
383 if (ofs < 0)
384 goto error;
385 }
386 return 0;
387
388 error:
389 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
390 azx_dev->bufsize, period_bytes);
391 return -EINVAL;
392 }
393
394 /*
395 * PCM ops
396 */
397
398 static int azx_pcm_close(struct snd_pcm_substream *substream)
399 {
400 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
401 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
402 struct azx *chip = apcm->chip;
403 struct azx_dev *azx_dev = get_azx_dev(substream);
404 unsigned long flags;
405
406 mutex_lock(&chip->open_mutex);
407 spin_lock_irqsave(&chip->reg_lock, flags);
408 azx_dev->substream = NULL;
409 azx_dev->running = 0;
410 spin_unlock_irqrestore(&chip->reg_lock, flags);
411 azx_release_device(azx_dev);
412 hinfo->ops.close(hinfo, apcm->codec, substream);
413 snd_hda_power_down(apcm->codec);
414 mutex_unlock(&chip->open_mutex);
415 return 0;
416 }
417
418 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
419 struct snd_pcm_hw_params *hw_params)
420 {
421 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
422 struct azx *chip = apcm->chip;
423 int ret;
424
425 dsp_lock(get_azx_dev(substream));
426 if (dsp_is_locked(get_azx_dev(substream))) {
427 ret = -EBUSY;
428 goto unlock;
429 }
430
431 ret = chip->ops->substream_alloc_pages(chip, substream,
432 params_buffer_bytes(hw_params));
433 unlock:
434 dsp_unlock(get_azx_dev(substream));
435 return ret;
436 }
437
438 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
439 {
440 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
441 struct azx_dev *azx_dev = get_azx_dev(substream);
442 struct azx *chip = apcm->chip;
443 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
444 int err;
445
446 /* reset BDL address */
447 dsp_lock(azx_dev);
448 if (!dsp_is_locked(azx_dev)) {
449 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
450 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
451 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
452 azx_dev->bufsize = 0;
453 azx_dev->period_bytes = 0;
454 azx_dev->format_val = 0;
455 }
456
457 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
458
459 err = chip->ops->substream_free_pages(chip, substream);
460 azx_dev->prepared = 0;
461 dsp_unlock(azx_dev);
462 return err;
463 }
464
465 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
466 {
467 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
468 struct azx *chip = apcm->chip;
469 struct azx_dev *azx_dev = get_azx_dev(substream);
470 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
471 struct snd_pcm_runtime *runtime = substream->runtime;
472 unsigned int bufsize, period_bytes, format_val, stream_tag;
473 int err;
474 struct hda_spdif_out *spdif =
475 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
476 unsigned short ctls = spdif ? spdif->ctls : 0;
477
478 dsp_lock(azx_dev);
479 if (dsp_is_locked(azx_dev)) {
480 err = -EBUSY;
481 goto unlock;
482 }
483
484 azx_stream_reset(chip, azx_dev);
485 format_val = snd_hda_calc_stream_format(apcm->codec,
486 runtime->rate,
487 runtime->channels,
488 runtime->format,
489 hinfo->maxbps,
490 ctls);
491 if (!format_val) {
492 dev_err(chip->card->dev,
493 "invalid format_val, rate=%d, ch=%d, format=%d\n",
494 runtime->rate, runtime->channels, runtime->format);
495 err = -EINVAL;
496 goto unlock;
497 }
498
499 bufsize = snd_pcm_lib_buffer_bytes(substream);
500 period_bytes = snd_pcm_lib_period_bytes(substream);
501
502 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
503 bufsize, format_val);
504
505 if (bufsize != azx_dev->bufsize ||
506 period_bytes != azx_dev->period_bytes ||
507 format_val != azx_dev->format_val ||
508 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
509 azx_dev->bufsize = bufsize;
510 azx_dev->period_bytes = period_bytes;
511 azx_dev->format_val = format_val;
512 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
513 err = azx_setup_periods(chip, substream, azx_dev);
514 if (err < 0)
515 goto unlock;
516 }
517
518 /* when LPIB delay correction gives a small negative value,
519 * we ignore it; currently set the threshold statically to
520 * 64 frames
521 */
522 if (runtime->period_size > 64)
523 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
524 else
525 azx_dev->delay_negative_threshold = 0;
526
527 /* wallclk has 24Mhz clock source */
528 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
529 runtime->rate) * 1000);
530 azx_setup_controller(chip, azx_dev);
531 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
532 azx_dev->fifo_size =
533 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
534 else
535 azx_dev->fifo_size = 0;
536
537 stream_tag = azx_dev->stream_tag;
538 /* CA-IBG chips need the playback stream starting from 1 */
539 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
540 stream_tag > chip->capture_streams)
541 stream_tag -= chip->capture_streams;
542 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
543 azx_dev->format_val, substream);
544
545 unlock:
546 if (!err)
547 azx_dev->prepared = 1;
548 dsp_unlock(azx_dev);
549 return err;
550 }
551
552 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
553 {
554 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
555 struct azx *chip = apcm->chip;
556 struct azx_dev *azx_dev;
557 struct snd_pcm_substream *s;
558 int rstart = 0, start, nsync = 0, sbits = 0;
559 int nwait, timeout;
560
561 azx_dev = get_azx_dev(substream);
562 trace_azx_pcm_trigger(chip, azx_dev, cmd);
563
564 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
565 return -EPIPE;
566
567 switch (cmd) {
568 case SNDRV_PCM_TRIGGER_START:
569 rstart = 1;
570 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
571 case SNDRV_PCM_TRIGGER_RESUME:
572 start = 1;
573 break;
574 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
575 case SNDRV_PCM_TRIGGER_SUSPEND:
576 case SNDRV_PCM_TRIGGER_STOP:
577 start = 0;
578 break;
579 default:
580 return -EINVAL;
581 }
582
583 snd_pcm_group_for_each_entry(s, substream) {
584 if (s->pcm->card != substream->pcm->card)
585 continue;
586 azx_dev = get_azx_dev(s);
587 sbits |= 1 << azx_dev->index;
588 nsync++;
589 snd_pcm_trigger_done(s, substream);
590 }
591
592 spin_lock(&chip->reg_lock);
593
594 /* first, set SYNC bits of corresponding streams */
595 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
596 azx_writel(chip, OLD_SSYNC,
597 azx_readl(chip, OLD_SSYNC) | sbits);
598 else
599 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
600
601 snd_pcm_group_for_each_entry(s, substream) {
602 if (s->pcm->card != substream->pcm->card)
603 continue;
604 azx_dev = get_azx_dev(s);
605 if (start) {
606 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
607 if (!rstart)
608 azx_dev->start_wallclk -=
609 azx_dev->period_wallclk;
610 azx_stream_start(chip, azx_dev);
611 } else {
612 azx_stream_stop(chip, azx_dev);
613 }
614 azx_dev->running = start;
615 }
616 spin_unlock(&chip->reg_lock);
617 if (start) {
618 /* wait until all FIFOs get ready */
619 for (timeout = 5000; timeout; timeout--) {
620 nwait = 0;
621 snd_pcm_group_for_each_entry(s, substream) {
622 if (s->pcm->card != substream->pcm->card)
623 continue;
624 azx_dev = get_azx_dev(s);
625 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
626 SD_STS_FIFO_READY))
627 nwait++;
628 }
629 if (!nwait)
630 break;
631 cpu_relax();
632 }
633 } else {
634 /* wait until all RUN bits are cleared */
635 for (timeout = 5000; timeout; timeout--) {
636 nwait = 0;
637 snd_pcm_group_for_each_entry(s, substream) {
638 if (s->pcm->card != substream->pcm->card)
639 continue;
640 azx_dev = get_azx_dev(s);
641 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
642 SD_CTL_DMA_START)
643 nwait++;
644 }
645 if (!nwait)
646 break;
647 cpu_relax();
648 }
649 }
650 spin_lock(&chip->reg_lock);
651 /* reset SYNC bits */
652 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
653 azx_writel(chip, OLD_SSYNC,
654 azx_readl(chip, OLD_SSYNC) & ~sbits);
655 else
656 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
657 if (start) {
658 azx_timecounter_init(substream, 0, 0);
659 if (nsync > 1) {
660 cycle_t cycle_last;
661
662 /* same start cycle for master and group */
663 azx_dev = get_azx_dev(substream);
664 cycle_last = azx_dev->azx_tc.cycle_last;
665
666 snd_pcm_group_for_each_entry(s, substream) {
667 if (s->pcm->card != substream->pcm->card)
668 continue;
669 azx_timecounter_init(s, 1, cycle_last);
670 }
671 }
672 }
673 spin_unlock(&chip->reg_lock);
674 return 0;
675 }
676
677 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
678 {
679 return azx_sd_readl(chip, azx_dev, SD_LPIB);
680 }
681 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
682
683 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
684 {
685 return le32_to_cpu(*azx_dev->posbuf);
686 }
687 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
688
689 unsigned int azx_get_position(struct azx *chip,
690 struct azx_dev *azx_dev)
691 {
692 struct snd_pcm_substream *substream = azx_dev->substream;
693 unsigned int pos;
694 int stream = substream->stream;
695 int delay = 0;
696
697 if (chip->get_position[stream])
698 pos = chip->get_position[stream](chip, azx_dev);
699 else /* use the position buffer as default */
700 pos = azx_get_pos_posbuf(chip, azx_dev);
701
702 if (pos >= azx_dev->bufsize)
703 pos = 0;
704
705 if (substream->runtime) {
706 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
707 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
708
709 if (chip->get_delay[stream])
710 delay += chip->get_delay[stream](chip, azx_dev, pos);
711 if (hinfo->ops.get_delay)
712 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
713 substream);
714 substream->runtime->delay = delay;
715 }
716
717 trace_azx_get_position(chip, azx_dev, pos, delay);
718 return pos;
719 }
720 EXPORT_SYMBOL_GPL(azx_get_position);
721
722 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
723 {
724 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
725 struct azx *chip = apcm->chip;
726 struct azx_dev *azx_dev = get_azx_dev(substream);
727 return bytes_to_frames(substream->runtime,
728 azx_get_position(chip, azx_dev));
729 }
730
731 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
732 struct timespec *ts)
733 {
734 struct azx_dev *azx_dev = get_azx_dev(substream);
735 u64 nsec;
736
737 nsec = timecounter_read(&azx_dev->azx_tc);
738 nsec = div_u64(nsec, 3); /* can be optimized */
739 nsec = azx_adjust_codec_delay(substream, nsec);
740
741 *ts = ns_to_timespec(nsec);
742
743 return 0;
744 }
745
746 static struct snd_pcm_hardware azx_pcm_hw = {
747 .info = (SNDRV_PCM_INFO_MMAP |
748 SNDRV_PCM_INFO_INTERLEAVED |
749 SNDRV_PCM_INFO_BLOCK_TRANSFER |
750 SNDRV_PCM_INFO_MMAP_VALID |
751 /* No full-resume yet implemented */
752 /* SNDRV_PCM_INFO_RESUME |*/
753 SNDRV_PCM_INFO_PAUSE |
754 SNDRV_PCM_INFO_SYNC_START |
755 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
756 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
757 .formats = SNDRV_PCM_FMTBIT_S16_LE,
758 .rates = SNDRV_PCM_RATE_48000,
759 .rate_min = 48000,
760 .rate_max = 48000,
761 .channels_min = 2,
762 .channels_max = 2,
763 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
764 .period_bytes_min = 128,
765 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
766 .periods_min = 2,
767 .periods_max = AZX_MAX_FRAG,
768 .fifo_size = 0,
769 };
770
771 static int azx_pcm_open(struct snd_pcm_substream *substream)
772 {
773 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
774 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
775 struct azx *chip = apcm->chip;
776 struct azx_dev *azx_dev;
777 struct snd_pcm_runtime *runtime = substream->runtime;
778 unsigned long flags;
779 int err;
780 int buff_step;
781
782 mutex_lock(&chip->open_mutex);
783 azx_dev = azx_assign_device(chip, substream);
784 if (azx_dev == NULL) {
785 mutex_unlock(&chip->open_mutex);
786 return -EBUSY;
787 }
788 runtime->hw = azx_pcm_hw;
789 runtime->hw.channels_min = hinfo->channels_min;
790 runtime->hw.channels_max = hinfo->channels_max;
791 runtime->hw.formats = hinfo->formats;
792 runtime->hw.rates = hinfo->rates;
793 snd_pcm_limit_hw_rates(runtime);
794 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
795
796 /* avoid wrap-around with wall-clock */
797 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
798 20,
799 178000000);
800
801 if (chip->align_buffer_size)
802 /* constrain buffer sizes to be multiple of 128
803 bytes. This is more efficient in terms of memory
804 access but isn't required by the HDA spec and
805 prevents users from specifying exact period/buffer
806 sizes. For example for 44.1kHz, a period size set
807 to 20ms will be rounded to 19.59ms. */
808 buff_step = 128;
809 else
810 /* Don't enforce steps on buffer sizes, still need to
811 be multiple of 4 bytes (HDA spec). Tested on Intel
812 HDA controllers, may not work on all devices where
813 option needs to be disabled */
814 buff_step = 4;
815
816 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
817 buff_step);
818 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
819 buff_step);
820 snd_hda_power_up_d3wait(apcm->codec);
821 err = hinfo->ops.open(hinfo, apcm->codec, substream);
822 if (err < 0) {
823 azx_release_device(azx_dev);
824 snd_hda_power_down(apcm->codec);
825 mutex_unlock(&chip->open_mutex);
826 return err;
827 }
828 snd_pcm_limit_hw_rates(runtime);
829 /* sanity check */
830 if (snd_BUG_ON(!runtime->hw.channels_min) ||
831 snd_BUG_ON(!runtime->hw.channels_max) ||
832 snd_BUG_ON(!runtime->hw.formats) ||
833 snd_BUG_ON(!runtime->hw.rates)) {
834 azx_release_device(azx_dev);
835 hinfo->ops.close(hinfo, apcm->codec, substream);
836 snd_hda_power_down(apcm->codec);
837 mutex_unlock(&chip->open_mutex);
838 return -EINVAL;
839 }
840
841 /* disable WALLCLOCK timestamps for capture streams
842 until we figure out how to handle digital inputs */
843 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
844 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
845
846 spin_lock_irqsave(&chip->reg_lock, flags);
847 azx_dev->substream = substream;
848 azx_dev->running = 0;
849 spin_unlock_irqrestore(&chip->reg_lock, flags);
850
851 runtime->private_data = azx_dev;
852 snd_pcm_set_sync(substream);
853 mutex_unlock(&chip->open_mutex);
854 return 0;
855 }
856
857 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
858 struct vm_area_struct *area)
859 {
860 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
861 struct azx *chip = apcm->chip;
862 if (chip->ops->pcm_mmap_prepare)
863 chip->ops->pcm_mmap_prepare(substream, area);
864 return snd_pcm_lib_default_mmap(substream, area);
865 }
866
867 static struct snd_pcm_ops azx_pcm_ops = {
868 .open = azx_pcm_open,
869 .close = azx_pcm_close,
870 .ioctl = snd_pcm_lib_ioctl,
871 .hw_params = azx_pcm_hw_params,
872 .hw_free = azx_pcm_hw_free,
873 .prepare = azx_pcm_prepare,
874 .trigger = azx_pcm_trigger,
875 .pointer = azx_pcm_pointer,
876 .wall_clock = azx_get_wallclock_tstamp,
877 .mmap = azx_pcm_mmap,
878 .page = snd_pcm_sgbuf_ops_page,
879 };
880
881 static void azx_pcm_free(struct snd_pcm *pcm)
882 {
883 struct azx_pcm *apcm = pcm->private_data;
884 if (apcm) {
885 list_del(&apcm->list);
886 kfree(apcm);
887 }
888 }
889
890 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
891
892 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
893 struct hda_pcm *cpcm)
894 {
895 struct azx *chip = bus->private_data;
896 struct snd_pcm *pcm;
897 struct azx_pcm *apcm;
898 int pcm_dev = cpcm->device;
899 unsigned int size;
900 int s, err;
901
902 list_for_each_entry(apcm, &chip->pcm_list, list) {
903 if (apcm->pcm->device == pcm_dev) {
904 dev_err(chip->card->dev, "PCM %d already exists\n",
905 pcm_dev);
906 return -EBUSY;
907 }
908 }
909 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
910 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
911 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
912 &pcm);
913 if (err < 0)
914 return err;
915 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
916 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
917 if (apcm == NULL)
918 return -ENOMEM;
919 apcm->chip = chip;
920 apcm->pcm = pcm;
921 apcm->codec = codec;
922 pcm->private_data = apcm;
923 pcm->private_free = azx_pcm_free;
924 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
925 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
926 list_add_tail(&apcm->list, &chip->pcm_list);
927 cpcm->pcm = pcm;
928 for (s = 0; s < 2; s++) {
929 apcm->hinfo[s] = &cpcm->stream[s];
930 if (cpcm->stream[s].substreams)
931 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
932 }
933 /* buffer pre-allocation */
934 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
935 if (size > MAX_PREALLOC_SIZE)
936 size = MAX_PREALLOC_SIZE;
937 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
938 chip->card->dev,
939 size, MAX_PREALLOC_SIZE);
940 /* link to codec */
941 pcm->dev = &codec->dev;
942 return 0;
943 }
944
945 /*
946 * CORB / RIRB interface
947 */
948 static int azx_alloc_cmd_io(struct azx *chip)
949 {
950 int err;
951
952 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
953 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
954 PAGE_SIZE, &chip->rb);
955 if (err < 0)
956 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
957 return err;
958 }
959 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
960
961 static void azx_init_cmd_io(struct azx *chip)
962 {
963 int timeout;
964
965 spin_lock_irq(&chip->reg_lock);
966 /* CORB set up */
967 chip->corb.addr = chip->rb.addr;
968 chip->corb.buf = (u32 *)chip->rb.area;
969 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
970 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
971
972 /* set the corb size to 256 entries (ULI requires explicitly) */
973 azx_writeb(chip, CORBSIZE, 0x02);
974 /* set the corb write pointer to 0 */
975 azx_writew(chip, CORBWP, 0);
976
977 /* reset the corb hw read pointer */
978 azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
979 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
980 for (timeout = 1000; timeout > 0; timeout--) {
981 if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
982 break;
983 udelay(1);
984 }
985 if (timeout <= 0)
986 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
987 azx_readw(chip, CORBRP));
988
989 azx_writew(chip, CORBRP, 0);
990 for (timeout = 1000; timeout > 0; timeout--) {
991 if (azx_readw(chip, CORBRP) == 0)
992 break;
993 udelay(1);
994 }
995 if (timeout <= 0)
996 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
997 azx_readw(chip, CORBRP));
998 }
999
1000 /* enable corb dma */
1001 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1002
1003 /* RIRB set up */
1004 chip->rirb.addr = chip->rb.addr + 2048;
1005 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1006 chip->rirb.wp = chip->rirb.rp = 0;
1007 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1008 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1009 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1010
1011 /* set the rirb size to 256 entries (ULI requires explicitly) */
1012 azx_writeb(chip, RIRBSIZE, 0x02);
1013 /* reset the rirb hw write pointer */
1014 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1015 /* set N=1, get RIRB response interrupt for new entry */
1016 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1017 azx_writew(chip, RINTCNT, 0xc0);
1018 else
1019 azx_writew(chip, RINTCNT, 1);
1020 /* enable rirb dma and response irq */
1021 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1022 spin_unlock_irq(&chip->reg_lock);
1023 }
1024 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1025
1026 static void azx_free_cmd_io(struct azx *chip)
1027 {
1028 spin_lock_irq(&chip->reg_lock);
1029 /* disable ringbuffer DMAs */
1030 azx_writeb(chip, RIRBCTL, 0);
1031 azx_writeb(chip, CORBCTL, 0);
1032 spin_unlock_irq(&chip->reg_lock);
1033 }
1034 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1035
1036 static unsigned int azx_command_addr(u32 cmd)
1037 {
1038 unsigned int addr = cmd >> 28;
1039
1040 if (addr >= AZX_MAX_CODECS) {
1041 snd_BUG();
1042 addr = 0;
1043 }
1044
1045 return addr;
1046 }
1047
1048 /* send a command */
1049 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1050 {
1051 struct azx *chip = bus->private_data;
1052 unsigned int addr = azx_command_addr(val);
1053 unsigned int wp, rp;
1054
1055 spin_lock_irq(&chip->reg_lock);
1056
1057 /* add command to corb */
1058 wp = azx_readw(chip, CORBWP);
1059 if (wp == 0xffff) {
1060 /* something wrong, controller likely turned to D3 */
1061 spin_unlock_irq(&chip->reg_lock);
1062 return -EIO;
1063 }
1064 wp++;
1065 wp %= ICH6_MAX_CORB_ENTRIES;
1066
1067 rp = azx_readw(chip, CORBRP);
1068 if (wp == rp) {
1069 /* oops, it's full */
1070 spin_unlock_irq(&chip->reg_lock);
1071 return -EAGAIN;
1072 }
1073
1074 chip->rirb.cmds[addr]++;
1075 chip->corb.buf[wp] = cpu_to_le32(val);
1076 azx_writew(chip, CORBWP, wp);
1077
1078 spin_unlock_irq(&chip->reg_lock);
1079
1080 return 0;
1081 }
1082
1083 #define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1084
1085 /* retrieve RIRB entry - called from interrupt handler */
1086 static void azx_update_rirb(struct azx *chip)
1087 {
1088 unsigned int rp, wp;
1089 unsigned int addr;
1090 u32 res, res_ex;
1091
1092 wp = azx_readw(chip, RIRBWP);
1093 if (wp == 0xffff) {
1094 /* something wrong, controller likely turned to D3 */
1095 return;
1096 }
1097
1098 if (wp == chip->rirb.wp)
1099 return;
1100 chip->rirb.wp = wp;
1101
1102 while (chip->rirb.rp != wp) {
1103 chip->rirb.rp++;
1104 chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1105
1106 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1107 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1108 res = le32_to_cpu(chip->rirb.buf[rp]);
1109 addr = res_ex & 0xf;
1110 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1111 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1112 res, res_ex,
1113 chip->rirb.rp, wp);
1114 snd_BUG();
1115 }
1116 else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1117 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1118 else if (chip->rirb.cmds[addr]) {
1119 chip->rirb.res[addr] = res;
1120 smp_wmb();
1121 chip->rirb.cmds[addr]--;
1122 } else if (printk_ratelimit()) {
1123 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1124 res, res_ex,
1125 chip->last_cmd[addr]);
1126 }
1127 }
1128 }
1129
1130 /* receive a response */
1131 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1132 unsigned int addr)
1133 {
1134 struct azx *chip = bus->private_data;
1135 unsigned long timeout;
1136 unsigned long loopcounter;
1137 int do_poll = 0;
1138
1139 again:
1140 timeout = jiffies + msecs_to_jiffies(1000);
1141
1142 for (loopcounter = 0;; loopcounter++) {
1143 if (chip->polling_mode || do_poll) {
1144 spin_lock_irq(&chip->reg_lock);
1145 azx_update_rirb(chip);
1146 spin_unlock_irq(&chip->reg_lock);
1147 }
1148 if (!chip->rirb.cmds[addr]) {
1149 smp_rmb();
1150 bus->rirb_error = 0;
1151
1152 if (!do_poll)
1153 chip->poll_count = 0;
1154 return chip->rirb.res[addr]; /* the last value */
1155 }
1156 if (time_after(jiffies, timeout))
1157 break;
1158 if (bus->needs_damn_long_delay || loopcounter > 3000)
1159 msleep(2); /* temporary workaround */
1160 else {
1161 udelay(10);
1162 cond_resched();
1163 }
1164 }
1165
1166 if (!bus->no_response_fallback)
1167 return -1;
1168
1169 if (!chip->polling_mode && chip->poll_count < 2) {
1170 dev_dbg(chip->card->dev,
1171 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1172 chip->last_cmd[addr]);
1173 do_poll = 1;
1174 chip->poll_count++;
1175 goto again;
1176 }
1177
1178
1179 if (!chip->polling_mode) {
1180 dev_warn(chip->card->dev,
1181 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1182 chip->last_cmd[addr]);
1183 chip->polling_mode = 1;
1184 goto again;
1185 }
1186
1187 if (chip->msi) {
1188 dev_warn(chip->card->dev,
1189 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1190 chip->last_cmd[addr]);
1191 if (chip->ops->disable_msi_reset_irq(chip) &&
1192 chip->ops->disable_msi_reset_irq(chip) < 0) {
1193 bus->rirb_error = 1;
1194 return -1;
1195 }
1196 goto again;
1197 }
1198
1199 if (chip->probing) {
1200 /* If this critical timeout happens during the codec probing
1201 * phase, this is likely an access to a non-existing codec
1202 * slot. Better to return an error and reset the system.
1203 */
1204 return -1;
1205 }
1206
1207 /* a fatal communication error; need either to reset or to fallback
1208 * to the single_cmd mode
1209 */
1210 bus->rirb_error = 1;
1211 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1212 bus->response_reset = 1;
1213 return -1; /* give a chance to retry */
1214 }
1215
1216 dev_err(chip->card->dev,
1217 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1218 chip->last_cmd[addr]);
1219 chip->single_cmd = 1;
1220 bus->response_reset = 0;
1221 /* release CORB/RIRB */
1222 azx_free_cmd_io(chip);
1223 /* disable unsolicited responses */
1224 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1225 return -1;
1226 }
1227
1228 /*
1229 * Use the single immediate command instead of CORB/RIRB for simplicity
1230 *
1231 * Note: according to Intel, this is not preferred use. The command was
1232 * intended for the BIOS only, and may get confused with unsolicited
1233 * responses. So, we shouldn't use it for normal operation from the
1234 * driver.
1235 * I left the codes, however, for debugging/testing purposes.
1236 */
1237
1238 /* receive a response */
1239 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1240 {
1241 int timeout = 50;
1242
1243 while (timeout--) {
1244 /* check IRV busy bit */
1245 if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1246 /* reuse rirb.res as the response return value */
1247 chip->rirb.res[addr] = azx_readl(chip, IR);
1248 return 0;
1249 }
1250 udelay(1);
1251 }
1252 if (printk_ratelimit())
1253 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1254 azx_readw(chip, IRS));
1255 chip->rirb.res[addr] = -1;
1256 return -EIO;
1257 }
1258
1259 /* send a command */
1260 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1261 {
1262 struct azx *chip = bus->private_data;
1263 unsigned int addr = azx_command_addr(val);
1264 int timeout = 50;
1265
1266 bus->rirb_error = 0;
1267 while (timeout--) {
1268 /* check ICB busy bit */
1269 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1270 /* Clear IRV valid bit */
1271 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1272 ICH6_IRS_VALID);
1273 azx_writel(chip, IC, val);
1274 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1275 ICH6_IRS_BUSY);
1276 return azx_single_wait_for_response(chip, addr);
1277 }
1278 udelay(1);
1279 }
1280 if (printk_ratelimit())
1281 dev_dbg(chip->card->dev,
1282 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1283 azx_readw(chip, IRS), val);
1284 return -EIO;
1285 }
1286
1287 /* receive a response */
1288 static unsigned int azx_single_get_response(struct hda_bus *bus,
1289 unsigned int addr)
1290 {
1291 struct azx *chip = bus->private_data;
1292 return chip->rirb.res[addr];
1293 }
1294
1295 /*
1296 * The below are the main callbacks from hda_codec.
1297 *
1298 * They are just the skeleton to call sub-callbacks according to the
1299 * current setting of chip->single_cmd.
1300 */
1301
1302 /* send a command */
1303 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1304 {
1305 struct azx *chip = bus->private_data;
1306
1307 if (chip->disabled)
1308 return 0;
1309 chip->last_cmd[azx_command_addr(val)] = val;
1310 if (chip->single_cmd)
1311 return azx_single_send_cmd(bus, val);
1312 else
1313 return azx_corb_send_cmd(bus, val);
1314 }
1315 EXPORT_SYMBOL_GPL(azx_send_cmd);
1316
1317 /* get a response */
1318 static unsigned int azx_get_response(struct hda_bus *bus,
1319 unsigned int addr)
1320 {
1321 struct azx *chip = bus->private_data;
1322 if (chip->disabled)
1323 return 0;
1324 if (chip->single_cmd)
1325 return azx_single_get_response(bus, addr);
1326 else
1327 return azx_rirb_get_response(bus, addr);
1328 }
1329 EXPORT_SYMBOL_GPL(azx_get_response);
1330
1331 #ifdef CONFIG_SND_HDA_DSP_LOADER
1332 /*
1333 * DSP loading code (e.g. for CA0132)
1334 */
1335
1336 /* use the first stream for loading DSP */
1337 static struct azx_dev *
1338 azx_get_dsp_loader_dev(struct azx *chip)
1339 {
1340 return &chip->azx_dev[chip->playback_index_offset];
1341 }
1342
1343 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1344 unsigned int byte_size,
1345 struct snd_dma_buffer *bufp)
1346 {
1347 u32 *bdl;
1348 struct azx *chip = bus->private_data;
1349 struct azx_dev *azx_dev;
1350 int err;
1351
1352 azx_dev = azx_get_dsp_loader_dev(chip);
1353
1354 dsp_lock(azx_dev);
1355 spin_lock_irq(&chip->reg_lock);
1356 if (azx_dev->running || azx_dev->locked) {
1357 spin_unlock_irq(&chip->reg_lock);
1358 err = -EBUSY;
1359 goto unlock;
1360 }
1361 azx_dev->prepared = 0;
1362 chip->saved_azx_dev = *azx_dev;
1363 azx_dev->locked = 1;
1364 spin_unlock_irq(&chip->reg_lock);
1365
1366 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1367 byte_size, bufp);
1368 if (err < 0)
1369 goto err_alloc;
1370
1371 azx_dev->bufsize = byte_size;
1372 azx_dev->period_bytes = byte_size;
1373 azx_dev->format_val = format;
1374
1375 azx_stream_reset(chip, azx_dev);
1376
1377 /* reset BDL address */
1378 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1379 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1380
1381 azx_dev->frags = 0;
1382 bdl = (u32 *)azx_dev->bdl.area;
1383 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1384 if (err < 0)
1385 goto error;
1386
1387 azx_setup_controller(chip, azx_dev);
1388 dsp_unlock(azx_dev);
1389 return azx_dev->stream_tag;
1390
1391 error:
1392 chip->ops->dma_free_pages(chip, bufp);
1393 err_alloc:
1394 spin_lock_irq(&chip->reg_lock);
1395 if (azx_dev->opened)
1396 *azx_dev = chip->saved_azx_dev;
1397 azx_dev->locked = 0;
1398 spin_unlock_irq(&chip->reg_lock);
1399 unlock:
1400 dsp_unlock(azx_dev);
1401 return err;
1402 }
1403
1404 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1405 {
1406 struct azx *chip = bus->private_data;
1407 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1408
1409 if (start)
1410 azx_stream_start(chip, azx_dev);
1411 else
1412 azx_stream_stop(chip, azx_dev);
1413 azx_dev->running = start;
1414 }
1415
1416 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1417 struct snd_dma_buffer *dmab)
1418 {
1419 struct azx *chip = bus->private_data;
1420 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1421
1422 if (!dmab->area || !azx_dev->locked)
1423 return;
1424
1425 dsp_lock(azx_dev);
1426 /* reset BDL address */
1427 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1428 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1429 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1430 azx_dev->bufsize = 0;
1431 azx_dev->period_bytes = 0;
1432 azx_dev->format_val = 0;
1433
1434 chip->ops->dma_free_pages(chip, dmab);
1435 dmab->area = NULL;
1436
1437 spin_lock_irq(&chip->reg_lock);
1438 if (azx_dev->opened)
1439 *azx_dev = chip->saved_azx_dev;
1440 azx_dev->locked = 0;
1441 spin_unlock_irq(&chip->reg_lock);
1442 dsp_unlock(azx_dev);
1443 }
1444 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1445
1446 int azx_alloc_stream_pages(struct azx *chip)
1447 {
1448 int i, err;
1449 struct snd_card *card = chip->card;
1450
1451 for (i = 0; i < chip->num_streams; i++) {
1452 dsp_lock_init(&chip->azx_dev[i]);
1453 /* allocate memory for the BDL for each stream */
1454 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1455 BDL_SIZE,
1456 &chip->azx_dev[i].bdl);
1457 if (err < 0) {
1458 dev_err(card->dev, "cannot allocate BDL\n");
1459 return -ENOMEM;
1460 }
1461 }
1462 /* allocate memory for the position buffer */
1463 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1464 chip->num_streams * 8, &chip->posbuf);
1465 if (err < 0) {
1466 dev_err(card->dev, "cannot allocate posbuf\n");
1467 return -ENOMEM;
1468 }
1469
1470 /* allocate CORB/RIRB */
1471 err = azx_alloc_cmd_io(chip);
1472 if (err < 0)
1473 return err;
1474 return 0;
1475 }
1476 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1477
1478 void azx_free_stream_pages(struct azx *chip)
1479 {
1480 int i;
1481 if (chip->azx_dev) {
1482 for (i = 0; i < chip->num_streams; i++)
1483 if (chip->azx_dev[i].bdl.area)
1484 chip->ops->dma_free_pages(
1485 chip, &chip->azx_dev[i].bdl);
1486 }
1487 if (chip->rb.area)
1488 chip->ops->dma_free_pages(chip, &chip->rb);
1489 if (chip->posbuf.area)
1490 chip->ops->dma_free_pages(chip, &chip->posbuf);
1491 }
1492 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1493
1494 /*
1495 * Lowlevel interface
1496 */
1497
1498 /* enter link reset */
1499 void azx_enter_link_reset(struct azx *chip)
1500 {
1501 unsigned long timeout;
1502
1503 /* reset controller */
1504 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
1505
1506 timeout = jiffies + msecs_to_jiffies(100);
1507 while ((azx_readb(chip, GCTL) & ICH6_GCTL_RESET) &&
1508 time_before(jiffies, timeout))
1509 usleep_range(500, 1000);
1510 }
1511 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1512
1513 /* exit link reset */
1514 static void azx_exit_link_reset(struct azx *chip)
1515 {
1516 unsigned long timeout;
1517
1518 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
1519
1520 timeout = jiffies + msecs_to_jiffies(100);
1521 while (!azx_readb(chip, GCTL) &&
1522 time_before(jiffies, timeout))
1523 usleep_range(500, 1000);
1524 }
1525
1526 /* reset codec link */
1527 static int azx_reset(struct azx *chip, bool full_reset)
1528 {
1529 if (!full_reset)
1530 goto __skip;
1531
1532 /* clear STATESTS */
1533 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1534
1535 /* reset controller */
1536 azx_enter_link_reset(chip);
1537
1538 /* delay for >= 100us for codec PLL to settle per spec
1539 * Rev 0.9 section 5.5.1
1540 */
1541 usleep_range(500, 1000);
1542
1543 /* Bring controller out of reset */
1544 azx_exit_link_reset(chip);
1545
1546 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1547 usleep_range(1000, 1200);
1548
1549 __skip:
1550 /* check to see if controller is ready */
1551 if (!azx_readb(chip, GCTL)) {
1552 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1553 return -EBUSY;
1554 }
1555
1556 /* Accept unsolicited responses */
1557 if (!chip->single_cmd)
1558 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1559 ICH6_GCTL_UNSOL);
1560
1561 /* detect codecs */
1562 if (!chip->codec_mask) {
1563 chip->codec_mask = azx_readw(chip, STATESTS);
1564 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1565 chip->codec_mask);
1566 }
1567
1568 return 0;
1569 }
1570
1571 /* enable interrupts */
1572 static void azx_int_enable(struct azx *chip)
1573 {
1574 /* enable controller CIE and GIE */
1575 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1576 ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
1577 }
1578
1579 /* disable interrupts */
1580 static void azx_int_disable(struct azx *chip)
1581 {
1582 int i;
1583
1584 /* disable interrupts in stream descriptor */
1585 for (i = 0; i < chip->num_streams; i++) {
1586 struct azx_dev *azx_dev = &chip->azx_dev[i];
1587 azx_sd_writeb(chip, azx_dev, SD_CTL,
1588 azx_sd_readb(chip, azx_dev, SD_CTL) &
1589 ~SD_INT_MASK);
1590 }
1591
1592 /* disable SIE for all streams */
1593 azx_writeb(chip, INTCTL, 0);
1594
1595 /* disable controller CIE and GIE */
1596 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1597 ~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
1598 }
1599
1600 /* clear interrupts */
1601 static void azx_int_clear(struct azx *chip)
1602 {
1603 int i;
1604
1605 /* clear stream status */
1606 for (i = 0; i < chip->num_streams; i++) {
1607 struct azx_dev *azx_dev = &chip->azx_dev[i];
1608 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1609 }
1610
1611 /* clear STATESTS */
1612 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1613
1614 /* clear rirb status */
1615 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1616
1617 /* clear int status */
1618 azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
1619 }
1620
1621 /*
1622 * reset and start the controller registers
1623 */
1624 void azx_init_chip(struct azx *chip, bool full_reset)
1625 {
1626 if (chip->initialized)
1627 return;
1628
1629 /* reset controller */
1630 azx_reset(chip, full_reset);
1631
1632 /* initialize interrupts */
1633 azx_int_clear(chip);
1634 azx_int_enable(chip);
1635
1636 /* initialize the codec command I/O */
1637 if (!chip->single_cmd)
1638 azx_init_cmd_io(chip);
1639
1640 /* program the position buffer */
1641 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1642 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1643
1644 chip->initialized = 1;
1645 }
1646 EXPORT_SYMBOL_GPL(azx_init_chip);
1647
1648 void azx_stop_chip(struct azx *chip)
1649 {
1650 if (!chip->initialized)
1651 return;
1652
1653 /* disable interrupts */
1654 azx_int_disable(chip);
1655 azx_int_clear(chip);
1656
1657 /* disable CORB/RIRB */
1658 azx_free_cmd_io(chip);
1659
1660 /* disable position buffer */
1661 azx_writel(chip, DPLBASE, 0);
1662 azx_writel(chip, DPUBASE, 0);
1663
1664 chip->initialized = 0;
1665 }
1666 EXPORT_SYMBOL_GPL(azx_stop_chip);
1667
1668 /*
1669 * interrupt handler
1670 */
1671 irqreturn_t azx_interrupt(int irq, void *dev_id)
1672 {
1673 struct azx *chip = dev_id;
1674 struct azx_dev *azx_dev;
1675 u32 status;
1676 u8 sd_status;
1677 int i;
1678
1679 #ifdef CONFIG_PM_RUNTIME
1680 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1681 if (!pm_runtime_active(chip->card->dev))
1682 return IRQ_NONE;
1683 #endif
1684
1685 spin_lock(&chip->reg_lock);
1686
1687 if (chip->disabled) {
1688 spin_unlock(&chip->reg_lock);
1689 return IRQ_NONE;
1690 }
1691
1692 status = azx_readl(chip, INTSTS);
1693 if (status == 0 || status == 0xffffffff) {
1694 spin_unlock(&chip->reg_lock);
1695 return IRQ_NONE;
1696 }
1697
1698 for (i = 0; i < chip->num_streams; i++) {
1699 azx_dev = &chip->azx_dev[i];
1700 if (status & azx_dev->sd_int_sta_mask) {
1701 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1702 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1703 if (!azx_dev->substream || !azx_dev->running ||
1704 !(sd_status & SD_INT_COMPLETE))
1705 continue;
1706 /* check whether this IRQ is really acceptable */
1707 if (!chip->ops->position_check ||
1708 chip->ops->position_check(chip, azx_dev)) {
1709 spin_unlock(&chip->reg_lock);
1710 snd_pcm_period_elapsed(azx_dev->substream);
1711 spin_lock(&chip->reg_lock);
1712 }
1713 }
1714 }
1715
1716 /* clear rirb int */
1717 status = azx_readb(chip, RIRBSTS);
1718 if (status & RIRB_INT_MASK) {
1719 if (status & RIRB_INT_RESPONSE) {
1720 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1721 udelay(80);
1722 azx_update_rirb(chip);
1723 }
1724 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1725 }
1726
1727 spin_unlock(&chip->reg_lock);
1728
1729 return IRQ_HANDLED;
1730 }
1731 EXPORT_SYMBOL_GPL(azx_interrupt);
1732
1733 /*
1734 * Codec initerface
1735 */
1736
1737 /*
1738 * Probe the given codec address
1739 */
1740 static int probe_codec(struct azx *chip, int addr)
1741 {
1742 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1743 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1744 unsigned int res;
1745
1746 mutex_lock(&chip->bus->cmd_mutex);
1747 chip->probing = 1;
1748 azx_send_cmd(chip->bus, cmd);
1749 res = azx_get_response(chip->bus, addr);
1750 chip->probing = 0;
1751 mutex_unlock(&chip->bus->cmd_mutex);
1752 if (res == -1)
1753 return -EIO;
1754 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1755 return 0;
1756 }
1757
1758 static void azx_bus_reset(struct hda_bus *bus)
1759 {
1760 struct azx *chip = bus->private_data;
1761
1762 bus->in_reset = 1;
1763 azx_stop_chip(chip);
1764 azx_init_chip(chip, true);
1765 #ifdef CONFIG_PM
1766 if (chip->initialized) {
1767 struct azx_pcm *p;
1768 list_for_each_entry(p, &chip->pcm_list, list)
1769 snd_pcm_suspend_all(p->pcm);
1770 snd_hda_suspend(chip->bus);
1771 snd_hda_resume(chip->bus);
1772 }
1773 #endif
1774 bus->in_reset = 0;
1775 }
1776
1777 #ifdef CONFIG_PM
1778 /* power-up/down the controller */
1779 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1780 {
1781 struct azx *chip = bus->private_data;
1782
1783 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1784 return;
1785
1786 if (power_up)
1787 pm_runtime_get_sync(chip->card->dev);
1788 else
1789 pm_runtime_put_sync(chip->card->dev);
1790 }
1791 #endif
1792
1793 static int get_jackpoll_interval(struct azx *chip)
1794 {
1795 int i;
1796 unsigned int j;
1797
1798 if (!chip->jackpoll_ms)
1799 return 0;
1800
1801 i = chip->jackpoll_ms[chip->dev_index];
1802 if (i == 0)
1803 return 0;
1804 if (i < 50 || i > 60000)
1805 j = 0;
1806 else
1807 j = msecs_to_jiffies(i);
1808 if (j == 0)
1809 dev_warn(chip->card->dev,
1810 "jackpoll_ms value out of range: %d\n", i);
1811 return j;
1812 }
1813
1814 /* Codec initialization */
1815 int azx_codec_create(struct azx *chip, const char *model,
1816 unsigned int max_slots,
1817 int *power_save_to)
1818 {
1819 struct hda_bus_template bus_temp;
1820 int c, codecs, err;
1821
1822 memset(&bus_temp, 0, sizeof(bus_temp));
1823 bus_temp.private_data = chip;
1824 bus_temp.modelname = model;
1825 bus_temp.pci = chip->pci;
1826 bus_temp.ops.command = azx_send_cmd;
1827 bus_temp.ops.get_response = azx_get_response;
1828 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1829 bus_temp.ops.bus_reset = azx_bus_reset;
1830 #ifdef CONFIG_PM
1831 bus_temp.power_save = power_save_to;
1832 bus_temp.ops.pm_notify = azx_power_notify;
1833 #endif
1834 #ifdef CONFIG_SND_HDA_DSP_LOADER
1835 bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1836 bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1837 bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1838 #endif
1839
1840 err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1841 if (err < 0)
1842 return err;
1843
1844 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1845 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1846 chip->bus->needs_damn_long_delay = 1;
1847 }
1848
1849 codecs = 0;
1850 if (!max_slots)
1851 max_slots = AZX_DEFAULT_CODECS;
1852
1853 /* First try to probe all given codec slots */
1854 for (c = 0; c < max_slots; c++) {
1855 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1856 if (probe_codec(chip, c) < 0) {
1857 /* Some BIOSen give you wrong codec addresses
1858 * that don't exist
1859 */
1860 dev_warn(chip->card->dev,
1861 "Codec #%d probe error; disabling it...\n", c);
1862 chip->codec_mask &= ~(1 << c);
1863 /* More badly, accessing to a non-existing
1864 * codec often screws up the controller chip,
1865 * and disturbs the further communications.
1866 * Thus if an error occurs during probing,
1867 * better to reset the controller chip to
1868 * get back to the sanity state.
1869 */
1870 azx_stop_chip(chip);
1871 azx_init_chip(chip, true);
1872 }
1873 }
1874 }
1875
1876 /* AMD chipsets often cause the communication stalls upon certain
1877 * sequence like the pin-detection. It seems that forcing the synced
1878 * access works around the stall. Grrr...
1879 */
1880 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1881 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1882 chip->bus->sync_write = 1;
1883 chip->bus->allow_bus_reset = 1;
1884 }
1885
1886 /* Then create codec instances */
1887 for (c = 0; c < max_slots; c++) {
1888 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1889 struct hda_codec *codec;
1890 err = snd_hda_codec_new(chip->bus, c, &codec);
1891 if (err < 0)
1892 continue;
1893 codec->jackpoll_interval = get_jackpoll_interval(chip);
1894 codec->beep_mode = chip->beep_mode;
1895 codecs++;
1896 }
1897 }
1898 if (!codecs) {
1899 dev_err(chip->card->dev, "no codecs initialized\n");
1900 return -ENXIO;
1901 }
1902 return 0;
1903 }
1904 EXPORT_SYMBOL_GPL(azx_codec_create);
1905
1906 /* configure each codec instance */
1907 int azx_codec_configure(struct azx *chip)
1908 {
1909 struct hda_codec *codec;
1910 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1911 snd_hda_codec_configure(codec);
1912 }
1913 return 0;
1914 }
1915 EXPORT_SYMBOL_GPL(azx_codec_configure);
1916
1917 /* mixer creation - all stuff is implemented in hda module */
1918 int azx_mixer_create(struct azx *chip)
1919 {
1920 return snd_hda_build_controls(chip->bus);
1921 }
1922 EXPORT_SYMBOL_GPL(azx_mixer_create);
1923
1924
1925 /* initialize SD streams */
1926 int azx_init_stream(struct azx *chip)
1927 {
1928 int i;
1929
1930 /* initialize each stream (aka device)
1931 * assign the starting bdl address to each stream (device)
1932 * and initialize
1933 */
1934 for (i = 0; i < chip->num_streams; i++) {
1935 struct azx_dev *azx_dev = &chip->azx_dev[i];
1936 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1937 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1938 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1939 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1940 azx_dev->sd_int_sta_mask = 1 << i;
1941 /* stream tag: must be non-zero and unique */
1942 azx_dev->index = i;
1943 azx_dev->stream_tag = i + 1;
1944 }
1945
1946 return 0;
1947 }
1948 EXPORT_SYMBOL_GPL(azx_init_stream);
1949
1950 /*
1951 * reboot notifier for hang-up problem at power-down
1952 */
1953 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1954 {
1955 struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1956 snd_hda_bus_reboot_notify(chip->bus);
1957 azx_stop_chip(chip);
1958 return NOTIFY_OK;
1959 }
1960
1961 void azx_notifier_register(struct azx *chip)
1962 {
1963 chip->reboot_notifier.notifier_call = azx_halt;
1964 register_reboot_notifier(&chip->reboot_notifier);
1965 }
1966 EXPORT_SYMBOL_GPL(azx_notifier_register);
1967
1968 void azx_notifier_unregister(struct azx *chip)
1969 {
1970 if (chip->reboot_notifier.notifier_call)
1971 unregister_reboot_notifier(&chip->reboot_notifier);
1972 }
1973 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1974
1975 MODULE_LICENSE("GPL");
1976 MODULE_DESCRIPTION("Common HDA driver funcitons");