3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
34 #include "hda_controller.h"
36 #define CREATE_TRACE_POINTS
37 #include "hda_intel_trace.h"
39 /* DSP lock helpers */
40 #ifdef CONFIG_SND_HDA_DSP_LOADER
41 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
42 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
43 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
44 #define dsp_is_locked(dev) ((dev)->locked)
46 #define dsp_lock_init(dev) do {} while (0)
47 #define dsp_lock(dev) do {} while (0)
48 #define dsp_unlock(dev) do {} while (0)
49 #define dsp_is_locked(dev) 0
53 * AZX stream operations.
57 static void azx_stream_start(struct azx
*chip
, struct azx_dev
*azx_dev
)
60 * Before stream start, initialize parameter
62 azx_dev
->insufficient
= 1;
65 azx_writel(chip
, INTCTL
,
66 azx_readl(chip
, INTCTL
) | (1 << azx_dev
->index
));
67 /* set DMA start and interrupt mask */
68 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
69 azx_sd_readb(chip
, azx_dev
, SD_CTL
) |
70 SD_CTL_DMA_START
| SD_INT_MASK
);
74 static void azx_stream_clear(struct azx
*chip
, struct azx_dev
*azx_dev
)
76 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
77 azx_sd_readb(chip
, azx_dev
, SD_CTL
) &
78 ~(SD_CTL_DMA_START
| SD_INT_MASK
));
79 azx_sd_writeb(chip
, azx_dev
, SD_STS
, SD_INT_MASK
); /* to be sure */
83 void azx_stream_stop(struct azx
*chip
, struct azx_dev
*azx_dev
)
85 azx_stream_clear(chip
, azx_dev
);
87 azx_writel(chip
, INTCTL
,
88 azx_readl(chip
, INTCTL
) & ~(1 << azx_dev
->index
));
90 EXPORT_SYMBOL_GPL(azx_stream_stop
);
93 static void azx_stream_reset(struct azx
*chip
, struct azx_dev
*azx_dev
)
98 azx_stream_clear(chip
, azx_dev
);
100 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
101 azx_sd_readb(chip
, azx_dev
, SD_CTL
) |
102 SD_CTL_STREAM_RESET
);
105 while (!((val
= azx_sd_readb(chip
, azx_dev
, SD_CTL
)) &
106 SD_CTL_STREAM_RESET
) && --timeout
)
108 val
&= ~SD_CTL_STREAM_RESET
;
109 azx_sd_writeb(chip
, azx_dev
, SD_CTL
, val
);
113 /* waiting for hardware to report that the stream is out of reset */
114 while (((val
= azx_sd_readb(chip
, azx_dev
, SD_CTL
)) &
115 SD_CTL_STREAM_RESET
) && --timeout
)
118 /* reset first position - may not be synced with hw at this time */
119 *azx_dev
->posbuf
= 0;
123 * set up the SD for streaming
125 static int azx_setup_controller(struct azx
*chip
, struct azx_dev
*azx_dev
)
128 /* make sure the run bit is zero for SD */
129 azx_stream_clear(chip
, azx_dev
);
130 /* program the stream_tag */
131 val
= azx_sd_readl(chip
, azx_dev
, SD_CTL
);
132 val
= (val
& ~SD_CTL_STREAM_TAG_MASK
) |
133 (azx_dev
->stream_tag
<< SD_CTL_STREAM_TAG_SHIFT
);
134 if (!azx_snoop(chip
))
135 val
|= SD_CTL_TRAFFIC_PRIO
;
136 azx_sd_writel(chip
, azx_dev
, SD_CTL
, val
);
138 /* program the length of samples in cyclic buffer */
139 azx_sd_writel(chip
, azx_dev
, SD_CBL
, azx_dev
->bufsize
);
141 /* program the stream format */
142 /* this value needs to be the same as the one programmed */
143 azx_sd_writew(chip
, azx_dev
, SD_FORMAT
, azx_dev
->format_val
);
145 /* program the stream LVI (last valid index) of the BDL */
146 azx_sd_writew(chip
, azx_dev
, SD_LVI
, azx_dev
->frags
- 1);
148 /* program the BDL address */
149 /* lower BDL address */
150 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, (u32
)azx_dev
->bdl
.addr
);
151 /* upper BDL address */
152 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
,
153 upper_32_bits(azx_dev
->bdl
.addr
));
155 /* enable the position buffer */
156 if (chip
->get_position
[0] != azx_get_pos_lpib
||
157 chip
->get_position
[1] != azx_get_pos_lpib
) {
158 if (!(azx_readl(chip
, DPLBASE
) & ICH6_DPLBASE_ENABLE
))
159 azx_writel(chip
, DPLBASE
,
160 (u32
)chip
->posbuf
.addr
| ICH6_DPLBASE_ENABLE
);
163 /* set the interrupt enable bits in the descriptor control register */
164 azx_sd_writel(chip
, azx_dev
, SD_CTL
,
165 azx_sd_readl(chip
, azx_dev
, SD_CTL
) | SD_INT_MASK
);
170 /* assign a stream for the PCM */
171 static inline struct azx_dev
*
172 azx_assign_device(struct azx
*chip
, struct snd_pcm_substream
*substream
)
175 struct azx_dev
*res
= NULL
;
176 /* make a non-zero unique key for the substream */
177 int key
= (substream
->pcm
->device
<< 16) | (substream
->number
<< 2) |
178 (substream
->stream
+ 1);
180 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
) {
181 dev
= chip
->playback_index_offset
;
182 nums
= chip
->playback_streams
;
184 dev
= chip
->capture_index_offset
;
185 nums
= chip
->capture_streams
;
187 for (i
= 0; i
< nums
; i
++, dev
++) {
188 struct azx_dev
*azx_dev
= &chip
->azx_dev
[dev
];
190 if (!azx_dev
->opened
&& !dsp_is_locked(azx_dev
)) {
191 if (azx_dev
->assigned_key
== key
) {
193 azx_dev
->assigned_key
= key
;
205 res
->assigned_key
= key
;
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev
*azx_dev
)
217 static cycle_t
azx_cc_read(const struct cyclecounter
*cc
)
219 struct azx_dev
*azx_dev
= container_of(cc
, struct azx_dev
, azx_cc
);
220 struct snd_pcm_substream
*substream
= azx_dev
->substream
;
221 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
222 struct azx
*chip
= apcm
->chip
;
224 return azx_readl(chip
, WALLCLK
);
227 static void azx_timecounter_init(struct snd_pcm_substream
*substream
,
228 bool force
, cycle_t last
)
230 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
231 struct timecounter
*tc
= &azx_dev
->azx_tc
;
232 struct cyclecounter
*cc
= &azx_dev
->azx_cc
;
235 cc
->read
= azx_cc_read
;
236 cc
->mask
= CLOCKSOURCE_MASK(32);
239 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 * To avoid any saturation issues in intermediate operations,
241 * the 125 factor is applied first. The division is applied
242 * last after reading the timecounter value.
243 * Applying the 1/3 factor as part of the multiplication
244 * requires at least 20 bits for a decent precision, however
245 * overflows occur after about 4 hours or less, not a option.
248 cc
->mult
= 125; /* saturation after 195 years */
251 nsec
= 0; /* audio time is elapsed time since trigger */
252 timecounter_init(tc
, cc
, nsec
);
255 * force timecounter to use predefined value,
256 * used for synchronized starts
258 tc
->cycle_last
= last
;
261 static u64
azx_adjust_codec_delay(struct snd_pcm_substream
*substream
,
264 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
265 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
266 u64 codec_frames
, codec_nsecs
;
268 if (!hinfo
->ops
.get_delay
)
271 codec_frames
= hinfo
->ops
.get_delay(hinfo
, apcm
->codec
, substream
);
272 codec_nsecs
= div_u64(codec_frames
* 1000000000LL,
273 substream
->runtime
->rate
);
275 if (substream
->stream
== SNDRV_PCM_STREAM_CAPTURE
)
276 return nsec
+ codec_nsecs
;
278 return (nsec
> codec_nsecs
) ? nsec
- codec_nsecs
: 0;
284 static int setup_bdle(struct azx
*chip
,
285 struct snd_dma_buffer
*dmab
,
286 struct azx_dev
*azx_dev
, u32
**bdlp
,
287 int ofs
, int size
, int with_ioc
)
295 if (azx_dev
->frags
>= AZX_MAX_BDL_ENTRIES
)
298 addr
= snd_sgbuf_get_addr(dmab
, ofs
);
299 /* program the address field of the BDL entry */
300 bdl
[0] = cpu_to_le32((u32
)addr
);
301 bdl
[1] = cpu_to_le32(upper_32_bits(addr
));
302 /* program the size field of the BDL entry */
303 chunk
= snd_sgbuf_get_chunk_size(dmab
, ofs
, size
);
304 /* one BDLE cannot cross 4K boundary on CTHDA chips */
305 if (chip
->driver_caps
& AZX_DCAPS_4K_BDLE_BOUNDARY
) {
306 u32 remain
= 0x1000 - (ofs
& 0xfff);
310 bdl
[2] = cpu_to_le32(chunk
);
311 /* program the IOC to enable interrupt
312 * only when the whole fragment is processed
315 bdl
[3] = (size
|| !with_ioc
) ? 0 : cpu_to_le32(0x01);
327 static int azx_setup_periods(struct azx
*chip
,
328 struct snd_pcm_substream
*substream
,
329 struct azx_dev
*azx_dev
)
332 int i
, ofs
, periods
, period_bytes
;
335 /* reset BDL address */
336 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
337 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
339 period_bytes
= azx_dev
->period_bytes
;
340 periods
= azx_dev
->bufsize
/ period_bytes
;
342 /* program the initial BDL entries */
343 bdl
= (u32
*)azx_dev
->bdl
.area
;
347 if (chip
->bdl_pos_adj
)
348 pos_adj
= chip
->bdl_pos_adj
[chip
->dev_index
];
349 if (!azx_dev
->no_period_wakeup
&& pos_adj
> 0) {
350 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
351 int pos_align
= pos_adj
;
352 pos_adj
= (pos_adj
* runtime
->rate
+ 47999) / 48000;
356 pos_adj
= ((pos_adj
+ pos_align
- 1) / pos_align
) *
358 pos_adj
= frames_to_bytes(runtime
, pos_adj
);
359 if (pos_adj
>= period_bytes
) {
360 dev_warn(chip
->card
->dev
,"Too big adjustment %d\n",
364 ofs
= setup_bdle(chip
, snd_pcm_get_dma_buf(substream
),
366 &bdl
, ofs
, pos_adj
, true);
373 for (i
= 0; i
< periods
; i
++) {
374 if (i
== periods
- 1 && pos_adj
)
375 ofs
= setup_bdle(chip
, snd_pcm_get_dma_buf(substream
),
377 period_bytes
- pos_adj
, 0);
379 ofs
= setup_bdle(chip
, snd_pcm_get_dma_buf(substream
),
382 !azx_dev
->no_period_wakeup
);
389 dev_err(chip
->card
->dev
, "Too many BDL entries: buffer=%d, period=%d\n",
390 azx_dev
->bufsize
, period_bytes
);
398 static int azx_pcm_close(struct snd_pcm_substream
*substream
)
400 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
401 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
402 struct azx
*chip
= apcm
->chip
;
403 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
406 mutex_lock(&chip
->open_mutex
);
407 spin_lock_irqsave(&chip
->reg_lock
, flags
);
408 azx_dev
->substream
= NULL
;
409 azx_dev
->running
= 0;
410 spin_unlock_irqrestore(&chip
->reg_lock
, flags
);
411 azx_release_device(azx_dev
);
412 hinfo
->ops
.close(hinfo
, apcm
->codec
, substream
);
413 snd_hda_power_down(apcm
->codec
);
414 mutex_unlock(&chip
->open_mutex
);
418 static int azx_pcm_hw_params(struct snd_pcm_substream
*substream
,
419 struct snd_pcm_hw_params
*hw_params
)
421 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
422 struct azx
*chip
= apcm
->chip
;
425 dsp_lock(get_azx_dev(substream
));
426 if (dsp_is_locked(get_azx_dev(substream
))) {
431 ret
= chip
->ops
->substream_alloc_pages(chip
, substream
,
432 params_buffer_bytes(hw_params
));
434 dsp_unlock(get_azx_dev(substream
));
438 static int azx_pcm_hw_free(struct snd_pcm_substream
*substream
)
440 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
441 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
442 struct azx
*chip
= apcm
->chip
;
443 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
446 /* reset BDL address */
448 if (!dsp_is_locked(azx_dev
)) {
449 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
450 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
451 azx_sd_writel(chip
, azx_dev
, SD_CTL
, 0);
452 azx_dev
->bufsize
= 0;
453 azx_dev
->period_bytes
= 0;
454 azx_dev
->format_val
= 0;
457 snd_hda_codec_cleanup(apcm
->codec
, hinfo
, substream
);
459 err
= chip
->ops
->substream_free_pages(chip
, substream
);
460 azx_dev
->prepared
= 0;
465 static int azx_pcm_prepare(struct snd_pcm_substream
*substream
)
467 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
468 struct azx
*chip
= apcm
->chip
;
469 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
470 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
471 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
472 unsigned int bufsize
, period_bytes
, format_val
, stream_tag
;
474 struct hda_spdif_out
*spdif
=
475 snd_hda_spdif_out_of_nid(apcm
->codec
, hinfo
->nid
);
476 unsigned short ctls
= spdif
? spdif
->ctls
: 0;
479 if (dsp_is_locked(azx_dev
)) {
484 azx_stream_reset(chip
, azx_dev
);
485 format_val
= snd_hda_calc_stream_format(apcm
->codec
,
492 dev_err(chip
->card
->dev
,
493 "invalid format_val, rate=%d, ch=%d, format=%d\n",
494 runtime
->rate
, runtime
->channels
, runtime
->format
);
499 bufsize
= snd_pcm_lib_buffer_bytes(substream
);
500 period_bytes
= snd_pcm_lib_period_bytes(substream
);
502 dev_dbg(chip
->card
->dev
, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
503 bufsize
, format_val
);
505 if (bufsize
!= azx_dev
->bufsize
||
506 period_bytes
!= azx_dev
->period_bytes
||
507 format_val
!= azx_dev
->format_val
||
508 runtime
->no_period_wakeup
!= azx_dev
->no_period_wakeup
) {
509 azx_dev
->bufsize
= bufsize
;
510 azx_dev
->period_bytes
= period_bytes
;
511 azx_dev
->format_val
= format_val
;
512 azx_dev
->no_period_wakeup
= runtime
->no_period_wakeup
;
513 err
= azx_setup_periods(chip
, substream
, azx_dev
);
518 /* when LPIB delay correction gives a small negative value,
519 * we ignore it; currently set the threshold statically to
522 if (runtime
->period_size
> 64)
523 azx_dev
->delay_negative_threshold
= -frames_to_bytes(runtime
, 64);
525 azx_dev
->delay_negative_threshold
= 0;
527 /* wallclk has 24Mhz clock source */
528 azx_dev
->period_wallclk
= (((runtime
->period_size
* 24000) /
529 runtime
->rate
) * 1000);
530 azx_setup_controller(chip
, azx_dev
);
531 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
)
533 azx_sd_readw(chip
, azx_dev
, SD_FIFOSIZE
) + 1;
535 azx_dev
->fifo_size
= 0;
537 stream_tag
= azx_dev
->stream_tag
;
538 /* CA-IBG chips need the playback stream starting from 1 */
539 if ((chip
->driver_caps
& AZX_DCAPS_CTX_WORKAROUND
) &&
540 stream_tag
> chip
->capture_streams
)
541 stream_tag
-= chip
->capture_streams
;
542 err
= snd_hda_codec_prepare(apcm
->codec
, hinfo
, stream_tag
,
543 azx_dev
->format_val
, substream
);
547 azx_dev
->prepared
= 1;
552 static int azx_pcm_trigger(struct snd_pcm_substream
*substream
, int cmd
)
554 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
555 struct azx
*chip
= apcm
->chip
;
556 struct azx_dev
*azx_dev
;
557 struct snd_pcm_substream
*s
;
558 int rstart
= 0, start
, nsync
= 0, sbits
= 0;
561 azx_dev
= get_azx_dev(substream
);
562 trace_azx_pcm_trigger(chip
, azx_dev
, cmd
);
564 if (dsp_is_locked(azx_dev
) || !azx_dev
->prepared
)
568 case SNDRV_PCM_TRIGGER_START
:
570 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE
:
571 case SNDRV_PCM_TRIGGER_RESUME
:
574 case SNDRV_PCM_TRIGGER_PAUSE_PUSH
:
575 case SNDRV_PCM_TRIGGER_SUSPEND
:
576 case SNDRV_PCM_TRIGGER_STOP
:
583 snd_pcm_group_for_each_entry(s
, substream
) {
584 if (s
->pcm
->card
!= substream
->pcm
->card
)
586 azx_dev
= get_azx_dev(s
);
587 sbits
|= 1 << azx_dev
->index
;
589 snd_pcm_trigger_done(s
, substream
);
592 spin_lock(&chip
->reg_lock
);
594 /* first, set SYNC bits of corresponding streams */
595 if (chip
->driver_caps
& AZX_DCAPS_OLD_SSYNC
)
596 azx_writel(chip
, OLD_SSYNC
,
597 azx_readl(chip
, OLD_SSYNC
) | sbits
);
599 azx_writel(chip
, SSYNC
, azx_readl(chip
, SSYNC
) | sbits
);
601 snd_pcm_group_for_each_entry(s
, substream
) {
602 if (s
->pcm
->card
!= substream
->pcm
->card
)
604 azx_dev
= get_azx_dev(s
);
606 azx_dev
->start_wallclk
= azx_readl(chip
, WALLCLK
);
608 azx_dev
->start_wallclk
-=
609 azx_dev
->period_wallclk
;
610 azx_stream_start(chip
, azx_dev
);
612 azx_stream_stop(chip
, azx_dev
);
614 azx_dev
->running
= start
;
616 spin_unlock(&chip
->reg_lock
);
618 /* wait until all FIFOs get ready */
619 for (timeout
= 5000; timeout
; timeout
--) {
621 snd_pcm_group_for_each_entry(s
, substream
) {
622 if (s
->pcm
->card
!= substream
->pcm
->card
)
624 azx_dev
= get_azx_dev(s
);
625 if (!(azx_sd_readb(chip
, azx_dev
, SD_STS
) &
634 /* wait until all RUN bits are cleared */
635 for (timeout
= 5000; timeout
; timeout
--) {
637 snd_pcm_group_for_each_entry(s
, substream
) {
638 if (s
->pcm
->card
!= substream
->pcm
->card
)
640 azx_dev
= get_azx_dev(s
);
641 if (azx_sd_readb(chip
, azx_dev
, SD_CTL
) &
650 spin_lock(&chip
->reg_lock
);
651 /* reset SYNC bits */
652 if (chip
->driver_caps
& AZX_DCAPS_OLD_SSYNC
)
653 azx_writel(chip
, OLD_SSYNC
,
654 azx_readl(chip
, OLD_SSYNC
) & ~sbits
);
656 azx_writel(chip
, SSYNC
, azx_readl(chip
, SSYNC
) & ~sbits
);
658 azx_timecounter_init(substream
, 0, 0);
662 /* same start cycle for master and group */
663 azx_dev
= get_azx_dev(substream
);
664 cycle_last
= azx_dev
->azx_tc
.cycle_last
;
666 snd_pcm_group_for_each_entry(s
, substream
) {
667 if (s
->pcm
->card
!= substream
->pcm
->card
)
669 azx_timecounter_init(s
, 1, cycle_last
);
673 spin_unlock(&chip
->reg_lock
);
677 unsigned int azx_get_pos_lpib(struct azx
*chip
, struct azx_dev
*azx_dev
)
679 return azx_sd_readl(chip
, azx_dev
, SD_LPIB
);
681 EXPORT_SYMBOL_GPL(azx_get_pos_lpib
);
683 unsigned int azx_get_pos_posbuf(struct azx
*chip
, struct azx_dev
*azx_dev
)
685 return le32_to_cpu(*azx_dev
->posbuf
);
687 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf
);
689 unsigned int azx_get_position(struct azx
*chip
,
690 struct azx_dev
*azx_dev
)
692 struct snd_pcm_substream
*substream
= azx_dev
->substream
;
694 int stream
= substream
->stream
;
697 if (chip
->get_position
[stream
])
698 pos
= chip
->get_position
[stream
](chip
, azx_dev
);
699 else /* use the position buffer as default */
700 pos
= azx_get_pos_posbuf(chip
, azx_dev
);
702 if (pos
>= azx_dev
->bufsize
)
705 if (substream
->runtime
) {
706 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
707 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[stream
];
709 if (chip
->get_delay
[stream
])
710 delay
+= chip
->get_delay
[stream
](chip
, azx_dev
, pos
);
711 if (hinfo
->ops
.get_delay
)
712 delay
+= hinfo
->ops
.get_delay(hinfo
, apcm
->codec
,
714 substream
->runtime
->delay
= delay
;
717 trace_azx_get_position(chip
, azx_dev
, pos
, delay
);
720 EXPORT_SYMBOL_GPL(azx_get_position
);
722 static snd_pcm_uframes_t
azx_pcm_pointer(struct snd_pcm_substream
*substream
)
724 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
725 struct azx
*chip
= apcm
->chip
;
726 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
727 return bytes_to_frames(substream
->runtime
,
728 azx_get_position(chip
, azx_dev
));
731 static int azx_get_wallclock_tstamp(struct snd_pcm_substream
*substream
,
734 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
737 nsec
= timecounter_read(&azx_dev
->azx_tc
);
738 nsec
= div_u64(nsec
, 3); /* can be optimized */
739 nsec
= azx_adjust_codec_delay(substream
, nsec
);
741 *ts
= ns_to_timespec(nsec
);
746 static struct snd_pcm_hardware azx_pcm_hw
= {
747 .info
= (SNDRV_PCM_INFO_MMAP
|
748 SNDRV_PCM_INFO_INTERLEAVED
|
749 SNDRV_PCM_INFO_BLOCK_TRANSFER
|
750 SNDRV_PCM_INFO_MMAP_VALID
|
751 /* No full-resume yet implemented */
752 /* SNDRV_PCM_INFO_RESUME |*/
753 SNDRV_PCM_INFO_PAUSE
|
754 SNDRV_PCM_INFO_SYNC_START
|
755 SNDRV_PCM_INFO_HAS_WALL_CLOCK
|
756 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
),
757 .formats
= SNDRV_PCM_FMTBIT_S16_LE
,
758 .rates
= SNDRV_PCM_RATE_48000
,
763 .buffer_bytes_max
= AZX_MAX_BUF_SIZE
,
764 .period_bytes_min
= 128,
765 .period_bytes_max
= AZX_MAX_BUF_SIZE
/ 2,
767 .periods_max
= AZX_MAX_FRAG
,
771 static int azx_pcm_open(struct snd_pcm_substream
*substream
)
773 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
774 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
775 struct azx
*chip
= apcm
->chip
;
776 struct azx_dev
*azx_dev
;
777 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
782 mutex_lock(&chip
->open_mutex
);
783 azx_dev
= azx_assign_device(chip
, substream
);
784 if (azx_dev
== NULL
) {
785 mutex_unlock(&chip
->open_mutex
);
788 runtime
->hw
= azx_pcm_hw
;
789 runtime
->hw
.channels_min
= hinfo
->channels_min
;
790 runtime
->hw
.channels_max
= hinfo
->channels_max
;
791 runtime
->hw
.formats
= hinfo
->formats
;
792 runtime
->hw
.rates
= hinfo
->rates
;
793 snd_pcm_limit_hw_rates(runtime
);
794 snd_pcm_hw_constraint_integer(runtime
, SNDRV_PCM_HW_PARAM_PERIODS
);
796 /* avoid wrap-around with wall-clock */
797 snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_BUFFER_TIME
,
801 if (chip
->align_buffer_size
)
802 /* constrain buffer sizes to be multiple of 128
803 bytes. This is more efficient in terms of memory
804 access but isn't required by the HDA spec and
805 prevents users from specifying exact period/buffer
806 sizes. For example for 44.1kHz, a period size set
807 to 20ms will be rounded to 19.59ms. */
810 /* Don't enforce steps on buffer sizes, still need to
811 be multiple of 4 bytes (HDA spec). Tested on Intel
812 HDA controllers, may not work on all devices where
813 option needs to be disabled */
816 snd_pcm_hw_constraint_step(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES
,
818 snd_pcm_hw_constraint_step(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES
,
820 snd_hda_power_up_d3wait(apcm
->codec
);
821 err
= hinfo
->ops
.open(hinfo
, apcm
->codec
, substream
);
823 azx_release_device(azx_dev
);
824 snd_hda_power_down(apcm
->codec
);
825 mutex_unlock(&chip
->open_mutex
);
828 snd_pcm_limit_hw_rates(runtime
);
830 if (snd_BUG_ON(!runtime
->hw
.channels_min
) ||
831 snd_BUG_ON(!runtime
->hw
.channels_max
) ||
832 snd_BUG_ON(!runtime
->hw
.formats
) ||
833 snd_BUG_ON(!runtime
->hw
.rates
)) {
834 azx_release_device(azx_dev
);
835 hinfo
->ops
.close(hinfo
, apcm
->codec
, substream
);
836 snd_hda_power_down(apcm
->codec
);
837 mutex_unlock(&chip
->open_mutex
);
841 /* disable WALLCLOCK timestamps for capture streams
842 until we figure out how to handle digital inputs */
843 if (substream
->stream
== SNDRV_PCM_STREAM_CAPTURE
)
844 runtime
->hw
.info
&= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK
;
846 spin_lock_irqsave(&chip
->reg_lock
, flags
);
847 azx_dev
->substream
= substream
;
848 azx_dev
->running
= 0;
849 spin_unlock_irqrestore(&chip
->reg_lock
, flags
);
851 runtime
->private_data
= azx_dev
;
852 snd_pcm_set_sync(substream
);
853 mutex_unlock(&chip
->open_mutex
);
857 static int azx_pcm_mmap(struct snd_pcm_substream
*substream
,
858 struct vm_area_struct
*area
)
860 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
861 struct azx
*chip
= apcm
->chip
;
862 if (chip
->ops
->pcm_mmap_prepare
)
863 chip
->ops
->pcm_mmap_prepare(substream
, area
);
864 return snd_pcm_lib_default_mmap(substream
, area
);
867 static struct snd_pcm_ops azx_pcm_ops
= {
868 .open
= azx_pcm_open
,
869 .close
= azx_pcm_close
,
870 .ioctl
= snd_pcm_lib_ioctl
,
871 .hw_params
= azx_pcm_hw_params
,
872 .hw_free
= azx_pcm_hw_free
,
873 .prepare
= azx_pcm_prepare
,
874 .trigger
= azx_pcm_trigger
,
875 .pointer
= azx_pcm_pointer
,
876 .wall_clock
= azx_get_wallclock_tstamp
,
877 .mmap
= azx_pcm_mmap
,
878 .page
= snd_pcm_sgbuf_ops_page
,
881 static void azx_pcm_free(struct snd_pcm
*pcm
)
883 struct azx_pcm
*apcm
= pcm
->private_data
;
885 list_del(&apcm
->list
);
890 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
892 static int azx_attach_pcm_stream(struct hda_bus
*bus
, struct hda_codec
*codec
,
893 struct hda_pcm
*cpcm
)
895 struct azx
*chip
= bus
->private_data
;
897 struct azx_pcm
*apcm
;
898 int pcm_dev
= cpcm
->device
;
902 list_for_each_entry(apcm
, &chip
->pcm_list
, list
) {
903 if (apcm
->pcm
->device
== pcm_dev
) {
904 dev_err(chip
->card
->dev
, "PCM %d already exists\n",
909 err
= snd_pcm_new(chip
->card
, cpcm
->name
, pcm_dev
,
910 cpcm
->stream
[SNDRV_PCM_STREAM_PLAYBACK
].substreams
,
911 cpcm
->stream
[SNDRV_PCM_STREAM_CAPTURE
].substreams
,
915 strlcpy(pcm
->name
, cpcm
->name
, sizeof(pcm
->name
));
916 apcm
= kzalloc(sizeof(*apcm
), GFP_KERNEL
);
922 pcm
->private_data
= apcm
;
923 pcm
->private_free
= azx_pcm_free
;
924 if (cpcm
->pcm_type
== HDA_PCM_TYPE_MODEM
)
925 pcm
->dev_class
= SNDRV_PCM_CLASS_MODEM
;
926 list_add_tail(&apcm
->list
, &chip
->pcm_list
);
928 for (s
= 0; s
< 2; s
++) {
929 apcm
->hinfo
[s
] = &cpcm
->stream
[s
];
930 if (cpcm
->stream
[s
].substreams
)
931 snd_pcm_set_ops(pcm
, s
, &azx_pcm_ops
);
933 /* buffer pre-allocation */
934 size
= CONFIG_SND_HDA_PREALLOC_SIZE
* 1024;
935 if (size
> MAX_PREALLOC_SIZE
)
936 size
= MAX_PREALLOC_SIZE
;
937 snd_pcm_lib_preallocate_pages_for_all(pcm
, SNDRV_DMA_TYPE_DEV_SG
,
939 size
, MAX_PREALLOC_SIZE
);
941 pcm
->dev
= &codec
->dev
;
946 * CORB / RIRB interface
948 static int azx_alloc_cmd_io(struct azx
*chip
)
952 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
953 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV
,
954 PAGE_SIZE
, &chip
->rb
);
956 dev_err(chip
->card
->dev
, "cannot allocate CORB/RIRB\n");
959 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io
);
961 static void azx_init_cmd_io(struct azx
*chip
)
965 spin_lock_irq(&chip
->reg_lock
);
967 chip
->corb
.addr
= chip
->rb
.addr
;
968 chip
->corb
.buf
= (u32
*)chip
->rb
.area
;
969 azx_writel(chip
, CORBLBASE
, (u32
)chip
->corb
.addr
);
970 azx_writel(chip
, CORBUBASE
, upper_32_bits(chip
->corb
.addr
));
972 /* set the corb size to 256 entries (ULI requires explicitly) */
973 azx_writeb(chip
, CORBSIZE
, 0x02);
974 /* set the corb write pointer to 0 */
975 azx_writew(chip
, CORBWP
, 0);
977 /* reset the corb hw read pointer */
978 azx_writew(chip
, CORBRP
, ICH6_CORBRP_RST
);
979 if (!(chip
->driver_caps
& AZX_DCAPS_CORBRP_SELF_CLEAR
)) {
980 for (timeout
= 1000; timeout
> 0; timeout
--) {
981 if ((azx_readw(chip
, CORBRP
) & ICH6_CORBRP_RST
) == ICH6_CORBRP_RST
)
986 dev_err(chip
->card
->dev
, "CORB reset timeout#1, CORBRP = %d\n",
987 azx_readw(chip
, CORBRP
));
989 azx_writew(chip
, CORBRP
, 0);
990 for (timeout
= 1000; timeout
> 0; timeout
--) {
991 if (azx_readw(chip
, CORBRP
) == 0)
996 dev_err(chip
->card
->dev
, "CORB reset timeout#2, CORBRP = %d\n",
997 azx_readw(chip
, CORBRP
));
1000 /* enable corb dma */
1001 azx_writeb(chip
, CORBCTL
, ICH6_CORBCTL_RUN
);
1004 chip
->rirb
.addr
= chip
->rb
.addr
+ 2048;
1005 chip
->rirb
.buf
= (u32
*)(chip
->rb
.area
+ 2048);
1006 chip
->rirb
.wp
= chip
->rirb
.rp
= 0;
1007 memset(chip
->rirb
.cmds
, 0, sizeof(chip
->rirb
.cmds
));
1008 azx_writel(chip
, RIRBLBASE
, (u32
)chip
->rirb
.addr
);
1009 azx_writel(chip
, RIRBUBASE
, upper_32_bits(chip
->rirb
.addr
));
1011 /* set the rirb size to 256 entries (ULI requires explicitly) */
1012 azx_writeb(chip
, RIRBSIZE
, 0x02);
1013 /* reset the rirb hw write pointer */
1014 azx_writew(chip
, RIRBWP
, ICH6_RIRBWP_RST
);
1015 /* set N=1, get RIRB response interrupt for new entry */
1016 if (chip
->driver_caps
& AZX_DCAPS_CTX_WORKAROUND
)
1017 azx_writew(chip
, RINTCNT
, 0xc0);
1019 azx_writew(chip
, RINTCNT
, 1);
1020 /* enable rirb dma and response irq */
1021 azx_writeb(chip
, RIRBCTL
, ICH6_RBCTL_DMA_EN
| ICH6_RBCTL_IRQ_EN
);
1022 spin_unlock_irq(&chip
->reg_lock
);
1024 EXPORT_SYMBOL_GPL(azx_init_cmd_io
);
1026 static void azx_free_cmd_io(struct azx
*chip
)
1028 spin_lock_irq(&chip
->reg_lock
);
1029 /* disable ringbuffer DMAs */
1030 azx_writeb(chip
, RIRBCTL
, 0);
1031 azx_writeb(chip
, CORBCTL
, 0);
1032 spin_unlock_irq(&chip
->reg_lock
);
1034 EXPORT_SYMBOL_GPL(azx_free_cmd_io
);
1036 static unsigned int azx_command_addr(u32 cmd
)
1038 unsigned int addr
= cmd
>> 28;
1040 if (addr
>= AZX_MAX_CODECS
) {
1048 /* send a command */
1049 static int azx_corb_send_cmd(struct hda_bus
*bus
, u32 val
)
1051 struct azx
*chip
= bus
->private_data
;
1052 unsigned int addr
= azx_command_addr(val
);
1053 unsigned int wp
, rp
;
1055 spin_lock_irq(&chip
->reg_lock
);
1057 /* add command to corb */
1058 wp
= azx_readw(chip
, CORBWP
);
1060 /* something wrong, controller likely turned to D3 */
1061 spin_unlock_irq(&chip
->reg_lock
);
1065 wp
%= ICH6_MAX_CORB_ENTRIES
;
1067 rp
= azx_readw(chip
, CORBRP
);
1069 /* oops, it's full */
1070 spin_unlock_irq(&chip
->reg_lock
);
1074 chip
->rirb
.cmds
[addr
]++;
1075 chip
->corb
.buf
[wp
] = cpu_to_le32(val
);
1076 azx_writew(chip
, CORBWP
, wp
);
1078 spin_unlock_irq(&chip
->reg_lock
);
1083 #define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1085 /* retrieve RIRB entry - called from interrupt handler */
1086 static void azx_update_rirb(struct azx
*chip
)
1088 unsigned int rp
, wp
;
1092 wp
= azx_readw(chip
, RIRBWP
);
1094 /* something wrong, controller likely turned to D3 */
1098 if (wp
== chip
->rirb
.wp
)
1102 while (chip
->rirb
.rp
!= wp
) {
1104 chip
->rirb
.rp
%= ICH6_MAX_RIRB_ENTRIES
;
1106 rp
= chip
->rirb
.rp
<< 1; /* an RIRB entry is 8-bytes */
1107 res_ex
= le32_to_cpu(chip
->rirb
.buf
[rp
+ 1]);
1108 res
= le32_to_cpu(chip
->rirb
.buf
[rp
]);
1109 addr
= res_ex
& 0xf;
1110 if ((addr
>= AZX_MAX_CODECS
) || !(chip
->codec_mask
& (1 << addr
))) {
1111 dev_err(chip
->card
->dev
, "spurious response %#x:%#x, rp = %d, wp = %d",
1116 else if (res_ex
& ICH6_RIRB_EX_UNSOL_EV
)
1117 snd_hda_queue_unsol_event(chip
->bus
, res
, res_ex
);
1118 else if (chip
->rirb
.cmds
[addr
]) {
1119 chip
->rirb
.res
[addr
] = res
;
1121 chip
->rirb
.cmds
[addr
]--;
1122 } else if (printk_ratelimit()) {
1123 dev_err(chip
->card
->dev
, "spurious response %#x:%#x, last cmd=%#08x\n",
1125 chip
->last_cmd
[addr
]);
1130 /* receive a response */
1131 static unsigned int azx_rirb_get_response(struct hda_bus
*bus
,
1134 struct azx
*chip
= bus
->private_data
;
1135 unsigned long timeout
;
1136 unsigned long loopcounter
;
1140 timeout
= jiffies
+ msecs_to_jiffies(1000);
1142 for (loopcounter
= 0;; loopcounter
++) {
1143 if (chip
->polling_mode
|| do_poll
) {
1144 spin_lock_irq(&chip
->reg_lock
);
1145 azx_update_rirb(chip
);
1146 spin_unlock_irq(&chip
->reg_lock
);
1148 if (!chip
->rirb
.cmds
[addr
]) {
1150 bus
->rirb_error
= 0;
1153 chip
->poll_count
= 0;
1154 return chip
->rirb
.res
[addr
]; /* the last value */
1156 if (time_after(jiffies
, timeout
))
1158 if (bus
->needs_damn_long_delay
|| loopcounter
> 3000)
1159 msleep(2); /* temporary workaround */
1166 if (!bus
->no_response_fallback
)
1169 if (!chip
->polling_mode
&& chip
->poll_count
< 2) {
1170 dev_dbg(chip
->card
->dev
,
1171 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1172 chip
->last_cmd
[addr
]);
1179 if (!chip
->polling_mode
) {
1180 dev_warn(chip
->card
->dev
,
1181 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1182 chip
->last_cmd
[addr
]);
1183 chip
->polling_mode
= 1;
1188 dev_warn(chip
->card
->dev
,
1189 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1190 chip
->last_cmd
[addr
]);
1191 if (chip
->ops
->disable_msi_reset_irq(chip
) &&
1192 chip
->ops
->disable_msi_reset_irq(chip
) < 0) {
1193 bus
->rirb_error
= 1;
1199 if (chip
->probing
) {
1200 /* If this critical timeout happens during the codec probing
1201 * phase, this is likely an access to a non-existing codec
1202 * slot. Better to return an error and reset the system.
1207 /* a fatal communication error; need either to reset or to fallback
1208 * to the single_cmd mode
1210 bus
->rirb_error
= 1;
1211 if (bus
->allow_bus_reset
&& !bus
->response_reset
&& !bus
->in_reset
) {
1212 bus
->response_reset
= 1;
1213 return -1; /* give a chance to retry */
1216 dev_err(chip
->card
->dev
,
1217 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1218 chip
->last_cmd
[addr
]);
1219 chip
->single_cmd
= 1;
1220 bus
->response_reset
= 0;
1221 /* release CORB/RIRB */
1222 azx_free_cmd_io(chip
);
1223 /* disable unsolicited responses */
1224 azx_writel(chip
, GCTL
, azx_readl(chip
, GCTL
) & ~ICH6_GCTL_UNSOL
);
1229 * Use the single immediate command instead of CORB/RIRB for simplicity
1231 * Note: according to Intel, this is not preferred use. The command was
1232 * intended for the BIOS only, and may get confused with unsolicited
1233 * responses. So, we shouldn't use it for normal operation from the
1235 * I left the codes, however, for debugging/testing purposes.
1238 /* receive a response */
1239 static int azx_single_wait_for_response(struct azx
*chip
, unsigned int addr
)
1244 /* check IRV busy bit */
1245 if (azx_readw(chip
, IRS
) & ICH6_IRS_VALID
) {
1246 /* reuse rirb.res as the response return value */
1247 chip
->rirb
.res
[addr
] = azx_readl(chip
, IR
);
1252 if (printk_ratelimit())
1253 dev_dbg(chip
->card
->dev
, "get_response timeout: IRS=0x%x\n",
1254 azx_readw(chip
, IRS
));
1255 chip
->rirb
.res
[addr
] = -1;
1259 /* send a command */
1260 static int azx_single_send_cmd(struct hda_bus
*bus
, u32 val
)
1262 struct azx
*chip
= bus
->private_data
;
1263 unsigned int addr
= azx_command_addr(val
);
1266 bus
->rirb_error
= 0;
1268 /* check ICB busy bit */
1269 if (!((azx_readw(chip
, IRS
) & ICH6_IRS_BUSY
))) {
1270 /* Clear IRV valid bit */
1271 azx_writew(chip
, IRS
, azx_readw(chip
, IRS
) |
1273 azx_writel(chip
, IC
, val
);
1274 azx_writew(chip
, IRS
, azx_readw(chip
, IRS
) |
1276 return azx_single_wait_for_response(chip
, addr
);
1280 if (printk_ratelimit())
1281 dev_dbg(chip
->card
->dev
,
1282 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1283 azx_readw(chip
, IRS
), val
);
1287 /* receive a response */
1288 static unsigned int azx_single_get_response(struct hda_bus
*bus
,
1291 struct azx
*chip
= bus
->private_data
;
1292 return chip
->rirb
.res
[addr
];
1296 * The below are the main callbacks from hda_codec.
1298 * They are just the skeleton to call sub-callbacks according to the
1299 * current setting of chip->single_cmd.
1302 /* send a command */
1303 static int azx_send_cmd(struct hda_bus
*bus
, unsigned int val
)
1305 struct azx
*chip
= bus
->private_data
;
1309 chip
->last_cmd
[azx_command_addr(val
)] = val
;
1310 if (chip
->single_cmd
)
1311 return azx_single_send_cmd(bus
, val
);
1313 return azx_corb_send_cmd(bus
, val
);
1315 EXPORT_SYMBOL_GPL(azx_send_cmd
);
1317 /* get a response */
1318 static unsigned int azx_get_response(struct hda_bus
*bus
,
1321 struct azx
*chip
= bus
->private_data
;
1324 if (chip
->single_cmd
)
1325 return azx_single_get_response(bus
, addr
);
1327 return azx_rirb_get_response(bus
, addr
);
1329 EXPORT_SYMBOL_GPL(azx_get_response
);
1331 #ifdef CONFIG_SND_HDA_DSP_LOADER
1333 * DSP loading code (e.g. for CA0132)
1336 /* use the first stream for loading DSP */
1337 static struct azx_dev
*
1338 azx_get_dsp_loader_dev(struct azx
*chip
)
1340 return &chip
->azx_dev
[chip
->playback_index_offset
];
1343 static int azx_load_dsp_prepare(struct hda_bus
*bus
, unsigned int format
,
1344 unsigned int byte_size
,
1345 struct snd_dma_buffer
*bufp
)
1348 struct azx
*chip
= bus
->private_data
;
1349 struct azx_dev
*azx_dev
;
1352 azx_dev
= azx_get_dsp_loader_dev(chip
);
1355 spin_lock_irq(&chip
->reg_lock
);
1356 if (azx_dev
->running
|| azx_dev
->locked
) {
1357 spin_unlock_irq(&chip
->reg_lock
);
1361 azx_dev
->prepared
= 0;
1362 chip
->saved_azx_dev
= *azx_dev
;
1363 azx_dev
->locked
= 1;
1364 spin_unlock_irq(&chip
->reg_lock
);
1366 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV_SG
,
1371 azx_dev
->bufsize
= byte_size
;
1372 azx_dev
->period_bytes
= byte_size
;
1373 azx_dev
->format_val
= format
;
1375 azx_stream_reset(chip
, azx_dev
);
1377 /* reset BDL address */
1378 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
1379 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
1382 bdl
= (u32
*)azx_dev
->bdl
.area
;
1383 err
= setup_bdle(chip
, bufp
, azx_dev
, &bdl
, 0, byte_size
, 0);
1387 azx_setup_controller(chip
, azx_dev
);
1388 dsp_unlock(azx_dev
);
1389 return azx_dev
->stream_tag
;
1392 chip
->ops
->dma_free_pages(chip
, bufp
);
1394 spin_lock_irq(&chip
->reg_lock
);
1395 if (azx_dev
->opened
)
1396 *azx_dev
= chip
->saved_azx_dev
;
1397 azx_dev
->locked
= 0;
1398 spin_unlock_irq(&chip
->reg_lock
);
1400 dsp_unlock(azx_dev
);
1404 static void azx_load_dsp_trigger(struct hda_bus
*bus
, bool start
)
1406 struct azx
*chip
= bus
->private_data
;
1407 struct azx_dev
*azx_dev
= azx_get_dsp_loader_dev(chip
);
1410 azx_stream_start(chip
, azx_dev
);
1412 azx_stream_stop(chip
, azx_dev
);
1413 azx_dev
->running
= start
;
1416 static void azx_load_dsp_cleanup(struct hda_bus
*bus
,
1417 struct snd_dma_buffer
*dmab
)
1419 struct azx
*chip
= bus
->private_data
;
1420 struct azx_dev
*azx_dev
= azx_get_dsp_loader_dev(chip
);
1422 if (!dmab
->area
|| !azx_dev
->locked
)
1426 /* reset BDL address */
1427 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
1428 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
1429 azx_sd_writel(chip
, azx_dev
, SD_CTL
, 0);
1430 azx_dev
->bufsize
= 0;
1431 azx_dev
->period_bytes
= 0;
1432 azx_dev
->format_val
= 0;
1434 chip
->ops
->dma_free_pages(chip
, dmab
);
1437 spin_lock_irq(&chip
->reg_lock
);
1438 if (azx_dev
->opened
)
1439 *azx_dev
= chip
->saved_azx_dev
;
1440 azx_dev
->locked
= 0;
1441 spin_unlock_irq(&chip
->reg_lock
);
1442 dsp_unlock(azx_dev
);
1444 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1446 int azx_alloc_stream_pages(struct azx
*chip
)
1449 struct snd_card
*card
= chip
->card
;
1451 for (i
= 0; i
< chip
->num_streams
; i
++) {
1452 dsp_lock_init(&chip
->azx_dev
[i
]);
1453 /* allocate memory for the BDL for each stream */
1454 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV
,
1456 &chip
->azx_dev
[i
].bdl
);
1458 dev_err(card
->dev
, "cannot allocate BDL\n");
1462 /* allocate memory for the position buffer */
1463 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV
,
1464 chip
->num_streams
* 8, &chip
->posbuf
);
1466 dev_err(card
->dev
, "cannot allocate posbuf\n");
1470 /* allocate CORB/RIRB */
1471 err
= azx_alloc_cmd_io(chip
);
1476 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages
);
1478 void azx_free_stream_pages(struct azx
*chip
)
1481 if (chip
->azx_dev
) {
1482 for (i
= 0; i
< chip
->num_streams
; i
++)
1483 if (chip
->azx_dev
[i
].bdl
.area
)
1484 chip
->ops
->dma_free_pages(
1485 chip
, &chip
->azx_dev
[i
].bdl
);
1488 chip
->ops
->dma_free_pages(chip
, &chip
->rb
);
1489 if (chip
->posbuf
.area
)
1490 chip
->ops
->dma_free_pages(chip
, &chip
->posbuf
);
1492 EXPORT_SYMBOL_GPL(azx_free_stream_pages
);
1495 * Lowlevel interface
1498 /* enter link reset */
1499 void azx_enter_link_reset(struct azx
*chip
)
1501 unsigned long timeout
;
1503 /* reset controller */
1504 azx_writel(chip
, GCTL
, azx_readl(chip
, GCTL
) & ~ICH6_GCTL_RESET
);
1506 timeout
= jiffies
+ msecs_to_jiffies(100);
1507 while ((azx_readb(chip
, GCTL
) & ICH6_GCTL_RESET
) &&
1508 time_before(jiffies
, timeout
))
1509 usleep_range(500, 1000);
1511 EXPORT_SYMBOL_GPL(azx_enter_link_reset
);
1513 /* exit link reset */
1514 static void azx_exit_link_reset(struct azx
*chip
)
1516 unsigned long timeout
;
1518 azx_writeb(chip
, GCTL
, azx_readb(chip
, GCTL
) | ICH6_GCTL_RESET
);
1520 timeout
= jiffies
+ msecs_to_jiffies(100);
1521 while (!azx_readb(chip
, GCTL
) &&
1522 time_before(jiffies
, timeout
))
1523 usleep_range(500, 1000);
1526 /* reset codec link */
1527 static int azx_reset(struct azx
*chip
, bool full_reset
)
1532 /* clear STATESTS */
1533 azx_writew(chip
, STATESTS
, STATESTS_INT_MASK
);
1535 /* reset controller */
1536 azx_enter_link_reset(chip
);
1538 /* delay for >= 100us for codec PLL to settle per spec
1539 * Rev 0.9 section 5.5.1
1541 usleep_range(500, 1000);
1543 /* Bring controller out of reset */
1544 azx_exit_link_reset(chip
);
1546 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1547 usleep_range(1000, 1200);
1550 /* check to see if controller is ready */
1551 if (!azx_readb(chip
, GCTL
)) {
1552 dev_dbg(chip
->card
->dev
, "azx_reset: controller not ready!\n");
1556 /* Accept unsolicited responses */
1557 if (!chip
->single_cmd
)
1558 azx_writel(chip
, GCTL
, azx_readl(chip
, GCTL
) |
1562 if (!chip
->codec_mask
) {
1563 chip
->codec_mask
= azx_readw(chip
, STATESTS
);
1564 dev_dbg(chip
->card
->dev
, "codec_mask = 0x%x\n",
1571 /* enable interrupts */
1572 static void azx_int_enable(struct azx
*chip
)
1574 /* enable controller CIE and GIE */
1575 azx_writel(chip
, INTCTL
, azx_readl(chip
, INTCTL
) |
1576 ICH6_INT_CTRL_EN
| ICH6_INT_GLOBAL_EN
);
1579 /* disable interrupts */
1580 static void azx_int_disable(struct azx
*chip
)
1584 /* disable interrupts in stream descriptor */
1585 for (i
= 0; i
< chip
->num_streams
; i
++) {
1586 struct azx_dev
*azx_dev
= &chip
->azx_dev
[i
];
1587 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
1588 azx_sd_readb(chip
, azx_dev
, SD_CTL
) &
1592 /* disable SIE for all streams */
1593 azx_writeb(chip
, INTCTL
, 0);
1595 /* disable controller CIE and GIE */
1596 azx_writel(chip
, INTCTL
, azx_readl(chip
, INTCTL
) &
1597 ~(ICH6_INT_CTRL_EN
| ICH6_INT_GLOBAL_EN
));
1600 /* clear interrupts */
1601 static void azx_int_clear(struct azx
*chip
)
1605 /* clear stream status */
1606 for (i
= 0; i
< chip
->num_streams
; i
++) {
1607 struct azx_dev
*azx_dev
= &chip
->azx_dev
[i
];
1608 azx_sd_writeb(chip
, azx_dev
, SD_STS
, SD_INT_MASK
);
1611 /* clear STATESTS */
1612 azx_writew(chip
, STATESTS
, STATESTS_INT_MASK
);
1614 /* clear rirb status */
1615 azx_writeb(chip
, RIRBSTS
, RIRB_INT_MASK
);
1617 /* clear int status */
1618 azx_writel(chip
, INTSTS
, ICH6_INT_CTRL_EN
| ICH6_INT_ALL_STREAM
);
1622 * reset and start the controller registers
1624 void azx_init_chip(struct azx
*chip
, bool full_reset
)
1626 if (chip
->initialized
)
1629 /* reset controller */
1630 azx_reset(chip
, full_reset
);
1632 /* initialize interrupts */
1633 azx_int_clear(chip
);
1634 azx_int_enable(chip
);
1636 /* initialize the codec command I/O */
1637 if (!chip
->single_cmd
)
1638 azx_init_cmd_io(chip
);
1640 /* program the position buffer */
1641 azx_writel(chip
, DPLBASE
, (u32
)chip
->posbuf
.addr
);
1642 azx_writel(chip
, DPUBASE
, upper_32_bits(chip
->posbuf
.addr
));
1644 chip
->initialized
= 1;
1646 EXPORT_SYMBOL_GPL(azx_init_chip
);
1648 void azx_stop_chip(struct azx
*chip
)
1650 if (!chip
->initialized
)
1653 /* disable interrupts */
1654 azx_int_disable(chip
);
1655 azx_int_clear(chip
);
1657 /* disable CORB/RIRB */
1658 azx_free_cmd_io(chip
);
1660 /* disable position buffer */
1661 azx_writel(chip
, DPLBASE
, 0);
1662 azx_writel(chip
, DPUBASE
, 0);
1664 chip
->initialized
= 0;
1666 EXPORT_SYMBOL_GPL(azx_stop_chip
);
1671 irqreturn_t
azx_interrupt(int irq
, void *dev_id
)
1673 struct azx
*chip
= dev_id
;
1674 struct azx_dev
*azx_dev
;
1679 #ifdef CONFIG_PM_RUNTIME
1680 if (chip
->driver_caps
& AZX_DCAPS_PM_RUNTIME
)
1681 if (!pm_runtime_active(chip
->card
->dev
))
1685 spin_lock(&chip
->reg_lock
);
1687 if (chip
->disabled
) {
1688 spin_unlock(&chip
->reg_lock
);
1692 status
= azx_readl(chip
, INTSTS
);
1693 if (status
== 0 || status
== 0xffffffff) {
1694 spin_unlock(&chip
->reg_lock
);
1698 for (i
= 0; i
< chip
->num_streams
; i
++) {
1699 azx_dev
= &chip
->azx_dev
[i
];
1700 if (status
& azx_dev
->sd_int_sta_mask
) {
1701 sd_status
= azx_sd_readb(chip
, azx_dev
, SD_STS
);
1702 azx_sd_writeb(chip
, azx_dev
, SD_STS
, SD_INT_MASK
);
1703 if (!azx_dev
->substream
|| !azx_dev
->running
||
1704 !(sd_status
& SD_INT_COMPLETE
))
1706 /* check whether this IRQ is really acceptable */
1707 if (!chip
->ops
->position_check
||
1708 chip
->ops
->position_check(chip
, azx_dev
)) {
1709 spin_unlock(&chip
->reg_lock
);
1710 snd_pcm_period_elapsed(azx_dev
->substream
);
1711 spin_lock(&chip
->reg_lock
);
1716 /* clear rirb int */
1717 status
= azx_readb(chip
, RIRBSTS
);
1718 if (status
& RIRB_INT_MASK
) {
1719 if (status
& RIRB_INT_RESPONSE
) {
1720 if (chip
->driver_caps
& AZX_DCAPS_RIRB_PRE_DELAY
)
1722 azx_update_rirb(chip
);
1724 azx_writeb(chip
, RIRBSTS
, RIRB_INT_MASK
);
1727 spin_unlock(&chip
->reg_lock
);
1731 EXPORT_SYMBOL_GPL(azx_interrupt
);
1738 * Probe the given codec address
1740 static int probe_codec(struct azx
*chip
, int addr
)
1742 unsigned int cmd
= (addr
<< 28) | (AC_NODE_ROOT
<< 20) |
1743 (AC_VERB_PARAMETERS
<< 8) | AC_PAR_VENDOR_ID
;
1746 mutex_lock(&chip
->bus
->cmd_mutex
);
1748 azx_send_cmd(chip
->bus
, cmd
);
1749 res
= azx_get_response(chip
->bus
, addr
);
1751 mutex_unlock(&chip
->bus
->cmd_mutex
);
1754 dev_dbg(chip
->card
->dev
, "codec #%d probed OK\n", addr
);
1758 static void azx_bus_reset(struct hda_bus
*bus
)
1760 struct azx
*chip
= bus
->private_data
;
1763 azx_stop_chip(chip
);
1764 azx_init_chip(chip
, true);
1766 if (chip
->initialized
) {
1768 list_for_each_entry(p
, &chip
->pcm_list
, list
)
1769 snd_pcm_suspend_all(p
->pcm
);
1770 snd_hda_suspend(chip
->bus
);
1771 snd_hda_resume(chip
->bus
);
1778 /* power-up/down the controller */
1779 static void azx_power_notify(struct hda_bus
*bus
, bool power_up
)
1781 struct azx
*chip
= bus
->private_data
;
1783 if (!(chip
->driver_caps
& AZX_DCAPS_PM_RUNTIME
))
1787 pm_runtime_get_sync(chip
->card
->dev
);
1789 pm_runtime_put_sync(chip
->card
->dev
);
1793 static int get_jackpoll_interval(struct azx
*chip
)
1798 if (!chip
->jackpoll_ms
)
1801 i
= chip
->jackpoll_ms
[chip
->dev_index
];
1804 if (i
< 50 || i
> 60000)
1807 j
= msecs_to_jiffies(i
);
1809 dev_warn(chip
->card
->dev
,
1810 "jackpoll_ms value out of range: %d\n", i
);
1814 /* Codec initialization */
1815 int azx_codec_create(struct azx
*chip
, const char *model
,
1816 unsigned int max_slots
,
1819 struct hda_bus_template bus_temp
;
1822 memset(&bus_temp
, 0, sizeof(bus_temp
));
1823 bus_temp
.private_data
= chip
;
1824 bus_temp
.modelname
= model
;
1825 bus_temp
.pci
= chip
->pci
;
1826 bus_temp
.ops
.command
= azx_send_cmd
;
1827 bus_temp
.ops
.get_response
= azx_get_response
;
1828 bus_temp
.ops
.attach_pcm
= azx_attach_pcm_stream
;
1829 bus_temp
.ops
.bus_reset
= azx_bus_reset
;
1831 bus_temp
.power_save
= power_save_to
;
1832 bus_temp
.ops
.pm_notify
= azx_power_notify
;
1834 #ifdef CONFIG_SND_HDA_DSP_LOADER
1835 bus_temp
.ops
.load_dsp_prepare
= azx_load_dsp_prepare
;
1836 bus_temp
.ops
.load_dsp_trigger
= azx_load_dsp_trigger
;
1837 bus_temp
.ops
.load_dsp_cleanup
= azx_load_dsp_cleanup
;
1840 err
= snd_hda_bus_new(chip
->card
, &bus_temp
, &chip
->bus
);
1844 if (chip
->driver_caps
& AZX_DCAPS_RIRB_DELAY
) {
1845 dev_dbg(chip
->card
->dev
, "Enable delay in RIRB handling\n");
1846 chip
->bus
->needs_damn_long_delay
= 1;
1851 max_slots
= AZX_DEFAULT_CODECS
;
1853 /* First try to probe all given codec slots */
1854 for (c
= 0; c
< max_slots
; c
++) {
1855 if ((chip
->codec_mask
& (1 << c
)) & chip
->codec_probe_mask
) {
1856 if (probe_codec(chip
, c
) < 0) {
1857 /* Some BIOSen give you wrong codec addresses
1860 dev_warn(chip
->card
->dev
,
1861 "Codec #%d probe error; disabling it...\n", c
);
1862 chip
->codec_mask
&= ~(1 << c
);
1863 /* More badly, accessing to a non-existing
1864 * codec often screws up the controller chip,
1865 * and disturbs the further communications.
1866 * Thus if an error occurs during probing,
1867 * better to reset the controller chip to
1868 * get back to the sanity state.
1870 azx_stop_chip(chip
);
1871 azx_init_chip(chip
, true);
1876 /* AMD chipsets often cause the communication stalls upon certain
1877 * sequence like the pin-detection. It seems that forcing the synced
1878 * access works around the stall. Grrr...
1880 if (chip
->driver_caps
& AZX_DCAPS_SYNC_WRITE
) {
1881 dev_dbg(chip
->card
->dev
, "Enable sync_write for stable communication\n");
1882 chip
->bus
->sync_write
= 1;
1883 chip
->bus
->allow_bus_reset
= 1;
1886 /* Then create codec instances */
1887 for (c
= 0; c
< max_slots
; c
++) {
1888 if ((chip
->codec_mask
& (1 << c
)) & chip
->codec_probe_mask
) {
1889 struct hda_codec
*codec
;
1890 err
= snd_hda_codec_new(chip
->bus
, c
, &codec
);
1893 codec
->jackpoll_interval
= get_jackpoll_interval(chip
);
1894 codec
->beep_mode
= chip
->beep_mode
;
1899 dev_err(chip
->card
->dev
, "no codecs initialized\n");
1904 EXPORT_SYMBOL_GPL(azx_codec_create
);
1906 /* configure each codec instance */
1907 int azx_codec_configure(struct azx
*chip
)
1909 struct hda_codec
*codec
;
1910 list_for_each_entry(codec
, &chip
->bus
->codec_list
, list
) {
1911 snd_hda_codec_configure(codec
);
1915 EXPORT_SYMBOL_GPL(azx_codec_configure
);
1917 /* mixer creation - all stuff is implemented in hda module */
1918 int azx_mixer_create(struct azx
*chip
)
1920 return snd_hda_build_controls(chip
->bus
);
1922 EXPORT_SYMBOL_GPL(azx_mixer_create
);
1925 /* initialize SD streams */
1926 int azx_init_stream(struct azx
*chip
)
1930 /* initialize each stream (aka device)
1931 * assign the starting bdl address to each stream (device)
1934 for (i
= 0; i
< chip
->num_streams
; i
++) {
1935 struct azx_dev
*azx_dev
= &chip
->azx_dev
[i
];
1936 azx_dev
->posbuf
= (u32 __iomem
*)(chip
->posbuf
.area
+ i
* 8);
1937 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1938 azx_dev
->sd_addr
= chip
->remap_addr
+ (0x20 * i
+ 0x80);
1939 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1940 azx_dev
->sd_int_sta_mask
= 1 << i
;
1941 /* stream tag: must be non-zero and unique */
1943 azx_dev
->stream_tag
= i
+ 1;
1948 EXPORT_SYMBOL_GPL(azx_init_stream
);
1951 * reboot notifier for hang-up problem at power-down
1953 static int azx_halt(struct notifier_block
*nb
, unsigned long event
, void *buf
)
1955 struct azx
*chip
= container_of(nb
, struct azx
, reboot_notifier
);
1956 snd_hda_bus_reboot_notify(chip
->bus
);
1957 azx_stop_chip(chip
);
1961 void azx_notifier_register(struct azx
*chip
)
1963 chip
->reboot_notifier
.notifier_call
= azx_halt
;
1964 register_reboot_notifier(&chip
->reboot_notifier
);
1966 EXPORT_SYMBOL_GPL(azx_notifier_register
);
1968 void azx_notifier_unregister(struct azx
*chip
)
1970 if (chip
->reboot_notifier
.notifier_call
)
1971 unregister_reboot_notifier(&chip
->reboot_notifier
);
1973 EXPORT_SYMBOL_GPL(azx_notifier_unregister
);
1975 MODULE_LICENSE("GPL");
1976 MODULE_DESCRIPTION("Common HDA driver funcitons");