ALSA: hda_controller: Separate stream_tag for input and output streams.
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / sound / pci / hda / hda_controller.c
1 /*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_priv.h"
34 #include "hda_controller.h"
35
36 #define CREATE_TRACE_POINTS
37 #include "hda_intel_trace.h"
38
39 /* DSP lock helpers */
40 #ifdef CONFIG_SND_HDA_DSP_LOADER
41 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
42 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
43 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
44 #define dsp_is_locked(dev) ((dev)->locked)
45 #else
46 #define dsp_lock_init(dev) do {} while (0)
47 #define dsp_lock(dev) do {} while (0)
48 #define dsp_unlock(dev) do {} while (0)
49 #define dsp_is_locked(dev) 0
50 #endif
51
52 /*
53 * AZX stream operations.
54 */
55
56 /* start a stream */
57 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
58 {
59 /*
60 * Before stream start, initialize parameter
61 */
62 azx_dev->insufficient = 1;
63
64 /* enable SIE */
65 azx_writel(chip, INTCTL,
66 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
67 /* set DMA start and interrupt mask */
68 azx_sd_writeb(chip, azx_dev, SD_CTL,
69 azx_sd_readb(chip, azx_dev, SD_CTL) |
70 SD_CTL_DMA_START | SD_INT_MASK);
71 }
72
73 /* stop DMA */
74 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 {
76 azx_sd_writeb(chip, azx_dev, SD_CTL,
77 azx_sd_readb(chip, azx_dev, SD_CTL) &
78 ~(SD_CTL_DMA_START | SD_INT_MASK));
79 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
80 }
81
82 /* stop a stream */
83 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 {
85 azx_stream_clear(chip, azx_dev);
86 /* disable SIE */
87 azx_writel(chip, INTCTL,
88 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 }
90 EXPORT_SYMBOL_GPL(azx_stream_stop);
91
92 /* reset stream */
93 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
94 {
95 unsigned char val;
96 int timeout;
97
98 azx_stream_clear(chip, azx_dev);
99
100 azx_sd_writeb(chip, azx_dev, SD_CTL,
101 azx_sd_readb(chip, azx_dev, SD_CTL) |
102 SD_CTL_STREAM_RESET);
103 udelay(3);
104 timeout = 300;
105 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
106 SD_CTL_STREAM_RESET) && --timeout)
107 ;
108 val &= ~SD_CTL_STREAM_RESET;
109 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
110 udelay(3);
111
112 timeout = 300;
113 /* waiting for hardware to report that the stream is out of reset */
114 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
115 SD_CTL_STREAM_RESET) && --timeout)
116 ;
117
118 /* reset first position - may not be synced with hw at this time */
119 *azx_dev->posbuf = 0;
120 }
121
122 /*
123 * set up the SD for streaming
124 */
125 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
126 {
127 unsigned int val;
128 /* make sure the run bit is zero for SD */
129 azx_stream_clear(chip, azx_dev);
130 /* program the stream_tag */
131 val = azx_sd_readl(chip, azx_dev, SD_CTL);
132 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
133 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
134 if (!azx_snoop(chip))
135 val |= SD_CTL_TRAFFIC_PRIO;
136 azx_sd_writel(chip, azx_dev, SD_CTL, val);
137
138 /* program the length of samples in cyclic buffer */
139 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140
141 /* program the stream format */
142 /* this value needs to be the same as the one programmed */
143 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144
145 /* program the stream LVI (last valid index) of the BDL */
146 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147
148 /* program the BDL address */
149 /* lower BDL address */
150 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
151 /* upper BDL address */
152 azx_sd_writel(chip, azx_dev, SD_BDLPU,
153 upper_32_bits(azx_dev->bdl.addr));
154
155 /* enable the position buffer */
156 if (chip->get_position[0] != azx_get_pos_lpib ||
157 chip->get_position[1] != azx_get_pos_lpib) {
158 if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
159 azx_writel(chip, DPLBASE,
160 (u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
161 }
162
163 /* set the interrupt enable bits in the descriptor control register */
164 azx_sd_writel(chip, azx_dev, SD_CTL,
165 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
166
167 return 0;
168 }
169
170 /* assign a stream for the PCM */
171 static inline struct azx_dev *
172 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
173 {
174 int dev, i, nums;
175 struct azx_dev *res = NULL;
176 /* make a non-zero unique key for the substream */
177 int key = (substream->pcm->device << 16) | (substream->number << 2) |
178 (substream->stream + 1);
179
180 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
181 dev = chip->playback_index_offset;
182 nums = chip->playback_streams;
183 } else {
184 dev = chip->capture_index_offset;
185 nums = chip->capture_streams;
186 }
187 for (i = 0; i < nums; i++, dev++) {
188 struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 dsp_lock(azx_dev);
190 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
191 if (azx_dev->assigned_key == key) {
192 azx_dev->opened = 1;
193 azx_dev->assigned_key = key;
194 dsp_unlock(azx_dev);
195 return azx_dev;
196 }
197 if (!res ||
198 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
199 res = azx_dev;
200 }
201 dsp_unlock(azx_dev);
202 }
203 if (res) {
204 dsp_lock(res);
205 res->opened = 1;
206 res->assigned_key = key;
207 dsp_unlock(res);
208 }
209 return res;
210 }
211
212 /* release the assigned stream */
213 static inline void azx_release_device(struct azx_dev *azx_dev)
214 {
215 azx_dev->opened = 0;
216 }
217
218 static cycle_t azx_cc_read(const struct cyclecounter *cc)
219 {
220 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
221 struct snd_pcm_substream *substream = azx_dev->substream;
222 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
223 struct azx *chip = apcm->chip;
224
225 return azx_readl(chip, WALLCLK);
226 }
227
228 static void azx_timecounter_init(struct snd_pcm_substream *substream,
229 bool force, cycle_t last)
230 {
231 struct azx_dev *azx_dev = get_azx_dev(substream);
232 struct timecounter *tc = &azx_dev->azx_tc;
233 struct cyclecounter *cc = &azx_dev->azx_cc;
234 u64 nsec;
235
236 cc->read = azx_cc_read;
237 cc->mask = CLOCKSOURCE_MASK(32);
238
239 /*
240 * Converting from 24 MHz to ns means applying a 125/3 factor.
241 * To avoid any saturation issues in intermediate operations,
242 * the 125 factor is applied first. The division is applied
243 * last after reading the timecounter value.
244 * Applying the 1/3 factor as part of the multiplication
245 * requires at least 20 bits for a decent precision, however
246 * overflows occur after about 4 hours or less, not a option.
247 */
248
249 cc->mult = 125; /* saturation after 195 years */
250 cc->shift = 0;
251
252 nsec = 0; /* audio time is elapsed time since trigger */
253 timecounter_init(tc, cc, nsec);
254 if (force)
255 /*
256 * force timecounter to use predefined value,
257 * used for synchronized starts
258 */
259 tc->cycle_last = last;
260 }
261
262 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
263 u64 nsec)
264 {
265 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
266 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
267 u64 codec_frames, codec_nsecs;
268
269 if (!hinfo->ops.get_delay)
270 return nsec;
271
272 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
273 codec_nsecs = div_u64(codec_frames * 1000000000LL,
274 substream->runtime->rate);
275
276 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
277 return nsec + codec_nsecs;
278
279 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
280 }
281
282 /*
283 * set up a BDL entry
284 */
285 static int setup_bdle(struct azx *chip,
286 struct snd_dma_buffer *dmab,
287 struct azx_dev *azx_dev, u32 **bdlp,
288 int ofs, int size, int with_ioc)
289 {
290 u32 *bdl = *bdlp;
291
292 while (size > 0) {
293 dma_addr_t addr;
294 int chunk;
295
296 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
297 return -EINVAL;
298
299 addr = snd_sgbuf_get_addr(dmab, ofs);
300 /* program the address field of the BDL entry */
301 bdl[0] = cpu_to_le32((u32)addr);
302 bdl[1] = cpu_to_le32(upper_32_bits(addr));
303 /* program the size field of the BDL entry */
304 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
305 /* one BDLE cannot cross 4K boundary on CTHDA chips */
306 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
307 u32 remain = 0x1000 - (ofs & 0xfff);
308 if (chunk > remain)
309 chunk = remain;
310 }
311 bdl[2] = cpu_to_le32(chunk);
312 /* program the IOC to enable interrupt
313 * only when the whole fragment is processed
314 */
315 size -= chunk;
316 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
317 bdl += 4;
318 azx_dev->frags++;
319 ofs += chunk;
320 }
321 *bdlp = bdl;
322 return ofs;
323 }
324
325 /*
326 * set up BDL entries
327 */
328 static int azx_setup_periods(struct azx *chip,
329 struct snd_pcm_substream *substream,
330 struct azx_dev *azx_dev)
331 {
332 u32 *bdl;
333 int i, ofs, periods, period_bytes;
334 int pos_adj = 0;
335
336 /* reset BDL address */
337 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
338 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
339
340 period_bytes = azx_dev->period_bytes;
341 periods = azx_dev->bufsize / period_bytes;
342
343 /* program the initial BDL entries */
344 bdl = (u32 *)azx_dev->bdl.area;
345 ofs = 0;
346 azx_dev->frags = 0;
347
348 if (chip->bdl_pos_adj)
349 pos_adj = chip->bdl_pos_adj[chip->dev_index];
350 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
351 struct snd_pcm_runtime *runtime = substream->runtime;
352 int pos_align = pos_adj;
353 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
354 if (!pos_adj)
355 pos_adj = pos_align;
356 else
357 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
358 pos_align;
359 pos_adj = frames_to_bytes(runtime, pos_adj);
360 if (pos_adj >= period_bytes) {
361 dev_warn(chip->card->dev,"Too big adjustment %d\n",
362 pos_adj);
363 pos_adj = 0;
364 } else {
365 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
366 azx_dev,
367 &bdl, ofs, pos_adj, true);
368 if (ofs < 0)
369 goto error;
370 }
371 } else
372 pos_adj = 0;
373
374 for (i = 0; i < periods; i++) {
375 if (i == periods - 1 && pos_adj)
376 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
377 azx_dev, &bdl, ofs,
378 period_bytes - pos_adj, 0);
379 else
380 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
381 azx_dev, &bdl, ofs,
382 period_bytes,
383 !azx_dev->no_period_wakeup);
384 if (ofs < 0)
385 goto error;
386 }
387 return 0;
388
389 error:
390 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
391 azx_dev->bufsize, period_bytes);
392 return -EINVAL;
393 }
394
395 /*
396 * PCM ops
397 */
398
399 static int azx_pcm_close(struct snd_pcm_substream *substream)
400 {
401 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
402 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
403 struct azx *chip = apcm->chip;
404 struct azx_dev *azx_dev = get_azx_dev(substream);
405 unsigned long flags;
406
407 mutex_lock(&chip->open_mutex);
408 spin_lock_irqsave(&chip->reg_lock, flags);
409 azx_dev->substream = NULL;
410 azx_dev->running = 0;
411 spin_unlock_irqrestore(&chip->reg_lock, flags);
412 azx_release_device(azx_dev);
413 hinfo->ops.close(hinfo, apcm->codec, substream);
414 snd_hda_power_down(apcm->codec);
415 mutex_unlock(&chip->open_mutex);
416 return 0;
417 }
418
419 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
420 struct snd_pcm_hw_params *hw_params)
421 {
422 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
423 struct azx *chip = apcm->chip;
424 int ret;
425
426 dsp_lock(get_azx_dev(substream));
427 if (dsp_is_locked(get_azx_dev(substream))) {
428 ret = -EBUSY;
429 goto unlock;
430 }
431
432 ret = chip->ops->substream_alloc_pages(chip, substream,
433 params_buffer_bytes(hw_params));
434 unlock:
435 dsp_unlock(get_azx_dev(substream));
436 return ret;
437 }
438
439 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
440 {
441 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
442 struct azx_dev *azx_dev = get_azx_dev(substream);
443 struct azx *chip = apcm->chip;
444 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
445 int err;
446
447 /* reset BDL address */
448 dsp_lock(azx_dev);
449 if (!dsp_is_locked(azx_dev)) {
450 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
451 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
452 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
453 azx_dev->bufsize = 0;
454 azx_dev->period_bytes = 0;
455 azx_dev->format_val = 0;
456 }
457
458 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
459
460 err = chip->ops->substream_free_pages(chip, substream);
461 azx_dev->prepared = 0;
462 dsp_unlock(azx_dev);
463 return err;
464 }
465
466 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
467 {
468 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
469 struct azx *chip = apcm->chip;
470 struct azx_dev *azx_dev = get_azx_dev(substream);
471 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
472 struct snd_pcm_runtime *runtime = substream->runtime;
473 unsigned int bufsize, period_bytes, format_val, stream_tag;
474 int err;
475 struct hda_spdif_out *spdif =
476 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
477 unsigned short ctls = spdif ? spdif->ctls : 0;
478
479 dsp_lock(azx_dev);
480 if (dsp_is_locked(azx_dev)) {
481 err = -EBUSY;
482 goto unlock;
483 }
484
485 azx_stream_reset(chip, azx_dev);
486 format_val = snd_hda_calc_stream_format(apcm->codec,
487 runtime->rate,
488 runtime->channels,
489 runtime->format,
490 hinfo->maxbps,
491 ctls);
492 if (!format_val) {
493 dev_err(chip->card->dev,
494 "invalid format_val, rate=%d, ch=%d, format=%d\n",
495 runtime->rate, runtime->channels, runtime->format);
496 err = -EINVAL;
497 goto unlock;
498 }
499
500 bufsize = snd_pcm_lib_buffer_bytes(substream);
501 period_bytes = snd_pcm_lib_period_bytes(substream);
502
503 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
504 bufsize, format_val);
505
506 if (bufsize != azx_dev->bufsize ||
507 period_bytes != azx_dev->period_bytes ||
508 format_val != azx_dev->format_val ||
509 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
510 azx_dev->bufsize = bufsize;
511 azx_dev->period_bytes = period_bytes;
512 azx_dev->format_val = format_val;
513 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
514 err = azx_setup_periods(chip, substream, azx_dev);
515 if (err < 0)
516 goto unlock;
517 }
518
519 /* when LPIB delay correction gives a small negative value,
520 * we ignore it; currently set the threshold statically to
521 * 64 frames
522 */
523 if (runtime->period_size > 64)
524 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
525 else
526 azx_dev->delay_negative_threshold = 0;
527
528 /* wallclk has 24Mhz clock source */
529 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
530 runtime->rate) * 1000);
531 azx_setup_controller(chip, azx_dev);
532 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
533 azx_dev->fifo_size =
534 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
535 else
536 azx_dev->fifo_size = 0;
537
538 stream_tag = azx_dev->stream_tag;
539 /* CA-IBG chips need the playback stream starting from 1 */
540 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
541 stream_tag > chip->capture_streams)
542 stream_tag -= chip->capture_streams;
543 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
544 azx_dev->format_val, substream);
545
546 unlock:
547 if (!err)
548 azx_dev->prepared = 1;
549 dsp_unlock(azx_dev);
550 return err;
551 }
552
553 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
554 {
555 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
556 struct azx *chip = apcm->chip;
557 struct azx_dev *azx_dev;
558 struct snd_pcm_substream *s;
559 int rstart = 0, start, nsync = 0, sbits = 0;
560 int nwait, timeout;
561
562 azx_dev = get_azx_dev(substream);
563 trace_azx_pcm_trigger(chip, azx_dev, cmd);
564
565 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
566 return -EPIPE;
567
568 switch (cmd) {
569 case SNDRV_PCM_TRIGGER_START:
570 rstart = 1;
571 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
572 case SNDRV_PCM_TRIGGER_RESUME:
573 start = 1;
574 break;
575 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
576 case SNDRV_PCM_TRIGGER_SUSPEND:
577 case SNDRV_PCM_TRIGGER_STOP:
578 start = 0;
579 break;
580 default:
581 return -EINVAL;
582 }
583
584 snd_pcm_group_for_each_entry(s, substream) {
585 if (s->pcm->card != substream->pcm->card)
586 continue;
587 azx_dev = get_azx_dev(s);
588 sbits |= 1 << azx_dev->index;
589 nsync++;
590 snd_pcm_trigger_done(s, substream);
591 }
592
593 spin_lock(&chip->reg_lock);
594
595 /* first, set SYNC bits of corresponding streams */
596 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
597 azx_writel(chip, OLD_SSYNC,
598 azx_readl(chip, OLD_SSYNC) | sbits);
599 else
600 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
601
602 snd_pcm_group_for_each_entry(s, substream) {
603 if (s->pcm->card != substream->pcm->card)
604 continue;
605 azx_dev = get_azx_dev(s);
606 if (start) {
607 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
608 if (!rstart)
609 azx_dev->start_wallclk -=
610 azx_dev->period_wallclk;
611 azx_stream_start(chip, azx_dev);
612 } else {
613 azx_stream_stop(chip, azx_dev);
614 }
615 azx_dev->running = start;
616 }
617 spin_unlock(&chip->reg_lock);
618 if (start) {
619 /* wait until all FIFOs get ready */
620 for (timeout = 5000; timeout; timeout--) {
621 nwait = 0;
622 snd_pcm_group_for_each_entry(s, substream) {
623 if (s->pcm->card != substream->pcm->card)
624 continue;
625 azx_dev = get_azx_dev(s);
626 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
627 SD_STS_FIFO_READY))
628 nwait++;
629 }
630 if (!nwait)
631 break;
632 cpu_relax();
633 }
634 } else {
635 /* wait until all RUN bits are cleared */
636 for (timeout = 5000; timeout; timeout--) {
637 nwait = 0;
638 snd_pcm_group_for_each_entry(s, substream) {
639 if (s->pcm->card != substream->pcm->card)
640 continue;
641 azx_dev = get_azx_dev(s);
642 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
643 SD_CTL_DMA_START)
644 nwait++;
645 }
646 if (!nwait)
647 break;
648 cpu_relax();
649 }
650 }
651 spin_lock(&chip->reg_lock);
652 /* reset SYNC bits */
653 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
654 azx_writel(chip, OLD_SSYNC,
655 azx_readl(chip, OLD_SSYNC) & ~sbits);
656 else
657 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
658 if (start) {
659 azx_timecounter_init(substream, 0, 0);
660 if (nsync > 1) {
661 cycle_t cycle_last;
662
663 /* same start cycle for master and group */
664 azx_dev = get_azx_dev(substream);
665 cycle_last = azx_dev->azx_tc.cycle_last;
666
667 snd_pcm_group_for_each_entry(s, substream) {
668 if (s->pcm->card != substream->pcm->card)
669 continue;
670 azx_timecounter_init(s, 1, cycle_last);
671 }
672 }
673 }
674 spin_unlock(&chip->reg_lock);
675 return 0;
676 }
677
678 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
679 {
680 return azx_sd_readl(chip, azx_dev, SD_LPIB);
681 }
682 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
683
684 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
685 {
686 return le32_to_cpu(*azx_dev->posbuf);
687 }
688 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
689
690 unsigned int azx_get_position(struct azx *chip,
691 struct azx_dev *azx_dev)
692 {
693 struct snd_pcm_substream *substream = azx_dev->substream;
694 unsigned int pos;
695 int stream = substream->stream;
696 int delay = 0;
697
698 if (chip->get_position[stream])
699 pos = chip->get_position[stream](chip, azx_dev);
700 else /* use the position buffer as default */
701 pos = azx_get_pos_posbuf(chip, azx_dev);
702
703 if (pos >= azx_dev->bufsize)
704 pos = 0;
705
706 if (substream->runtime) {
707 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
708 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
709
710 if (chip->get_delay[stream])
711 delay += chip->get_delay[stream](chip, azx_dev, pos);
712 if (hinfo->ops.get_delay)
713 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
714 substream);
715 substream->runtime->delay = delay;
716 }
717
718 trace_azx_get_position(chip, azx_dev, pos, delay);
719 return pos;
720 }
721 EXPORT_SYMBOL_GPL(azx_get_position);
722
723 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
724 {
725 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
726 struct azx *chip = apcm->chip;
727 struct azx_dev *azx_dev = get_azx_dev(substream);
728 return bytes_to_frames(substream->runtime,
729 azx_get_position(chip, azx_dev));
730 }
731
732 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
733 struct timespec *ts)
734 {
735 struct azx_dev *azx_dev = get_azx_dev(substream);
736 u64 nsec;
737
738 nsec = timecounter_read(&azx_dev->azx_tc);
739 nsec = div_u64(nsec, 3); /* can be optimized */
740 nsec = azx_adjust_codec_delay(substream, nsec);
741
742 *ts = ns_to_timespec(nsec);
743
744 return 0;
745 }
746
747 static struct snd_pcm_hardware azx_pcm_hw = {
748 .info = (SNDRV_PCM_INFO_MMAP |
749 SNDRV_PCM_INFO_INTERLEAVED |
750 SNDRV_PCM_INFO_BLOCK_TRANSFER |
751 SNDRV_PCM_INFO_MMAP_VALID |
752 /* No full-resume yet implemented */
753 /* SNDRV_PCM_INFO_RESUME |*/
754 SNDRV_PCM_INFO_PAUSE |
755 SNDRV_PCM_INFO_SYNC_START |
756 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
757 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
758 .formats = SNDRV_PCM_FMTBIT_S16_LE,
759 .rates = SNDRV_PCM_RATE_48000,
760 .rate_min = 48000,
761 .rate_max = 48000,
762 .channels_min = 2,
763 .channels_max = 2,
764 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
765 .period_bytes_min = 128,
766 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
767 .periods_min = 2,
768 .periods_max = AZX_MAX_FRAG,
769 .fifo_size = 0,
770 };
771
772 static int azx_pcm_open(struct snd_pcm_substream *substream)
773 {
774 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
775 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
776 struct azx *chip = apcm->chip;
777 struct azx_dev *azx_dev;
778 struct snd_pcm_runtime *runtime = substream->runtime;
779 unsigned long flags;
780 int err;
781 int buff_step;
782
783 mutex_lock(&chip->open_mutex);
784 azx_dev = azx_assign_device(chip, substream);
785 if (azx_dev == NULL) {
786 mutex_unlock(&chip->open_mutex);
787 return -EBUSY;
788 }
789 runtime->hw = azx_pcm_hw;
790 runtime->hw.channels_min = hinfo->channels_min;
791 runtime->hw.channels_max = hinfo->channels_max;
792 runtime->hw.formats = hinfo->formats;
793 runtime->hw.rates = hinfo->rates;
794 snd_pcm_limit_hw_rates(runtime);
795 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
796
797 /* avoid wrap-around with wall-clock */
798 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
799 20,
800 178000000);
801
802 if (chip->align_buffer_size)
803 /* constrain buffer sizes to be multiple of 128
804 bytes. This is more efficient in terms of memory
805 access but isn't required by the HDA spec and
806 prevents users from specifying exact period/buffer
807 sizes. For example for 44.1kHz, a period size set
808 to 20ms will be rounded to 19.59ms. */
809 buff_step = 128;
810 else
811 /* Don't enforce steps on buffer sizes, still need to
812 be multiple of 4 bytes (HDA spec). Tested on Intel
813 HDA controllers, may not work on all devices where
814 option needs to be disabled */
815 buff_step = 4;
816
817 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
818 buff_step);
819 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
820 buff_step);
821 snd_hda_power_up_d3wait(apcm->codec);
822 err = hinfo->ops.open(hinfo, apcm->codec, substream);
823 if (err < 0) {
824 azx_release_device(azx_dev);
825 snd_hda_power_down(apcm->codec);
826 mutex_unlock(&chip->open_mutex);
827 return err;
828 }
829 snd_pcm_limit_hw_rates(runtime);
830 /* sanity check */
831 if (snd_BUG_ON(!runtime->hw.channels_min) ||
832 snd_BUG_ON(!runtime->hw.channels_max) ||
833 snd_BUG_ON(!runtime->hw.formats) ||
834 snd_BUG_ON(!runtime->hw.rates)) {
835 azx_release_device(azx_dev);
836 hinfo->ops.close(hinfo, apcm->codec, substream);
837 snd_hda_power_down(apcm->codec);
838 mutex_unlock(&chip->open_mutex);
839 return -EINVAL;
840 }
841
842 /* disable WALLCLOCK timestamps for capture streams
843 until we figure out how to handle digital inputs */
844 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
845 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
846
847 spin_lock_irqsave(&chip->reg_lock, flags);
848 azx_dev->substream = substream;
849 azx_dev->running = 0;
850 spin_unlock_irqrestore(&chip->reg_lock, flags);
851
852 runtime->private_data = azx_dev;
853 snd_pcm_set_sync(substream);
854 mutex_unlock(&chip->open_mutex);
855 return 0;
856 }
857
858 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
859 struct vm_area_struct *area)
860 {
861 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
862 struct azx *chip = apcm->chip;
863 if (chip->ops->pcm_mmap_prepare)
864 chip->ops->pcm_mmap_prepare(substream, area);
865 return snd_pcm_lib_default_mmap(substream, area);
866 }
867
868 static struct snd_pcm_ops azx_pcm_ops = {
869 .open = azx_pcm_open,
870 .close = azx_pcm_close,
871 .ioctl = snd_pcm_lib_ioctl,
872 .hw_params = azx_pcm_hw_params,
873 .hw_free = azx_pcm_hw_free,
874 .prepare = azx_pcm_prepare,
875 .trigger = azx_pcm_trigger,
876 .pointer = azx_pcm_pointer,
877 .wall_clock = azx_get_wallclock_tstamp,
878 .mmap = azx_pcm_mmap,
879 .page = snd_pcm_sgbuf_ops_page,
880 };
881
882 static void azx_pcm_free(struct snd_pcm *pcm)
883 {
884 struct azx_pcm *apcm = pcm->private_data;
885 if (apcm) {
886 list_del(&apcm->list);
887 kfree(apcm);
888 }
889 }
890
891 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
892
893 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
894 struct hda_pcm *cpcm)
895 {
896 struct azx *chip = bus->private_data;
897 struct snd_pcm *pcm;
898 struct azx_pcm *apcm;
899 int pcm_dev = cpcm->device;
900 unsigned int size;
901 int s, err;
902
903 list_for_each_entry(apcm, &chip->pcm_list, list) {
904 if (apcm->pcm->device == pcm_dev) {
905 dev_err(chip->card->dev, "PCM %d already exists\n",
906 pcm_dev);
907 return -EBUSY;
908 }
909 }
910 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
911 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
912 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
913 &pcm);
914 if (err < 0)
915 return err;
916 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
917 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
918 if (apcm == NULL)
919 return -ENOMEM;
920 apcm->chip = chip;
921 apcm->pcm = pcm;
922 apcm->codec = codec;
923 pcm->private_data = apcm;
924 pcm->private_free = azx_pcm_free;
925 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
926 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
927 list_add_tail(&apcm->list, &chip->pcm_list);
928 cpcm->pcm = pcm;
929 for (s = 0; s < 2; s++) {
930 apcm->hinfo[s] = &cpcm->stream[s];
931 if (cpcm->stream[s].substreams)
932 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
933 }
934 /* buffer pre-allocation */
935 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
936 if (size > MAX_PREALLOC_SIZE)
937 size = MAX_PREALLOC_SIZE;
938 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
939 chip->card->dev,
940 size, MAX_PREALLOC_SIZE);
941 /* link to codec */
942 pcm->dev = &codec->dev;
943 return 0;
944 }
945
946 /*
947 * CORB / RIRB interface
948 */
949 static int azx_alloc_cmd_io(struct azx *chip)
950 {
951 int err;
952
953 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
954 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
955 PAGE_SIZE, &chip->rb);
956 if (err < 0)
957 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
958 return err;
959 }
960 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
961
962 static void azx_init_cmd_io(struct azx *chip)
963 {
964 int timeout;
965
966 spin_lock_irq(&chip->reg_lock);
967 /* CORB set up */
968 chip->corb.addr = chip->rb.addr;
969 chip->corb.buf = (u32 *)chip->rb.area;
970 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
971 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
972
973 /* set the corb size to 256 entries (ULI requires explicitly) */
974 azx_writeb(chip, CORBSIZE, 0x02);
975 /* set the corb write pointer to 0 */
976 azx_writew(chip, CORBWP, 0);
977
978 /* reset the corb hw read pointer */
979 azx_writew(chip, CORBRP, AZX_CORBRP_RST);
980 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
981 for (timeout = 1000; timeout > 0; timeout--) {
982 if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
983 break;
984 udelay(1);
985 }
986 if (timeout <= 0)
987 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
988 azx_readw(chip, CORBRP));
989
990 azx_writew(chip, CORBRP, 0);
991 for (timeout = 1000; timeout > 0; timeout--) {
992 if (azx_readw(chip, CORBRP) == 0)
993 break;
994 udelay(1);
995 }
996 if (timeout <= 0)
997 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
998 azx_readw(chip, CORBRP));
999 }
1000
1001 /* enable corb dma */
1002 azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1003
1004 /* RIRB set up */
1005 chip->rirb.addr = chip->rb.addr + 2048;
1006 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1007 chip->rirb.wp = chip->rirb.rp = 0;
1008 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1009 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1010 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1011
1012 /* set the rirb size to 256 entries (ULI requires explicitly) */
1013 azx_writeb(chip, RIRBSIZE, 0x02);
1014 /* reset the rirb hw write pointer */
1015 azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1016 /* set N=1, get RIRB response interrupt for new entry */
1017 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1018 azx_writew(chip, RINTCNT, 0xc0);
1019 else
1020 azx_writew(chip, RINTCNT, 1);
1021 /* enable rirb dma and response irq */
1022 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1023 spin_unlock_irq(&chip->reg_lock);
1024 }
1025 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1026
1027 static void azx_free_cmd_io(struct azx *chip)
1028 {
1029 spin_lock_irq(&chip->reg_lock);
1030 /* disable ringbuffer DMAs */
1031 azx_writeb(chip, RIRBCTL, 0);
1032 azx_writeb(chip, CORBCTL, 0);
1033 spin_unlock_irq(&chip->reg_lock);
1034 }
1035 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1036
1037 static unsigned int azx_command_addr(u32 cmd)
1038 {
1039 unsigned int addr = cmd >> 28;
1040
1041 if (addr >= AZX_MAX_CODECS) {
1042 snd_BUG();
1043 addr = 0;
1044 }
1045
1046 return addr;
1047 }
1048
1049 /* send a command */
1050 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1051 {
1052 struct azx *chip = bus->private_data;
1053 unsigned int addr = azx_command_addr(val);
1054 unsigned int wp, rp;
1055
1056 spin_lock_irq(&chip->reg_lock);
1057
1058 /* add command to corb */
1059 wp = azx_readw(chip, CORBWP);
1060 if (wp == 0xffff) {
1061 /* something wrong, controller likely turned to D3 */
1062 spin_unlock_irq(&chip->reg_lock);
1063 return -EIO;
1064 }
1065 wp++;
1066 wp %= AZX_MAX_CORB_ENTRIES;
1067
1068 rp = azx_readw(chip, CORBRP);
1069 if (wp == rp) {
1070 /* oops, it's full */
1071 spin_unlock_irq(&chip->reg_lock);
1072 return -EAGAIN;
1073 }
1074
1075 chip->rirb.cmds[addr]++;
1076 chip->corb.buf[wp] = cpu_to_le32(val);
1077 azx_writew(chip, CORBWP, wp);
1078
1079 spin_unlock_irq(&chip->reg_lock);
1080
1081 return 0;
1082 }
1083
1084 #define AZX_RIRB_EX_UNSOL_EV (1<<4)
1085
1086 /* retrieve RIRB entry - called from interrupt handler */
1087 static void azx_update_rirb(struct azx *chip)
1088 {
1089 unsigned int rp, wp;
1090 unsigned int addr;
1091 u32 res, res_ex;
1092
1093 wp = azx_readw(chip, RIRBWP);
1094 if (wp == 0xffff) {
1095 /* something wrong, controller likely turned to D3 */
1096 return;
1097 }
1098
1099 if (wp == chip->rirb.wp)
1100 return;
1101 chip->rirb.wp = wp;
1102
1103 while (chip->rirb.rp != wp) {
1104 chip->rirb.rp++;
1105 chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1106
1107 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1108 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1109 res = le32_to_cpu(chip->rirb.buf[rp]);
1110 addr = res_ex & 0xf;
1111 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1112 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1113 res, res_ex,
1114 chip->rirb.rp, wp);
1115 snd_BUG();
1116 } else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1117 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1118 else if (chip->rirb.cmds[addr]) {
1119 chip->rirb.res[addr] = res;
1120 smp_wmb();
1121 chip->rirb.cmds[addr]--;
1122 } else if (printk_ratelimit()) {
1123 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1124 res, res_ex,
1125 chip->last_cmd[addr]);
1126 }
1127 }
1128 }
1129
1130 /* receive a response */
1131 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1132 unsigned int addr)
1133 {
1134 struct azx *chip = bus->private_data;
1135 unsigned long timeout;
1136 unsigned long loopcounter;
1137 int do_poll = 0;
1138
1139 again:
1140 timeout = jiffies + msecs_to_jiffies(1000);
1141
1142 for (loopcounter = 0;; loopcounter++) {
1143 if (chip->polling_mode || do_poll) {
1144 spin_lock_irq(&chip->reg_lock);
1145 azx_update_rirb(chip);
1146 spin_unlock_irq(&chip->reg_lock);
1147 }
1148 if (!chip->rirb.cmds[addr]) {
1149 smp_rmb();
1150 bus->rirb_error = 0;
1151
1152 if (!do_poll)
1153 chip->poll_count = 0;
1154 return chip->rirb.res[addr]; /* the last value */
1155 }
1156 if (time_after(jiffies, timeout))
1157 break;
1158 if (bus->needs_damn_long_delay || loopcounter > 3000)
1159 msleep(2); /* temporary workaround */
1160 else {
1161 udelay(10);
1162 cond_resched();
1163 }
1164 }
1165
1166 if (!bus->no_response_fallback)
1167 return -1;
1168
1169 if (!chip->polling_mode && chip->poll_count < 2) {
1170 dev_dbg(chip->card->dev,
1171 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1172 chip->last_cmd[addr]);
1173 do_poll = 1;
1174 chip->poll_count++;
1175 goto again;
1176 }
1177
1178
1179 if (!chip->polling_mode) {
1180 dev_warn(chip->card->dev,
1181 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1182 chip->last_cmd[addr]);
1183 chip->polling_mode = 1;
1184 goto again;
1185 }
1186
1187 if (chip->msi) {
1188 dev_warn(chip->card->dev,
1189 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1190 chip->last_cmd[addr]);
1191 if (chip->ops->disable_msi_reset_irq(chip) &&
1192 chip->ops->disable_msi_reset_irq(chip) < 0) {
1193 bus->rirb_error = 1;
1194 return -1;
1195 }
1196 goto again;
1197 }
1198
1199 if (chip->probing) {
1200 /* If this critical timeout happens during the codec probing
1201 * phase, this is likely an access to a non-existing codec
1202 * slot. Better to return an error and reset the system.
1203 */
1204 return -1;
1205 }
1206
1207 /* a fatal communication error; need either to reset or to fallback
1208 * to the single_cmd mode
1209 */
1210 bus->rirb_error = 1;
1211 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1212 bus->response_reset = 1;
1213 return -1; /* give a chance to retry */
1214 }
1215
1216 dev_err(chip->card->dev,
1217 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1218 chip->last_cmd[addr]);
1219 chip->single_cmd = 1;
1220 bus->response_reset = 0;
1221 /* release CORB/RIRB */
1222 azx_free_cmd_io(chip);
1223 /* disable unsolicited responses */
1224 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1225 return -1;
1226 }
1227
1228 /*
1229 * Use the single immediate command instead of CORB/RIRB for simplicity
1230 *
1231 * Note: according to Intel, this is not preferred use. The command was
1232 * intended for the BIOS only, and may get confused with unsolicited
1233 * responses. So, we shouldn't use it for normal operation from the
1234 * driver.
1235 * I left the codes, however, for debugging/testing purposes.
1236 */
1237
1238 /* receive a response */
1239 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1240 {
1241 int timeout = 50;
1242
1243 while (timeout--) {
1244 /* check IRV busy bit */
1245 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1246 /* reuse rirb.res as the response return value */
1247 chip->rirb.res[addr] = azx_readl(chip, IR);
1248 return 0;
1249 }
1250 udelay(1);
1251 }
1252 if (printk_ratelimit())
1253 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1254 azx_readw(chip, IRS));
1255 chip->rirb.res[addr] = -1;
1256 return -EIO;
1257 }
1258
1259 /* send a command */
1260 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1261 {
1262 struct azx *chip = bus->private_data;
1263 unsigned int addr = azx_command_addr(val);
1264 int timeout = 50;
1265
1266 bus->rirb_error = 0;
1267 while (timeout--) {
1268 /* check ICB busy bit */
1269 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1270 /* Clear IRV valid bit */
1271 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1272 AZX_IRS_VALID);
1273 azx_writel(chip, IC, val);
1274 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1275 AZX_IRS_BUSY);
1276 return azx_single_wait_for_response(chip, addr);
1277 }
1278 udelay(1);
1279 }
1280 if (printk_ratelimit())
1281 dev_dbg(chip->card->dev,
1282 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1283 azx_readw(chip, IRS), val);
1284 return -EIO;
1285 }
1286
1287 /* receive a response */
1288 static unsigned int azx_single_get_response(struct hda_bus *bus,
1289 unsigned int addr)
1290 {
1291 struct azx *chip = bus->private_data;
1292 return chip->rirb.res[addr];
1293 }
1294
1295 /*
1296 * The below are the main callbacks from hda_codec.
1297 *
1298 * They are just the skeleton to call sub-callbacks according to the
1299 * current setting of chip->single_cmd.
1300 */
1301
1302 /* send a command */
1303 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1304 {
1305 struct azx *chip = bus->private_data;
1306
1307 if (chip->disabled)
1308 return 0;
1309 chip->last_cmd[azx_command_addr(val)] = val;
1310 if (chip->single_cmd)
1311 return azx_single_send_cmd(bus, val);
1312 else
1313 return azx_corb_send_cmd(bus, val);
1314 }
1315 EXPORT_SYMBOL_GPL(azx_send_cmd);
1316
1317 /* get a response */
1318 static unsigned int azx_get_response(struct hda_bus *bus,
1319 unsigned int addr)
1320 {
1321 struct azx *chip = bus->private_data;
1322 if (chip->disabled)
1323 return 0;
1324 if (chip->single_cmd)
1325 return azx_single_get_response(bus, addr);
1326 else
1327 return azx_rirb_get_response(bus, addr);
1328 }
1329 EXPORT_SYMBOL_GPL(azx_get_response);
1330
1331 #ifdef CONFIG_SND_HDA_DSP_LOADER
1332 /*
1333 * DSP loading code (e.g. for CA0132)
1334 */
1335
1336 /* use the first stream for loading DSP */
1337 static struct azx_dev *
1338 azx_get_dsp_loader_dev(struct azx *chip)
1339 {
1340 return &chip->azx_dev[chip->playback_index_offset];
1341 }
1342
1343 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1344 unsigned int byte_size,
1345 struct snd_dma_buffer *bufp)
1346 {
1347 u32 *bdl;
1348 struct azx *chip = bus->private_data;
1349 struct azx_dev *azx_dev;
1350 int err;
1351
1352 azx_dev = azx_get_dsp_loader_dev(chip);
1353
1354 dsp_lock(azx_dev);
1355 spin_lock_irq(&chip->reg_lock);
1356 if (azx_dev->running || azx_dev->locked) {
1357 spin_unlock_irq(&chip->reg_lock);
1358 err = -EBUSY;
1359 goto unlock;
1360 }
1361 azx_dev->prepared = 0;
1362 chip->saved_azx_dev = *azx_dev;
1363 azx_dev->locked = 1;
1364 spin_unlock_irq(&chip->reg_lock);
1365
1366 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1367 byte_size, bufp);
1368 if (err < 0)
1369 goto err_alloc;
1370
1371 azx_dev->bufsize = byte_size;
1372 azx_dev->period_bytes = byte_size;
1373 azx_dev->format_val = format;
1374
1375 azx_stream_reset(chip, azx_dev);
1376
1377 /* reset BDL address */
1378 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1379 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1380
1381 azx_dev->frags = 0;
1382 bdl = (u32 *)azx_dev->bdl.area;
1383 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1384 if (err < 0)
1385 goto error;
1386
1387 azx_setup_controller(chip, azx_dev);
1388 dsp_unlock(azx_dev);
1389 return azx_dev->stream_tag;
1390
1391 error:
1392 chip->ops->dma_free_pages(chip, bufp);
1393 err_alloc:
1394 spin_lock_irq(&chip->reg_lock);
1395 if (azx_dev->opened)
1396 *azx_dev = chip->saved_azx_dev;
1397 azx_dev->locked = 0;
1398 spin_unlock_irq(&chip->reg_lock);
1399 unlock:
1400 dsp_unlock(azx_dev);
1401 return err;
1402 }
1403
1404 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1405 {
1406 struct azx *chip = bus->private_data;
1407 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1408
1409 if (start)
1410 azx_stream_start(chip, azx_dev);
1411 else
1412 azx_stream_stop(chip, azx_dev);
1413 azx_dev->running = start;
1414 }
1415
1416 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1417 struct snd_dma_buffer *dmab)
1418 {
1419 struct azx *chip = bus->private_data;
1420 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1421
1422 if (!dmab->area || !azx_dev->locked)
1423 return;
1424
1425 dsp_lock(azx_dev);
1426 /* reset BDL address */
1427 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1428 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1429 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1430 azx_dev->bufsize = 0;
1431 azx_dev->period_bytes = 0;
1432 azx_dev->format_val = 0;
1433
1434 chip->ops->dma_free_pages(chip, dmab);
1435 dmab->area = NULL;
1436
1437 spin_lock_irq(&chip->reg_lock);
1438 if (azx_dev->opened)
1439 *azx_dev = chip->saved_azx_dev;
1440 azx_dev->locked = 0;
1441 spin_unlock_irq(&chip->reg_lock);
1442 dsp_unlock(azx_dev);
1443 }
1444 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1445
1446 int azx_alloc_stream_pages(struct azx *chip)
1447 {
1448 int i, err;
1449 struct snd_card *card = chip->card;
1450
1451 for (i = 0; i < chip->num_streams; i++) {
1452 dsp_lock_init(&chip->azx_dev[i]);
1453 /* allocate memory for the BDL for each stream */
1454 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1455 BDL_SIZE,
1456 &chip->azx_dev[i].bdl);
1457 if (err < 0) {
1458 dev_err(card->dev, "cannot allocate BDL\n");
1459 return -ENOMEM;
1460 }
1461 }
1462 /* allocate memory for the position buffer */
1463 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1464 chip->num_streams * 8, &chip->posbuf);
1465 if (err < 0) {
1466 dev_err(card->dev, "cannot allocate posbuf\n");
1467 return -ENOMEM;
1468 }
1469
1470 /* allocate CORB/RIRB */
1471 err = azx_alloc_cmd_io(chip);
1472 if (err < 0)
1473 return err;
1474 return 0;
1475 }
1476 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1477
1478 void azx_free_stream_pages(struct azx *chip)
1479 {
1480 int i;
1481 if (chip->azx_dev) {
1482 for (i = 0; i < chip->num_streams; i++)
1483 if (chip->azx_dev[i].bdl.area)
1484 chip->ops->dma_free_pages(
1485 chip, &chip->azx_dev[i].bdl);
1486 }
1487 if (chip->rb.area)
1488 chip->ops->dma_free_pages(chip, &chip->rb);
1489 if (chip->posbuf.area)
1490 chip->ops->dma_free_pages(chip, &chip->posbuf);
1491 }
1492 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1493
1494 /*
1495 * Lowlevel interface
1496 */
1497
1498 /* enter link reset */
1499 void azx_enter_link_reset(struct azx *chip)
1500 {
1501 unsigned long timeout;
1502
1503 /* reset controller */
1504 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1505
1506 timeout = jiffies + msecs_to_jiffies(100);
1507 while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1508 time_before(jiffies, timeout))
1509 usleep_range(500, 1000);
1510 }
1511 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1512
1513 /* exit link reset */
1514 static void azx_exit_link_reset(struct azx *chip)
1515 {
1516 unsigned long timeout;
1517
1518 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1519
1520 timeout = jiffies + msecs_to_jiffies(100);
1521 while (!azx_readb(chip, GCTL) &&
1522 time_before(jiffies, timeout))
1523 usleep_range(500, 1000);
1524 }
1525
1526 /* reset codec link */
1527 static int azx_reset(struct azx *chip, bool full_reset)
1528 {
1529 if (!full_reset)
1530 goto __skip;
1531
1532 /* clear STATESTS */
1533 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1534
1535 /* reset controller */
1536 azx_enter_link_reset(chip);
1537
1538 /* delay for >= 100us for codec PLL to settle per spec
1539 * Rev 0.9 section 5.5.1
1540 */
1541 usleep_range(500, 1000);
1542
1543 /* Bring controller out of reset */
1544 azx_exit_link_reset(chip);
1545
1546 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1547 usleep_range(1000, 1200);
1548
1549 __skip:
1550 /* check to see if controller is ready */
1551 if (!azx_readb(chip, GCTL)) {
1552 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1553 return -EBUSY;
1554 }
1555
1556 /* Accept unsolicited responses */
1557 if (!chip->single_cmd)
1558 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1559 AZX_GCTL_UNSOL);
1560
1561 /* detect codecs */
1562 if (!chip->codec_mask) {
1563 chip->codec_mask = azx_readw(chip, STATESTS);
1564 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1565 chip->codec_mask);
1566 }
1567
1568 return 0;
1569 }
1570
1571 /* enable interrupts */
1572 static void azx_int_enable(struct azx *chip)
1573 {
1574 /* enable controller CIE and GIE */
1575 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1576 AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1577 }
1578
1579 /* disable interrupts */
1580 static void azx_int_disable(struct azx *chip)
1581 {
1582 int i;
1583
1584 /* disable interrupts in stream descriptor */
1585 for (i = 0; i < chip->num_streams; i++) {
1586 struct azx_dev *azx_dev = &chip->azx_dev[i];
1587 azx_sd_writeb(chip, azx_dev, SD_CTL,
1588 azx_sd_readb(chip, azx_dev, SD_CTL) &
1589 ~SD_INT_MASK);
1590 }
1591
1592 /* disable SIE for all streams */
1593 azx_writeb(chip, INTCTL, 0);
1594
1595 /* disable controller CIE and GIE */
1596 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1597 ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1598 }
1599
1600 /* clear interrupts */
1601 static void azx_int_clear(struct azx *chip)
1602 {
1603 int i;
1604
1605 /* clear stream status */
1606 for (i = 0; i < chip->num_streams; i++) {
1607 struct azx_dev *azx_dev = &chip->azx_dev[i];
1608 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1609 }
1610
1611 /* clear STATESTS */
1612 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1613
1614 /* clear rirb status */
1615 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1616
1617 /* clear int status */
1618 azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1619 }
1620
1621 /*
1622 * reset and start the controller registers
1623 */
1624 void azx_init_chip(struct azx *chip, bool full_reset)
1625 {
1626 if (chip->initialized)
1627 return;
1628
1629 /* reset controller */
1630 azx_reset(chip, full_reset);
1631
1632 /* initialize interrupts */
1633 azx_int_clear(chip);
1634 azx_int_enable(chip);
1635
1636 /* initialize the codec command I/O */
1637 if (!chip->single_cmd)
1638 azx_init_cmd_io(chip);
1639
1640 /* program the position buffer */
1641 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1642 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1643
1644 chip->initialized = 1;
1645 }
1646 EXPORT_SYMBOL_GPL(azx_init_chip);
1647
1648 void azx_stop_chip(struct azx *chip)
1649 {
1650 if (!chip->initialized)
1651 return;
1652
1653 /* disable interrupts */
1654 azx_int_disable(chip);
1655 azx_int_clear(chip);
1656
1657 /* disable CORB/RIRB */
1658 azx_free_cmd_io(chip);
1659
1660 /* disable position buffer */
1661 azx_writel(chip, DPLBASE, 0);
1662 azx_writel(chip, DPUBASE, 0);
1663
1664 chip->initialized = 0;
1665 }
1666 EXPORT_SYMBOL_GPL(azx_stop_chip);
1667
1668 /*
1669 * interrupt handler
1670 */
1671 irqreturn_t azx_interrupt(int irq, void *dev_id)
1672 {
1673 struct azx *chip = dev_id;
1674 struct azx_dev *azx_dev;
1675 u32 status;
1676 u8 sd_status;
1677 int i;
1678
1679 #ifdef CONFIG_PM
1680 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1681 if (!pm_runtime_active(chip->card->dev))
1682 return IRQ_NONE;
1683 #endif
1684
1685 spin_lock(&chip->reg_lock);
1686
1687 if (chip->disabled) {
1688 spin_unlock(&chip->reg_lock);
1689 return IRQ_NONE;
1690 }
1691
1692 status = azx_readl(chip, INTSTS);
1693 if (status == 0 || status == 0xffffffff) {
1694 spin_unlock(&chip->reg_lock);
1695 return IRQ_NONE;
1696 }
1697
1698 for (i = 0; i < chip->num_streams; i++) {
1699 azx_dev = &chip->azx_dev[i];
1700 if (status & azx_dev->sd_int_sta_mask) {
1701 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1702 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1703 if (!azx_dev->substream || !azx_dev->running ||
1704 !(sd_status & SD_INT_COMPLETE))
1705 continue;
1706 /* check whether this IRQ is really acceptable */
1707 if (!chip->ops->position_check ||
1708 chip->ops->position_check(chip, azx_dev)) {
1709 spin_unlock(&chip->reg_lock);
1710 snd_pcm_period_elapsed(azx_dev->substream);
1711 spin_lock(&chip->reg_lock);
1712 }
1713 }
1714 }
1715
1716 /* clear rirb int */
1717 status = azx_readb(chip, RIRBSTS);
1718 if (status & RIRB_INT_MASK) {
1719 if (status & RIRB_INT_RESPONSE) {
1720 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1721 udelay(80);
1722 azx_update_rirb(chip);
1723 }
1724 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1725 }
1726
1727 spin_unlock(&chip->reg_lock);
1728
1729 return IRQ_HANDLED;
1730 }
1731 EXPORT_SYMBOL_GPL(azx_interrupt);
1732
1733 /*
1734 * Codec initerface
1735 */
1736
1737 /*
1738 * Probe the given codec address
1739 */
1740 static int probe_codec(struct azx *chip, int addr)
1741 {
1742 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1743 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1744 unsigned int res;
1745
1746 mutex_lock(&chip->bus->cmd_mutex);
1747 chip->probing = 1;
1748 azx_send_cmd(chip->bus, cmd);
1749 res = azx_get_response(chip->bus, addr);
1750 chip->probing = 0;
1751 mutex_unlock(&chip->bus->cmd_mutex);
1752 if (res == -1)
1753 return -EIO;
1754 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1755 return 0;
1756 }
1757
1758 static void azx_bus_reset(struct hda_bus *bus)
1759 {
1760 struct azx *chip = bus->private_data;
1761
1762 bus->in_reset = 1;
1763 azx_stop_chip(chip);
1764 azx_init_chip(chip, true);
1765 #ifdef CONFIG_PM
1766 if (chip->initialized) {
1767 struct azx_pcm *p;
1768 list_for_each_entry(p, &chip->pcm_list, list)
1769 snd_pcm_suspend_all(p->pcm);
1770 snd_hda_suspend(chip->bus);
1771 snd_hda_resume(chip->bus);
1772 }
1773 #endif
1774 bus->in_reset = 0;
1775 }
1776
1777 #ifdef CONFIG_PM
1778 /* power-up/down the controller */
1779 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1780 {
1781 struct azx *chip = bus->private_data;
1782
1783 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1784 return;
1785
1786 if (power_up)
1787 pm_runtime_get_sync(chip->card->dev);
1788 else
1789 pm_runtime_put_sync(chip->card->dev);
1790 }
1791 #endif
1792
1793 static int get_jackpoll_interval(struct azx *chip)
1794 {
1795 int i;
1796 unsigned int j;
1797
1798 if (!chip->jackpoll_ms)
1799 return 0;
1800
1801 i = chip->jackpoll_ms[chip->dev_index];
1802 if (i == 0)
1803 return 0;
1804 if (i < 50 || i > 60000)
1805 j = 0;
1806 else
1807 j = msecs_to_jiffies(i);
1808 if (j == 0)
1809 dev_warn(chip->card->dev,
1810 "jackpoll_ms value out of range: %d\n", i);
1811 return j;
1812 }
1813
1814 /* Codec initialization */
1815 int azx_codec_create(struct azx *chip, const char *model,
1816 unsigned int max_slots,
1817 int *power_save_to)
1818 {
1819 struct hda_bus_template bus_temp;
1820 int c, codecs, err;
1821
1822 memset(&bus_temp, 0, sizeof(bus_temp));
1823 bus_temp.private_data = chip;
1824 bus_temp.modelname = model;
1825 bus_temp.pci = chip->pci;
1826 bus_temp.ops.command = azx_send_cmd;
1827 bus_temp.ops.get_response = azx_get_response;
1828 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1829 bus_temp.ops.bus_reset = azx_bus_reset;
1830 #ifdef CONFIG_PM
1831 bus_temp.power_save = power_save_to;
1832 bus_temp.ops.pm_notify = azx_power_notify;
1833 #endif
1834 #ifdef CONFIG_SND_HDA_DSP_LOADER
1835 bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1836 bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1837 bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1838 #endif
1839
1840 err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1841 if (err < 0)
1842 return err;
1843
1844 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1845 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1846 chip->bus->needs_damn_long_delay = 1;
1847 }
1848
1849 codecs = 0;
1850 if (!max_slots)
1851 max_slots = AZX_DEFAULT_CODECS;
1852
1853 /* First try to probe all given codec slots */
1854 for (c = 0; c < max_slots; c++) {
1855 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1856 if (probe_codec(chip, c) < 0) {
1857 /* Some BIOSen give you wrong codec addresses
1858 * that don't exist
1859 */
1860 dev_warn(chip->card->dev,
1861 "Codec #%d probe error; disabling it...\n", c);
1862 chip->codec_mask &= ~(1 << c);
1863 /* More badly, accessing to a non-existing
1864 * codec often screws up the controller chip,
1865 * and disturbs the further communications.
1866 * Thus if an error occurs during probing,
1867 * better to reset the controller chip to
1868 * get back to the sanity state.
1869 */
1870 azx_stop_chip(chip);
1871 azx_init_chip(chip, true);
1872 }
1873 }
1874 }
1875
1876 /* AMD chipsets often cause the communication stalls upon certain
1877 * sequence like the pin-detection. It seems that forcing the synced
1878 * access works around the stall. Grrr...
1879 */
1880 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1881 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1882 chip->bus->sync_write = 1;
1883 chip->bus->allow_bus_reset = 1;
1884 }
1885
1886 /* Then create codec instances */
1887 for (c = 0; c < max_slots; c++) {
1888 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1889 struct hda_codec *codec;
1890 err = snd_hda_codec_new(chip->bus, c, &codec);
1891 if (err < 0)
1892 continue;
1893 codec->jackpoll_interval = get_jackpoll_interval(chip);
1894 codec->beep_mode = chip->beep_mode;
1895 codecs++;
1896 }
1897 }
1898 if (!codecs) {
1899 dev_err(chip->card->dev, "no codecs initialized\n");
1900 return -ENXIO;
1901 }
1902 return 0;
1903 }
1904 EXPORT_SYMBOL_GPL(azx_codec_create);
1905
1906 /* configure each codec instance */
1907 int azx_codec_configure(struct azx *chip)
1908 {
1909 struct hda_codec *codec;
1910 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1911 snd_hda_codec_configure(codec);
1912 }
1913 return 0;
1914 }
1915 EXPORT_SYMBOL_GPL(azx_codec_configure);
1916
1917 /* mixer creation - all stuff is implemented in hda module */
1918 int azx_mixer_create(struct azx *chip)
1919 {
1920 return snd_hda_build_controls(chip->bus);
1921 }
1922 EXPORT_SYMBOL_GPL(azx_mixer_create);
1923
1924
1925 static bool is_input_stream(struct azx *chip, unsigned char index)
1926 {
1927 return (index >= chip->capture_index_offset &&
1928 index < chip->capture_index_offset + chip->capture_streams);
1929 }
1930
1931 /* initialize SD streams */
1932 int azx_init_stream(struct azx *chip)
1933 {
1934 int i;
1935 int in_stream_tag = 0;
1936 int out_stream_tag = 0;
1937
1938 /* initialize each stream (aka device)
1939 * assign the starting bdl address to each stream (device)
1940 * and initialize
1941 */
1942 for (i = 0; i < chip->num_streams; i++) {
1943 struct azx_dev *azx_dev = &chip->azx_dev[i];
1944 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1945 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1946 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1947 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1948 azx_dev->sd_int_sta_mask = 1 << i;
1949 azx_dev->index = i;
1950
1951 /* stream tag must be unique throughout
1952 * the stream direction group,
1953 * valid values 1...15
1954 * use separate stream tag if the flag
1955 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1956 */
1957 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1958 azx_dev->stream_tag =
1959 is_input_stream(chip, i) ?
1960 ++in_stream_tag :
1961 ++out_stream_tag;
1962 else
1963 azx_dev->stream_tag = i + 1;
1964 }
1965
1966 return 0;
1967 }
1968 EXPORT_SYMBOL_GPL(azx_init_stream);
1969
1970 /*
1971 * reboot notifier for hang-up problem at power-down
1972 */
1973 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1974 {
1975 struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1976 snd_hda_bus_reboot_notify(chip->bus);
1977 azx_stop_chip(chip);
1978 return NOTIFY_OK;
1979 }
1980
1981 void azx_notifier_register(struct azx *chip)
1982 {
1983 chip->reboot_notifier.notifier_call = azx_halt;
1984 register_reboot_notifier(&chip->reboot_notifier);
1985 }
1986 EXPORT_SYMBOL_GPL(azx_notifier_register);
1987
1988 void azx_notifier_unregister(struct azx *chip)
1989 {
1990 if (chip->reboot_notifier.notifier_call)
1991 unregister_reboot_notifier(&chip->reboot_notifier);
1992 }
1993 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1994
1995 MODULE_LICENSE("GPL");
1996 MODULE_DESCRIPTION("Common HDA driver funcitons");