1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Implementation of primary alsa driver code base for Intel HD Audio.
5 *
6 * Copyright(c) 2004 Intel Corporation
7 *
8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9 * PeiSen Hou <pshou@realtek.com.tw>
10 */
11
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include <sound/pcm_params.h>
28 #include "hda_controller.h"
29 #include "hda_local.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "hda_controller_trace.h"
33
34 /* DSP lock helpers */
35 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev))
36 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev))
37 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev))
38
39 /* assign a stream for the PCM */
40 static inline struct azx_dev *
azx_assign_device(struct azx * chip,struct snd_pcm_substream * substream)41 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
42 {
43 struct hdac_stream *s;
44
45 s = snd_hdac_stream_assign(azx_bus(chip), substream);
46 if (!s)
47 return NULL;
48 return stream_to_azx_dev(s);
49 }
50
51 /* release the assigned stream */
azx_release_device(struct azx_dev * azx_dev)52 static inline void azx_release_device(struct azx_dev *azx_dev)
53 {
54 snd_hdac_stream_release(azx_stream(azx_dev));
55 }
56
57 static inline struct hda_pcm_stream *
to_hda_pcm_stream(struct snd_pcm_substream * substream)58 to_hda_pcm_stream(struct snd_pcm_substream *substream)
59 {
60 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
61 return &apcm->info->stream[substream->stream];
62 }
63
azx_adjust_codec_delay(struct snd_pcm_substream * substream,u64 nsec)64 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
65 u64 nsec)
66 {
67 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
68 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
69 u64 codec_frames, codec_nsecs;
70
71 if (!hinfo->ops.get_delay)
72 return nsec;
73
74 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
75 codec_nsecs = div_u64(codec_frames * 1000000000LL,
76 substream->runtime->rate);
77
78 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
79 return nsec + codec_nsecs;
80
81 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
82 }
83
84 /*
85 * PCM ops
86 */
87
azx_pcm_close(struct snd_pcm_substream * substream)88 static int azx_pcm_close(struct snd_pcm_substream *substream)
89 {
90 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
91 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
92 struct azx *chip = apcm->chip;
93 struct azx_dev *azx_dev = get_azx_dev(substream);
94
95 trace_azx_pcm_close(chip, azx_dev);
96 mutex_lock(&chip->open_mutex);
97 azx_release_device(azx_dev);
98 if (hinfo->ops.close)
99 hinfo->ops.close(hinfo, apcm->codec, substream);
100 snd_hda_power_down(apcm->codec);
101 mutex_unlock(&chip->open_mutex);
102 snd_hda_codec_pcm_put(apcm->info);
103 return 0;
104 }
105
azx_pcm_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * hw_params)106 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
107 struct snd_pcm_hw_params *hw_params)
108 {
109 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
110 struct azx *chip = apcm->chip;
111 struct azx_dev *azx_dev = get_azx_dev(substream);
112 struct hdac_stream *hdas = azx_stream(azx_dev);
113 int ret = 0;
114
115 trace_azx_pcm_hw_params(chip, azx_dev);
116 dsp_lock(azx_dev);
117 if (dsp_is_locked(azx_dev)) {
118 ret = -EBUSY;
119 goto unlock;
120 }
121
122 /* Set up BDLEs here, return -ENOMEM if too many BDLEs are required */
123 hdas->bufsize = params_buffer_bytes(hw_params);
124 hdas->period_bytes = params_period_bytes(hw_params);
125 hdas->format_val = 0;
126 hdas->no_period_wakeup =
127 (hw_params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
128 (hw_params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
129 if (snd_hdac_stream_setup_periods(hdas) < 0)
130 ret = -ENOMEM;
131
132 unlock:
133 dsp_unlock(azx_dev);
134 return ret;
135 }
136
azx_pcm_hw_free(struct snd_pcm_substream * substream)137 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
138 {
139 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
140 struct azx_dev *azx_dev = get_azx_dev(substream);
141 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
142
143 /* reset BDL address */
144 dsp_lock(azx_dev);
145 if (!dsp_is_locked(azx_dev))
146 snd_hdac_stream_cleanup(azx_stream(azx_dev));
147
148 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
149
150 azx_stream(azx_dev)->prepared = 0;
151 dsp_unlock(azx_dev);
152 return 0;
153 }
154
azx_pcm_prepare(struct snd_pcm_substream * substream)155 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
156 {
157 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
158 struct azx *chip = apcm->chip;
159 struct azx_dev *azx_dev = get_azx_dev(substream);
160 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
161 struct snd_pcm_runtime *runtime = substream->runtime;
162 unsigned int format_val, stream_tag, bits;
163 int err;
164 struct hda_spdif_out *spdif =
165 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
166 unsigned short ctls = spdif ? spdif->ctls : 0;
167
168 trace_azx_pcm_prepare(chip, azx_dev);
169 dsp_lock(azx_dev);
170 if (dsp_is_locked(azx_dev)) {
171 err = -EBUSY;
172 goto unlock;
173 }
174
175 snd_hdac_stream_reset(azx_stream(azx_dev));
176 bits = snd_hdac_stream_format_bits(runtime->format, SNDRV_PCM_SUBFORMAT_STD, hinfo->maxbps);
177
178 format_val = snd_hdac_spdif_stream_format(runtime->channels, bits, runtime->rate, ctls);
179 if (!format_val) {
180 dev_err(chip->card->dev,
181 "invalid format_val, rate=%d, ch=%d, format=%d\n",
182 runtime->rate, runtime->channels, runtime->format);
183 err = -EINVAL;
184 goto unlock;
185 }
186
187 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
188 if (err < 0)
189 goto unlock;
190
191 snd_hdac_stream_setup(azx_stream(azx_dev), false);
192
193 stream_tag = azx_dev->core.stream_tag;
194 /* CA-IBG chips need the playback stream starting from 1 */
195 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
196 stream_tag > chip->capture_streams)
197 stream_tag -= chip->capture_streams;
198 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
199 azx_dev->core.format_val, substream);
200
201 unlock:
202 if (!err)
203 azx_stream(azx_dev)->prepared = 1;
204 dsp_unlock(azx_dev);
205 return err;
206 }
207
azx_pcm_trigger(struct snd_pcm_substream * substream,int cmd)208 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
209 {
210 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
211 struct azx *chip = apcm->chip;
212 struct hdac_bus *bus = azx_bus(chip);
213 struct azx_dev *azx_dev;
214 struct snd_pcm_substream *s;
215 struct hdac_stream *hstr;
216 bool start;
217 int sbits = 0;
218 int sync_reg;
219
220 azx_dev = get_azx_dev(substream);
221 trace_azx_pcm_trigger(chip, azx_dev, cmd);
222
223 hstr = azx_stream(azx_dev);
224 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
225 sync_reg = AZX_REG_OLD_SSYNC;
226 else
227 sync_reg = AZX_REG_SSYNC;
228
229 if (dsp_is_locked(azx_dev) || !hstr->prepared)
230 return -EPIPE;
231
232 switch (cmd) {
233 case SNDRV_PCM_TRIGGER_START:
234 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
235 case SNDRV_PCM_TRIGGER_RESUME:
236 start = true;
237 break;
238 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
239 case SNDRV_PCM_TRIGGER_SUSPEND:
240 case SNDRV_PCM_TRIGGER_STOP:
241 start = false;
242 break;
243 default:
244 return -EINVAL;
245 }
246
247 snd_pcm_group_for_each_entry(s, substream) {
248 if (s->pcm->card != substream->pcm->card)
249 continue;
250 azx_dev = get_azx_dev(s);
251 sbits |= 1 << azx_dev->core.index;
252 snd_pcm_trigger_done(s, substream);
253 }
254
255 spin_lock(&bus->reg_lock);
256
257 /* first, set SYNC bits of corresponding streams */
258 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
259
260 snd_pcm_group_for_each_entry(s, substream) {
261 if (s->pcm->card != substream->pcm->card)
262 continue;
263 azx_dev = get_azx_dev(s);
264 if (start) {
265 azx_dev->insufficient = 1;
266 snd_hdac_stream_start(azx_stream(azx_dev));
267 } else {
268 snd_hdac_stream_stop(azx_stream(azx_dev));
269 }
270 }
271 spin_unlock(&bus->reg_lock);
272
273 snd_hdac_stream_sync(hstr, start, sbits);
274
275 spin_lock(&bus->reg_lock);
276 /* reset SYNC bits */
277 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
278 snd_hdac_stream_timecounter_init(hstr, sbits, start);
279 spin_unlock(&bus->reg_lock);
280 return 0;
281 }
282
azx_get_pos_lpib(struct azx * chip,struct azx_dev * azx_dev)283 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
284 {
285 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
286 }
287 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
288
azx_get_pos_posbuf(struct azx * chip,struct azx_dev * azx_dev)289 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
290 {
291 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
292 }
293 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
294
azx_get_position(struct azx * chip,struct azx_dev * azx_dev)295 unsigned int azx_get_position(struct azx *chip,
296 struct azx_dev *azx_dev)
297 {
298 struct snd_pcm_substream *substream = azx_dev->core.substream;
299 unsigned int pos;
300 int stream = substream->stream;
301 int delay = 0;
302
303 if (chip->get_position[stream])
304 pos = chip->get_position[stream](chip, azx_dev);
305 else /* use the position buffer as default */
306 pos = azx_get_pos_posbuf(chip, azx_dev);
307
308 if (pos >= azx_dev->core.bufsize)
309 pos = 0;
310
311 if (substream->runtime) {
312 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
313 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
314
315 if (chip->get_delay[stream])
316 delay += chip->get_delay[stream](chip, azx_dev, pos);
317 if (hinfo->ops.get_delay)
318 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
319 substream);
320 substream->runtime->delay = delay;
321 }
322
323 trace_azx_get_position(chip, azx_dev, pos, delay);
324 return pos;
325 }
326 EXPORT_SYMBOL_GPL(azx_get_position);
327
azx_pcm_pointer(struct snd_pcm_substream * substream)328 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
329 {
330 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
331 struct azx *chip = apcm->chip;
332 struct azx_dev *azx_dev = get_azx_dev(substream);
333 return bytes_to_frames(substream->runtime,
334 azx_get_position(chip, azx_dev));
335 }
336
337 /*
338 * azx_scale64: Scale base by mult/div while not overflowing sanely
339 *
340 * Derived from scale64_check_overflow in kernel/time/timekeeping.c
341 *
342 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
343 * is about 384307 ie ~4.5 days.
344 *
345 * This scales the calculation so that overflow will happen but after 2^64 /
346 * 48000 secs, which is pretty large!
347 *
348 * In caln below:
349 * base may overflow, but since there isn’t any additional division
350 * performed on base it’s OK
351 * rem can’t overflow because both are 32-bit values
352 */
353
354 #ifdef CONFIG_X86
azx_scale64(u64 base,u32 num,u32 den)355 static u64 azx_scale64(u64 base, u32 num, u32 den)
356 {
357 u64 rem;
358
359 rem = do_div(base, den);
360
361 base *= num;
362 rem *= num;
363
364 do_div(rem, den);
365
366 return base + rem;
367 }
368
azx_get_sync_time(ktime_t * device,struct system_counterval_t * system,void * ctx)369 static int azx_get_sync_time(ktime_t *device,
370 struct system_counterval_t *system, void *ctx)
371 {
372 struct snd_pcm_substream *substream = ctx;
373 struct azx_dev *azx_dev = get_azx_dev(substream);
374 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
375 struct azx *chip = apcm->chip;
376 struct snd_pcm_runtime *runtime;
377 u64 ll_counter, ll_counter_l, ll_counter_h;
378 u64 tsc_counter, tsc_counter_l, tsc_counter_h;
379 u32 wallclk_ctr, wallclk_cycles;
380 bool direction;
381 u32 dma_select;
382 u32 timeout;
383 u32 retry_count = 0;
384
385 runtime = substream->runtime;
386
387 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
388 direction = 1;
389 else
390 direction = 0;
391
392 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
393 do {
394 timeout = 100;
395 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
396 (azx_dev->core.stream_tag - 1);
397 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
398
399 /* Enable the capture */
400 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
401
402 while (timeout) {
403 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
404 GTSCC_TSCCD_MASK)
405 break;
406
407 timeout--;
408 }
409
410 if (!timeout) {
411 dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
412 return -EIO;
413 }
414
415 /* Read wall clock counter */
416 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
417
418 /* Read TSC counter */
419 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
420 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
421
422 /* Read Link counter */
423 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
424 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
425
426 /* Ack: registers read done */
427 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
428
429 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
430 tsc_counter_l;
431
432 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l;
433 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
434
435 /*
436 * An error occurs near frame "rollover". The clocks in
437 * frame value indicates whether this error may have
438 * occurred. Here we use the value of 10 i.e.,
439 * HDA_MAX_CYCLE_OFFSET
440 */
441 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
442 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
443 break;
444
445 /*
446 * Sleep before we read again, else we may again get
447 * value near to MAX_CYCLE. Try to sleep for different
448 * amount of time so we dont hit the same number again
449 */
450 udelay(retry_count++);
451
452 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
453
454 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
455 dev_err_ratelimited(chip->card->dev,
456 "Error in WALFCC cycle count\n");
457 return -EIO;
458 }
459
460 *device = ns_to_ktime(azx_scale64(ll_counter,
461 NSEC_PER_SEC, runtime->rate));
462 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
463 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
464
465 system->cycles = tsc_counter;
466 system->cs_id = CSID_X86_ART;
467
468 return 0;
469 }
470
471 #else
azx_get_sync_time(ktime_t * device,struct system_counterval_t * system,void * ctx)472 static int azx_get_sync_time(ktime_t *device,
473 struct system_counterval_t *system, void *ctx)
474 {
475 return -ENXIO;
476 }
477 #endif
478
azx_get_crosststamp(struct snd_pcm_substream * substream,struct system_device_crosststamp * xtstamp)479 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
480 struct system_device_crosststamp *xtstamp)
481 {
482 return get_device_system_crosststamp(azx_get_sync_time,
483 substream, NULL, xtstamp);
484 }
485
is_link_time_supported(struct snd_pcm_runtime * runtime,struct snd_pcm_audio_tstamp_config * ts)486 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
487 struct snd_pcm_audio_tstamp_config *ts)
488 {
489 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
490 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
491 return true;
492
493 return false;
494 }
495
azx_get_time_info(struct snd_pcm_substream * substream,struct timespec64 * system_ts,struct timespec64 * audio_ts,struct snd_pcm_audio_tstamp_config * audio_tstamp_config,struct snd_pcm_audio_tstamp_report * audio_tstamp_report)496 static int azx_get_time_info(struct snd_pcm_substream *substream,
497 struct timespec64 *system_ts, struct timespec64 *audio_ts,
498 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
499 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
500 {
501 struct azx_dev *azx_dev = get_azx_dev(substream);
502 struct snd_pcm_runtime *runtime = substream->runtime;
503 struct system_device_crosststamp xtstamp;
504 int ret;
505 u64 nsec;
506
507 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
508 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
509
510 snd_pcm_gettime(substream->runtime, system_ts);
511
512 nsec = timecounter_read(&azx_dev->core.tc);
513 if (audio_tstamp_config->report_delay)
514 nsec = azx_adjust_codec_delay(substream, nsec);
515
516 *audio_ts = ns_to_timespec64(nsec);
517
518 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
519 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
520 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
521
522 } else if (is_link_time_supported(runtime, audio_tstamp_config)) {
523
524 ret = azx_get_crosststamp(substream, &xtstamp);
525 if (ret)
526 return ret;
527
528 switch (runtime->tstamp_type) {
529 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
530 return -EINVAL;
531
532 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
533 *system_ts = ktime_to_timespec64(xtstamp.sys_monoraw);
534 break;
535
536 default:
537 *system_ts = ktime_to_timespec64(xtstamp.sys_realtime);
538 break;
539
540 }
541
542 *audio_ts = ktime_to_timespec64(xtstamp.device);
543
544 audio_tstamp_report->actual_type =
545 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
546 audio_tstamp_report->accuracy_report = 1;
547 /* 24 MHz WallClock == 42ns resolution */
548 audio_tstamp_report->accuracy = 42;
549
550 } else {
551 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
552 }
553
554 return 0;
555 }
556
557 static const struct snd_pcm_hardware azx_pcm_hw = {
558 .info = (SNDRV_PCM_INFO_MMAP |
559 SNDRV_PCM_INFO_INTERLEAVED |
560 SNDRV_PCM_INFO_BLOCK_TRANSFER |
561 SNDRV_PCM_INFO_MMAP_VALID |
562 /* No full-resume yet implemented */
563 /* SNDRV_PCM_INFO_RESUME |*/
564 SNDRV_PCM_INFO_PAUSE |
565 SNDRV_PCM_INFO_SYNC_START |
566 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
567 SNDRV_PCM_INFO_HAS_LINK_ATIME |
568 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
569 .formats = SNDRV_PCM_FMTBIT_S16_LE,
570 .rates = SNDRV_PCM_RATE_48000,
571 .rate_min = 48000,
572 .rate_max = 48000,
573 .channels_min = 2,
574 .channels_max = 2,
575 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
576 .period_bytes_min = 128,
577 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
578 .periods_min = 2,
579 .periods_max = AZX_MAX_FRAG,
580 .fifo_size = 0,
581 };
582
azx_pcm_open(struct snd_pcm_substream * substream)583 static int azx_pcm_open(struct snd_pcm_substream *substream)
584 {
585 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
586 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
587 struct azx *chip = apcm->chip;
588 struct azx_dev *azx_dev;
589 struct snd_pcm_runtime *runtime = substream->runtime;
590 int err;
591 int buff_step;
592
593 snd_hda_codec_pcm_get(apcm->info);
594 mutex_lock(&chip->open_mutex);
595 azx_dev = azx_assign_device(chip, substream);
596 trace_azx_pcm_open(chip, azx_dev);
597 if (azx_dev == NULL) {
598 err = -EBUSY;
599 goto unlock;
600 }
601 runtime->private_data = azx_dev;
602
603 runtime->hw = azx_pcm_hw;
604 if (chip->gts_present)
605 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
606 runtime->hw.channels_min = hinfo->channels_min;
607 runtime->hw.channels_max = hinfo->channels_max;
608 runtime->hw.formats = hinfo->formats;
609 runtime->hw.rates = hinfo->rates;
610 snd_pcm_limit_hw_rates(runtime);
611 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
612
613 /* avoid wrap-around with wall-clock */
614 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
615 20,
616 178000000);
617
618 if (chip->align_buffer_size)
619 /* constrain buffer sizes to be multiple of 128
620 bytes. This is more efficient in terms of memory
621 access but isn't required by the HDA spec and
622 prevents users from specifying exact period/buffer
623 sizes. For example for 44.1kHz, a period size set
624 to 20ms will be rounded to 19.59ms. */
625 buff_step = 128;
626 else
627 /* Don't enforce steps on buffer sizes, still need to
628 be multiple of 4 bytes (HDA spec). Tested on Intel
629 HDA controllers, may not work on all devices where
630 option needs to be disabled */
631 buff_step = 4;
632
633 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
634 buff_step);
635 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
636 buff_step);
637 snd_hda_power_up(apcm->codec);
638 if (hinfo->ops.open)
639 err = hinfo->ops.open(hinfo, apcm->codec, substream);
640 else
641 err = -ENODEV;
642 if (err < 0) {
643 azx_release_device(azx_dev);
644 goto powerdown;
645 }
646 snd_pcm_limit_hw_rates(runtime);
647 /* sanity check */
648 if (snd_BUG_ON(!runtime->hw.channels_min) ||
649 snd_BUG_ON(!runtime->hw.channels_max) ||
650 snd_BUG_ON(!runtime->hw.formats) ||
651 snd_BUG_ON(!runtime->hw.rates)) {
652 azx_release_device(azx_dev);
653 if (hinfo->ops.close)
654 hinfo->ops.close(hinfo, apcm->codec, substream);
655 err = -EINVAL;
656 goto powerdown;
657 }
658
659 /* disable LINK_ATIME timestamps for capture streams
660 until we figure out how to handle digital inputs */
661 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
662 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
663 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
664 }
665
666 snd_pcm_set_sync(substream);
667 mutex_unlock(&chip->open_mutex);
668 return 0;
669
670 powerdown:
671 snd_hda_power_down(apcm->codec);
672 unlock:
673 mutex_unlock(&chip->open_mutex);
674 snd_hda_codec_pcm_put(apcm->info);
675 return err;
676 }
677
678 static const struct snd_pcm_ops azx_pcm_ops = {
679 .open = azx_pcm_open,
680 .close = azx_pcm_close,
681 .hw_params = azx_pcm_hw_params,
682 .hw_free = azx_pcm_hw_free,
683 .prepare = azx_pcm_prepare,
684 .trigger = azx_pcm_trigger,
685 .pointer = azx_pcm_pointer,
686 .get_time_info = azx_get_time_info,
687 };
688
azx_pcm_free(struct snd_pcm * pcm)689 static void azx_pcm_free(struct snd_pcm *pcm)
690 {
691 struct azx_pcm *apcm = pcm->private_data;
692 if (apcm) {
693 list_del(&apcm->list);
694 apcm->info->pcm = NULL;
695 kfree(apcm);
696 }
697 }
698
699 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
700
snd_hda_attach_pcm_stream(struct hda_bus * _bus,struct hda_codec * codec,struct hda_pcm * cpcm)701 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
702 struct hda_pcm *cpcm)
703 {
704 struct hdac_bus *bus = &_bus->core;
705 struct azx *chip = bus_to_azx(bus);
706 struct snd_pcm *pcm;
707 struct azx_pcm *apcm;
708 int pcm_dev = cpcm->device;
709 unsigned int size;
710 int s, err;
711 int type = SNDRV_DMA_TYPE_DEV_SG;
712
713 list_for_each_entry(apcm, &chip->pcm_list, list) {
714 if (apcm->pcm->device == pcm_dev) {
715 dev_err(chip->card->dev, "PCM %d already exists\n",
716 pcm_dev);
717 return -EBUSY;
718 }
719 }
720 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
721 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
722 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
723 &pcm);
724 if (err < 0)
725 return err;
726 strscpy(pcm->name, cpcm->name, sizeof(pcm->name));
727 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
728 if (apcm == NULL) {
729 snd_device_free(chip->card, pcm);
730 return -ENOMEM;
731 }
732 apcm->chip = chip;
733 apcm->pcm = pcm;
734 apcm->codec = codec;
735 apcm->info = cpcm;
736 pcm->private_data = apcm;
737 pcm->private_free = azx_pcm_free;
738 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
739 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
740 list_add_tail(&apcm->list, &chip->pcm_list);
741 cpcm->pcm = pcm;
742 for (s = 0; s < 2; s++) {
743 if (cpcm->stream[s].substreams)
744 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
745 }
746 /* buffer pre-allocation */
747 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
748 if (size > MAX_PREALLOC_SIZE)
749 size = MAX_PREALLOC_SIZE;
750 if (chip->uc_buffer)
751 type = SNDRV_DMA_TYPE_DEV_WC_SG;
752 snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
753 size, MAX_PREALLOC_SIZE);
754 return 0;
755 }
756
azx_command_addr(u32 cmd)757 static unsigned int azx_command_addr(u32 cmd)
758 {
759 unsigned int addr = cmd >> 28;
760
761 if (addr >= AZX_MAX_CODECS) {
762 snd_BUG();
763 addr = 0;
764 }
765
766 return addr;
767 }
768
769 /* receive a response */
azx_rirb_get_response(struct hdac_bus * bus,unsigned int addr,unsigned int * res)770 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
771 unsigned int *res)
772 {
773 struct azx *chip = bus_to_azx(bus);
774 struct hda_bus *hbus = &chip->bus;
775 int err;
776
777 again:
778 err = snd_hdac_bus_get_response(bus, addr, res);
779 if (!err)
780 return 0;
781
782 if (hbus->no_response_fallback)
783 return -EIO;
784
785 if (!bus->polling_mode) {
786 dev_warn(chip->card->dev,
787 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
788 bus->last_cmd[addr]);
789 bus->polling_mode = 1;
790 goto again;
791 }
792
793 if (chip->msi) {
794 dev_warn(chip->card->dev,
795 "No response from codec, disabling MSI: last cmd=0x%08x\n",
796 bus->last_cmd[addr]);
797 if (chip->ops->disable_msi_reset_irq &&
798 chip->ops->disable_msi_reset_irq(chip) < 0)
799 return -EIO;
800 goto again;
801 }
802
803 if (chip->probing) {
804 /* If this critical timeout happens during the codec probing
805 * phase, this is likely an access to a non-existing codec
806 * slot. Better to return an error and reset the system.
807 */
808 return -EIO;
809 }
810
811 /* no fallback mechanism? */
812 if (!chip->fallback_to_single_cmd)
813 return -EIO;
814
815 /* a fatal communication error; need either to reset or to fallback
816 * to the single_cmd mode
817 */
818 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
819 hbus->response_reset = 1;
820 dev_err(chip->card->dev,
821 "No response from codec, resetting bus: last cmd=0x%08x\n",
822 bus->last_cmd[addr]);
823 return -EAGAIN; /* give a chance to retry */
824 }
825
826 dev_err(chip->card->dev,
827 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
828 bus->last_cmd[addr]);
829 chip->single_cmd = 1;
830 hbus->response_reset = 0;
831 snd_hdac_bus_stop_cmd_io(bus);
832 return -EIO;
833 }
834
835 /*
836 * Use the single immediate command instead of CORB/RIRB for simplicity
837 *
838 * Note: according to Intel, this is not preferred use. The command was
839 * intended for the BIOS only, and may get confused with unsolicited
840 * responses. So, we shouldn't use it for normal operation from the
841 * driver.
842 * I left the codes, however, for debugging/testing purposes.
843 */
844
845 /* receive a response */
azx_single_wait_for_response(struct azx * chip,unsigned int addr)846 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
847 {
848 int timeout = 50;
849
850 while (timeout--) {
851 /* check IRV busy bit */
852 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
853 /* reuse rirb.res as the response return value */
854 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
855 return 0;
856 }
857 udelay(1);
858 }
859 if (printk_ratelimit())
860 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
861 azx_readw(chip, IRS));
862 azx_bus(chip)->rirb.res[addr] = -1;
863 return -EIO;
864 }
865
866 /* send a command */
azx_single_send_cmd(struct hdac_bus * bus,u32 val)867 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
868 {
869 struct azx *chip = bus_to_azx(bus);
870 unsigned int addr = azx_command_addr(val);
871 int timeout = 50;
872
873 bus->last_cmd[azx_command_addr(val)] = val;
874 while (timeout--) {
875 /* check ICB busy bit */
876 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
877 /* Clear IRV valid bit */
878 azx_writew(chip, IRS, azx_readw(chip, IRS) |
879 AZX_IRS_VALID);
880 azx_writel(chip, IC, val);
881 azx_writew(chip, IRS, azx_readw(chip, IRS) |
882 AZX_IRS_BUSY);
883 return azx_single_wait_for_response(chip, addr);
884 }
885 udelay(1);
886 }
887 if (printk_ratelimit())
888 dev_dbg(chip->card->dev,
889 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
890 azx_readw(chip, IRS), val);
891 return -EIO;
892 }
893
894 /* receive a response */
azx_single_get_response(struct hdac_bus * bus,unsigned int addr,unsigned int * res)895 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
896 unsigned int *res)
897 {
898 if (res)
899 *res = bus->rirb.res[addr];
900 return 0;
901 }
902
903 /*
904 * The below are the main callbacks from hda_codec.
905 *
906 * They are just the skeleton to call sub-callbacks according to the
907 * current setting of chip->single_cmd.
908 */
909
910 /* send a command */
azx_send_cmd(struct hdac_bus * bus,unsigned int val)911 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
912 {
913 struct azx *chip = bus_to_azx(bus);
914
915 if (chip->disabled)
916 return 0;
917 if (chip->single_cmd || bus->use_pio_for_commands)
918 return azx_single_send_cmd(bus, val);
919 else
920 return snd_hdac_bus_send_cmd(bus, val);
921 }
922
923 /* get a response */
azx_get_response(struct hdac_bus * bus,unsigned int addr,unsigned int * res)924 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
925 unsigned int *res)
926 {
927 struct azx *chip = bus_to_azx(bus);
928
929 if (chip->disabled)
930 return 0;
931 if (chip->single_cmd || bus->use_pio_for_commands)
932 return azx_single_get_response(bus, addr, res);
933 else
934 return azx_rirb_get_response(bus, addr, res);
935 }
936
937 static const struct hdac_bus_ops bus_core_ops = {
938 .command = azx_send_cmd,
939 .get_response = azx_get_response,
940 };
941
942 #ifdef CONFIG_SND_HDA_DSP_LOADER
943 /*
944 * DSP loading code (e.g. for CA0132)
945 */
946
947 /* use the first stream for loading DSP */
948 static struct azx_dev *
azx_get_dsp_loader_dev(struct azx * chip)949 azx_get_dsp_loader_dev(struct azx *chip)
950 {
951 struct hdac_bus *bus = azx_bus(chip);
952 struct hdac_stream *s;
953
954 list_for_each_entry(s, &bus->stream_list, list)
955 if (s->index == chip->playback_index_offset)
956 return stream_to_azx_dev(s);
957
958 return NULL;
959 }
960
snd_hda_codec_load_dsp_prepare(struct hda_codec * codec,unsigned int format,unsigned int byte_size,struct snd_dma_buffer * bufp)961 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
962 unsigned int byte_size,
963 struct snd_dma_buffer *bufp)
964 {
965 struct hdac_bus *bus = &codec->bus->core;
966 struct azx *chip = bus_to_azx(bus);
967 struct azx_dev *azx_dev;
968 struct hdac_stream *hstr;
969 bool saved = false;
970 int err;
971
972 azx_dev = azx_get_dsp_loader_dev(chip);
973 hstr = azx_stream(azx_dev);
974 spin_lock_irq(&bus->reg_lock);
975 if (hstr->opened) {
976 chip->saved_azx_dev = *azx_dev;
977 saved = true;
978 }
979 spin_unlock_irq(&bus->reg_lock);
980
981 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
982 if (err < 0) {
983 spin_lock_irq(&bus->reg_lock);
984 if (saved)
985 *azx_dev = chip->saved_azx_dev;
986 spin_unlock_irq(&bus->reg_lock);
987 return err;
988 }
989
990 hstr->prepared = 0;
991 return err;
992 }
993 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
994
snd_hda_codec_load_dsp_trigger(struct hda_codec * codec,bool start)995 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
996 {
997 struct hdac_bus *bus = &codec->bus->core;
998 struct azx *chip = bus_to_azx(bus);
999 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1000
1001 snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1002 }
1003 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1004
snd_hda_codec_load_dsp_cleanup(struct hda_codec * codec,struct snd_dma_buffer * dmab)1005 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1006 struct snd_dma_buffer *dmab)
1007 {
1008 struct hdac_bus *bus = &codec->bus->core;
1009 struct azx *chip = bus_to_azx(bus);
1010 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1011 struct hdac_stream *hstr = azx_stream(azx_dev);
1012
1013 if (!dmab->area || !hstr->locked)
1014 return;
1015
1016 snd_hdac_dsp_cleanup(hstr, dmab);
1017 spin_lock_irq(&bus->reg_lock);
1018 if (hstr->opened)
1019 *azx_dev = chip->saved_azx_dev;
1020 hstr->locked = false;
1021 spin_unlock_irq(&bus->reg_lock);
1022 }
1023 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1024 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1025
1026 /*
1027 * reset and start the controller registers
1028 */
azx_init_chip(struct azx * chip,bool full_reset)1029 void azx_init_chip(struct azx *chip, bool full_reset)
1030 {
1031 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1032 /* correct RINTCNT for CXT */
1033 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1034 azx_writew(chip, RINTCNT, 0xc0);
1035 }
1036 }
1037 EXPORT_SYMBOL_GPL(azx_init_chip);
1038
azx_stop_all_streams(struct azx * chip)1039 void azx_stop_all_streams(struct azx *chip)
1040 {
1041 struct hdac_bus *bus = azx_bus(chip);
1042
1043 snd_hdac_stop_streams(bus);
1044 }
1045 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1046
azx_stop_chip(struct azx * chip)1047 void azx_stop_chip(struct azx *chip)
1048 {
1049 snd_hdac_bus_stop_chip(azx_bus(chip));
1050 }
1051 EXPORT_SYMBOL_GPL(azx_stop_chip);
1052
1053 /*
1054 * interrupt handler
1055 */
stream_update(struct hdac_bus * bus,struct hdac_stream * s)1056 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1057 {
1058 struct azx *chip = bus_to_azx(bus);
1059 struct azx_dev *azx_dev = stream_to_azx_dev(s);
1060
1061 /* check whether this IRQ is really acceptable */
1062 if (!chip->ops->position_check ||
1063 chip->ops->position_check(chip, azx_dev)) {
1064 spin_unlock(&bus->reg_lock);
1065 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1066 spin_lock(&bus->reg_lock);
1067 }
1068 }
1069
azx_interrupt(int irq,void * dev_id)1070 irqreturn_t azx_interrupt(int irq, void *dev_id)
1071 {
1072 struct azx *chip = dev_id;
1073 struct hdac_bus *bus = azx_bus(chip);
1074 u32 status;
1075 bool active, handled = false;
1076 int repeat = 0; /* count for avoiding endless loop */
1077
1078 if (azx_has_pm_runtime(chip))
1079 if (!pm_runtime_active(chip->card->dev))
1080 return IRQ_NONE;
1081
1082 spin_lock(&bus->reg_lock);
1083
1084 if (chip->disabled)
1085 goto unlock;
1086
1087 do {
1088 status = azx_readl(chip, INTSTS);
1089 if (status == 0 || status == 0xffffffff)
1090 break;
1091
1092 handled = true;
1093 active = false;
1094 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1095 active = true;
1096
1097 status = azx_readb(chip, RIRBSTS);
1098 if (status & RIRB_INT_MASK) {
1099 /*
1100 * Clearing the interrupt status here ensures that no
1101 * interrupt gets masked after the RIRB wp is read in
1102 * snd_hdac_bus_update_rirb. This avoids a possible
1103 * race condition where codec response in RIRB may
1104 * remain unserviced by IRQ, eventually falling back
1105 * to polling mode in azx_rirb_get_response.
1106 */
1107 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1108 active = true;
1109 if (status & RIRB_INT_RESPONSE) {
1110 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1111 udelay(80);
1112 snd_hdac_bus_update_rirb(bus);
1113 }
1114 }
1115 } while (active && ++repeat < 10);
1116
1117 unlock:
1118 spin_unlock(&bus->reg_lock);
1119
1120 return IRQ_RETVAL(handled);
1121 }
1122 EXPORT_SYMBOL_GPL(azx_interrupt);
1123
1124 /*
1125 * Codec initerface
1126 */
1127
1128 /*
1129 * Probe the given codec address
1130 */
probe_codec(struct azx * chip,int addr)1131 static int probe_codec(struct azx *chip, int addr)
1132 {
1133 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1134 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1135 struct hdac_bus *bus = azx_bus(chip);
1136 int err;
1137 unsigned int res = -1;
1138
1139 mutex_lock(&bus->cmd_mutex);
1140 chip->probing = 1;
1141 azx_send_cmd(bus, cmd);
1142 err = azx_get_response(bus, addr, &res);
1143 chip->probing = 0;
1144 mutex_unlock(&bus->cmd_mutex);
1145 if (err < 0 || res == -1)
1146 return -EIO;
1147 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1148 return 0;
1149 }
1150
snd_hda_bus_reset(struct hda_bus * bus)1151 void snd_hda_bus_reset(struct hda_bus *bus)
1152 {
1153 struct azx *chip = bus_to_azx(&bus->core);
1154
1155 bus->in_reset = 1;
1156 azx_stop_chip(chip);
1157 azx_init_chip(chip, true);
1158 if (bus->core.chip_init)
1159 snd_hda_bus_reset_codecs(bus);
1160 bus->in_reset = 0;
1161 }
1162
1163 /* HD-audio bus initialization */
azx_bus_init(struct azx * chip,const char * model)1164 int azx_bus_init(struct azx *chip, const char *model)
1165 {
1166 struct hda_bus *bus = &chip->bus;
1167 int err;
1168
1169 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1170 if (err < 0)
1171 return err;
1172
1173 bus->card = chip->card;
1174 mutex_init(&bus->prepare_mutex);
1175 bus->pci = chip->pci;
1176 bus->modelname = model;
1177 bus->mixer_assigned = -1;
1178 bus->core.snoop = azx_snoop(chip);
1179 if (chip->get_position[0] != azx_get_pos_lpib ||
1180 chip->get_position[1] != azx_get_pos_lpib)
1181 bus->core.use_posbuf = true;
1182 bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1183 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1184 bus->core.corbrp_self_clear = true;
1185
1186 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1187 bus->core.align_bdle_4k = true;
1188
1189 if (chip->driver_caps & AZX_DCAPS_PIO_COMMANDS)
1190 bus->core.use_pio_for_commands = true;
1191
1192 /* enable sync_write flag for stable communication as default */
1193 bus->core.sync_write = 1;
1194
1195 return 0;
1196 }
1197 EXPORT_SYMBOL_GPL(azx_bus_init);
1198
1199 /* Probe codecs */
azx_probe_codecs(struct azx * chip,unsigned int max_slots)1200 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1201 {
1202 struct hdac_bus *bus = azx_bus(chip);
1203 int c, codecs, err;
1204
1205 codecs = 0;
1206 if (!max_slots)
1207 max_slots = AZX_DEFAULT_CODECS;
1208
1209 /* First try to probe all given codec slots */
1210 for (c = 0; c < max_slots; c++) {
1211 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1212 if (probe_codec(chip, c) < 0) {
1213 /* Some BIOSen give you wrong codec addresses
1214 * that don't exist
1215 */
1216 dev_warn(chip->card->dev,
1217 "Codec #%d probe error; disabling it...\n", c);
1218 bus->codec_mask &= ~(1 << c);
1219 /* no codecs */
1220 if (bus->codec_mask == 0)
1221 break;
1222 /* More badly, accessing to a non-existing
1223 * codec often screws up the controller chip,
1224 * and disturbs the further communications.
1225 * Thus if an error occurs during probing,
1226 * better to reset the controller chip to
1227 * get back to the sanity state.
1228 */
1229 azx_stop_chip(chip);
1230 azx_init_chip(chip, true);
1231 }
1232 }
1233 }
1234
1235 /* Then create codec instances */
1236 for (c = 0; c < max_slots; c++) {
1237 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1238 struct hda_codec *codec;
1239 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1240 if (err < 0)
1241 continue;
1242 codec->jackpoll_interval = chip->jackpoll_interval;
1243 codec->beep_mode = chip->beep_mode;
1244 codec->ctl_dev_id = chip->ctl_dev_id;
1245 codecs++;
1246 }
1247 }
1248 if (!codecs) {
1249 dev_err(chip->card->dev, "no codecs initialized\n");
1250 return -ENXIO;
1251 }
1252 return 0;
1253 }
1254 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1255
1256 /* configure each codec instance */
azx_codec_configure(struct azx * chip)1257 int azx_codec_configure(struct azx *chip)
1258 {
1259 struct hda_codec *codec, *next;
1260 int success = 0;
1261
1262 list_for_each_codec(codec, &chip->bus) {
1263 if (!snd_hda_codec_configure(codec))
1264 success++;
1265 }
1266
1267 if (success) {
1268 /* unregister failed codecs if any codec has been probed */
1269 list_for_each_codec_safe(codec, next, &chip->bus) {
1270 if (!codec->configured) {
1271 codec_err(codec, "Unable to configure, disabling\n");
1272 snd_hdac_device_unregister(&codec->core);
1273 }
1274 }
1275 }
1276
1277 return success ? 0 : -ENODEV;
1278 }
1279 EXPORT_SYMBOL_GPL(azx_codec_configure);
1280
stream_direction(struct azx * chip,unsigned char index)1281 static int stream_direction(struct azx *chip, unsigned char index)
1282 {
1283 if (index >= chip->capture_index_offset &&
1284 index < chip->capture_index_offset + chip->capture_streams)
1285 return SNDRV_PCM_STREAM_CAPTURE;
1286 return SNDRV_PCM_STREAM_PLAYBACK;
1287 }
1288
1289 /* initialize SD streams */
azx_init_streams(struct azx * chip)1290 int azx_init_streams(struct azx *chip)
1291 {
1292 int i;
1293 int stream_tags[2] = { 0, 0 };
1294
1295 /* initialize each stream (aka device)
1296 * assign the starting bdl address to each stream (device)
1297 * and initialize
1298 */
1299 for (i = 0; i < chip->num_streams; i++) {
1300 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1301 int dir, tag;
1302
1303 if (!azx_dev)
1304 return -ENOMEM;
1305
1306 dir = stream_direction(chip, i);
1307 /* stream tag must be unique throughout
1308 * the stream direction group,
1309 * valid values 1...15
1310 * use separate stream tag if the flag
1311 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1312 */
1313 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1314 tag = ++stream_tags[dir];
1315 else
1316 tag = i + 1;
1317 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1318 i, dir, tag);
1319 }
1320
1321 return 0;
1322 }
1323 EXPORT_SYMBOL_GPL(azx_init_streams);
1324
azx_free_streams(struct azx * chip)1325 void azx_free_streams(struct azx *chip)
1326 {
1327 struct hdac_bus *bus = azx_bus(chip);
1328 struct hdac_stream *s;
1329
1330 while (!list_empty(&bus->stream_list)) {
1331 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1332 list_del(&s->list);
1333 kfree(stream_to_azx_dev(s));
1334 }
1335 }
1336 EXPORT_SYMBOL_GPL(azx_free_streams);
1337