xref: /linux/sound/soc/intel/avs/core.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 // Special thanks to:
9 //    Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
10 //    Michal Sienkiewicz <michal.sienkiewicz@intel.com>
11 //    Filip Proborszcz
12 //
13 // for sharing Intel AudioDSP expertise and helping shape the very
14 // foundation of this driver
15 //
16 
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <sound/hda_codec.h>
20 #include <sound/hda_i915.h>
21 #include <sound/hda_register.h>
22 #include <sound/hdaudio.h>
23 #include <sound/hdaudio_ext.h>
24 #include <sound/intel-dsp-config.h>
25 #include <sound/intel-nhlt.h>
26 #include "../../codecs/hda.h"
27 #include "avs.h"
28 #include "cldma.h"
29 #include "messages.h"
30 
31 static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
32 module_param(pgctl_mask, uint, 0444);
33 MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
34 
35 static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
36 module_param(cgctl_mask, uint, 0444);
37 MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
38 
39 static void
40 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
41 {
42 	struct pci_dev *pci = to_pci_dev(bus->dev);
43 	u32 data;
44 
45 	pci_read_config_dword(pci, reg, &data);
46 	data &= ~mask;
47 	data |= (value & mask);
48 	pci_write_config_dword(pci, reg, data);
49 }
50 
51 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
52 {
53 	u32 value = enable ? 0 : pgctl_mask;
54 
55 	avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
56 }
57 
58 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
59 {
60 	u32 value = enable ? cgctl_mask : 0;
61 
62 	avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
63 }
64 
65 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
66 {
67 	avs_hdac_clock_gating_enable(&adev->base.core, enable);
68 }
69 
70 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
71 {
72 	u32 value = enable ? AZX_VS_EM2_L1SEN : 0;
73 
74 	snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value);
75 }
76 
77 static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
78 {
79 	unsigned int cp_streams, pb_streams;
80 	unsigned int gcap;
81 
82 	gcap = snd_hdac_chip_readw(bus, GCAP);
83 	cp_streams = (gcap >> 8) & 0x0F;
84 	pb_streams = (gcap >> 12) & 0x0F;
85 	bus->num_streams = cp_streams + pb_streams;
86 
87 	snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
88 	snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
89 
90 	return snd_hdac_bus_alloc_stream_pages(bus);
91 }
92 
93 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
94 {
95 	struct hdac_ext_link *hlink;
96 	bool ret;
97 
98 	avs_hdac_clock_gating_enable(bus, false);
99 	ret = snd_hdac_bus_init_chip(bus, full_reset);
100 
101 	/* Reset stream-to-link mapping */
102 	list_for_each_entry(hlink, &bus->hlink_list, list)
103 		writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
104 
105 	avs_hdac_clock_gating_enable(bus, true);
106 
107 	/* Set DUM bit to address incorrect position reporting for capture
108 	 * streams. In order to do so, CTRL needs to be out of reset state
109 	 */
110 	snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
111 
112 	return ret;
113 }
114 
115 static int probe_codec(struct hdac_bus *bus, int addr)
116 {
117 	struct hda_codec *codec;
118 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
119 			   (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
120 	unsigned int res = -1;
121 	int ret;
122 
123 	mutex_lock(&bus->cmd_mutex);
124 	snd_hdac_bus_send_cmd(bus, cmd);
125 	snd_hdac_bus_get_response(bus, addr, &res);
126 	mutex_unlock(&bus->cmd_mutex);
127 	if (res == -1)
128 		return -EIO;
129 
130 	dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
131 
132 	codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
133 	if (IS_ERR(codec)) {
134 		dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
135 		return PTR_ERR(codec);
136 	}
137 	/*
138 	 * Allow avs_core suspend by forcing suspended state on all
139 	 * of its codec child devices. Component interested in
140 	 * dealing with hda codecs directly takes pm responsibilities
141 	 */
142 	pm_runtime_set_suspended(hda_codec_dev(codec));
143 
144 	/* configure effectively creates new ASoC component */
145 	ret = snd_hda_codec_configure(codec);
146 	if (ret < 0) {
147 		dev_err(bus->dev, "failed to config codec %d\n", ret);
148 		return ret;
149 	}
150 
151 	return 0;
152 }
153 
154 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
155 {
156 	int c;
157 
158 	/* First try to probe all given codec slots */
159 	for (c = 0; c < HDA_MAX_CODECS; c++) {
160 		if (!(bus->codec_mask & BIT(c)))
161 			continue;
162 
163 		if (!probe_codec(bus, c))
164 			/* success, continue probing */
165 			continue;
166 
167 		/*
168 		 * Some BIOSen give you wrong codec addresses
169 		 * that don't exist
170 		 */
171 		dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
172 		bus->codec_mask &= ~BIT(c);
173 		/*
174 		 * More badly, accessing to a non-existing
175 		 * codec often screws up the controller bus,
176 		 * and disturbs the further communications.
177 		 * Thus if an error occurs during probing,
178 		 * better to reset the controller bus to get
179 		 * back to the sanity state.
180 		 */
181 		snd_hdac_bus_stop_chip(bus);
182 		avs_hdac_bus_init_chip(bus, true);
183 	}
184 }
185 
186 static void avs_hda_probe_work(struct work_struct *work)
187 {
188 	struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
189 	struct hdac_bus *bus = &adev->base.core;
190 	struct hdac_ext_link *hlink;
191 	int ret;
192 
193 	pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
194 
195 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
196 	avs_hdac_bus_init_chip(bus, true);
197 	avs_hdac_bus_probe_codecs(bus);
198 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
199 
200 	/* with all codecs probed, links can be powered down */
201 	list_for_each_entry(hlink, &bus->hlink_list, list)
202 		snd_hdac_ext_bus_link_put(bus, hlink);
203 
204 	snd_hdac_ext_bus_ppcap_enable(bus, true);
205 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
206 
207 	ret = avs_dsp_first_boot_firmware(adev);
208 	if (ret < 0)
209 		return;
210 
211 	adev->nhlt = intel_nhlt_init(adev->dev);
212 	if (!adev->nhlt)
213 		dev_info(bus->dev, "platform has no NHLT\n");
214 	avs_debugfs_init(adev);
215 
216 	avs_register_all_boards(adev);
217 
218 	/* configure PM */
219 	pm_runtime_set_autosuspend_delay(bus->dev, 2000);
220 	pm_runtime_use_autosuspend(bus->dev);
221 	pm_runtime_mark_last_busy(bus->dev);
222 	pm_runtime_put_autosuspend(bus->dev);
223 	pm_runtime_allow(bus->dev);
224 }
225 
226 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
227 {
228 	u64 prev_pos, pos, num_bytes;
229 
230 	div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
231 	pos = snd_hdac_stream_get_pos_posbuf(stream);
232 
233 	if (pos < prev_pos)
234 		num_bytes = (buffer_size - prev_pos) +  pos;
235 	else
236 		num_bytes = pos - prev_pos;
237 
238 	stream->curr_pos += num_bytes;
239 }
240 
241 /* called from IRQ */
242 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
243 {
244 	if (stream->substream) {
245 		snd_pcm_period_elapsed(stream->substream);
246 	} else if (stream->cstream) {
247 		u64 buffer_size = stream->cstream->runtime->buffer_size;
248 
249 		hdac_stream_update_pos(stream, buffer_size);
250 		snd_compr_fragment_elapsed(stream->cstream);
251 	}
252 }
253 
254 static irqreturn_t hdac_bus_irq_handler(int irq, void *context)
255 {
256 	struct hdac_bus *bus = context;
257 	u32 mask, int_enable;
258 	u32 status;
259 	int ret = IRQ_NONE;
260 
261 	if (!pm_runtime_active(bus->dev))
262 		return ret;
263 
264 	spin_lock(&bus->reg_lock);
265 
266 	status = snd_hdac_chip_readl(bus, INTSTS);
267 	if (status == 0 || status == UINT_MAX) {
268 		spin_unlock(&bus->reg_lock);
269 		return ret;
270 	}
271 
272 	/* clear rirb int */
273 	status = snd_hdac_chip_readb(bus, RIRBSTS);
274 	if (status & RIRB_INT_MASK) {
275 		if (status & RIRB_INT_RESPONSE)
276 			snd_hdac_bus_update_rirb(bus);
277 		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
278 	}
279 
280 	mask = (0x1 << bus->num_streams) - 1;
281 
282 	status = snd_hdac_chip_readl(bus, INTSTS);
283 	status &= mask;
284 	if (status) {
285 		/* Disable stream interrupts; Re-enable in bottom half */
286 		int_enable = snd_hdac_chip_readl(bus, INTCTL);
287 		snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask)));
288 		ret = IRQ_WAKE_THREAD;
289 	} else {
290 		ret = IRQ_HANDLED;
291 	}
292 
293 	spin_unlock(&bus->reg_lock);
294 	return ret;
295 }
296 
297 static irqreturn_t hdac_bus_irq_thread(int irq, void *context)
298 {
299 	struct hdac_bus *bus = context;
300 	u32 status;
301 	u32 int_enable;
302 	u32 mask;
303 	unsigned long flags;
304 
305 	status = snd_hdac_chip_readl(bus, INTSTS);
306 
307 	snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream);
308 
309 	/* Re-enable stream interrupts */
310 	mask = (0x1 << bus->num_streams) - 1;
311 	spin_lock_irqsave(&bus->reg_lock, flags);
312 	int_enable = snd_hdac_chip_readl(bus, INTCTL);
313 	snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask));
314 	spin_unlock_irqrestore(&bus->reg_lock, flags);
315 
316 	return IRQ_HANDLED;
317 }
318 
319 static int avs_hdac_acquire_irq(struct avs_dev *adev)
320 {
321 	struct hdac_bus *bus = &adev->base.core;
322 	struct pci_dev *pci = to_pci_dev(bus->dev);
323 	int ret;
324 
325 	/* request one and check that we only got one interrupt */
326 	ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
327 	if (ret != 1) {
328 		dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
329 		return ret;
330 	}
331 
332 	ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus,
333 			      KBUILD_MODNAME);
334 	if (ret < 0) {
335 		dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
336 		goto free_vector;
337 	}
338 
339 	ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
340 			      KBUILD_MODNAME);
341 	if (ret < 0) {
342 		dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
343 		goto free_stream_irq;
344 	}
345 
346 	return 0;
347 
348 free_stream_irq:
349 	pci_free_irq(pci, 0, bus);
350 free_vector:
351 	pci_free_irq_vectors(pci);
352 	return ret;
353 }
354 
355 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
356 {
357 	struct hda_bus *bus = &adev->base;
358 	struct avs_ipc *ipc;
359 	struct device *dev = &pci->dev;
360 	int ret;
361 
362 	ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
363 	if (ret < 0)
364 		return ret;
365 
366 	bus->core.use_posbuf = 1;
367 	bus->core.bdl_pos_adj = 0;
368 	bus->core.sync_write = 1;
369 	bus->pci = pci;
370 	bus->mixer_assigned = -1;
371 	mutex_init(&bus->prepare_mutex);
372 
373 	ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
374 	if (!ipc)
375 		return -ENOMEM;
376 	ret = avs_ipc_init(ipc, dev);
377 	if (ret < 0)
378 		return ret;
379 
380 	adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
381 	if (!adev->modcfg_buf)
382 		return -ENOMEM;
383 
384 	adev->dev = dev;
385 	adev->spec = (const struct avs_spec *)id->driver_data;
386 	adev->ipc = ipc;
387 	adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
388 	INIT_WORK(&adev->probe_work, avs_hda_probe_work);
389 	INIT_LIST_HEAD(&adev->comp_list);
390 	INIT_LIST_HEAD(&adev->path_list);
391 	INIT_LIST_HEAD(&adev->fw_list);
392 	init_completion(&adev->fw_ready);
393 	spin_lock_init(&adev->path_list_lock);
394 	mutex_init(&adev->modres_mutex);
395 	mutex_init(&adev->comp_list_mutex);
396 	mutex_init(&adev->path_mutex);
397 
398 	return 0;
399 }
400 
401 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
402 {
403 	struct hdac_bus *bus;
404 	struct avs_dev *adev;
405 	struct device *dev = &pci->dev;
406 	int ret;
407 
408 	ret = snd_intel_dsp_driver_probe(pci);
409 	if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS)
410 		return -ENODEV;
411 
412 	ret = pcim_enable_device(pci);
413 	if (ret < 0)
414 		return ret;
415 
416 	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
417 	if (!adev)
418 		return -ENOMEM;
419 	ret = avs_bus_init(adev, pci, id);
420 	if (ret < 0) {
421 		dev_err(dev, "failed to init avs bus: %d\n", ret);
422 		return ret;
423 	}
424 
425 	ret = pci_request_regions(pci, "AVS HDAudio");
426 	if (ret < 0)
427 		return ret;
428 
429 	bus = &adev->base.core;
430 	bus->addr = pci_resource_start(pci, 0);
431 	bus->remap_addr = pci_ioremap_bar(pci, 0);
432 	if (!bus->remap_addr) {
433 		dev_err(bus->dev, "ioremap error\n");
434 		ret = -ENXIO;
435 		goto err_remap_bar0;
436 	}
437 
438 	adev->dsp_ba = pci_ioremap_bar(pci, 4);
439 	if (!adev->dsp_ba) {
440 		dev_err(bus->dev, "ioremap error\n");
441 		ret = -ENXIO;
442 		goto err_remap_bar4;
443 	}
444 
445 	snd_hdac_bus_parse_capabilities(bus);
446 	if (bus->mlcap)
447 		snd_hdac_ext_bus_get_ml_capabilities(bus);
448 
449 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
450 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
451 	dma_set_max_seg_size(dev, UINT_MAX);
452 
453 	ret = avs_hdac_bus_init_streams(bus);
454 	if (ret < 0) {
455 		dev_err(dev, "failed to init streams: %d\n", ret);
456 		goto err_init_streams;
457 	}
458 
459 	ret = avs_hdac_acquire_irq(adev);
460 	if (ret < 0) {
461 		dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
462 		goto err_acquire_irq;
463 	}
464 
465 	pci_set_master(pci);
466 	pci_set_drvdata(pci, bus);
467 	device_disable_async_suspend(dev);
468 
469 	ret = snd_hdac_i915_init(bus);
470 	if (ret == -EPROBE_DEFER)
471 		goto err_i915_init;
472 	else if (ret < 0)
473 		dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
474 
475 	schedule_work(&adev->probe_work);
476 
477 	return 0;
478 
479 err_i915_init:
480 	pci_clear_master(pci);
481 	pci_set_drvdata(pci, NULL);
482 err_acquire_irq:
483 	snd_hdac_bus_free_stream_pages(bus);
484 	snd_hdac_ext_stream_free_all(bus);
485 err_init_streams:
486 	iounmap(adev->dsp_ba);
487 err_remap_bar4:
488 	iounmap(bus->remap_addr);
489 err_remap_bar0:
490 	pci_release_regions(pci);
491 	return ret;
492 }
493 
494 static void avs_pci_shutdown(struct pci_dev *pci)
495 {
496 	struct hdac_bus *bus = pci_get_drvdata(pci);
497 	struct avs_dev *adev = hdac_to_avs(bus);
498 
499 	cancel_work_sync(&adev->probe_work);
500 	avs_ipc_block(adev->ipc);
501 
502 	snd_hdac_stop_streams(bus);
503 	avs_dsp_op(adev, int_control, false);
504 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
505 	snd_hdac_ext_bus_link_power_down_all(bus);
506 
507 	snd_hdac_bus_stop_chip(bus);
508 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
509 
510 	if (avs_platattr_test(adev, CLDMA))
511 		pci_free_irq(pci, 0, &code_loader);
512 	pci_free_irq(pci, 0, adev);
513 	pci_free_irq(pci, 0, bus);
514 	pci_free_irq_vectors(pci);
515 }
516 
517 static void avs_pci_remove(struct pci_dev *pci)
518 {
519 	struct hdac_device *hdev, *save;
520 	struct hdac_bus *bus = pci_get_drvdata(pci);
521 	struct avs_dev *adev = hdac_to_avs(bus);
522 
523 	cancel_work_sync(&adev->probe_work);
524 	avs_ipc_block(adev->ipc);
525 
526 	avs_unregister_all_boards(adev);
527 
528 	avs_debugfs_exit(adev);
529 	if (adev->nhlt)
530 		intel_nhlt_free(adev->nhlt);
531 
532 	if (avs_platattr_test(adev, CLDMA))
533 		hda_cldma_free(&code_loader);
534 
535 	snd_hdac_stop_streams_and_chip(bus);
536 	avs_dsp_op(adev, int_control, false);
537 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
538 
539 	/* it is safe to remove all codecs from the system now */
540 	list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
541 		snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
542 
543 	snd_hdac_bus_free_stream_pages(bus);
544 	snd_hdac_ext_stream_free_all(bus);
545 	/* reverse ml_capabilities */
546 	snd_hdac_ext_link_free_all(bus);
547 	snd_hdac_ext_bus_exit(bus);
548 
549 	avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
550 	snd_hdac_ext_bus_ppcap_enable(bus, false);
551 
552 	/* snd_hdac_stop_streams_and_chip does that already? */
553 	snd_hdac_bus_stop_chip(bus);
554 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
555 	if (bus->audio_component)
556 		snd_hdac_i915_exit(bus);
557 
558 	avs_module_info_free(adev);
559 	pci_free_irq(pci, 0, adev);
560 	pci_free_irq(pci, 0, bus);
561 	pci_free_irq_vectors(pci);
562 	iounmap(bus->remap_addr);
563 	iounmap(adev->dsp_ba);
564 	pci_release_regions(pci);
565 
566 	/* Firmware is not needed anymore */
567 	avs_release_firmwares(adev);
568 
569 	/* pm_runtime_forbid() can rpm_resume() which we do not want */
570 	pm_runtime_disable(&pci->dev);
571 	pm_runtime_forbid(&pci->dev);
572 	pm_runtime_enable(&pci->dev);
573 	pm_runtime_get_noresume(&pci->dev);
574 }
575 
576 static int avs_suspend_standby(struct avs_dev *adev)
577 {
578 	struct hdac_bus *bus = &adev->base.core;
579 	struct pci_dev *pci = adev->base.pci;
580 
581 	if (bus->cmd_dma_state)
582 		snd_hdac_bus_stop_cmd_io(bus);
583 
584 	snd_hdac_ext_bus_link_power_down_all(bus);
585 
586 	enable_irq_wake(pci->irq);
587 	pci_save_state(pci);
588 
589 	return 0;
590 }
591 
592 static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power)
593 {
594 	struct hdac_bus *bus = &adev->base.core;
595 	int ret;
596 
597 	flush_work(&adev->probe_work);
598 	if (low_power && adev->num_lp_paths)
599 		return avs_suspend_standby(adev);
600 
601 	snd_hdac_ext_bus_link_power_down_all(bus);
602 
603 	ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
604 	/*
605 	 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
606 	 * Do not block entire system from suspending if that's the case.
607 	 */
608 	if (ret && ret != -EPERM) {
609 		dev_err(adev->dev, "set dx failed: %d\n", ret);
610 		return AVS_IPC_RET(ret);
611 	}
612 
613 	avs_ipc_block(adev->ipc);
614 	avs_dsp_op(adev, int_control, false);
615 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
616 
617 	ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
618 	if (ret < 0) {
619 		dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
620 		return ret;
621 	}
622 
623 	snd_hdac_ext_bus_ppcap_enable(bus, false);
624 	/* disable LP SRAM retention */
625 	avs_hda_power_gating_enable(adev, false);
626 	snd_hdac_bus_stop_chip(bus);
627 	/* disable CG when putting controller to reset */
628 	avs_hdac_clock_gating_enable(bus, false);
629 	snd_hdac_bus_enter_link_reset(bus);
630 	avs_hdac_clock_gating_enable(bus, true);
631 
632 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
633 
634 	return 0;
635 }
636 
637 static int avs_resume_standby(struct avs_dev *adev)
638 {
639 	struct hdac_bus *bus = &adev->base.core;
640 	struct pci_dev *pci = adev->base.pci;
641 
642 	pci_restore_state(pci);
643 	disable_irq_wake(pci->irq);
644 
645 	snd_hdac_ext_bus_link_power_up_all(bus);
646 
647 	if (bus->cmd_dma_state)
648 		snd_hdac_bus_init_cmd_io(bus);
649 
650 	return 0;
651 }
652 
653 static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
654 {
655 	struct hdac_bus *bus = &adev->base.core;
656 	int ret;
657 
658 	if (low_power && adev->num_lp_paths)
659 		return avs_resume_standby(adev);
660 
661 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
662 	avs_hdac_bus_init_chip(bus, true);
663 
664 	snd_hdac_ext_bus_ppcap_enable(bus, true);
665 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
666 
667 	ret = avs_dsp_boot_firmware(adev, purge);
668 	if (ret < 0) {
669 		dev_err(adev->dev, "firmware boot failed: %d\n", ret);
670 		return ret;
671 	}
672 
673 	return 0;
674 }
675 
676 static int __maybe_unused avs_suspend(struct device *dev)
677 {
678 	return avs_suspend_common(to_avs_dev(dev), true);
679 }
680 
681 static int __maybe_unused avs_resume(struct device *dev)
682 {
683 	return avs_resume_common(to_avs_dev(dev), true, true);
684 }
685 
686 static int __maybe_unused avs_runtime_suspend(struct device *dev)
687 {
688 	return avs_suspend_common(to_avs_dev(dev), true);
689 }
690 
691 static int __maybe_unused avs_runtime_resume(struct device *dev)
692 {
693 	return avs_resume_common(to_avs_dev(dev), true, false);
694 }
695 
696 static int __maybe_unused avs_freeze(struct device *dev)
697 {
698 	return avs_suspend_common(to_avs_dev(dev), false);
699 }
700 static int __maybe_unused avs_thaw(struct device *dev)
701 {
702 	return avs_resume_common(to_avs_dev(dev), false, true);
703 }
704 
705 static int __maybe_unused avs_poweroff(struct device *dev)
706 {
707 	return avs_suspend_common(to_avs_dev(dev), false);
708 }
709 
710 static int __maybe_unused avs_restore(struct device *dev)
711 {
712 	return avs_resume_common(to_avs_dev(dev), false, true);
713 }
714 
715 static const struct dev_pm_ops avs_dev_pm = {
716 	.suspend = avs_suspend,
717 	.resume = avs_resume,
718 	.freeze = avs_freeze,
719 	.thaw = avs_thaw,
720 	.poweroff = avs_poweroff,
721 	.restore = avs_restore,
722 	SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
723 };
724 
725 static const struct avs_spec skl_desc = {
726 	.name = "skl",
727 	.min_fw_version = {
728 		.major = 9,
729 		.minor = 21,
730 		.hotfix = 0,
731 		.build = 4732,
732 	},
733 	.dsp_ops = &skl_dsp_ops,
734 	.core_init_mask = 1,
735 	.attributes = AVS_PLATATTR_CLDMA,
736 	.sram_base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
737 	.sram_window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
738 	.rom_status = SKL_ADSP_SRAM_BASE_OFFSET,
739 };
740 
741 static const struct avs_spec apl_desc = {
742 	.name = "apl",
743 	.min_fw_version = {
744 		.major = 9,
745 		.minor = 22,
746 		.hotfix = 1,
747 		.build = 4323,
748 	},
749 	.dsp_ops = &apl_dsp_ops,
750 	.core_init_mask = 3,
751 	.attributes = AVS_PLATATTR_IMR,
752 	.sram_base_offset = APL_ADSP_SRAM_BASE_OFFSET,
753 	.sram_window_size = APL_ADSP_SRAM_WINDOW_SIZE,
754 	.rom_status = APL_ADSP_SRAM_BASE_OFFSET,
755 };
756 
757 static const struct pci_device_id avs_ids[] = {
758 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
759 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
760 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
761 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
762 	{ PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
763 	{ PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
764 	{ PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
765 	{ PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
766 	{ 0 }
767 };
768 MODULE_DEVICE_TABLE(pci, avs_ids);
769 
770 static struct pci_driver avs_pci_driver = {
771 	.name = KBUILD_MODNAME,
772 	.id_table = avs_ids,
773 	.probe = avs_pci_probe,
774 	.remove = avs_pci_remove,
775 	.shutdown = avs_pci_shutdown,
776 	.driver = {
777 		.pm = &avs_dev_pm,
778 	},
779 };
780 module_pci_driver(avs_pci_driver);
781 
782 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
783 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
784 MODULE_DESCRIPTION("Intel cAVS sound driver");
785 MODULE_LICENSE("GPL");
786