xref: /freebsd/sys/dev/sound/pci/hda/hdac.c (revision fb1028dc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca>
5  * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org>
6  * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Intel High Definition Audio (Controller) driver for FreeBSD.
33  */
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_snd.h"
37 #endif
38 
39 #include <dev/sound/pcm/sound.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 
43 #include <sys/ctype.h>
44 #include <sys/endian.h>
45 #include <sys/taskqueue.h>
46 
47 #include <dev/sound/pci/hda/hdac_private.h>
48 #include <dev/sound/pci/hda/hdac_reg.h>
49 #include <dev/sound/pci/hda/hda_reg.h>
50 #include <dev/sound/pci/hda/hdac.h>
51 
52 #define HDA_DRV_TEST_REV	"20120126_0002"
53 
54 #define hdac_lock(sc)		snd_mtxlock((sc)->lock)
55 #define hdac_unlock(sc)		snd_mtxunlock((sc)->lock)
56 #define hdac_lockassert(sc)	snd_mtxassert((sc)->lock)
57 
58 #define HDAC_QUIRK_64BIT	(1 << 0)
59 #define HDAC_QUIRK_DMAPOS	(1 << 1)
60 #define HDAC_QUIRK_MSI		(1 << 2)
61 
62 static const struct {
63 	const char *key;
64 	uint32_t value;
65 } hdac_quirks_tab[] = {
66 	{ "64bit", HDAC_QUIRK_64BIT },
67 	{ "dmapos", HDAC_QUIRK_DMAPOS },
68 	{ "msi", HDAC_QUIRK_MSI },
69 };
70 
71 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller");
72 
73 static const struct {
74 	uint32_t	model;
75 	const char	*desc;
76 	char		quirks_on;
77 	char		quirks_off;
78 } hdac_devices[] = {
79 	{ HDA_INTEL_OAK,     "Intel Oaktrail",	0, 0 },
80 	{ HDA_INTEL_CMLKLP,  "Intel Comet Lake-LP",	0, 0 },
81 	{ HDA_INTEL_CMLKH,   "Intel Comet Lake-H",	0, 0 },
82 	{ HDA_INTEL_BAY,     "Intel BayTrail",	0, 0 },
83 	{ HDA_INTEL_HSW1,    "Intel Haswell",	0, 0 },
84 	{ HDA_INTEL_HSW2,    "Intel Haswell",	0, 0 },
85 	{ HDA_INTEL_HSW3,    "Intel Haswell",	0, 0 },
86 	{ HDA_INTEL_BDW1,    "Intel Broadwell",	0, 0 },
87 	{ HDA_INTEL_BDW2,    "Intel Broadwell",	0, 0 },
88 	{ HDA_INTEL_BXTNT,   "Intel Broxton-T",	0, 0 },
89 	{ HDA_INTEL_CPT,     "Intel Cougar Point",	0, 0 },
90 	{ HDA_INTEL_PATSBURG,"Intel Patsburg",  0, 0 },
91 	{ HDA_INTEL_PPT1,    "Intel Panther Point",	0, 0 },
92 	{ HDA_INTEL_BR,      "Intel Braswell",	0, 0 },
93 	{ HDA_INTEL_LPT1,    "Intel Lynx Point",	0, 0 },
94 	{ HDA_INTEL_LPT2,    "Intel Lynx Point",	0, 0 },
95 	{ HDA_INTEL_WCPT,    "Intel Wildcat Point",	0, 0 },
96 	{ HDA_INTEL_WELLS1,  "Intel Wellsburg",	0, 0 },
97 	{ HDA_INTEL_WELLS2,  "Intel Wellsburg",	0, 0 },
98 	{ HDA_INTEL_LPTLP1,  "Intel Lynx Point-LP",	0, 0 },
99 	{ HDA_INTEL_LPTLP2,  "Intel Lynx Point-LP",	0, 0 },
100 	{ HDA_INTEL_SRPTLP,  "Intel Sunrise Point-LP",	0, 0 },
101 	{ HDA_INTEL_KBLKLP,  "Intel Kaby Lake-LP",	0, 0 },
102 	{ HDA_INTEL_SRPT,    "Intel Sunrise Point",	0, 0 },
103 	{ HDA_INTEL_KBLK,    "Intel Kaby Lake",	0, 0 },
104 	{ HDA_INTEL_KBLKH,   "Intel Kaby Lake-H",	0, 0 },
105 	{ HDA_INTEL_CFLK,    "Intel Coffee Lake",	0, 0 },
106 	{ HDA_INTEL_CMLKS,   "Intel Comet Lake-S",	0, 0 },
107 	{ HDA_INTEL_CNLK,    "Intel Cannon Lake",	0, 0 },
108 	{ HDA_INTEL_ICLK,    "Intel Ice Lake",		0, 0 },
109 	{ HDA_INTEL_CMLKLP,  "Intel Comet Lake-LP",	0, 0 },
110 	{ HDA_INTEL_CMLKH,   "Intel Comet Lake-H",	0, 0 },
111 	{ HDA_INTEL_TGLK,    "Intel Tiger Lake",	0, 0 },
112 	{ HDA_INTEL_TGLKH,   "Intel Tiger Lake-H",	0, 0 },
113 	{ HDA_INTEL_GMLK,    "Intel Gemini Lake",	0, 0 },
114 	{ HDA_INTEL_ALLK,    "Intel Alder Lake",	0, 0 },
115 	{ HDA_INTEL_ALLKM,   "Intel Alder Lake-M",	0, 0 },
116 	{ HDA_INTEL_ALLKN,   "Intel Alder Lake-N",	0, 0 },
117 	{ HDA_INTEL_ALLKP1,  "Intel Alder Lake-P",	0, 0 },
118 	{ HDA_INTEL_ALLKP2,  "Intel Alder Lake-P",	0, 0 },
119 	{ HDA_INTEL_ALLKPS,  "Intel Alder Lake-PS",	0, 0 },
120 	{ HDA_INTEL_RPTLK1,  "Intel Raptor Lake-P",	0, 0 },
121 	{ HDA_INTEL_RPTLK2,  "Intel Raptor Lake-P",	0, 0 },
122 	{ HDA_INTEL_MTL,     "Intel Meteor Lake-P",	0, 0 },
123 	{ HDA_INTEL_ARLS,    "Intel Arrow Lake-S",	0, 0 },
124 	{ HDA_INTEL_ARL,     "Intel Arrow Lake",	0, 0 },
125 	{ HDA_INTEL_LNLP,    "Intel Lunar Lake-P",	0, 0 },
126 	{ HDA_INTEL_82801F,  "Intel 82801F",	0, 0 },
127 	{ HDA_INTEL_63XXESB, "Intel 631x/632xESB",	0, 0 },
128 	{ HDA_INTEL_82801G,  "Intel 82801G",	0, 0 },
129 	{ HDA_INTEL_82801H,  "Intel 82801H",	0, 0 },
130 	{ HDA_INTEL_82801I,  "Intel 82801I",	0, 0 },
131 	{ HDA_INTEL_JLK,     "Intel Jasper Lake",	0, 0 },
132 	{ HDA_INTEL_82801JI, "Intel 82801JI",	0, 0 },
133 	{ HDA_INTEL_82801JD, "Intel 82801JD",	0, 0 },
134 	{ HDA_INTEL_PCH,     "Intel Ibex Peak",	0, 0 },
135 	{ HDA_INTEL_PCH2,    "Intel Ibex Peak",	0, 0 },
136 	{ HDA_INTEL_ELLK,    "Intel Elkhart Lake",	0, 0 },
137 	{ HDA_INTEL_JLK2,    "Intel Jasper Lake",	0, 0 },
138 	{ HDA_INTEL_BXTNP,   "Intel Broxton-P",	0, 0 },
139 	{ HDA_INTEL_SCH,     "Intel SCH",	0, 0 },
140 	{ HDA_NVIDIA_MCP51,  "NVIDIA MCP51",	0, HDAC_QUIRK_MSI },
141 	{ HDA_NVIDIA_MCP55,  "NVIDIA MCP55",	0, HDAC_QUIRK_MSI },
142 	{ HDA_NVIDIA_MCP61_1, "NVIDIA MCP61",	0, 0 },
143 	{ HDA_NVIDIA_MCP61_2, "NVIDIA MCP61",	0, 0 },
144 	{ HDA_NVIDIA_MCP65_1, "NVIDIA MCP65",	0, 0 },
145 	{ HDA_NVIDIA_MCP65_2, "NVIDIA MCP65",	0, 0 },
146 	{ HDA_NVIDIA_MCP67_1, "NVIDIA MCP67",	0, 0 },
147 	{ HDA_NVIDIA_MCP67_2, "NVIDIA MCP67",	0, 0 },
148 	{ HDA_NVIDIA_MCP73_1, "NVIDIA MCP73",	0, 0 },
149 	{ HDA_NVIDIA_MCP73_2, "NVIDIA MCP73",	0, 0 },
150 	{ HDA_NVIDIA_MCP78_1, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
151 	{ HDA_NVIDIA_MCP78_2, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
152 	{ HDA_NVIDIA_MCP78_3, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
153 	{ HDA_NVIDIA_MCP78_4, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
154 	{ HDA_NVIDIA_MCP79_1, "NVIDIA MCP79",	0, 0 },
155 	{ HDA_NVIDIA_MCP79_2, "NVIDIA MCP79",	0, 0 },
156 	{ HDA_NVIDIA_MCP79_3, "NVIDIA MCP79",	0, 0 },
157 	{ HDA_NVIDIA_MCP79_4, "NVIDIA MCP79",	0, 0 },
158 	{ HDA_NVIDIA_MCP89_1, "NVIDIA MCP89",	0, 0 },
159 	{ HDA_NVIDIA_MCP89_2, "NVIDIA MCP89",	0, 0 },
160 	{ HDA_NVIDIA_MCP89_3, "NVIDIA MCP89",	0, 0 },
161 	{ HDA_NVIDIA_MCP89_4, "NVIDIA MCP89",	0, 0 },
162 	{ HDA_NVIDIA_0BE2,   "NVIDIA (0x0be2)",	0, HDAC_QUIRK_MSI },
163 	{ HDA_NVIDIA_0BE3,   "NVIDIA (0x0be3)",	0, HDAC_QUIRK_MSI },
164 	{ HDA_NVIDIA_0BE4,   "NVIDIA (0x0be4)",	0, HDAC_QUIRK_MSI },
165 	{ HDA_NVIDIA_GT100,  "NVIDIA GT100",	0, HDAC_QUIRK_MSI },
166 	{ HDA_NVIDIA_GT104,  "NVIDIA GT104",	0, HDAC_QUIRK_MSI },
167 	{ HDA_NVIDIA_GT106,  "NVIDIA GT106",	0, HDAC_QUIRK_MSI },
168 	{ HDA_NVIDIA_GT108,  "NVIDIA GT108",	0, HDAC_QUIRK_MSI },
169 	{ HDA_NVIDIA_GT116,  "NVIDIA GT116",	0, HDAC_QUIRK_MSI },
170 	{ HDA_NVIDIA_GF119,  "NVIDIA GF119",	0, 0 },
171 	{ HDA_NVIDIA_GF110_1, "NVIDIA GF110",	0, HDAC_QUIRK_MSI },
172 	{ HDA_NVIDIA_GF110_2, "NVIDIA GF110",	0, HDAC_QUIRK_MSI },
173 	{ HDA_ATI_SB450,     "ATI SB450",	0, 0 },
174 	{ HDA_ATI_SB600,     "ATI SB600",	0, 0 },
175 	{ HDA_ATI_RS600,     "ATI RS600",	0, 0 },
176 	{ HDA_ATI_RS690,     "ATI RS690",	0, 0 },
177 	{ HDA_ATI_RS780,     "ATI RS780",	0, 0 },
178 	{ HDA_ATI_RS880,     "ATI RS880",	0, 0 },
179 	{ HDA_ATI_R600,      "ATI R600",	0, 0 },
180 	{ HDA_ATI_RV610,     "ATI RV610",	0, 0 },
181 	{ HDA_ATI_RV620,     "ATI RV620",	0, 0 },
182 	{ HDA_ATI_RV630,     "ATI RV630",	0, 0 },
183 	{ HDA_ATI_RV635,     "ATI RV635",	0, 0 },
184 	{ HDA_ATI_RV710,     "ATI RV710",	0, 0 },
185 	{ HDA_ATI_RV730,     "ATI RV730",	0, 0 },
186 	{ HDA_ATI_RV740,     "ATI RV740",	0, 0 },
187 	{ HDA_ATI_RV770,     "ATI RV770",	0, 0 },
188 	{ HDA_ATI_RV810,     "ATI RV810",	0, 0 },
189 	{ HDA_ATI_RV830,     "ATI RV830",	0, 0 },
190 	{ HDA_ATI_RV840,     "ATI RV840",	0, 0 },
191 	{ HDA_ATI_RV870,     "ATI RV870",	0, 0 },
192 	{ HDA_ATI_RV910,     "ATI RV910",	0, 0 },
193 	{ HDA_ATI_RV930,     "ATI RV930",	0, 0 },
194 	{ HDA_ATI_RV940,     "ATI RV940",	0, 0 },
195 	{ HDA_ATI_RV970,     "ATI RV970",	0, 0 },
196 	{ HDA_ATI_R1000,     "ATI R1000",	0, 0 },
197 	{ HDA_ATI_OLAND,     "ATI Oland",	0, 0 },
198 	{ HDA_ATI_KABINI,    "ATI Kabini",	0, 0 },
199 	{ HDA_ATI_TRINITY,   "ATI Trinity",	0, 0 },
200 	{ HDA_AMD_X370,      "AMD X370",	0, 0 },
201 	{ HDA_AMD_X570,      "AMD X570",	0, 0 },
202 	{ HDA_AMD_STONEY,    "AMD Stoney",	0, 0 },
203 	{ HDA_AMD_RAVEN,     "AMD Raven",	0, 0 },
204 	{ HDA_AMD_HUDSON2,   "AMD Hudson-2",	0, 0 },
205 	{ HDA_RDC_M3010,     "RDC M3010",	0, 0 },
206 	{ HDA_VIA_VT82XX,    "VIA VT8251/8237A",0, 0 },
207 	{ HDA_VMWARE,        "VMware",		0, 0 },
208 	{ HDA_SIS_966,       "SiS 966/968",	0, 0 },
209 	{ HDA_ULI_M5461,     "ULI M5461",	0, 0 },
210 	{ HDA_CREATIVE_SB1570,	"Creative SB Audigy FX", 0, HDAC_QUIRK_64BIT },
211 	/* Unknown */
212 	{ HDA_INTEL_ALL,  "Intel",		0, 0 },
213 	{ HDA_NVIDIA_ALL, "NVIDIA",		0, 0 },
214 	{ HDA_ATI_ALL,    "ATI",		0, 0 },
215 	{ HDA_AMD_ALL,    "AMD",		0, 0 },
216 	{ HDA_CREATIVE_ALL,    "Creative",	0, 0 },
217 	{ HDA_VIA_ALL,    "VIA",		0, 0 },
218 	{ HDA_VMWARE_ALL, "VMware",		0, 0 },
219 	{ HDA_SIS_ALL,    "SiS",		0, 0 },
220 	{ HDA_ULI_ALL,    "ULI",		0, 0 },
221 };
222 
223 static const struct {
224 	uint16_t vendor;
225 	uint8_t reg;
226 	uint8_t mask;
227 	uint8_t enable;
228 } hdac_pcie_snoop[] = {
229 	{  INTEL_VENDORID, 0x00, 0x00, 0x00 },
230 	{    ATI_VENDORID, 0x42, 0xf8, 0x02 },
231 	{    AMD_VENDORID, 0x42, 0xf8, 0x02 },
232 	{ NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f },
233 };
234 
235 /****************************************************************************
236  * Function prototypes
237  ****************************************************************************/
238 static void	hdac_intr_handler(void *);
239 static int	hdac_reset(struct hdac_softc *, bool);
240 static int	hdac_get_capabilities(struct hdac_softc *);
241 static void	hdac_dma_cb(void *, bus_dma_segment_t *, int, int);
242 static int	hdac_dma_alloc(struct hdac_softc *,
243 					struct hdac_dma *, bus_size_t);
244 static void	hdac_dma_free(struct hdac_softc *, struct hdac_dma *);
245 static int	hdac_mem_alloc(struct hdac_softc *);
246 static void	hdac_mem_free(struct hdac_softc *);
247 static int	hdac_irq_alloc(struct hdac_softc *);
248 static void	hdac_irq_free(struct hdac_softc *);
249 static void	hdac_corb_init(struct hdac_softc *);
250 static void	hdac_rirb_init(struct hdac_softc *);
251 static void	hdac_corb_start(struct hdac_softc *);
252 static void	hdac_rirb_start(struct hdac_softc *);
253 
254 static void	hdac_attach2(void *);
255 
256 static uint32_t	hdac_send_command(struct hdac_softc *, nid_t, uint32_t);
257 
258 static int	hdac_probe(device_t);
259 static int	hdac_attach(device_t);
260 static int	hdac_detach(device_t);
261 static int	hdac_suspend(device_t);
262 static int	hdac_resume(device_t);
263 
264 static int	hdac_rirb_flush(struct hdac_softc *sc);
265 static int	hdac_unsolq_flush(struct hdac_softc *sc);
266 
267 /* This function surely going to make its way into upper level someday. */
268 static void
hdac_config_fetch(struct hdac_softc * sc,uint32_t * on,uint32_t * off)269 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off)
270 {
271 	const char *res = NULL;
272 	int i = 0, j, k, len, inv;
273 
274 	if (resource_string_value(device_get_name(sc->dev),
275 	    device_get_unit(sc->dev), "config", &res) != 0)
276 		return;
277 	if (!(res != NULL && strlen(res) > 0))
278 		return;
279 	HDA_BOOTVERBOSE(
280 		device_printf(sc->dev, "Config options:");
281 	);
282 	for (;;) {
283 		while (res[i] != '\0' &&
284 		    (res[i] == ',' || isspace(res[i]) != 0))
285 			i++;
286 		if (res[i] == '\0') {
287 			HDA_BOOTVERBOSE(
288 				printf("\n");
289 			);
290 			return;
291 		}
292 		j = i;
293 		while (res[j] != '\0' &&
294 		    !(res[j] == ',' || isspace(res[j]) != 0))
295 			j++;
296 		len = j - i;
297 		if (len > 2 && strncmp(res + i, "no", 2) == 0)
298 			inv = 2;
299 		else
300 			inv = 0;
301 		for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) {
302 			if (strncmp(res + i + inv,
303 			    hdac_quirks_tab[k].key, len - inv) != 0)
304 				continue;
305 			if (len - inv != strlen(hdac_quirks_tab[k].key))
306 				continue;
307 			HDA_BOOTVERBOSE(
308 				printf(" %s%s", (inv != 0) ? "no" : "",
309 				    hdac_quirks_tab[k].key);
310 			);
311 			if (inv == 0) {
312 				*on |= hdac_quirks_tab[k].value;
313 				*off &= ~hdac_quirks_tab[k].value;
314 			} else if (inv != 0) {
315 				*off |= hdac_quirks_tab[k].value;
316 				*on &= ~hdac_quirks_tab[k].value;
317 			}
318 			break;
319 		}
320 		i = j;
321 	}
322 }
323 
324 static void
hdac_one_intr(struct hdac_softc * sc,uint32_t intsts)325 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts)
326 {
327 	device_t dev;
328 	uint8_t rirbsts;
329 	int i;
330 
331 	/* Was this a controller interrupt? */
332 	if (intsts & HDAC_INTSTS_CIS) {
333 		/*
334 		 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then
335 		 * we will need to check and clear HDAC_STATESTS.
336 		 * That event is used to report codec status changes such as
337 		 * a reset or a wake-up event.
338 		 */
339 		/*
340 		 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we
341 		 * will need to check and clear HDAC_CORBSTS_CMEI in
342 		 * HDAC_CORBSTS.
343 		 * That event is used to report CORB memory errors.
344 		 */
345 		/*
346 		 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we
347 		 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in
348 		 * HDAC_RIRBSTS.
349 		 * That event is used to report response FIFO overruns.
350 		 */
351 
352 		/* Get as many responses that we can */
353 		rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
354 		while (rirbsts & HDAC_RIRBSTS_RINTFL) {
355 			HDAC_WRITE_1(&sc->mem,
356 			    HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL);
357 			hdac_rirb_flush(sc);
358 			rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
359 		}
360 		if (sc->unsolq_rp != sc->unsolq_wp)
361 			taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
362 	}
363 
364 	if (intsts & HDAC_INTSTS_SIS_MASK) {
365 		for (i = 0; i < sc->num_ss; i++) {
366 			if ((intsts & (1 << i)) == 0)
367 				continue;
368 			HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS,
369 			    HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
370 			if ((dev = sc->streams[i].dev) != NULL) {
371 				HDAC_STREAM_INTR(dev,
372 				    sc->streams[i].dir, sc->streams[i].stream);
373 			}
374 		}
375 	}
376 }
377 
378 /****************************************************************************
379  * void hdac_intr_handler(void *)
380  *
381  * Interrupt handler. Processes interrupts received from the hdac.
382  ****************************************************************************/
383 static void
hdac_intr_handler(void * context)384 hdac_intr_handler(void *context)
385 {
386 	struct hdac_softc *sc;
387 	uint32_t intsts;
388 
389 	sc = (struct hdac_softc *)context;
390 
391 	/*
392 	 * Loop until HDAC_INTSTS_GIS gets clear.
393 	 * It is plausible that hardware interrupts a host only when GIS goes
394 	 * from zero to one.  GIS is formed by OR-ing multiple hardware
395 	 * statuses, so it's possible that a previously cleared status gets set
396 	 * again while another status has not been cleared yet.  Thus, there
397 	 * will be no new interrupt as GIS always stayed set.  If we don't
398 	 * re-examine GIS then we can leave it set and never get an interrupt
399 	 * again.
400 	 */
401 	hdac_lock(sc);
402 	intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
403 	while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) {
404 		hdac_one_intr(sc, intsts);
405 		intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
406 	}
407 	hdac_unlock(sc);
408 }
409 
410 static void
hdac_poll_callback(void * arg)411 hdac_poll_callback(void *arg)
412 {
413 	struct hdac_softc *sc = arg;
414 
415 	if (sc == NULL)
416 		return;
417 
418 	hdac_lock(sc);
419 	if (sc->polling == 0) {
420 		hdac_unlock(sc);
421 		return;
422 	}
423 	callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc);
424 	hdac_unlock(sc);
425 
426 	hdac_intr_handler(sc);
427 }
428 
429 /****************************************************************************
430  * int hdac_reset(hdac_softc *, bool)
431  *
432  * Reset the hdac to a quiescent and known state.
433  ****************************************************************************/
434 static int
hdac_reset(struct hdac_softc * sc,bool wakeup)435 hdac_reset(struct hdac_softc *sc, bool wakeup)
436 {
437 	uint32_t gctl;
438 	int count, i;
439 
440 	/*
441 	 * Stop all Streams DMA engine
442 	 */
443 	for (i = 0; i < sc->num_iss; i++)
444 		HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0);
445 	for (i = 0; i < sc->num_oss; i++)
446 		HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0);
447 	for (i = 0; i < sc->num_bss; i++)
448 		HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0);
449 
450 	/*
451 	 * Stop Control DMA engines.
452 	 */
453 	HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0);
454 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0);
455 
456 	/*
457 	 * Reset DMA position buffer.
458 	 */
459 	HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0);
460 	HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0);
461 
462 	/*
463 	 * Reset the controller. The reset must remain asserted for
464 	 * a minimum of 100us.
465 	 */
466 	gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
467 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST);
468 	count = 10000;
469 	do {
470 		gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
471 		if (!(gctl & HDAC_GCTL_CRST))
472 			break;
473 		DELAY(10);
474 	} while (--count);
475 	if (gctl & HDAC_GCTL_CRST) {
476 		device_printf(sc->dev, "Unable to put hdac in reset\n");
477 		return (ENXIO);
478 	}
479 
480 	/* If wakeup is not requested - leave the controller in reset state. */
481 	if (!wakeup)
482 		return (0);
483 
484 	DELAY(100);
485 	gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
486 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST);
487 	count = 10000;
488 	do {
489 		gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
490 		if (gctl & HDAC_GCTL_CRST)
491 			break;
492 		DELAY(10);
493 	} while (--count);
494 	if (!(gctl & HDAC_GCTL_CRST)) {
495 		device_printf(sc->dev, "Device stuck in reset\n");
496 		return (ENXIO);
497 	}
498 
499 	/*
500 	 * Wait for codecs to finish their own reset sequence. The delay here
501 	 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery).
502 	 */
503 	DELAY(1000);
504 
505 	return (0);
506 }
507 
508 /****************************************************************************
509  * int hdac_get_capabilities(struct hdac_softc *);
510  *
511  * Retreive the general capabilities of the hdac;
512  *	Number of Input Streams
513  *	Number of Output Streams
514  *	Number of bidirectional Streams
515  *	64bit ready
516  *	CORB and RIRB sizes
517  ****************************************************************************/
518 static int
hdac_get_capabilities(struct hdac_softc * sc)519 hdac_get_capabilities(struct hdac_softc *sc)
520 {
521 	uint16_t gcap;
522 	uint8_t corbsize, rirbsize;
523 
524 	gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP);
525 	sc->num_iss = HDAC_GCAP_ISS(gcap);
526 	sc->num_oss = HDAC_GCAP_OSS(gcap);
527 	sc->num_bss = HDAC_GCAP_BSS(gcap);
528 	sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss;
529 	sc->num_sdo = HDAC_GCAP_NSDO(gcap);
530 	sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0;
531 	if (sc->quirks_on & HDAC_QUIRK_64BIT)
532 		sc->support_64bit = 1;
533 	else if (sc->quirks_off & HDAC_QUIRK_64BIT)
534 		sc->support_64bit = 0;
535 
536 	corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE);
537 	if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) ==
538 	    HDAC_CORBSIZE_CORBSZCAP_256)
539 		sc->corb_size = 256;
540 	else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) ==
541 	    HDAC_CORBSIZE_CORBSZCAP_16)
542 		sc->corb_size = 16;
543 	else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) ==
544 	    HDAC_CORBSIZE_CORBSZCAP_2)
545 		sc->corb_size = 2;
546 	else {
547 		device_printf(sc->dev, "%s: Invalid corb size (%x)\n",
548 		    __func__, corbsize);
549 		return (ENXIO);
550 	}
551 
552 	rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE);
553 	if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) ==
554 	    HDAC_RIRBSIZE_RIRBSZCAP_256)
555 		sc->rirb_size = 256;
556 	else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) ==
557 	    HDAC_RIRBSIZE_RIRBSZCAP_16)
558 		sc->rirb_size = 16;
559 	else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) ==
560 	    HDAC_RIRBSIZE_RIRBSZCAP_2)
561 		sc->rirb_size = 2;
562 	else {
563 		device_printf(sc->dev, "%s: Invalid rirb size (%x)\n",
564 		    __func__, rirbsize);
565 		return (ENXIO);
566 	}
567 
568 	HDA_BOOTVERBOSE(
569 		device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, "
570 		    "NSDO %d%s, CORB %d, RIRB %d\n",
571 		    sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo,
572 		    sc->support_64bit ? ", 64bit" : "",
573 		    sc->corb_size, sc->rirb_size);
574 	);
575 
576 	return (0);
577 }
578 
579 /****************************************************************************
580  * void hdac_dma_cb
581  *
582  * This function is called by bus_dmamap_load when the mapping has been
583  * established. We just record the physical address of the mapping into
584  * the struct hdac_dma passed in.
585  ****************************************************************************/
586 static void
hdac_dma_cb(void * callback_arg,bus_dma_segment_t * segs,int nseg,int error)587 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error)
588 {
589 	struct hdac_dma *dma;
590 
591 	if (error == 0) {
592 		dma = (struct hdac_dma *)callback_arg;
593 		dma->dma_paddr = segs[0].ds_addr;
594 	}
595 }
596 
597 /****************************************************************************
598  * int hdac_dma_alloc
599  *
600  * This function allocate and setup a dma region (struct hdac_dma).
601  * It must be freed by a corresponding hdac_dma_free.
602  ****************************************************************************/
603 static int
hdac_dma_alloc(struct hdac_softc * sc,struct hdac_dma * dma,bus_size_t size)604 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size)
605 {
606 	bus_size_t roundsz;
607 	int result;
608 
609 	roundsz = roundup2(size, HDA_DMA_ALIGNMENT);
610 	bzero(dma, sizeof(*dma));
611 
612 	/*
613 	 * Create a DMA tag
614 	 */
615 	result = bus_dma_tag_create(
616 	    bus_get_dma_tag(sc->dev),		/* parent */
617 	    HDA_DMA_ALIGNMENT,			/* alignment */
618 	    0,					/* boundary */
619 	    (sc->support_64bit) ? BUS_SPACE_MAXADDR :
620 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
621 	    BUS_SPACE_MAXADDR,			/* highaddr */
622 	    NULL,				/* filtfunc */
623 	    NULL,				/* fistfuncarg */
624 	    roundsz,				/* maxsize */
625 	    1,					/* nsegments */
626 	    roundsz,				/* maxsegsz */
627 	    0,					/* flags */
628 	    NULL,				/* lockfunc */
629 	    NULL,				/* lockfuncarg */
630 	    &dma->dma_tag);			/* dmat */
631 	if (result != 0) {
632 		device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n",
633 		    __func__, result);
634 		goto hdac_dma_alloc_fail;
635 	}
636 
637 	/*
638 	 * Allocate DMA memory
639 	 */
640 	result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
641 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO |
642 	    ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE :
643 	     BUS_DMA_COHERENT),
644 	    &dma->dma_map);
645 	if (result != 0) {
646 		device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n",
647 		    __func__, result);
648 		goto hdac_dma_alloc_fail;
649 	}
650 
651 	dma->dma_size = roundsz;
652 
653 	/*
654 	 * Map the memory
655 	 */
656 	result = bus_dmamap_load(dma->dma_tag, dma->dma_map,
657 	    (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0);
658 	if (result != 0 || dma->dma_paddr == 0) {
659 		if (result == 0)
660 			result = ENOMEM;
661 		device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n",
662 		    __func__, result);
663 		goto hdac_dma_alloc_fail;
664 	}
665 
666 	HDA_BOOTHVERBOSE(
667 		device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n",
668 		    __func__, (uintmax_t)size, (uintmax_t)roundsz);
669 	);
670 
671 	return (0);
672 
673 hdac_dma_alloc_fail:
674 	hdac_dma_free(sc, dma);
675 
676 	return (result);
677 }
678 
679 /****************************************************************************
680  * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *)
681  *
682  * Free a struct hdac_dma that has been previously allocated via the
683  * hdac_dma_alloc function.
684  ****************************************************************************/
685 static void
hdac_dma_free(struct hdac_softc * sc,struct hdac_dma * dma)686 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma)
687 {
688 	if (dma->dma_paddr != 0) {
689 		/* Flush caches */
690 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
691 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
692 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
693 		dma->dma_paddr = 0;
694 	}
695 	if (dma->dma_vaddr != NULL) {
696 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
697 		dma->dma_vaddr = NULL;
698 	}
699 	if (dma->dma_tag != NULL) {
700 		bus_dma_tag_destroy(dma->dma_tag);
701 		dma->dma_tag = NULL;
702 	}
703 	dma->dma_size = 0;
704 }
705 
706 /****************************************************************************
707  * int hdac_mem_alloc(struct hdac_softc *)
708  *
709  * Allocate all the bus resources necessary to speak with the physical
710  * controller.
711  ****************************************************************************/
712 static int
hdac_mem_alloc(struct hdac_softc * sc)713 hdac_mem_alloc(struct hdac_softc *sc)
714 {
715 	struct hdac_mem *mem;
716 
717 	mem = &sc->mem;
718 	mem->mem_rid = PCIR_BAR(0);
719 	mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
720 	    &mem->mem_rid, RF_ACTIVE);
721 	if (mem->mem_res == NULL) {
722 		device_printf(sc->dev,
723 		    "%s: Unable to allocate memory resource\n", __func__);
724 		return (ENOMEM);
725 	}
726 	mem->mem_tag = rman_get_bustag(mem->mem_res);
727 	mem->mem_handle = rman_get_bushandle(mem->mem_res);
728 
729 	return (0);
730 }
731 
732 /****************************************************************************
733  * void hdac_mem_free(struct hdac_softc *)
734  *
735  * Free up resources previously allocated by hdac_mem_alloc.
736  ****************************************************************************/
737 static void
hdac_mem_free(struct hdac_softc * sc)738 hdac_mem_free(struct hdac_softc *sc)
739 {
740 	struct hdac_mem *mem;
741 
742 	mem = &sc->mem;
743 	if (mem->mem_res != NULL)
744 		bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid,
745 		    mem->mem_res);
746 	mem->mem_res = NULL;
747 }
748 
749 /****************************************************************************
750  * int hdac_irq_alloc(struct hdac_softc *)
751  *
752  * Allocate and setup the resources necessary for interrupt handling.
753  ****************************************************************************/
754 static int
hdac_irq_alloc(struct hdac_softc * sc)755 hdac_irq_alloc(struct hdac_softc *sc)
756 {
757 	struct hdac_irq *irq;
758 	int result;
759 
760 	irq = &sc->irq;
761 	irq->irq_rid = 0x0;
762 
763 	if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 &&
764 	    (result = pci_msi_count(sc->dev)) == 1 &&
765 	    pci_alloc_msi(sc->dev, &result) == 0)
766 		irq->irq_rid = 0x1;
767 
768 	irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
769 	    &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE);
770 	if (irq->irq_res == NULL) {
771 		device_printf(sc->dev, "%s: Unable to allocate irq\n",
772 		    __func__);
773 		goto hdac_irq_alloc_fail;
774 	}
775 	result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV,
776 	    NULL, hdac_intr_handler, sc, &irq->irq_handle);
777 	if (result != 0) {
778 		device_printf(sc->dev,
779 		    "%s: Unable to setup interrupt handler (%d)\n",
780 		    __func__, result);
781 		goto hdac_irq_alloc_fail;
782 	}
783 
784 	return (0);
785 
786 hdac_irq_alloc_fail:
787 	hdac_irq_free(sc);
788 
789 	return (ENXIO);
790 }
791 
792 /****************************************************************************
793  * void hdac_irq_free(struct hdac_softc *)
794  *
795  * Free up resources previously allocated by hdac_irq_alloc.
796  ****************************************************************************/
797 static void
hdac_irq_free(struct hdac_softc * sc)798 hdac_irq_free(struct hdac_softc *sc)
799 {
800 	struct hdac_irq *irq;
801 
802 	irq = &sc->irq;
803 	if (irq->irq_res != NULL && irq->irq_handle != NULL)
804 		bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle);
805 	if (irq->irq_res != NULL)
806 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid,
807 		    irq->irq_res);
808 	if (irq->irq_rid == 0x1)
809 		pci_release_msi(sc->dev);
810 	irq->irq_handle = NULL;
811 	irq->irq_res = NULL;
812 	irq->irq_rid = 0x0;
813 }
814 
815 /****************************************************************************
816  * void hdac_corb_init(struct hdac_softc *)
817  *
818  * Initialize the corb registers for operations but do not start it up yet.
819  * The CORB engine must not be running when this function is called.
820  ****************************************************************************/
821 static void
hdac_corb_init(struct hdac_softc * sc)822 hdac_corb_init(struct hdac_softc *sc)
823 {
824 	uint8_t corbsize;
825 	uint64_t corbpaddr;
826 
827 	/* Setup the CORB size. */
828 	switch (sc->corb_size) {
829 	case 256:
830 		corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256);
831 		break;
832 	case 16:
833 		corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16);
834 		break;
835 	case 2:
836 		corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2);
837 		break;
838 	default:
839 		panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size);
840 	}
841 	HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize);
842 
843 	/* Setup the CORB Address in the hdac */
844 	corbpaddr = (uint64_t)sc->corb_dma.dma_paddr;
845 	HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr);
846 	HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32));
847 
848 	/* Set the WP and RP */
849 	sc->corb_wp = 0;
850 	HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
851 	HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST);
852 	/*
853 	 * The HDA specification indicates that the CORBRPRST bit will always
854 	 * read as zero. Unfortunately, it seems that at least the 82801G
855 	 * doesn't reset the bit to zero, which stalls the corb engine.
856 	 * manually reset the bit to zero before continuing.
857 	 */
858 	HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0);
859 
860 	/* Enable CORB error reporting */
861 #if 0
862 	HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE);
863 #endif
864 }
865 
866 /****************************************************************************
867  * void hdac_rirb_init(struct hdac_softc *)
868  *
869  * Initialize the rirb registers for operations but do not start it up yet.
870  * The RIRB engine must not be running when this function is called.
871  ****************************************************************************/
872 static void
hdac_rirb_init(struct hdac_softc * sc)873 hdac_rirb_init(struct hdac_softc *sc)
874 {
875 	uint8_t rirbsize;
876 	uint64_t rirbpaddr;
877 
878 	/* Setup the RIRB size. */
879 	switch (sc->rirb_size) {
880 	case 256:
881 		rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256);
882 		break;
883 	case 16:
884 		rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16);
885 		break;
886 	case 2:
887 		rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2);
888 		break;
889 	default:
890 		panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size);
891 	}
892 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize);
893 
894 	/* Setup the RIRB Address in the hdac */
895 	rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr;
896 	HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr);
897 	HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32));
898 
899 	/* Setup the WP and RP */
900 	sc->rirb_rp = 0;
901 	HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST);
902 
903 	/* Setup the interrupt threshold */
904 	HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2);
905 
906 	/* Enable Overrun and response received reporting */
907 #if 0
908 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL,
909 	    HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL);
910 #else
911 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL);
912 #endif
913 
914 	/*
915 	 * Make sure that the Host CPU cache doesn't contain any dirty
916 	 * cache lines that falls in the rirb. If I understood correctly, it
917 	 * should be sufficient to do this only once as the rirb is purely
918 	 * read-only from now on.
919 	 */
920 	bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
921 	    BUS_DMASYNC_PREREAD);
922 }
923 
924 /****************************************************************************
925  * void hdac_corb_start(hdac_softc *)
926  *
927  * Startup the corb DMA engine
928  ****************************************************************************/
929 static void
hdac_corb_start(struct hdac_softc * sc)930 hdac_corb_start(struct hdac_softc *sc)
931 {
932 	uint32_t corbctl;
933 
934 	corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL);
935 	corbctl |= HDAC_CORBCTL_CORBRUN;
936 	HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl);
937 }
938 
939 /****************************************************************************
940  * void hdac_rirb_start(hdac_softc *)
941  *
942  * Startup the rirb DMA engine
943  ****************************************************************************/
944 static void
hdac_rirb_start(struct hdac_softc * sc)945 hdac_rirb_start(struct hdac_softc *sc)
946 {
947 	uint32_t rirbctl;
948 
949 	rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL);
950 	rirbctl |= HDAC_RIRBCTL_RIRBDMAEN;
951 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl);
952 }
953 
954 static int
hdac_rirb_flush(struct hdac_softc * sc)955 hdac_rirb_flush(struct hdac_softc *sc)
956 {
957 	struct hdac_rirb *rirb_base, *rirb;
958 	nid_t cad;
959 	uint32_t resp, resp_ex;
960 	uint8_t rirbwp;
961 	int ret;
962 
963 	rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr;
964 	rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP);
965 	bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
966 	    BUS_DMASYNC_POSTREAD);
967 
968 	ret = 0;
969 	while (sc->rirb_rp != rirbwp) {
970 		sc->rirb_rp++;
971 		sc->rirb_rp %= sc->rirb_size;
972 		rirb = &rirb_base[sc->rirb_rp];
973 		resp = le32toh(rirb->response);
974 		resp_ex = le32toh(rirb->response_ex);
975 		cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex);
976 		if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) {
977 			sc->unsolq[sc->unsolq_wp++] = resp;
978 			sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
979 			sc->unsolq[sc->unsolq_wp++] = cad;
980 			sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
981 		} else if (sc->codecs[cad].pending <= 0) {
982 			device_printf(sc->dev, "Unexpected unsolicited "
983 			    "response from address %d: %08x\n", cad, resp);
984 		} else {
985 			sc->codecs[cad].response = resp;
986 			sc->codecs[cad].pending--;
987 		}
988 		ret++;
989 	}
990 
991 	bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
992 	    BUS_DMASYNC_PREREAD);
993 	return (ret);
994 }
995 
996 static int
hdac_unsolq_flush(struct hdac_softc * sc)997 hdac_unsolq_flush(struct hdac_softc *sc)
998 {
999 	device_t child;
1000 	nid_t cad;
1001 	uint32_t resp;
1002 	int ret = 0;
1003 
1004 	if (sc->unsolq_st == HDAC_UNSOLQ_READY) {
1005 		sc->unsolq_st = HDAC_UNSOLQ_BUSY;
1006 		while (sc->unsolq_rp != sc->unsolq_wp) {
1007 			resp = sc->unsolq[sc->unsolq_rp++];
1008 			sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1009 			cad = sc->unsolq[sc->unsolq_rp++];
1010 			sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1011 			if ((child = sc->codecs[cad].dev) != NULL &&
1012 			    device_is_attached(child))
1013 				HDAC_UNSOL_INTR(child, resp);
1014 			ret++;
1015 		}
1016 		sc->unsolq_st = HDAC_UNSOLQ_READY;
1017 	}
1018 
1019 	return (ret);
1020 }
1021 
1022 /****************************************************************************
1023  * uint32_t hdac_send_command
1024  *
1025  * Wrapper function that sends only one command to a given codec
1026  ****************************************************************************/
1027 static uint32_t
hdac_send_command(struct hdac_softc * sc,nid_t cad,uint32_t verb)1028 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb)
1029 {
1030 	int timeout;
1031 	uint32_t *corb;
1032 
1033 	hdac_lockassert(sc);
1034 	verb &= ~HDA_CMD_CAD_MASK;
1035 	verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT;
1036 	sc->codecs[cad].response = HDA_INVALID;
1037 
1038 	sc->codecs[cad].pending++;
1039 	sc->corb_wp++;
1040 	sc->corb_wp %= sc->corb_size;
1041 	corb = (uint32_t *)sc->corb_dma.dma_vaddr;
1042 	bus_dmamap_sync(sc->corb_dma.dma_tag,
1043 	    sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE);
1044 	corb[sc->corb_wp] = htole32(verb);
1045 	bus_dmamap_sync(sc->corb_dma.dma_tag,
1046 	    sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE);
1047 	HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
1048 
1049 	timeout = 10000;
1050 	do {
1051 		if (hdac_rirb_flush(sc) == 0)
1052 			DELAY(10);
1053 	} while (sc->codecs[cad].pending != 0 && --timeout);
1054 
1055 	if (sc->codecs[cad].pending != 0) {
1056 		device_printf(sc->dev, "Command 0x%08x timeout on address %d\n",
1057 		    verb, cad);
1058 		sc->codecs[cad].pending = 0;
1059 	}
1060 
1061 	if (sc->unsolq_rp != sc->unsolq_wp)
1062 		taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
1063 	return (sc->codecs[cad].response);
1064 }
1065 
1066 /****************************************************************************
1067  * Device Methods
1068  ****************************************************************************/
1069 
1070 /****************************************************************************
1071  * int hdac_probe(device_t)
1072  *
1073  * Probe for the presence of an hdac. If none is found, check for a generic
1074  * match using the subclass of the device.
1075  ****************************************************************************/
1076 static int
hdac_probe(device_t dev)1077 hdac_probe(device_t dev)
1078 {
1079 	int i, result;
1080 	uint32_t model;
1081 	uint16_t class, subclass;
1082 	char desc[64];
1083 
1084 	model = (uint32_t)pci_get_device(dev) << 16;
1085 	model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1086 	class = pci_get_class(dev);
1087 	subclass = pci_get_subclass(dev);
1088 
1089 	bzero(desc, sizeof(desc));
1090 	result = ENXIO;
1091 	for (i = 0; i < nitems(hdac_devices); i++) {
1092 		if (hdac_devices[i].model == model) {
1093 			strlcpy(desc, hdac_devices[i].desc, sizeof(desc));
1094 			result = BUS_PROBE_DEFAULT;
1095 			break;
1096 		}
1097 		if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1098 		    class == PCIC_MULTIMEDIA &&
1099 		    subclass == PCIS_MULTIMEDIA_HDA) {
1100 			snprintf(desc, sizeof(desc), "%s (0x%04x)",
1101 			    hdac_devices[i].desc, pci_get_device(dev));
1102 			result = BUS_PROBE_GENERIC;
1103 			break;
1104 		}
1105 	}
1106 	if (result == ENXIO && class == PCIC_MULTIMEDIA &&
1107 	    subclass == PCIS_MULTIMEDIA_HDA) {
1108 		snprintf(desc, sizeof(desc), "Generic (0x%08x)", model);
1109 		result = BUS_PROBE_GENERIC;
1110 	}
1111 	if (result != ENXIO)
1112 		device_set_descf(dev, "%s HDA Controller", desc);
1113 
1114 	return (result);
1115 }
1116 
1117 static void
hdac_unsolq_task(void * context,int pending)1118 hdac_unsolq_task(void *context, int pending)
1119 {
1120 	struct hdac_softc *sc;
1121 
1122 	sc = (struct hdac_softc *)context;
1123 
1124 	hdac_lock(sc);
1125 	hdac_unsolq_flush(sc);
1126 	hdac_unlock(sc);
1127 }
1128 
1129 /****************************************************************************
1130  * int hdac_attach(device_t)
1131  *
1132  * Attach the device into the kernel. Interrupts usually won't be enabled
1133  * when this function is called. Setup everything that doesn't require
1134  * interrupts and defer probing of codecs until interrupts are enabled.
1135  ****************************************************************************/
1136 static int
hdac_attach(device_t dev)1137 hdac_attach(device_t dev)
1138 {
1139 	struct hdac_softc *sc;
1140 	int result;
1141 	int i, devid = -1;
1142 	uint32_t model;
1143 	uint16_t class, subclass;
1144 	uint16_t vendor;
1145 	uint8_t v;
1146 
1147 	sc = device_get_softc(dev);
1148 	HDA_BOOTVERBOSE(
1149 		device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n",
1150 		    pci_get_subvendor(dev), pci_get_subdevice(dev));
1151 		device_printf(dev, "HDA Driver Revision: %s\n",
1152 		    HDA_DRV_TEST_REV);
1153 	);
1154 
1155 	model = (uint32_t)pci_get_device(dev) << 16;
1156 	model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1157 	class = pci_get_class(dev);
1158 	subclass = pci_get_subclass(dev);
1159 
1160 	for (i = 0; i < nitems(hdac_devices); i++) {
1161 		if (hdac_devices[i].model == model) {
1162 			devid = i;
1163 			break;
1164 		}
1165 		if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1166 		    class == PCIC_MULTIMEDIA &&
1167 		    subclass == PCIS_MULTIMEDIA_HDA) {
1168 			devid = i;
1169 			break;
1170 		}
1171 	}
1172 
1173 	sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex");
1174 	sc->dev = dev;
1175 	TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc);
1176 	callout_init(&sc->poll_callout, 1);
1177 	for (i = 0; i < HDAC_CODEC_MAX; i++)
1178 		sc->codecs[i].dev = NULL;
1179 	if (devid >= 0) {
1180 		sc->quirks_on = hdac_devices[devid].quirks_on;
1181 		sc->quirks_off = hdac_devices[devid].quirks_off;
1182 	} else {
1183 		sc->quirks_on = 0;
1184 		sc->quirks_off = 0;
1185 	}
1186 	if (resource_int_value(device_get_name(dev),
1187 	    device_get_unit(dev), "msi", &i) == 0) {
1188 		if (i == 0)
1189 			sc->quirks_off |= HDAC_QUIRK_MSI;
1190 		else {
1191 			sc->quirks_on |= HDAC_QUIRK_MSI;
1192 			sc->quirks_off |= ~HDAC_QUIRK_MSI;
1193 		}
1194 	}
1195 	hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off);
1196 	HDA_BOOTVERBOSE(
1197 		device_printf(sc->dev,
1198 		    "Config options: on=0x%08x off=0x%08x\n",
1199 		    sc->quirks_on, sc->quirks_off);
1200 	);
1201 	sc->poll_ival = hz;
1202 	if (resource_int_value(device_get_name(dev),
1203 	    device_get_unit(dev), "polling", &i) == 0 && i != 0)
1204 		sc->polling = 1;
1205 	else
1206 		sc->polling = 0;
1207 
1208 	pci_enable_busmaster(dev);
1209 
1210 	vendor = pci_get_vendor(dev);
1211 	if (vendor == INTEL_VENDORID) {
1212 		/* TCSEL -> TC0 */
1213 		v = pci_read_config(dev, 0x44, 1);
1214 		pci_write_config(dev, 0x44, v & 0xf8, 1);
1215 		HDA_BOOTHVERBOSE(
1216 			device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v,
1217 			    pci_read_config(dev, 0x44, 1));
1218 		);
1219 	}
1220 
1221 #if defined(__i386__) || defined(__amd64__)
1222 	sc->flags |= HDAC_F_DMA_NOCACHE;
1223 
1224 	if (resource_int_value(device_get_name(dev),
1225 	    device_get_unit(dev), "snoop", &i) == 0 && i != 0) {
1226 #else
1227 	sc->flags &= ~HDAC_F_DMA_NOCACHE;
1228 #endif
1229 		/*
1230 		 * Try to enable PCIe snoop to avoid messing around with
1231 		 * uncacheable DMA attribute. Since PCIe snoop register
1232 		 * config is pretty much vendor specific, there are no
1233 		 * general solutions on how to enable it, forcing us (even
1234 		 * Microsoft) to enable uncacheable or write combined DMA
1235 		 * by default.
1236 		 *
1237 		 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx
1238 		 */
1239 		for (i = 0; i < nitems(hdac_pcie_snoop); i++) {
1240 			if (hdac_pcie_snoop[i].vendor != vendor)
1241 				continue;
1242 			sc->flags &= ~HDAC_F_DMA_NOCACHE;
1243 			if (hdac_pcie_snoop[i].reg == 0x00)
1244 				break;
1245 			v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1246 			if ((v & hdac_pcie_snoop[i].enable) ==
1247 			    hdac_pcie_snoop[i].enable)
1248 				break;
1249 			v &= hdac_pcie_snoop[i].mask;
1250 			v |= hdac_pcie_snoop[i].enable;
1251 			pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1);
1252 			v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1253 			if ((v & hdac_pcie_snoop[i].enable) !=
1254 			    hdac_pcie_snoop[i].enable) {
1255 				HDA_BOOTVERBOSE(
1256 					device_printf(dev,
1257 					    "WARNING: Failed to enable PCIe "
1258 					    "snoop!\n");
1259 				);
1260 #if defined(__i386__) || defined(__amd64__)
1261 				sc->flags |= HDAC_F_DMA_NOCACHE;
1262 #endif
1263 			}
1264 			break;
1265 		}
1266 #if defined(__i386__) || defined(__amd64__)
1267 	}
1268 #endif
1269 
1270 	HDA_BOOTHVERBOSE(
1271 		device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n",
1272 		    (sc->flags & HDAC_F_DMA_NOCACHE) ?
1273 		    "Uncacheable" : "PCIe snoop", vendor);
1274 	);
1275 
1276 	/* Allocate resources */
1277 	result = hdac_mem_alloc(sc);
1278 	if (result != 0)
1279 		goto hdac_attach_fail;
1280 
1281 	/* Get Capabilities */
1282 	result = hdac_get_capabilities(sc);
1283 	if (result != 0)
1284 		goto hdac_attach_fail;
1285 
1286 	/* Allocate CORB, RIRB, POS and BDLs dma memory */
1287 	result = hdac_dma_alloc(sc, &sc->corb_dma,
1288 	    sc->corb_size * sizeof(uint32_t));
1289 	if (result != 0)
1290 		goto hdac_attach_fail;
1291 	result = hdac_dma_alloc(sc, &sc->rirb_dma,
1292 	    sc->rirb_size * sizeof(struct hdac_rirb));
1293 	if (result != 0)
1294 		goto hdac_attach_fail;
1295 	sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss,
1296 	    M_HDAC, M_ZERO | M_WAITOK);
1297 	for (i = 0; i < sc->num_ss; i++) {
1298 		result = hdac_dma_alloc(sc, &sc->streams[i].bdl,
1299 		    sizeof(struct hdac_bdle) * HDA_BDL_MAX);
1300 		if (result != 0)
1301 			goto hdac_attach_fail;
1302 	}
1303 	if (sc->quirks_on & HDAC_QUIRK_DMAPOS) {
1304 		if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) {
1305 			HDA_BOOTVERBOSE(
1306 				device_printf(dev, "Failed to "
1307 				    "allocate DMA pos buffer "
1308 				    "(non-fatal)\n");
1309 			);
1310 		} else {
1311 			uint64_t addr = sc->pos_dma.dma_paddr;
1312 
1313 			HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32);
1314 			HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE,
1315 			    (addr & HDAC_DPLBASE_DPLBASE_MASK) |
1316 			    HDAC_DPLBASE_DPLBASE_DMAPBE);
1317 		}
1318 	}
1319 
1320 	result = bus_dma_tag_create(
1321 	    bus_get_dma_tag(sc->dev),		/* parent */
1322 	    HDA_DMA_ALIGNMENT,			/* alignment */
1323 	    0,					/* boundary */
1324 	    (sc->support_64bit) ? BUS_SPACE_MAXADDR :
1325 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1326 	    BUS_SPACE_MAXADDR,			/* highaddr */
1327 	    NULL,				/* filtfunc */
1328 	    NULL,				/* fistfuncarg */
1329 	    HDA_BUFSZ_MAX,			/* maxsize */
1330 	    1,					/* nsegments */
1331 	    HDA_BUFSZ_MAX,			/* maxsegsz */
1332 	    0,					/* flags */
1333 	    NULL,				/* lockfunc */
1334 	    NULL,				/* lockfuncarg */
1335 	    &sc->chan_dmat);			/* dmat */
1336 	if (result != 0) {
1337 		device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n",
1338 		     __func__, result);
1339 		goto hdac_attach_fail;
1340 	}
1341 
1342 	/* Quiesce everything */
1343 	HDA_BOOTHVERBOSE(
1344 		device_printf(dev, "Reset controller...\n");
1345 	);
1346 	hdac_reset(sc, true);
1347 
1348 	/* Initialize the CORB and RIRB */
1349 	hdac_corb_init(sc);
1350 	hdac_rirb_init(sc);
1351 
1352 	result = hdac_irq_alloc(sc);
1353 	if (result != 0)
1354 		goto hdac_attach_fail;
1355 
1356 	/* Defer remaining of initialization until interrupts are enabled */
1357 	sc->intrhook.ich_func = hdac_attach2;
1358 	sc->intrhook.ich_arg = (void *)sc;
1359 	if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) {
1360 		sc->intrhook.ich_func = NULL;
1361 		hdac_attach2((void *)sc);
1362 	}
1363 
1364 	return (0);
1365 
1366 hdac_attach_fail:
1367 	hdac_irq_free(sc);
1368 	if (sc->streams != NULL)
1369 		for (i = 0; i < sc->num_ss; i++)
1370 			hdac_dma_free(sc, &sc->streams[i].bdl);
1371 	free(sc->streams, M_HDAC);
1372 	hdac_dma_free(sc, &sc->rirb_dma);
1373 	hdac_dma_free(sc, &sc->corb_dma);
1374 	hdac_mem_free(sc);
1375 	snd_mtxfree(sc->lock);
1376 
1377 	return (ENXIO);
1378 }
1379 
1380 static int
sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)1381 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)
1382 {
1383 	struct hdac_softc *sc;
1384 	device_t *devlist;
1385 	device_t dev;
1386 	int devcount, i, err, val;
1387 
1388 	dev = oidp->oid_arg1;
1389 	sc = device_get_softc(dev);
1390 	if (sc == NULL)
1391 		return (EINVAL);
1392 	val = 0;
1393 	err = sysctl_handle_int(oidp, &val, 0, req);
1394 	if (err != 0 || req->newptr == NULL || val == 0)
1395 		return (err);
1396 
1397 	/* XXX: Temporary. For debugging. */
1398 	if (val == 100) {
1399 		hdac_suspend(dev);
1400 		return (0);
1401 	} else if (val == 101) {
1402 		hdac_resume(dev);
1403 		return (0);
1404 	}
1405 
1406 	bus_topo_lock();
1407 
1408 	if ((err = device_get_children(dev, &devlist, &devcount)) != 0) {
1409 		bus_topo_unlock();
1410 		return (err);
1411 	}
1412 
1413 	hdac_lock(sc);
1414 	for (i = 0; i < devcount; i++)
1415 		HDAC_PINDUMP(devlist[i]);
1416 	hdac_unlock(sc);
1417 
1418 	bus_topo_unlock();
1419 
1420 	free(devlist, M_TEMP);
1421 	return (0);
1422 }
1423 
1424 static int
hdac_mdata_rate(uint16_t fmt)1425 hdac_mdata_rate(uint16_t fmt)
1426 {
1427 	static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 };
1428 	int rate, bits;
1429 
1430 	if (fmt & (1 << 14))
1431 		rate = 44100;
1432 	else
1433 		rate = 48000;
1434 	rate *= ((fmt >> 11) & 0x07) + 1;
1435 	rate /= ((fmt >> 8) & 0x07) + 1;
1436 	bits = mbits[(fmt >> 4) & 0x03];
1437 	bits *= (fmt & 0x0f) + 1;
1438 	return (rate * bits);
1439 }
1440 
1441 static int
hdac_bdata_rate(uint16_t fmt,int output)1442 hdac_bdata_rate(uint16_t fmt, int output)
1443 {
1444 	static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 };
1445 	int rate, bits;
1446 
1447 	rate = 48000;
1448 	rate *= ((fmt >> 11) & 0x07) + 1;
1449 	bits = bbits[(fmt >> 4) & 0x03];
1450 	bits *= (fmt & 0x0f) + 1;
1451 	if (!output)
1452 		bits = ((bits + 7) & ~0x07) + 10;
1453 	return (rate * bits);
1454 }
1455 
1456 static void
hdac_poll_reinit(struct hdac_softc * sc)1457 hdac_poll_reinit(struct hdac_softc *sc)
1458 {
1459 	int i, pollticks, min = 1000000;
1460 	struct hdac_stream *s;
1461 
1462 	if (sc->polling == 0)
1463 		return;
1464 	if (sc->unsol_registered > 0)
1465 		min = hz / 2;
1466 	for (i = 0; i < sc->num_ss; i++) {
1467 		s = &sc->streams[i];
1468 		if (s->running == 0)
1469 			continue;
1470 		pollticks = ((uint64_t)hz * s->blksz) /
1471 		    (hdac_mdata_rate(s->format) / 8);
1472 		pollticks >>= 1;
1473 		if (pollticks > hz)
1474 			pollticks = hz;
1475 		if (pollticks < 1)
1476 			pollticks = 1;
1477 		if (min > pollticks)
1478 			min = pollticks;
1479 	}
1480 	sc->poll_ival = min;
1481 	if (min == 1000000)
1482 		callout_stop(&sc->poll_callout);
1483 	else
1484 		callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc);
1485 }
1486 
1487 static int
sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)1488 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)
1489 {
1490 	struct hdac_softc *sc;
1491 	device_t dev;
1492 	uint32_t ctl;
1493 	int err, val;
1494 
1495 	dev = oidp->oid_arg1;
1496 	sc = device_get_softc(dev);
1497 	if (sc == NULL)
1498 		return (EINVAL);
1499 	hdac_lock(sc);
1500 	val = sc->polling;
1501 	hdac_unlock(sc);
1502 	err = sysctl_handle_int(oidp, &val, 0, req);
1503 
1504 	if (err != 0 || req->newptr == NULL)
1505 		return (err);
1506 	if (val < 0 || val > 1)
1507 		return (EINVAL);
1508 
1509 	hdac_lock(sc);
1510 	if (val != sc->polling) {
1511 		if (val == 0) {
1512 			callout_stop(&sc->poll_callout);
1513 			hdac_unlock(sc);
1514 			callout_drain(&sc->poll_callout);
1515 			hdac_lock(sc);
1516 			sc->polling = 0;
1517 			ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1518 			ctl |= HDAC_INTCTL_GIE;
1519 			HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1520 		} else {
1521 			ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1522 			ctl &= ~HDAC_INTCTL_GIE;
1523 			HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1524 			sc->polling = 1;
1525 			hdac_poll_reinit(sc);
1526 		}
1527 	}
1528 	hdac_unlock(sc);
1529 
1530 	return (err);
1531 }
1532 
1533 static void
hdac_attach2(void * arg)1534 hdac_attach2(void *arg)
1535 {
1536 	struct hdac_softc *sc;
1537 	device_t child;
1538 	uint32_t vendorid, revisionid;
1539 	int i;
1540 	uint16_t statests;
1541 
1542 	sc = (struct hdac_softc *)arg;
1543 
1544 	hdac_lock(sc);
1545 
1546 	/* Remove ourselves from the config hooks */
1547 	if (sc->intrhook.ich_func != NULL) {
1548 		config_intrhook_disestablish(&sc->intrhook);
1549 		sc->intrhook.ich_func = NULL;
1550 	}
1551 
1552 	HDA_BOOTHVERBOSE(
1553 		device_printf(sc->dev, "Starting CORB Engine...\n");
1554 	);
1555 	hdac_corb_start(sc);
1556 	HDA_BOOTHVERBOSE(
1557 		device_printf(sc->dev, "Starting RIRB Engine...\n");
1558 	);
1559 	hdac_rirb_start(sc);
1560 
1561 	/*
1562 	 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1563 	 * (status change) interrupts.  The documentation says that we
1564 	 * should not make any assumptions about the state of this register
1565 	 * and set it explicitly.
1566 	 * NB: this needs to be done before the interrupt is enabled as
1567 	 * the handler does not expect this interrupt source.
1568 	 */
1569 	HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1570 
1571 	/*
1572 	 * Read and clear post-reset SDI wake status.
1573 	 * Each set bit corresponds to a codec that came out of reset.
1574 	 */
1575 	statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS);
1576 	HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests);
1577 
1578 	HDA_BOOTHVERBOSE(
1579 		device_printf(sc->dev,
1580 		    "Enabling controller interrupt...\n");
1581 	);
1582 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1583 	    HDAC_GCTL_UNSOL);
1584 	if (sc->polling == 0) {
1585 		HDAC_WRITE_4(&sc->mem, HDAC_INTCTL,
1586 		    HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1587 	}
1588 	DELAY(1000);
1589 
1590 	HDA_BOOTHVERBOSE(
1591 		device_printf(sc->dev, "Scanning HDA codecs ...\n");
1592 	);
1593 	hdac_unlock(sc);
1594 	for (i = 0; i < HDAC_CODEC_MAX; i++) {
1595 		if (HDAC_STATESTS_SDIWAKE(statests, i)) {
1596 			HDA_BOOTHVERBOSE(
1597 				device_printf(sc->dev,
1598 				    "Found CODEC at address %d\n", i);
1599 			);
1600 			hdac_lock(sc);
1601 			vendorid = hdac_send_command(sc, i,
1602 			    HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID));
1603 			revisionid = hdac_send_command(sc, i,
1604 			    HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID));
1605 			hdac_unlock(sc);
1606 			if (vendorid == HDA_INVALID &&
1607 			    revisionid == HDA_INVALID) {
1608 				device_printf(sc->dev,
1609 				    "CODEC at address %d not responding!\n", i);
1610 				continue;
1611 			}
1612 			sc->codecs[i].vendor_id =
1613 			    HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid);
1614 			sc->codecs[i].device_id =
1615 			    HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid);
1616 			sc->codecs[i].revision_id =
1617 			    HDA_PARAM_REVISION_ID_REVISION_ID(revisionid);
1618 			sc->codecs[i].stepping_id =
1619 			    HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid);
1620 			child = device_add_child(sc->dev, "hdacc", -1);
1621 			if (child == NULL) {
1622 				device_printf(sc->dev,
1623 				    "Failed to add CODEC device\n");
1624 				continue;
1625 			}
1626 			device_set_ivars(child, (void *)(intptr_t)i);
1627 			sc->codecs[i].dev = child;
1628 		}
1629 	}
1630 	bus_generic_attach(sc->dev);
1631 
1632 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1633 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1634 	    "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1635 	    sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data");
1636 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1637 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1638 	    "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1639 	    sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode");
1640 }
1641 
1642 /****************************************************************************
1643  * int hdac_suspend(device_t)
1644  *
1645  * Suspend and power down HDA bus and codecs.
1646  ****************************************************************************/
1647 static int
hdac_suspend(device_t dev)1648 hdac_suspend(device_t dev)
1649 {
1650 	struct hdac_softc *sc = device_get_softc(dev);
1651 
1652 	HDA_BOOTHVERBOSE(
1653 		device_printf(dev, "Suspend...\n");
1654 	);
1655 	bus_generic_suspend(dev);
1656 
1657 	hdac_lock(sc);
1658 	HDA_BOOTHVERBOSE(
1659 		device_printf(dev, "Reset controller...\n");
1660 	);
1661 	callout_stop(&sc->poll_callout);
1662 	hdac_reset(sc, false);
1663 	hdac_unlock(sc);
1664 	callout_drain(&sc->poll_callout);
1665 	taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1666 	HDA_BOOTHVERBOSE(
1667 		device_printf(dev, "Suspend done\n");
1668 	);
1669 	return (0);
1670 }
1671 
1672 /****************************************************************************
1673  * int hdac_resume(device_t)
1674  *
1675  * Powerup and restore HDA bus and codecs state.
1676  ****************************************************************************/
1677 static int
hdac_resume(device_t dev)1678 hdac_resume(device_t dev)
1679 {
1680 	struct hdac_softc *sc = device_get_softc(dev);
1681 	int error;
1682 
1683 	HDA_BOOTHVERBOSE(
1684 		device_printf(dev, "Resume...\n");
1685 	);
1686 	hdac_lock(sc);
1687 
1688 	/* Quiesce everything */
1689 	HDA_BOOTHVERBOSE(
1690 		device_printf(dev, "Reset controller...\n");
1691 	);
1692 	hdac_reset(sc, true);
1693 
1694 	/* Initialize the CORB and RIRB */
1695 	hdac_corb_init(sc);
1696 	hdac_rirb_init(sc);
1697 
1698 	HDA_BOOTHVERBOSE(
1699 		device_printf(dev, "Starting CORB Engine...\n");
1700 	);
1701 	hdac_corb_start(sc);
1702 	HDA_BOOTHVERBOSE(
1703 		device_printf(dev, "Starting RIRB Engine...\n");
1704 	);
1705 	hdac_rirb_start(sc);
1706 
1707 	/*
1708 	 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1709 	 * (status change) events.  The documentation says that we should
1710 	 * not make any assumptions about the state of this register and
1711 	 * set it explicitly.
1712 	 * Also, clear HDAC_STATESTS.
1713 	 * NB: this needs to be done before the interrupt is enabled as
1714 	 * the handler does not expect this interrupt source.
1715 	 */
1716 	HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1717 	HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK);
1718 
1719 	HDA_BOOTHVERBOSE(
1720 		device_printf(dev, "Enabling controller interrupt...\n");
1721 	);
1722 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1723 	    HDAC_GCTL_UNSOL);
1724 	HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1725 	DELAY(1000);
1726 	hdac_poll_reinit(sc);
1727 	hdac_unlock(sc);
1728 
1729 	error = bus_generic_resume(dev);
1730 	HDA_BOOTHVERBOSE(
1731 		device_printf(dev, "Resume done\n");
1732 	);
1733 	return (error);
1734 }
1735 
1736 /****************************************************************************
1737  * int hdac_detach(device_t)
1738  *
1739  * Detach and free up resources utilized by the hdac device.
1740  ****************************************************************************/
1741 static int
hdac_detach(device_t dev)1742 hdac_detach(device_t dev)
1743 {
1744 	struct hdac_softc *sc = device_get_softc(dev);
1745 	device_t *devlist;
1746 	int cad, i, devcount, error;
1747 
1748 	if ((error = device_get_children(dev, &devlist, &devcount)) != 0)
1749 		return (error);
1750 	for (i = 0; i < devcount; i++) {
1751 		cad = (intptr_t)device_get_ivars(devlist[i]);
1752 		if ((error = device_delete_child(dev, devlist[i])) != 0) {
1753 			free(devlist, M_TEMP);
1754 			return (error);
1755 		}
1756 		sc->codecs[cad].dev = NULL;
1757 	}
1758 	free(devlist, M_TEMP);
1759 
1760 	hdac_lock(sc);
1761 	hdac_reset(sc, false);
1762 	hdac_unlock(sc);
1763 	taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1764 	hdac_irq_free(sc);
1765 
1766 	for (i = 0; i < sc->num_ss; i++)
1767 		hdac_dma_free(sc, &sc->streams[i].bdl);
1768 	free(sc->streams, M_HDAC);
1769 	hdac_dma_free(sc, &sc->pos_dma);
1770 	hdac_dma_free(sc, &sc->rirb_dma);
1771 	hdac_dma_free(sc, &sc->corb_dma);
1772 	if (sc->chan_dmat != NULL) {
1773 		bus_dma_tag_destroy(sc->chan_dmat);
1774 		sc->chan_dmat = NULL;
1775 	}
1776 	hdac_mem_free(sc);
1777 	snd_mtxfree(sc->lock);
1778 	return (0);
1779 }
1780 
1781 static bus_dma_tag_t
hdac_get_dma_tag(device_t dev,device_t child)1782 hdac_get_dma_tag(device_t dev, device_t child)
1783 {
1784 	struct hdac_softc *sc = device_get_softc(dev);
1785 
1786 	return (sc->chan_dmat);
1787 }
1788 
1789 static int
hdac_print_child(device_t dev,device_t child)1790 hdac_print_child(device_t dev, device_t child)
1791 {
1792 	int retval;
1793 
1794 	retval = bus_print_child_header(dev, child);
1795 	retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child));
1796 	retval += bus_print_child_footer(dev, child);
1797 
1798 	return (retval);
1799 }
1800 
1801 static int
hdac_child_location(device_t dev,device_t child,struct sbuf * sb)1802 hdac_child_location(device_t dev, device_t child, struct sbuf *sb)
1803 {
1804 
1805 	sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child));
1806 	return (0);
1807 }
1808 
1809 static int
hdac_child_pnpinfo_method(device_t dev,device_t child,struct sbuf * sb)1810 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb)
1811 {
1812 	struct hdac_softc *sc = device_get_softc(dev);
1813 	nid_t cad = (uintptr_t)device_get_ivars(child);
1814 
1815 	sbuf_printf(sb,
1816 	    "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x",
1817 	    sc->codecs[cad].vendor_id, sc->codecs[cad].device_id,
1818 	    sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id);
1819 	return (0);
1820 }
1821 
1822 static int
hdac_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)1823 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1824 {
1825 	struct hdac_softc *sc = device_get_softc(dev);
1826 	nid_t cad = (uintptr_t)device_get_ivars(child);
1827 
1828 	switch (which) {
1829 	case HDA_IVAR_CODEC_ID:
1830 		*result = cad;
1831 		break;
1832 	case HDA_IVAR_VENDOR_ID:
1833 		*result = sc->codecs[cad].vendor_id;
1834 		break;
1835 	case HDA_IVAR_DEVICE_ID:
1836 		*result = sc->codecs[cad].device_id;
1837 		break;
1838 	case HDA_IVAR_REVISION_ID:
1839 		*result = sc->codecs[cad].revision_id;
1840 		break;
1841 	case HDA_IVAR_STEPPING_ID:
1842 		*result = sc->codecs[cad].stepping_id;
1843 		break;
1844 	case HDA_IVAR_SUBVENDOR_ID:
1845 		*result = pci_get_subvendor(dev);
1846 		break;
1847 	case HDA_IVAR_SUBDEVICE_ID:
1848 		*result = pci_get_subdevice(dev);
1849 		break;
1850 	case HDA_IVAR_DMA_NOCACHE:
1851 		*result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0;
1852 		break;
1853 	case HDA_IVAR_STRIPES_MASK:
1854 		*result = (1 << (1 << sc->num_sdo)) - 1;
1855 		break;
1856 	default:
1857 		return (ENOENT);
1858 	}
1859 	return (0);
1860 }
1861 
1862 static struct mtx *
hdac_get_mtx(device_t dev,device_t child)1863 hdac_get_mtx(device_t dev, device_t child)
1864 {
1865 	struct hdac_softc *sc = device_get_softc(dev);
1866 
1867 	return (sc->lock);
1868 }
1869 
1870 static uint32_t
hdac_codec_command(device_t dev,device_t child,uint32_t verb)1871 hdac_codec_command(device_t dev, device_t child, uint32_t verb)
1872 {
1873 
1874 	return (hdac_send_command(device_get_softc(dev),
1875 	    (intptr_t)device_get_ivars(child), verb));
1876 }
1877 
1878 static int
hdac_find_stream(struct hdac_softc * sc,int dir,int stream)1879 hdac_find_stream(struct hdac_softc *sc, int dir, int stream)
1880 {
1881 	int i, ss;
1882 
1883 	ss = -1;
1884 	/* Allocate ISS/OSS first. */
1885 	if (dir == 0) {
1886 		for (i = 0; i < sc->num_iss; i++) {
1887 			if (sc->streams[i].stream == stream) {
1888 				ss = i;
1889 				break;
1890 			}
1891 		}
1892 	} else {
1893 		for (i = 0; i < sc->num_oss; i++) {
1894 			if (sc->streams[i + sc->num_iss].stream == stream) {
1895 				ss = i + sc->num_iss;
1896 				break;
1897 			}
1898 		}
1899 	}
1900 	/* Fallback to BSS. */
1901 	if (ss == -1) {
1902 		for (i = 0; i < sc->num_bss; i++) {
1903 			if (sc->streams[i + sc->num_iss + sc->num_oss].stream
1904 			    == stream) {
1905 				ss = i + sc->num_iss + sc->num_oss;
1906 				break;
1907 			}
1908 		}
1909 	}
1910 	return (ss);
1911 }
1912 
1913 static int
hdac_stream_alloc(device_t dev,device_t child,int dir,int format,int stripe,uint32_t ** dmapos)1914 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe,
1915     uint32_t **dmapos)
1916 {
1917 	struct hdac_softc *sc = device_get_softc(dev);
1918 	nid_t cad = (uintptr_t)device_get_ivars(child);
1919 	int stream, ss, bw, maxbw, prevbw;
1920 
1921 	/* Look for empty stream. */
1922 	ss = hdac_find_stream(sc, dir, 0);
1923 
1924 	/* Return if found nothing. */
1925 	if (ss < 0)
1926 		return (0);
1927 
1928 	/* Check bus bandwidth. */
1929 	bw = hdac_bdata_rate(format, dir);
1930 	if (dir == 1) {
1931 		bw *= 1 << (sc->num_sdo - stripe);
1932 		prevbw = sc->sdo_bw_used;
1933 		maxbw = 48000 * 960 * (1 << sc->num_sdo);
1934 	} else {
1935 		prevbw = sc->codecs[cad].sdi_bw_used;
1936 		maxbw = 48000 * 464;
1937 	}
1938 	HDA_BOOTHVERBOSE(
1939 		device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n",
1940 		    (bw + prevbw) / 1000, maxbw / 1000,
1941 		    bw + prevbw > maxbw ? " -- OVERFLOW!" : "");
1942 	);
1943 	if (bw + prevbw > maxbw)
1944 		return (0);
1945 	if (dir == 1)
1946 		sc->sdo_bw_used += bw;
1947 	else
1948 		sc->codecs[cad].sdi_bw_used += bw;
1949 
1950 	/* Allocate stream number */
1951 	if (ss >= sc->num_iss + sc->num_oss)
1952 		stream = 15 - (ss - sc->num_iss - sc->num_oss);
1953 	else if (ss >= sc->num_iss)
1954 		stream = ss - sc->num_iss + 1;
1955 	else
1956 		stream = ss + 1;
1957 
1958 	sc->streams[ss].dev = child;
1959 	sc->streams[ss].dir = dir;
1960 	sc->streams[ss].stream = stream;
1961 	sc->streams[ss].bw = bw;
1962 	sc->streams[ss].format = format;
1963 	sc->streams[ss].stripe = stripe;
1964 	if (dmapos != NULL) {
1965 		if (sc->pos_dma.dma_vaddr != NULL)
1966 			*dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8);
1967 		else
1968 			*dmapos = NULL;
1969 	}
1970 	return (stream);
1971 }
1972 
1973 static void
hdac_stream_free(device_t dev,device_t child,int dir,int stream)1974 hdac_stream_free(device_t dev, device_t child, int dir, int stream)
1975 {
1976 	struct hdac_softc *sc = device_get_softc(dev);
1977 	nid_t cad = (uintptr_t)device_get_ivars(child);
1978 	int ss;
1979 
1980 	ss = hdac_find_stream(sc, dir, stream);
1981 	KASSERT(ss >= 0,
1982 	    ("Free for not allocated stream (%d/%d)\n", dir, stream));
1983 	if (dir == 1)
1984 		sc->sdo_bw_used -= sc->streams[ss].bw;
1985 	else
1986 		sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw;
1987 	sc->streams[ss].stream = 0;
1988 	sc->streams[ss].dev = NULL;
1989 }
1990 
1991 static int
hdac_stream_start(device_t dev,device_t child,int dir,int stream,bus_addr_t buf,int blksz,int blkcnt)1992 hdac_stream_start(device_t dev, device_t child, int dir, int stream,
1993     bus_addr_t buf, int blksz, int blkcnt)
1994 {
1995 	struct hdac_softc *sc = device_get_softc(dev);
1996 	struct hdac_bdle *bdle;
1997 	uint64_t addr;
1998 	int i, ss, off;
1999 	uint32_t ctl;
2000 
2001 	ss = hdac_find_stream(sc, dir, stream);
2002 	KASSERT(ss >= 0,
2003 	    ("Start for not allocated stream (%d/%d)\n", dir, stream));
2004 
2005 	addr = (uint64_t)buf;
2006 	bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr;
2007 	for (i = 0; i < blkcnt; i++, bdle++) {
2008 		bdle->addrl = htole32((uint32_t)addr);
2009 		bdle->addrh = htole32((uint32_t)(addr >> 32));
2010 		bdle->len = htole32(blksz);
2011 		bdle->ioc = htole32(1);
2012 		addr += blksz;
2013 	}
2014 
2015 	bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2016 	    sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE);
2017 
2018 	off = ss << 5;
2019 	HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt);
2020 	HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1);
2021 	addr = sc->streams[ss].bdl.dma_paddr;
2022 	HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr);
2023 	HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32));
2024 
2025 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2);
2026 	if (dir)
2027 		ctl |= HDAC_SDCTL2_DIR;
2028 	else
2029 		ctl &= ~HDAC_SDCTL2_DIR;
2030 	ctl &= ~HDAC_SDCTL2_STRM_MASK;
2031 	ctl |= stream << HDAC_SDCTL2_STRM_SHIFT;
2032 	ctl &= ~HDAC_SDCTL2_STRIPE_MASK;
2033 	ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT;
2034 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl);
2035 
2036 	HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format);
2037 
2038 	ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2039 	ctl |= 1 << ss;
2040 	HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2041 
2042 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS,
2043 	    HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
2044 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2045 	ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2046 	    HDAC_SDCTL_RUN;
2047 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2048 
2049 	sc->streams[ss].blksz = blksz;
2050 	sc->streams[ss].running = 1;
2051 	hdac_poll_reinit(sc);
2052 	return (0);
2053 }
2054 
2055 static void
hdac_stream_stop(device_t dev,device_t child,int dir,int stream)2056 hdac_stream_stop(device_t dev, device_t child, int dir, int stream)
2057 {
2058 	struct hdac_softc *sc = device_get_softc(dev);
2059 	int ss, off;
2060 	uint32_t ctl;
2061 
2062 	ss = hdac_find_stream(sc, dir, stream);
2063 	KASSERT(ss >= 0,
2064 	    ("Stop for not allocated stream (%d/%d)\n", dir, stream));
2065 
2066 	bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2067 	    sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE);
2068 
2069 	off = ss << 5;
2070 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2071 	ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2072 	    HDAC_SDCTL_RUN);
2073 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2074 
2075 	ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2076 	ctl &= ~(1 << ss);
2077 	HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2078 
2079 	sc->streams[ss].running = 0;
2080 	hdac_poll_reinit(sc);
2081 }
2082 
2083 static void
hdac_stream_reset(device_t dev,device_t child,int dir,int stream)2084 hdac_stream_reset(device_t dev, device_t child, int dir, int stream)
2085 {
2086 	struct hdac_softc *sc = device_get_softc(dev);
2087 	int timeout = 1000;
2088 	int to = timeout;
2089 	int ss, off;
2090 	uint32_t ctl;
2091 
2092 	ss = hdac_find_stream(sc, dir, stream);
2093 	KASSERT(ss >= 0,
2094 	    ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2095 
2096 	off = ss << 5;
2097 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2098 	ctl |= HDAC_SDCTL_SRST;
2099 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2100 	do {
2101 		ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2102 		if (ctl & HDAC_SDCTL_SRST)
2103 			break;
2104 		DELAY(10);
2105 	} while (--to);
2106 	if (!(ctl & HDAC_SDCTL_SRST))
2107 		device_printf(dev, "Reset setting timeout\n");
2108 	ctl &= ~HDAC_SDCTL_SRST;
2109 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2110 	to = timeout;
2111 	do {
2112 		ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2113 		if (!(ctl & HDAC_SDCTL_SRST))
2114 			break;
2115 		DELAY(10);
2116 	} while (--to);
2117 	if (ctl & HDAC_SDCTL_SRST)
2118 		device_printf(dev, "Reset timeout!\n");
2119 }
2120 
2121 static uint32_t
hdac_stream_getptr(device_t dev,device_t child,int dir,int stream)2122 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream)
2123 {
2124 	struct hdac_softc *sc = device_get_softc(dev);
2125 	int ss, off;
2126 
2127 	ss = hdac_find_stream(sc, dir, stream);
2128 	KASSERT(ss >= 0,
2129 	    ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2130 
2131 	off = ss << 5;
2132 	return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB));
2133 }
2134 
2135 static int
hdac_unsol_alloc(device_t dev,device_t child,int tag)2136 hdac_unsol_alloc(device_t dev, device_t child, int tag)
2137 {
2138 	struct hdac_softc *sc = device_get_softc(dev);
2139 
2140 	sc->unsol_registered++;
2141 	hdac_poll_reinit(sc);
2142 	return (tag);
2143 }
2144 
2145 static void
hdac_unsol_free(device_t dev,device_t child,int tag)2146 hdac_unsol_free(device_t dev, device_t child, int tag)
2147 {
2148 	struct hdac_softc *sc = device_get_softc(dev);
2149 
2150 	sc->unsol_registered--;
2151 	hdac_poll_reinit(sc);
2152 }
2153 
2154 static device_method_t hdac_methods[] = {
2155 	/* device interface */
2156 	DEVMETHOD(device_probe,		hdac_probe),
2157 	DEVMETHOD(device_attach,	hdac_attach),
2158 	DEVMETHOD(device_detach,	hdac_detach),
2159 	DEVMETHOD(device_suspend,	hdac_suspend),
2160 	DEVMETHOD(device_resume,	hdac_resume),
2161 	/* Bus interface */
2162 	DEVMETHOD(bus_get_dma_tag,	hdac_get_dma_tag),
2163 	DEVMETHOD(bus_print_child,	hdac_print_child),
2164 	DEVMETHOD(bus_child_location,	hdac_child_location),
2165 	DEVMETHOD(bus_child_pnpinfo,	hdac_child_pnpinfo_method),
2166 	DEVMETHOD(bus_read_ivar,	hdac_read_ivar),
2167 	DEVMETHOD(hdac_get_mtx,		hdac_get_mtx),
2168 	DEVMETHOD(hdac_codec_command,	hdac_codec_command),
2169 	DEVMETHOD(hdac_stream_alloc,	hdac_stream_alloc),
2170 	DEVMETHOD(hdac_stream_free,	hdac_stream_free),
2171 	DEVMETHOD(hdac_stream_start,	hdac_stream_start),
2172 	DEVMETHOD(hdac_stream_stop,	hdac_stream_stop),
2173 	DEVMETHOD(hdac_stream_reset,	hdac_stream_reset),
2174 	DEVMETHOD(hdac_stream_getptr,	hdac_stream_getptr),
2175 	DEVMETHOD(hdac_unsol_alloc,	hdac_unsol_alloc),
2176 	DEVMETHOD(hdac_unsol_free,	hdac_unsol_free),
2177 	DEVMETHOD_END
2178 };
2179 
2180 static driver_t hdac_driver = {
2181 	"hdac",
2182 	hdac_methods,
2183 	sizeof(struct hdac_softc),
2184 };
2185 
2186 DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL);
2187