1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * skl-sst-dsp.c - SKL SST library generic function
4  *
5  * Copyright (C) 2014-15, Intel Corporation.
6  * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
7  *	Jeeja KP <jeeja.kp@intel.com>
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  */
10 #include <sound/pcm.h>
11 
12 #include "../common/sst-dsp.h"
13 #include "../common/sst-ipc.h"
14 #include "../common/sst-dsp-priv.h"
15 #include "skl.h"
16 
17 /* various timeout values */
18 #define SKL_DSP_PU_TO		50
19 #define SKL_DSP_PD_TO		50
20 #define SKL_DSP_RESET_TO	50
21 
skl_dsp_set_state_locked(struct sst_dsp * ctx,int state)22 void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
23 {
24 	mutex_lock(&ctx->mutex);
25 	ctx->sst_state = state;
26 	mutex_unlock(&ctx->mutex);
27 }
28 
29 /*
30  * Initialize core power state and usage count. To be called after
31  * successful first boot. Hence core 0 will be running and other cores
32  * will be reset
33  */
skl_dsp_init_core_state(struct sst_dsp * ctx)34 void skl_dsp_init_core_state(struct sst_dsp *ctx)
35 {
36 	struct skl_dev *skl = ctx->thread_context;
37 	int i;
38 
39 	skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
40 	skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1;
41 
42 	for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) {
43 		skl->cores.state[i] = SKL_DSP_RESET;
44 		skl->cores.usage_count[i] = 0;
45 	}
46 }
47 
48 /* Get the mask for all enabled cores */
skl_dsp_get_enabled_cores(struct sst_dsp * ctx)49 unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx)
50 {
51 	struct skl_dev *skl = ctx->thread_context;
52 	unsigned int core_mask, en_cores_mask;
53 	u32 val;
54 
55 	core_mask = SKL_DSP_CORES_MASK(skl->cores.count);
56 
57 	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
58 
59 	/* Cores having CPA bit set */
60 	en_cores_mask = (val & SKL_ADSPCS_CPA_MASK(core_mask)) >>
61 			SKL_ADSPCS_CPA_SHIFT;
62 
63 	/* And cores having CRST bit cleared */
64 	en_cores_mask &= (~val & SKL_ADSPCS_CRST_MASK(core_mask)) >>
65 			SKL_ADSPCS_CRST_SHIFT;
66 
67 	/* And cores having CSTALL bit cleared */
68 	en_cores_mask &= (~val & SKL_ADSPCS_CSTALL_MASK(core_mask)) >>
69 			SKL_ADSPCS_CSTALL_SHIFT;
70 	en_cores_mask &= core_mask;
71 
72 	dev_dbg(ctx->dev, "DSP enabled cores mask = %x\n", en_cores_mask);
73 
74 	return en_cores_mask;
75 }
76 
77 static int
skl_dsp_core_set_reset_state(struct sst_dsp * ctx,unsigned int core_mask)78 skl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
79 {
80 	int ret;
81 
82 	/* update bits */
83 	sst_dsp_shim_update_bits_unlocked(ctx,
84 			SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask),
85 			SKL_ADSPCS_CRST_MASK(core_mask));
86 
87 	/* poll with timeout to check if operation successful */
88 	ret = sst_dsp_register_poll(ctx,
89 			SKL_ADSP_REG_ADSPCS,
90 			SKL_ADSPCS_CRST_MASK(core_mask),
91 			SKL_ADSPCS_CRST_MASK(core_mask),
92 			SKL_DSP_RESET_TO,
93 			"Set reset");
94 	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
95 				SKL_ADSPCS_CRST_MASK(core_mask)) !=
96 				SKL_ADSPCS_CRST_MASK(core_mask)) {
97 		dev_err(ctx->dev, "Set reset state failed: core_mask %x\n",
98 							core_mask);
99 		ret = -EIO;
100 	}
101 
102 	return ret;
103 }
104 
skl_dsp_core_unset_reset_state(struct sst_dsp * ctx,unsigned int core_mask)105 int skl_dsp_core_unset_reset_state(
106 		struct sst_dsp *ctx, unsigned int core_mask)
107 {
108 	int ret;
109 
110 	dev_dbg(ctx->dev, "In %s\n", __func__);
111 
112 	/* update bits */
113 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
114 				SKL_ADSPCS_CRST_MASK(core_mask), 0);
115 
116 	/* poll with timeout to check if operation successful */
117 	ret = sst_dsp_register_poll(ctx,
118 			SKL_ADSP_REG_ADSPCS,
119 			SKL_ADSPCS_CRST_MASK(core_mask),
120 			0,
121 			SKL_DSP_RESET_TO,
122 			"Unset reset");
123 
124 	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
125 				SKL_ADSPCS_CRST_MASK(core_mask)) != 0) {
126 		dev_err(ctx->dev, "Unset reset state failed: core_mask %x\n",
127 				core_mask);
128 		ret = -EIO;
129 	}
130 
131 	return ret;
132 }
133 
134 static bool
is_skl_dsp_core_enable(struct sst_dsp * ctx,unsigned int core_mask)135 is_skl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask)
136 {
137 	int val;
138 	bool is_enable;
139 
140 	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
141 
142 	is_enable = ((val & SKL_ADSPCS_CPA_MASK(core_mask)) &&
143 			(val & SKL_ADSPCS_SPA_MASK(core_mask)) &&
144 			!(val & SKL_ADSPCS_CRST_MASK(core_mask)) &&
145 			!(val & SKL_ADSPCS_CSTALL_MASK(core_mask)));
146 
147 	dev_dbg(ctx->dev, "DSP core(s) enabled? %d : core_mask %x\n",
148 						is_enable, core_mask);
149 
150 	return is_enable;
151 }
152 
skl_dsp_reset_core(struct sst_dsp * ctx,unsigned int core_mask)153 static int skl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask)
154 {
155 	/* stall core */
156 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
157 			SKL_ADSPCS_CSTALL_MASK(core_mask),
158 			SKL_ADSPCS_CSTALL_MASK(core_mask));
159 
160 	/* set reset state */
161 	return skl_dsp_core_set_reset_state(ctx, core_mask);
162 }
163 
skl_dsp_start_core(struct sst_dsp * ctx,unsigned int core_mask)164 int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask)
165 {
166 	int ret;
167 
168 	/* unset reset state */
169 	ret = skl_dsp_core_unset_reset_state(ctx, core_mask);
170 	if (ret < 0)
171 		return ret;
172 
173 	/* run core */
174 	dev_dbg(ctx->dev, "unstall/run core: core_mask = %x\n", core_mask);
175 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
176 			SKL_ADSPCS_CSTALL_MASK(core_mask), 0);
177 
178 	if (!is_skl_dsp_core_enable(ctx, core_mask)) {
179 		skl_dsp_reset_core(ctx, core_mask);
180 		dev_err(ctx->dev, "DSP start core failed: core_mask %x\n",
181 							core_mask);
182 		ret = -EIO;
183 	}
184 
185 	return ret;
186 }
187 
skl_dsp_core_power_up(struct sst_dsp * ctx,unsigned int core_mask)188 int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask)
189 {
190 	int ret;
191 
192 	/* update bits */
193 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
194 			SKL_ADSPCS_SPA_MASK(core_mask),
195 			SKL_ADSPCS_SPA_MASK(core_mask));
196 
197 	/* poll with timeout to check if operation successful */
198 	ret = sst_dsp_register_poll(ctx,
199 			SKL_ADSP_REG_ADSPCS,
200 			SKL_ADSPCS_CPA_MASK(core_mask),
201 			SKL_ADSPCS_CPA_MASK(core_mask),
202 			SKL_DSP_PU_TO,
203 			"Power up");
204 
205 	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
206 			SKL_ADSPCS_CPA_MASK(core_mask)) !=
207 			SKL_ADSPCS_CPA_MASK(core_mask)) {
208 		dev_err(ctx->dev, "DSP core power up failed: core_mask %x\n",
209 				core_mask);
210 		ret = -EIO;
211 	}
212 
213 	return ret;
214 }
215 
skl_dsp_core_power_down(struct sst_dsp * ctx,unsigned int core_mask)216 int skl_dsp_core_power_down(struct sst_dsp  *ctx, unsigned int core_mask)
217 {
218 	/* update bits */
219 	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
220 				SKL_ADSPCS_SPA_MASK(core_mask), 0);
221 
222 	/* poll with timeout to check if operation successful */
223 	return sst_dsp_register_poll(ctx,
224 			SKL_ADSP_REG_ADSPCS,
225 			SKL_ADSPCS_CPA_MASK(core_mask),
226 			0,
227 			SKL_DSP_PD_TO,
228 			"Power down");
229 }
230 
skl_dsp_enable_core(struct sst_dsp * ctx,unsigned int core_mask)231 int skl_dsp_enable_core(struct sst_dsp  *ctx, unsigned int core_mask)
232 {
233 	int ret;
234 
235 	/* power up */
236 	ret = skl_dsp_core_power_up(ctx, core_mask);
237 	if (ret < 0) {
238 		dev_err(ctx->dev, "dsp core power up failed: core_mask %x\n",
239 							core_mask);
240 		return ret;
241 	}
242 
243 	return skl_dsp_start_core(ctx, core_mask);
244 }
245 
skl_dsp_disable_core(struct sst_dsp * ctx,unsigned int core_mask)246 int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask)
247 {
248 	int ret;
249 
250 	ret = skl_dsp_reset_core(ctx, core_mask);
251 	if (ret < 0) {
252 		dev_err(ctx->dev, "dsp core reset failed: core_mask %x\n",
253 							core_mask);
254 		return ret;
255 	}
256 
257 	/* power down core*/
258 	ret = skl_dsp_core_power_down(ctx, core_mask);
259 	if (ret < 0) {
260 		dev_err(ctx->dev, "dsp core power down fail mask %x: %d\n",
261 							core_mask, ret);
262 		return ret;
263 	}
264 
265 	if (is_skl_dsp_core_enable(ctx, core_mask)) {
266 		dev_err(ctx->dev, "dsp core disable fail mask %x: %d\n",
267 							core_mask, ret);
268 		ret = -EIO;
269 	}
270 
271 	return ret;
272 }
273 
skl_dsp_boot(struct sst_dsp * ctx)274 int skl_dsp_boot(struct sst_dsp *ctx)
275 {
276 	int ret;
277 
278 	if (is_skl_dsp_core_enable(ctx, SKL_DSP_CORE0_MASK)) {
279 		ret = skl_dsp_reset_core(ctx, SKL_DSP_CORE0_MASK);
280 		if (ret < 0) {
281 			dev_err(ctx->dev, "dsp core0 reset fail: %d\n", ret);
282 			return ret;
283 		}
284 
285 		ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
286 		if (ret < 0) {
287 			dev_err(ctx->dev, "dsp core0 start fail: %d\n", ret);
288 			return ret;
289 		}
290 	} else {
291 		ret = skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
292 		if (ret < 0) {
293 			dev_err(ctx->dev, "dsp core0 disable fail: %d\n", ret);
294 			return ret;
295 		}
296 		ret = skl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
297 	}
298 
299 	return ret;
300 }
301 
skl_dsp_sst_interrupt(int irq,void * dev_id)302 irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
303 {
304 	struct sst_dsp *ctx = dev_id;
305 	u32 val;
306 	irqreturn_t result = IRQ_NONE;
307 
308 	spin_lock(&ctx->spinlock);
309 
310 	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
311 	ctx->intr_status = val;
312 
313 	if (val == 0xffffffff) {
314 		spin_unlock(&ctx->spinlock);
315 		return IRQ_NONE;
316 	}
317 
318 	if (val & SKL_ADSPIS_IPC) {
319 		skl_ipc_int_disable(ctx);
320 		result = IRQ_WAKE_THREAD;
321 	}
322 
323 	if (val & SKL_ADSPIS_CL_DMA) {
324 		skl_cldma_int_disable(ctx);
325 		result = IRQ_WAKE_THREAD;
326 	}
327 
328 	spin_unlock(&ctx->spinlock);
329 
330 	return result;
331 }
332 /*
333  * skl_dsp_get_core/skl_dsp_put_core will be called inside DAPM context
334  * within the dapm mutex. Hence no separate lock is used.
335  */
skl_dsp_get_core(struct sst_dsp * ctx,unsigned int core_id)336 int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id)
337 {
338 	struct skl_dev *skl = ctx->thread_context;
339 	int ret = 0;
340 
341 	if (core_id >= skl->cores.count) {
342 		dev_err(ctx->dev, "invalid core id: %d\n", core_id);
343 		return -EINVAL;
344 	}
345 
346 	skl->cores.usage_count[core_id]++;
347 
348 	if (skl->cores.state[core_id] == SKL_DSP_RESET) {
349 		ret = ctx->fw_ops.set_state_D0(ctx, core_id);
350 		if (ret < 0) {
351 			dev_err(ctx->dev, "unable to get core%d\n", core_id);
352 			goto out;
353 		}
354 	}
355 
356 out:
357 	dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
358 			core_id, skl->cores.state[core_id],
359 			skl->cores.usage_count[core_id]);
360 
361 	return ret;
362 }
363 EXPORT_SYMBOL_GPL(skl_dsp_get_core);
364 
skl_dsp_put_core(struct sst_dsp * ctx,unsigned int core_id)365 int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id)
366 {
367 	struct skl_dev *skl = ctx->thread_context;
368 	int ret = 0;
369 
370 	if (core_id >= skl->cores.count) {
371 		dev_err(ctx->dev, "invalid core id: %d\n", core_id);
372 		return -EINVAL;
373 	}
374 
375 	if ((--skl->cores.usage_count[core_id] == 0) &&
376 		(skl->cores.state[core_id] != SKL_DSP_RESET)) {
377 		ret = ctx->fw_ops.set_state_D3(ctx, core_id);
378 		if (ret < 0) {
379 			dev_err(ctx->dev, "unable to put core %d: %d\n",
380 					core_id, ret);
381 			skl->cores.usage_count[core_id]++;
382 		}
383 	}
384 
385 	dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
386 			core_id, skl->cores.state[core_id],
387 			skl->cores.usage_count[core_id]);
388 
389 	return ret;
390 }
391 EXPORT_SYMBOL_GPL(skl_dsp_put_core);
392 
skl_dsp_wake(struct sst_dsp * ctx)393 int skl_dsp_wake(struct sst_dsp *ctx)
394 {
395 	return skl_dsp_get_core(ctx, SKL_DSP_CORE0_ID);
396 }
397 EXPORT_SYMBOL_GPL(skl_dsp_wake);
398 
skl_dsp_sleep(struct sst_dsp * ctx)399 int skl_dsp_sleep(struct sst_dsp *ctx)
400 {
401 	return skl_dsp_put_core(ctx, SKL_DSP_CORE0_ID);
402 }
403 EXPORT_SYMBOL_GPL(skl_dsp_sleep);
404 
skl_dsp_ctx_init(struct device * dev,struct sst_dsp_device * sst_dev,int irq)405 struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
406 		struct sst_dsp_device *sst_dev, int irq)
407 {
408 	int ret;
409 	struct sst_dsp *sst;
410 
411 	sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
412 	if (sst == NULL)
413 		return NULL;
414 
415 	spin_lock_init(&sst->spinlock);
416 	mutex_init(&sst->mutex);
417 	sst->dev = dev;
418 	sst->sst_dev = sst_dev;
419 	sst->irq = irq;
420 	sst->ops = sst_dev->ops;
421 	sst->thread_context = sst_dev->thread_context;
422 
423 	/* Initialise SST Audio DSP */
424 	if (sst->ops->init) {
425 		ret = sst->ops->init(sst);
426 		if (ret < 0)
427 			return NULL;
428 	}
429 
430 	return sst;
431 }
432 
skl_dsp_acquire_irq(struct sst_dsp * sst)433 int skl_dsp_acquire_irq(struct sst_dsp *sst)
434 {
435 	struct sst_dsp_device *sst_dev = sst->sst_dev;
436 	int ret;
437 
438 	/* Register the ISR */
439 	ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
440 		sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
441 	if (ret)
442 		dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
443 			       sst->irq);
444 
445 	return ret;
446 }
447 
skl_dsp_free(struct sst_dsp * dsp)448 void skl_dsp_free(struct sst_dsp *dsp)
449 {
450 	skl_ipc_int_disable(dsp);
451 
452 	free_irq(dsp->irq, dsp);
453 	skl_ipc_op_int_disable(dsp);
454 	skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK);
455 }
456 EXPORT_SYMBOL_GPL(skl_dsp_free);
457 
is_skl_dsp_running(struct sst_dsp * ctx)458 bool is_skl_dsp_running(struct sst_dsp *ctx)
459 {
460 	return (ctx->sst_state == SKL_DSP_RUNNING);
461 }
462 EXPORT_SYMBOL_GPL(is_skl_dsp_running);
463