1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3
4 #include <linux/clk.h>
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
9 #include <drm/drm_gem.h>
10
11 #include "a6xx_gpu.h"
12 #include "a6xx_gmu.xml.h"
13 #include "msm_gem.h"
14 #include "msm_gpu_trace.h"
15 #include "msm_mmu.h"
16
a6xx_gmu_fault(struct a6xx_gmu * gmu)17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
18 {
19 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
20 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
21 struct msm_gpu *gpu = &adreno_gpu->base;
22
23 /* FIXME: add a banner here */
24 gmu->hung = true;
25
26 /* Turn off the hangcheck timer while we are resetting */
27 del_timer(&gpu->hangcheck_timer);
28
29 /* Queue the GPU handler because we need to treat this as a recovery */
30 kthread_queue_work(gpu->worker, &gpu->recover_work);
31 }
32
a6xx_gmu_irq(int irq,void * data)33 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
34 {
35 struct a6xx_gmu *gmu = data;
36 u32 status;
37
38 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
39 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
40
41 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
42 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
43
44 a6xx_gmu_fault(gmu);
45 }
46
47 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
48 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
49
50 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
51 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
52 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
53
54 return IRQ_HANDLED;
55 }
56
a6xx_hfi_irq(int irq,void * data)57 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
58 {
59 struct a6xx_gmu *gmu = data;
60 u32 status;
61
62 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
63 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
64
65 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
66 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
67
68 a6xx_gmu_fault(gmu);
69 }
70
71 return IRQ_HANDLED;
72 }
73
a6xx_gmu_sptprac_is_on(struct a6xx_gmu * gmu)74 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
75 {
76 u32 val;
77
78 /* This can be called from gpu state code so make sure GMU is valid */
79 if (!gmu->initialized)
80 return false;
81
82 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
83
84 return !(val &
85 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
86 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
87 }
88
89 /* Check to see if the GX rail is still powered */
a6xx_gmu_gx_is_on(struct a6xx_gmu * gmu)90 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
91 {
92 u32 val;
93
94 /* This can be called from gpu state code so make sure GMU is valid */
95 if (!gmu->initialized)
96 return false;
97
98 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
99
100 return !(val &
101 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
102 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
103 }
104
a6xx_gmu_set_freq(struct msm_gpu * gpu,struct dev_pm_opp * opp)105 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
106 {
107 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
108 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
109 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
110 u32 perf_index;
111 unsigned long gpu_freq;
112 int ret = 0;
113
114 gpu_freq = dev_pm_opp_get_freq(opp);
115
116 if (gpu_freq == gmu->freq)
117 return;
118
119 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
120 if (gpu_freq == gmu->gpu_freqs[perf_index])
121 break;
122
123 gmu->current_perf_index = perf_index;
124 gmu->freq = gmu->gpu_freqs[perf_index];
125
126 trace_msm_gmu_freq_change(gmu->freq, perf_index);
127
128 /*
129 * This can get called from devfreq while the hardware is idle. Don't
130 * bring up the power if it isn't already active
131 */
132 if (pm_runtime_get_if_in_use(gmu->dev) == 0)
133 return;
134
135 if (!gmu->legacy) {
136 a6xx_hfi_set_freq(gmu, perf_index);
137 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
138 pm_runtime_put(gmu->dev);
139 return;
140 }
141
142 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
143
144 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
145 ((3 & 0xf) << 28) | perf_index);
146
147 /*
148 * Send an invalid index as a vote for the bus bandwidth and let the
149 * firmware decide on the right vote
150 */
151 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
152
153 /* Set and clear the OOB for DCVS to trigger the GMU */
154 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
155 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
156
157 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
158 if (ret)
159 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
160
161 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
162 pm_runtime_put(gmu->dev);
163 }
164
a6xx_gmu_get_freq(struct msm_gpu * gpu)165 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
166 {
167 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
168 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
169 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
170
171 return gmu->freq;
172 }
173
a6xx_gmu_check_idle_level(struct a6xx_gmu * gmu)174 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
175 {
176 u32 val;
177 int local = gmu->idle_level;
178
179 /* SPTP and IFPC both report as IFPC */
180 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
181 local = GMU_IDLE_STATE_IFPC;
182
183 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
184
185 if (val == local) {
186 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
187 !a6xx_gmu_gx_is_on(gmu))
188 return true;
189 }
190
191 return false;
192 }
193
194 /* Wait for the GMU to get to its most idle state */
a6xx_gmu_wait_for_idle(struct a6xx_gmu * gmu)195 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
196 {
197 return spin_until(a6xx_gmu_check_idle_level(gmu));
198 }
199
a6xx_gmu_start(struct a6xx_gmu * gmu)200 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
201 {
202 int ret;
203 u32 val;
204 u32 mask, reset_val;
205
206 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
207 if (val <= 0x20010004) {
208 mask = 0xffffffff;
209 reset_val = 0xbabeface;
210 } else {
211 mask = 0x1ff;
212 reset_val = 0x100;
213 }
214
215 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
216
217 /* Set the log wptr index
218 * note: downstream saves the value in poweroff and restores it here
219 */
220 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
221
222 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
223
224 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
225 (val & mask) == reset_val, 100, 10000);
226
227 if (ret)
228 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
229
230 return ret;
231 }
232
a6xx_gmu_hfi_start(struct a6xx_gmu * gmu)233 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
234 {
235 u32 val;
236 int ret;
237
238 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
239
240 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
241 val & 1, 100, 10000);
242 if (ret)
243 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
244
245 return ret;
246 }
247
248 struct a6xx_gmu_oob_bits {
249 int set, ack, set_new, ack_new, clear, clear_new;
250 const char *name;
251 };
252
253 /* These are the interrupt / ack bits for each OOB request that are set
254 * in a6xx_gmu_set_oob and a6xx_clear_oob
255 */
256 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
257 [GMU_OOB_GPU_SET] = {
258 .name = "GPU_SET",
259 .set = 16,
260 .ack = 24,
261 .set_new = 30,
262 .ack_new = 31,
263 .clear = 24,
264 .clear_new = 31,
265 },
266
267 [GMU_OOB_PERFCOUNTER_SET] = {
268 .name = "PERFCOUNTER",
269 .set = 17,
270 .ack = 25,
271 .set_new = 28,
272 .ack_new = 30,
273 .clear = 25,
274 .clear_new = 29,
275 },
276
277 [GMU_OOB_BOOT_SLUMBER] = {
278 .name = "BOOT_SLUMBER",
279 .set = 22,
280 .ack = 30,
281 .clear = 30,
282 },
283
284 [GMU_OOB_DCVS_SET] = {
285 .name = "GPU_DCVS",
286 .set = 23,
287 .ack = 31,
288 .clear = 31,
289 },
290 };
291
292 /* Trigger a OOB (out of band) request to the GMU */
a6xx_gmu_set_oob(struct a6xx_gmu * gmu,enum a6xx_gmu_oob_state state)293 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
294 {
295 int ret;
296 u32 val;
297 int request, ack;
298
299 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
300 return -EINVAL;
301
302 if (gmu->legacy) {
303 request = a6xx_gmu_oob_bits[state].set;
304 ack = a6xx_gmu_oob_bits[state].ack;
305 } else {
306 request = a6xx_gmu_oob_bits[state].set_new;
307 ack = a6xx_gmu_oob_bits[state].ack_new;
308 if (!request || !ack) {
309 DRM_DEV_ERROR(gmu->dev,
310 "Invalid non-legacy GMU request %s\n",
311 a6xx_gmu_oob_bits[state].name);
312 return -EINVAL;
313 }
314 }
315
316 /* Trigger the equested OOB operation */
317 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
318
319 /* Wait for the acknowledge interrupt */
320 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
321 val & (1 << ack), 100, 10000);
322
323 if (ret)
324 DRM_DEV_ERROR(gmu->dev,
325 "Timeout waiting for GMU OOB set %s: 0x%x\n",
326 a6xx_gmu_oob_bits[state].name,
327 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
328
329 /* Clear the acknowledge interrupt */
330 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
331
332 return ret;
333 }
334
335 /* Clear a pending OOB state in the GMU */
a6xx_gmu_clear_oob(struct a6xx_gmu * gmu,enum a6xx_gmu_oob_state state)336 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
337 {
338 int bit;
339
340 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
341 return;
342
343 if (gmu->legacy)
344 bit = a6xx_gmu_oob_bits[state].clear;
345 else
346 bit = a6xx_gmu_oob_bits[state].clear_new;
347
348 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
349 }
350
351 /* Enable CPU control of SPTP power power collapse */
a6xx_sptprac_enable(struct a6xx_gmu * gmu)352 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
353 {
354 int ret;
355 u32 val;
356
357 if (!gmu->legacy)
358 return 0;
359
360 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
361
362 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
363 (val & 0x38) == 0x28, 1, 100);
364
365 if (ret) {
366 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
367 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
368 }
369
370 return 0;
371 }
372
373 /* Disable CPU control of SPTP power power collapse */
a6xx_sptprac_disable(struct a6xx_gmu * gmu)374 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
375 {
376 u32 val;
377 int ret;
378
379 if (!gmu->legacy)
380 return;
381
382 /* Make sure retention is on */
383 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
384
385 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
386
387 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
388 (val & 0x04), 100, 10000);
389
390 if (ret)
391 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
392 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
393 }
394
395 /* Let the GMU know we are starting a boot sequence */
a6xx_gmu_gfx_rail_on(struct a6xx_gmu * gmu)396 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
397 {
398 u32 vote;
399
400 /* Let the GMU know we are getting ready for boot */
401 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
402
403 /* Choose the "default" power level as the highest available */
404 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
405
406 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
407 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
408
409 /* Let the GMU know the boot sequence has started */
410 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
411 }
412
413 /* Let the GMU know that we are about to go into slumber */
a6xx_gmu_notify_slumber(struct a6xx_gmu * gmu)414 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
415 {
416 int ret;
417
418 /* Disable the power counter so the GMU isn't busy */
419 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
420
421 /* Disable SPTP_PC if the CPU is responsible for it */
422 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
423 a6xx_sptprac_disable(gmu);
424
425 if (!gmu->legacy) {
426 ret = a6xx_hfi_send_prep_slumber(gmu);
427 goto out;
428 }
429
430 /* Tell the GMU to get ready to slumber */
431 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
432
433 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
434 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
435
436 if (!ret) {
437 /* Check to see if the GMU really did slumber */
438 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
439 != 0x0f) {
440 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
441 ret = -ETIMEDOUT;
442 }
443 }
444
445 out:
446 /* Put fence into allow mode */
447 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
448 return ret;
449 }
450
a6xx_rpmh_start(struct a6xx_gmu * gmu)451 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
452 {
453 int ret;
454 u32 val;
455
456 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
457 /* Wait for the register to finish posting */
458 wmb();
459
460 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
461 val & (1 << 1), 100, 10000);
462 if (ret) {
463 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
464 return ret;
465 }
466
467 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
468 !val, 100, 10000);
469
470 if (ret) {
471 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
472 return ret;
473 }
474
475 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
476
477 /* Set up CX GMU counter 0 to count busy ticks */
478 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
479 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
480
481 /* Enable the power counter */
482 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
483 return 0;
484 }
485
a6xx_rpmh_stop(struct a6xx_gmu * gmu)486 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
487 {
488 int ret;
489 u32 val;
490
491 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
492
493 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
494 val, val & (1 << 16), 100, 10000);
495 if (ret)
496 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
497
498 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
499 }
500
pdc_write(void __iomem * ptr,u32 offset,u32 value)501 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
502 {
503 return msm_writel(value, ptr + (offset << 2));
504 }
505
506 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
507 const char *name);
508
a6xx_gmu_rpmh_init(struct a6xx_gmu * gmu)509 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
510 {
511 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
512 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
513 struct platform_device *pdev = to_platform_device(gmu->dev);
514 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
515 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
516 uint32_t pdc_address_offset;
517
518 if (!pdcptr || !seqptr)
519 goto err;
520
521 if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
522 pdc_address_offset = 0x30090;
523 else if (adreno_is_a650(adreno_gpu))
524 pdc_address_offset = 0x300a0;
525 else
526 pdc_address_offset = 0x30080;
527
528 /* Disable SDE clock gating */
529 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
530
531 /* Setup RSC PDC handshake for sleep and wakeup */
532 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
533 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
534 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
535 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
536 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
537 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
538 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
539 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
540 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
541 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
542 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
543
544 /* Load RSC sequencer uCode for sleep and wakeup */
545 if (adreno_is_a650(adreno_gpu)) {
546 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
547 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
548 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
549 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
550 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
551 } else {
552 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
553 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
554 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
555 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
556 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
557 }
558
559 /* Load PDC sequencer uCode for power up and power down sequence */
560 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
561 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
562 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
563 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
564 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
565
566 /* Set TCS commands used by PDC sequence for low power modes */
567 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
568 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
569 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
570 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
571 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
572 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
573 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
574 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
575 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
576
577 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
578 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
579 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
580
581 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
582 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
583 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
584 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
585 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
586 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
587
588 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
589 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
590 if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu))
591 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
592 else
593 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
594 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
595 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
596 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
597
598 /* Setup GPU PDC */
599 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
600 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
601
602 /* ensure no writes happen before the uCode is fully written */
603 wmb();
604
605 err:
606 if (!IS_ERR_OR_NULL(pdcptr))
607 iounmap(pdcptr);
608 if (!IS_ERR_OR_NULL(seqptr))
609 iounmap(seqptr);
610 }
611
612 /*
613 * The lowest 16 bits of this value are the number of XO clock cycles for main
614 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
615 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
616 */
617
618 #define GMU_PWR_COL_HYST 0x000a1680
619
620 /* Set up the idle state for the GMU */
a6xx_gmu_power_config(struct a6xx_gmu * gmu)621 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
622 {
623 /* Disable GMU WB/RB buffer */
624 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
625 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
626 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
627
628 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
629
630 switch (gmu->idle_level) {
631 case GMU_IDLE_STATE_IFPC:
632 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
633 GMU_PWR_COL_HYST);
634 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
635 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
636 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
637 fallthrough;
638 case GMU_IDLE_STATE_SPTP:
639 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
640 GMU_PWR_COL_HYST);
641 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
642 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
643 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
644 }
645
646 /* Enable RPMh GPU client */
647 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
648 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
649 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
650 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
651 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
652 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
653 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
654 }
655
656 struct block_header {
657 u32 addr;
658 u32 size;
659 u32 type;
660 u32 value;
661 u32 data[];
662 };
663
664 /* this should be a general kernel helper */
in_range(u32 addr,u32 start,u32 size)665 static int in_range(u32 addr, u32 start, u32 size)
666 {
667 return addr >= start && addr < start + size;
668 }
669
fw_block_mem(struct a6xx_gmu_bo * bo,const struct block_header * blk)670 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
671 {
672 if (!in_range(blk->addr, bo->iova, bo->size))
673 return false;
674
675 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
676 return true;
677 }
678
a6xx_gmu_fw_load(struct a6xx_gmu * gmu)679 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
680 {
681 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
682 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
683 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
684 const struct block_header *blk;
685 u32 reg_offset;
686
687 u32 itcm_base = 0x00000000;
688 u32 dtcm_base = 0x00040000;
689
690 if (adreno_is_a650(adreno_gpu))
691 dtcm_base = 0x10004000;
692
693 if (gmu->legacy) {
694 /* Sanity check the size of the firmware that was loaded */
695 if (fw_image->size > 0x8000) {
696 DRM_DEV_ERROR(gmu->dev,
697 "GMU firmware is bigger than the available region\n");
698 return -EINVAL;
699 }
700
701 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
702 (u32*) fw_image->data, fw_image->size);
703 return 0;
704 }
705
706
707 for (blk = (const struct block_header *) fw_image->data;
708 (const u8*) blk < fw_image->data + fw_image->size;
709 blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
710 if (blk->size == 0)
711 continue;
712
713 if (in_range(blk->addr, itcm_base, SZ_16K)) {
714 reg_offset = (blk->addr - itcm_base) >> 2;
715 gmu_write_bulk(gmu,
716 REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
717 blk->data, blk->size);
718 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
719 reg_offset = (blk->addr - dtcm_base) >> 2;
720 gmu_write_bulk(gmu,
721 REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
722 blk->data, blk->size);
723 } else if (!fw_block_mem(&gmu->icache, blk) &&
724 !fw_block_mem(&gmu->dcache, blk) &&
725 !fw_block_mem(&gmu->dummy, blk)) {
726 DRM_DEV_ERROR(gmu->dev,
727 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
728 blk->addr, blk->size, blk->data[0]);
729 }
730 }
731
732 return 0;
733 }
734
a6xx_gmu_fw_start(struct a6xx_gmu * gmu,unsigned int state)735 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
736 {
737 static bool rpmh_init;
738 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
739 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
740 int ret;
741 u32 chipid;
742
743 if (adreno_is_a650(adreno_gpu))
744 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
745
746 if (state == GMU_WARM_BOOT) {
747 ret = a6xx_rpmh_start(gmu);
748 if (ret)
749 return ret;
750 } else {
751 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
752 "GMU firmware is not loaded\n"))
753 return -ENOENT;
754
755 /* Turn on register retention */
756 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
757
758 /* We only need to load the RPMh microcode once */
759 if (!rpmh_init) {
760 a6xx_gmu_rpmh_init(gmu);
761 rpmh_init = true;
762 } else {
763 ret = a6xx_rpmh_start(gmu);
764 if (ret)
765 return ret;
766 }
767
768 ret = a6xx_gmu_fw_load(gmu);
769 if (ret)
770 return ret;
771 }
772
773 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
774 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
775
776 /* Write the iova of the HFI table */
777 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
778 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
779
780 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
781 (1 << 31) | (0xa << 18) | (0xa0));
782
783 chipid = adreno_gpu->rev.core << 24;
784 chipid |= adreno_gpu->rev.major << 16;
785 chipid |= adreno_gpu->rev.minor << 12;
786 chipid |= adreno_gpu->rev.patchid << 8;
787
788 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
789
790 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
791 gmu->log.iova | (gmu->log.size / SZ_4K - 1));
792
793 /* Set up the lowest idle level on the GMU */
794 a6xx_gmu_power_config(gmu);
795
796 ret = a6xx_gmu_start(gmu);
797 if (ret)
798 return ret;
799
800 if (gmu->legacy) {
801 ret = a6xx_gmu_gfx_rail_on(gmu);
802 if (ret)
803 return ret;
804 }
805
806 /* Enable SPTP_PC if the CPU is responsible for it */
807 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
808 ret = a6xx_sptprac_enable(gmu);
809 if (ret)
810 return ret;
811 }
812
813 ret = a6xx_gmu_hfi_start(gmu);
814 if (ret)
815 return ret;
816
817 /* FIXME: Do we need this wmb() here? */
818 wmb();
819
820 return 0;
821 }
822
823 #define A6XX_HFI_IRQ_MASK \
824 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
825
826 #define A6XX_GMU_IRQ_MASK \
827 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
828 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
829 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
830
a6xx_gmu_irq_disable(struct a6xx_gmu * gmu)831 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
832 {
833 disable_irq(gmu->gmu_irq);
834 disable_irq(gmu->hfi_irq);
835
836 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
837 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
838 }
839
a6xx_gmu_rpmh_off(struct a6xx_gmu * gmu)840 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
841 {
842 u32 val;
843
844 /* Make sure there are no outstanding RPMh votes */
845 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
846 (val & 1), 100, 10000);
847 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
848 (val & 1), 100, 10000);
849 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
850 (val & 1), 100, 10000);
851 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
852 (val & 1), 100, 1000);
853 }
854
855 /* Force the GMU off in case it isn't responsive */
a6xx_gmu_force_off(struct a6xx_gmu * gmu)856 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
857 {
858 /* Flush all the queues */
859 a6xx_hfi_stop(gmu);
860
861 /* Stop the interrupts */
862 a6xx_gmu_irq_disable(gmu);
863
864 /* Force off SPTP in case the GMU is managing it */
865 a6xx_sptprac_disable(gmu);
866
867 /* Make sure there are no outstanding RPMh votes */
868 a6xx_gmu_rpmh_off(gmu);
869 }
870
a6xx_gmu_set_initial_freq(struct msm_gpu * gpu,struct a6xx_gmu * gmu)871 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
872 {
873 struct dev_pm_opp *gpu_opp;
874 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
875
876 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
877 if (IS_ERR_OR_NULL(gpu_opp))
878 return;
879
880 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
881 a6xx_gmu_set_freq(gpu, gpu_opp);
882 dev_pm_opp_put(gpu_opp);
883 }
884
a6xx_gmu_set_initial_bw(struct msm_gpu * gpu,struct a6xx_gmu * gmu)885 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
886 {
887 struct dev_pm_opp *gpu_opp;
888 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
889
890 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
891 if (IS_ERR_OR_NULL(gpu_opp))
892 return;
893
894 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
895 dev_pm_opp_put(gpu_opp);
896 }
897
a6xx_gmu_resume(struct a6xx_gpu * a6xx_gpu)898 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
899 {
900 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
901 struct msm_gpu *gpu = &adreno_gpu->base;
902 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
903 int status, ret;
904
905 if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
906 return 0;
907
908 gmu->hung = false;
909
910 /* Turn on the resources */
911 pm_runtime_get_sync(gmu->dev);
912
913 /*
914 * "enable" the GX power domain which won't actually do anything but it
915 * will make sure that the refcounting is correct in case we need to
916 * bring down the GX after a GMU failure
917 */
918 if (!IS_ERR_OR_NULL(gmu->gxpd))
919 pm_runtime_get_sync(gmu->gxpd);
920
921 /* Use a known rate to bring up the GMU */
922 clk_set_rate(gmu->core_clk, 200000000);
923 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
924 if (ret) {
925 pm_runtime_put(gmu->gxpd);
926 pm_runtime_put(gmu->dev);
927 return ret;
928 }
929
930 /* Set the bus quota to a reasonable value for boot */
931 a6xx_gmu_set_initial_bw(gpu, gmu);
932
933 /* Enable the GMU interrupt */
934 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
935 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
936 enable_irq(gmu->gmu_irq);
937
938 /* Check to see if we are doing a cold or warm boot */
939 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
940 GMU_WARM_BOOT : GMU_COLD_BOOT;
941
942 /*
943 * Warm boot path does not work on newer GPUs
944 * Presumably this is because icache/dcache regions must be restored
945 */
946 if (!gmu->legacy)
947 status = GMU_COLD_BOOT;
948
949 ret = a6xx_gmu_fw_start(gmu, status);
950 if (ret)
951 goto out;
952
953 ret = a6xx_hfi_start(gmu, status);
954 if (ret)
955 goto out;
956
957 /*
958 * Turn on the GMU firmware fault interrupt after we know the boot
959 * sequence is successful
960 */
961 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
962 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
963 enable_irq(gmu->hfi_irq);
964
965 /* Set the GPU to the current freq */
966 a6xx_gmu_set_initial_freq(gpu, gmu);
967
968 out:
969 /* On failure, shut down the GMU to leave it in a good state */
970 if (ret) {
971 disable_irq(gmu->gmu_irq);
972 a6xx_rpmh_stop(gmu);
973 pm_runtime_put(gmu->gxpd);
974 pm_runtime_put(gmu->dev);
975 }
976
977 return ret;
978 }
979
a6xx_gmu_isidle(struct a6xx_gmu * gmu)980 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
981 {
982 u32 reg;
983
984 if (!gmu->initialized)
985 return true;
986
987 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
988
989 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
990 return false;
991
992 return true;
993 }
994
995 #define GBIF_CLIENT_HALT_MASK BIT(0)
996 #define GBIF_ARB_HALT_MASK BIT(1)
997
a6xx_bus_clear_pending_transactions(struct adreno_gpu * adreno_gpu)998 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
999 {
1000 struct msm_gpu *gpu = &adreno_gpu->base;
1001
1002 if (!a6xx_has_gbif(adreno_gpu)) {
1003 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
1004 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
1005 0xf) == 0xf);
1006 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
1007
1008 return;
1009 }
1010
1011 /* Halt new client requests on GBIF */
1012 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
1013 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1014 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
1015
1016 /* Halt all AXI requests on GBIF */
1017 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
1018 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1019 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
1020
1021 /* The GBIF halt needs to be explicitly cleared */
1022 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
1023 }
1024
1025 /* Gracefully try to shut down the GMU and by extension the GPU */
a6xx_gmu_shutdown(struct a6xx_gmu * gmu)1026 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1027 {
1028 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1029 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1030 u32 val;
1031
1032 /*
1033 * The GMU may still be in slumber unless the GPU started so check and
1034 * skip putting it back into slumber if so
1035 */
1036 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1037
1038 if (val != 0xf) {
1039 int ret = a6xx_gmu_wait_for_idle(gmu);
1040
1041 /* If the GMU isn't responding assume it is hung */
1042 if (ret) {
1043 a6xx_gmu_force_off(gmu);
1044 return;
1045 }
1046
1047 a6xx_bus_clear_pending_transactions(adreno_gpu);
1048
1049 /* tell the GMU we want to slumber */
1050 a6xx_gmu_notify_slumber(gmu);
1051
1052 ret = gmu_poll_timeout(gmu,
1053 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1054 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1055 100, 10000);
1056
1057 /*
1058 * Let the user know we failed to slumber but don't worry too
1059 * much because we are powering down anyway
1060 */
1061
1062 if (ret)
1063 DRM_DEV_ERROR(gmu->dev,
1064 "Unable to slumber GMU: status = 0%x/0%x\n",
1065 gmu_read(gmu,
1066 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1067 gmu_read(gmu,
1068 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1069 }
1070
1071 /* Turn off HFI */
1072 a6xx_hfi_stop(gmu);
1073
1074 /* Stop the interrupts and mask the hardware */
1075 a6xx_gmu_irq_disable(gmu);
1076
1077 /* Tell RPMh to power off the GPU */
1078 a6xx_rpmh_stop(gmu);
1079 }
1080
1081
a6xx_gmu_stop(struct a6xx_gpu * a6xx_gpu)1082 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1083 {
1084 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1085 struct msm_gpu *gpu = &a6xx_gpu->base.base;
1086
1087 if (!pm_runtime_active(gmu->dev))
1088 return 0;
1089
1090 /*
1091 * Force the GMU off if we detected a hang, otherwise try to shut it
1092 * down gracefully
1093 */
1094 if (gmu->hung)
1095 a6xx_gmu_force_off(gmu);
1096 else
1097 a6xx_gmu_shutdown(gmu);
1098
1099 /* Remove the bus vote */
1100 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1101
1102 /*
1103 * Make sure the GX domain is off before turning off the GMU (CX)
1104 * domain. Usually the GMU does this but only if the shutdown sequence
1105 * was successful
1106 */
1107 if (!IS_ERR_OR_NULL(gmu->gxpd))
1108 pm_runtime_put_sync(gmu->gxpd);
1109
1110 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1111
1112 pm_runtime_put_sync(gmu->dev);
1113
1114 return 0;
1115 }
1116
a6xx_gmu_memory_free(struct a6xx_gmu * gmu)1117 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1118 {
1119 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
1120 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
1121 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
1122 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
1123 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
1124 msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
1125
1126 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1127 msm_gem_address_space_put(gmu->aspace);
1128 }
1129
a6xx_gmu_memory_alloc(struct a6xx_gmu * gmu,struct a6xx_gmu_bo * bo,size_t size,u64 iova)1130 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1131 size_t size, u64 iova)
1132 {
1133 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1134 struct drm_device *dev = a6xx_gpu->base.base.dev;
1135 uint32_t flags = MSM_BO_WC;
1136 u64 range_start, range_end;
1137 int ret;
1138
1139 size = PAGE_ALIGN(size);
1140 if (!iova) {
1141 /* no fixed address - use GMU's uncached range */
1142 range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
1143 range_end = 0x80000000;
1144 } else {
1145 /* range for fixed address */
1146 range_start = iova;
1147 range_end = iova + size;
1148 /* use IOMMU_PRIV for icache/dcache */
1149 flags |= MSM_BO_MAP_PRIV;
1150 }
1151
1152 bo->obj = msm_gem_new(dev, size, flags);
1153 if (IS_ERR(bo->obj))
1154 return PTR_ERR(bo->obj);
1155
1156 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1157 range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
1158 if (ret) {
1159 drm_gem_object_put(bo->obj);
1160 return ret;
1161 }
1162
1163 bo->virt = msm_gem_get_vaddr(bo->obj);
1164 bo->size = size;
1165
1166 return 0;
1167 }
1168
a6xx_gmu_memory_probe(struct a6xx_gmu * gmu)1169 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1170 {
1171 struct iommu_domain *domain;
1172 struct msm_mmu *mmu;
1173
1174 domain = iommu_domain_alloc(&platform_bus_type);
1175 if (!domain)
1176 return -ENODEV;
1177
1178 mmu = msm_iommu_new(gmu->dev, domain);
1179 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1180 if (IS_ERR(gmu->aspace)) {
1181 iommu_domain_free(domain);
1182 return PTR_ERR(gmu->aspace);
1183 }
1184
1185 return 0;
1186 }
1187
1188 /* Return the 'arc-level' for the given frequency */
a6xx_gmu_get_arc_level(struct device * dev,unsigned long freq)1189 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1190 unsigned long freq)
1191 {
1192 struct dev_pm_opp *opp;
1193 unsigned int val;
1194
1195 if (!freq)
1196 return 0;
1197
1198 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1199 if (IS_ERR(opp))
1200 return 0;
1201
1202 val = dev_pm_opp_get_level(opp);
1203
1204 dev_pm_opp_put(opp);
1205
1206 return val;
1207 }
1208
a6xx_gmu_rpmh_arc_votes_init(struct device * dev,u32 * votes,unsigned long * freqs,int freqs_count,const char * id)1209 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1210 unsigned long *freqs, int freqs_count, const char *id)
1211 {
1212 int i, j;
1213 const u16 *pri, *sec;
1214 size_t pri_count, sec_count;
1215
1216 pri = cmd_db_read_aux_data(id, &pri_count);
1217 if (IS_ERR(pri))
1218 return PTR_ERR(pri);
1219 /*
1220 * The data comes back as an array of unsigned shorts so adjust the
1221 * count accordingly
1222 */
1223 pri_count >>= 1;
1224 if (!pri_count)
1225 return -EINVAL;
1226
1227 sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1228 if (IS_ERR(sec))
1229 return PTR_ERR(sec);
1230
1231 sec_count >>= 1;
1232 if (!sec_count)
1233 return -EINVAL;
1234
1235 /* Construct a vote for each frequency */
1236 for (i = 0; i < freqs_count; i++) {
1237 u8 pindex = 0, sindex = 0;
1238 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1239
1240 /* Get the primary index that matches the arc level */
1241 for (j = 0; j < pri_count; j++) {
1242 if (pri[j] >= level) {
1243 pindex = j;
1244 break;
1245 }
1246 }
1247
1248 if (j == pri_count) {
1249 DRM_DEV_ERROR(dev,
1250 "Level %u not found in the RPMh list\n",
1251 level);
1252 DRM_DEV_ERROR(dev, "Available levels:\n");
1253 for (j = 0; j < pri_count; j++)
1254 DRM_DEV_ERROR(dev, " %u\n", pri[j]);
1255
1256 return -EINVAL;
1257 }
1258
1259 /*
1260 * Look for a level in in the secondary list that matches. If
1261 * nothing fits, use the maximum non zero vote
1262 */
1263
1264 for (j = 0; j < sec_count; j++) {
1265 if (sec[j] >= level) {
1266 sindex = j;
1267 break;
1268 } else if (sec[j]) {
1269 sindex = j;
1270 }
1271 }
1272
1273 /* Construct the vote */
1274 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1275 (sindex << 8) | pindex;
1276 }
1277
1278 return 0;
1279 }
1280
1281 /*
1282 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1283 * to construct the list of votes on the CPU and send it over. Query the RPMh
1284 * voltage levels and build the votes
1285 */
1286
a6xx_gmu_rpmh_votes_init(struct a6xx_gmu * gmu)1287 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1288 {
1289 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1290 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1291 struct msm_gpu *gpu = &adreno_gpu->base;
1292 int ret;
1293
1294 /* Build the GX votes */
1295 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1296 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1297
1298 /* Build the CX votes */
1299 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1300 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1301
1302 return ret;
1303 }
1304
a6xx_gmu_build_freq_table(struct device * dev,unsigned long * freqs,u32 size)1305 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1306 u32 size)
1307 {
1308 int count = dev_pm_opp_get_opp_count(dev);
1309 struct dev_pm_opp *opp;
1310 int i, index = 0;
1311 unsigned long freq = 1;
1312
1313 /*
1314 * The OPP table doesn't contain the "off" frequency level so we need to
1315 * add 1 to the table size to account for it
1316 */
1317
1318 if (WARN(count + 1 > size,
1319 "The GMU frequency table is being truncated\n"))
1320 count = size - 1;
1321
1322 /* Set the "off" frequency */
1323 freqs[index++] = 0;
1324
1325 for (i = 0; i < count; i++) {
1326 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1327 if (IS_ERR(opp))
1328 break;
1329
1330 dev_pm_opp_put(opp);
1331 freqs[index++] = freq++;
1332 }
1333
1334 return index;
1335 }
1336
a6xx_gmu_pwrlevels_probe(struct a6xx_gmu * gmu)1337 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1338 {
1339 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1340 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1341 struct msm_gpu *gpu = &adreno_gpu->base;
1342
1343 int ret = 0;
1344
1345 /*
1346 * The GMU handles its own frequency switching so build a list of
1347 * available frequencies to send during initialization
1348 */
1349 ret = dev_pm_opp_of_add_table(gmu->dev);
1350 if (ret) {
1351 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1352 return ret;
1353 }
1354
1355 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1356 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1357
1358 /*
1359 * The GMU also handles GPU frequency switching so build a list
1360 * from the GPU OPP table
1361 */
1362 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1363 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1364
1365 gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1366
1367 /* Build the list of RPMh votes that we'll send to the GMU */
1368 return a6xx_gmu_rpmh_votes_init(gmu);
1369 }
1370
a6xx_gmu_clocks_probe(struct a6xx_gmu * gmu)1371 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1372 {
1373 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1374
1375 if (ret < 1)
1376 return ret;
1377
1378 gmu->nr_clocks = ret;
1379
1380 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1381 gmu->nr_clocks, "gmu");
1382
1383 return 0;
1384 }
1385
a6xx_gmu_get_mmio(struct platform_device * pdev,const char * name)1386 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1387 const char *name)
1388 {
1389 void __iomem *ret;
1390 struct resource *res = platform_get_resource_byname(pdev,
1391 IORESOURCE_MEM, name);
1392
1393 if (!res) {
1394 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1395 return ERR_PTR(-EINVAL);
1396 }
1397
1398 ret = ioremap(res->start, resource_size(res));
1399 if (!ret) {
1400 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1401 return ERR_PTR(-EINVAL);
1402 }
1403
1404 return ret;
1405 }
1406
a6xx_gmu_get_irq(struct a6xx_gmu * gmu,struct platform_device * pdev,const char * name,irq_handler_t handler)1407 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1408 const char *name, irq_handler_t handler)
1409 {
1410 int irq, ret;
1411
1412 irq = platform_get_irq_byname(pdev, name);
1413
1414 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1415 if (ret) {
1416 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1417 name, ret);
1418 return ret;
1419 }
1420
1421 disable_irq(irq);
1422
1423 return irq;
1424 }
1425
a6xx_gmu_remove(struct a6xx_gpu * a6xx_gpu)1426 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1427 {
1428 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1429 struct platform_device *pdev = to_platform_device(gmu->dev);
1430
1431 if (!gmu->initialized)
1432 return;
1433
1434 pm_runtime_force_suspend(gmu->dev);
1435
1436 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1437 pm_runtime_disable(gmu->gxpd);
1438 dev_pm_domain_detach(gmu->gxpd, false);
1439 }
1440
1441 iounmap(gmu->mmio);
1442 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1443 iounmap(gmu->rscc);
1444 gmu->mmio = NULL;
1445 gmu->rscc = NULL;
1446
1447 a6xx_gmu_memory_free(gmu);
1448
1449 free_irq(gmu->gmu_irq, gmu);
1450 free_irq(gmu->hfi_irq, gmu);
1451
1452 /* Drop reference taken in of_find_device_by_node */
1453 put_device(gmu->dev);
1454
1455 gmu->initialized = false;
1456 }
1457
a6xx_gmu_init(struct a6xx_gpu * a6xx_gpu,struct device_node * node)1458 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1459 {
1460 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1461 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1462 struct platform_device *pdev = of_find_device_by_node(node);
1463 int ret;
1464
1465 if (!pdev)
1466 return -ENODEV;
1467
1468 gmu->dev = &pdev->dev;
1469
1470 of_dma_configure(gmu->dev, node, true);
1471
1472 /* Fow now, don't do anything fancy until we get our feet under us */
1473 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1474
1475 pm_runtime_enable(gmu->dev);
1476
1477 /* Get the list of clocks */
1478 ret = a6xx_gmu_clocks_probe(gmu);
1479 if (ret)
1480 goto err_put_device;
1481
1482 ret = a6xx_gmu_memory_probe(gmu);
1483 if (ret)
1484 goto err_put_device;
1485
1486 /* Allocate memory for the GMU dummy page */
1487 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000);
1488 if (ret)
1489 goto err_memory;
1490
1491 if (adreno_is_a650(adreno_gpu)) {
1492 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1493 SZ_16M - SZ_16K, 0x04000);
1494 if (ret)
1495 goto err_memory;
1496 } else if (adreno_is_a640(adreno_gpu)) {
1497 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1498 SZ_256K - SZ_16K, 0x04000);
1499 if (ret)
1500 goto err_memory;
1501
1502 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1503 SZ_256K - SZ_16K, 0x44000);
1504 if (ret)
1505 goto err_memory;
1506 } else {
1507 /* HFI v1, has sptprac */
1508 gmu->legacy = true;
1509
1510 /* Allocate memory for the GMU debug region */
1511 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
1512 if (ret)
1513 goto err_memory;
1514 }
1515
1516 /* Allocate memory for for the HFI queues */
1517 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
1518 if (ret)
1519 goto err_memory;
1520
1521 /* Allocate memory for the GMU log region */
1522 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
1523 if (ret)
1524 goto err_memory;
1525
1526 /* Map the GMU registers */
1527 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1528 if (IS_ERR(gmu->mmio)) {
1529 ret = PTR_ERR(gmu->mmio);
1530 goto err_memory;
1531 }
1532
1533 if (adreno_is_a650(adreno_gpu)) {
1534 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1535 if (IS_ERR(gmu->rscc))
1536 goto err_mmio;
1537 } else {
1538 gmu->rscc = gmu->mmio + 0x23000;
1539 }
1540
1541 /* Get the HFI and GMU interrupts */
1542 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1543 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1544
1545 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1546 goto err_mmio;
1547
1548 /*
1549 * Get a link to the GX power domain to reset the GPU in case of GMU
1550 * crash
1551 */
1552 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1553
1554 /* Get the power levels for the GMU and GPU */
1555 a6xx_gmu_pwrlevels_probe(gmu);
1556
1557 /* Set up the HFI queues */
1558 a6xx_hfi_init(gmu);
1559
1560 gmu->initialized = true;
1561
1562 return 0;
1563
1564 err_mmio:
1565 iounmap(gmu->mmio);
1566 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1567 iounmap(gmu->rscc);
1568 free_irq(gmu->gmu_irq, gmu);
1569 free_irq(gmu->hfi_irq, gmu);
1570
1571 ret = -ENODEV;
1572
1573 err_memory:
1574 a6xx_gmu_memory_free(gmu);
1575 err_put_device:
1576 /* Drop reference taken in of_find_device_by_node */
1577 put_device(gmu->dev);
1578
1579 return ret;
1580 }
1581