1 /* $NetBSD: amdgpu_gfx_v10_0.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gfx_v10_0.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $");
28
29 #include <linux/delay.h>
30 #include <linux/kernel.h>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include "amdgpu.h"
35 #include "amdgpu_gfx.h"
36 #include "amdgpu_psp.h"
37 #include "amdgpu_smu.h"
38 #include "nv.h"
39 #include "nvd.h"
40
41 #include "gc/gc_10_1_0_offset.h"
42 #include "gc/gc_10_1_0_sh_mask.h"
43 #include "navi10_enum.h"
44 #include "hdp/hdp_5_0_0_offset.h"
45 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
46
47 #include "soc15.h"
48 #include "soc15d.h"
49 #include "soc15_common.h"
50 #include "clearstate_gfx10.h"
51 #include "v10_structs.h"
52 #include "gfx_v10_0.h"
53 #include "nbio_v2_3.h"
54
55 #include <linux/nbsd-namespace.h>
56
57 /**
58 * Navi10 has two graphic rings to share each graphic pipe.
59 * 1. Primary ring
60 * 2. Async ring
61 */
62 #define GFX10_NUM_GFX_RINGS 2
63 #define GFX10_MEC_HPD_SIZE 2048
64
65 #define F32_CE_PROGRAM_RAM_SIZE 65536
66 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
67
68 #define mmCGTT_GS_NGG_CLK_CTRL 0x5087
69 #define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
70
71 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
72 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
74 MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
75 MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
76 MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
77
78 MODULE_FIRMWARE("amdgpu/navi14_ce_wks.bin");
79 MODULE_FIRMWARE("amdgpu/navi14_pfp_wks.bin");
80 MODULE_FIRMWARE("amdgpu/navi14_me_wks.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_mec_wks.bin");
82 MODULE_FIRMWARE("amdgpu/navi14_mec2_wks.bin");
83 MODULE_FIRMWARE("amdgpu/navi14_ce.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/navi14_me.bin");
86 MODULE_FIRMWARE("amdgpu/navi14_mec.bin");
87 MODULE_FIRMWARE("amdgpu/navi14_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/navi14_rlc.bin");
89
90 MODULE_FIRMWARE("amdgpu/navi12_ce.bin");
91 MODULE_FIRMWARE("amdgpu/navi12_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/navi12_me.bin");
93 MODULE_FIRMWARE("amdgpu/navi12_mec.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_mec2.bin");
95 MODULE_FIRMWARE("amdgpu/navi12_rlc.bin");
96
97 static const struct soc15_reg_golden golden_settings_gc_10_1[] =
98 {
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07900000, 0x04900000),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
139 };
140
141 static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
142 {
143 /* Pending on emulation bring up */
144 };
145
146 static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
147 {
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
186 };
187
188 static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
189 {
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000000),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
215 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
216 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
217 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xffffffff, 0x842a4c02),
218 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
219 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
220 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04440000),
221 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
222 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
223 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
224 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
225 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
226 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
227 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
228 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
229 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
230 };
231
232 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
233 {
234 /* Pending on emulation bring up */
235 };
236
237 static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
238 {
239 /* Pending on emulation bring up */
240 };
241
242 #define DEFAULT_SH_MEM_CONFIG \
243 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
244 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
245 (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
246 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
247
248
249 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
250 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
251 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
252 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev);
253 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
254 struct amdgpu_cu_info *cu_info);
255 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
256 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
257 u32 sh_num, u32 instance);
258 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
259
260 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
261 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev);
262 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
263 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
264 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
265 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
266 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
267
gfx10_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)268 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
269 {
270 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
271 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
272 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
273 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
274 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
275 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
276 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
277 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
278 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
279 }
280
gfx10_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)281 static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring,
282 struct amdgpu_ring *ring)
283 {
284 struct amdgpu_device *adev = kiq_ring->adev;
285 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
286 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
287 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
288
289 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
290 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
291 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
292 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
293 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
294 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
295 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
296 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
297 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
298 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
299 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
300 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
301 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
302 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
303 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
304 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
305 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
306 }
307
gfx10_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)308 static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
309 struct amdgpu_ring *ring,
310 enum amdgpu_unmap_queues_action action,
311 u64 gpu_addr, u64 seq)
312 {
313 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
314
315 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
316 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
317 PACKET3_UNMAP_QUEUES_ACTION(action) |
318 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
319 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
320 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
321 amdgpu_ring_write(kiq_ring,
322 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
323
324 if (action == PREEMPT_QUEUES_NO_UNMAP) {
325 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
326 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
327 amdgpu_ring_write(kiq_ring, seq);
328 } else {
329 amdgpu_ring_write(kiq_ring, 0);
330 amdgpu_ring_write(kiq_ring, 0);
331 amdgpu_ring_write(kiq_ring, 0);
332 }
333 }
334
gfx10_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)335 static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
336 struct amdgpu_ring *ring,
337 u64 addr,
338 u64 seq)
339 {
340 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
341
342 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
343 amdgpu_ring_write(kiq_ring,
344 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
345 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
346 PACKET3_QUERY_STATUS_COMMAND(2));
347 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
348 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
349 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
350 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
351 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
352 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
353 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
354 }
355
gfx10_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)356 static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
357 uint16_t pasid, uint32_t flush_type,
358 bool all_hub)
359 {
360 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
361 amdgpu_ring_write(kiq_ring,
362 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
363 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
364 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
365 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
366 }
367
368 static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
369 .kiq_set_resources = gfx10_kiq_set_resources,
370 .kiq_map_queues = gfx10_kiq_map_queues,
371 .kiq_unmap_queues = gfx10_kiq_unmap_queues,
372 .kiq_query_status = gfx10_kiq_query_status,
373 .kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
374 .set_resources_size = 8,
375 .map_queues_size = 7,
376 .unmap_queues_size = 6,
377 .query_status_size = 7,
378 .invalidate_tlbs_size = 2,
379 };
380
gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device * adev)381 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
382 {
383 adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
384 }
385
gfx_v10_0_init_golden_registers(struct amdgpu_device * adev)386 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
387 {
388 switch (adev->asic_type) {
389 case CHIP_NAVI10:
390 soc15_program_register_sequence(adev,
391 golden_settings_gc_10_1,
392 (const u32)ARRAY_SIZE(golden_settings_gc_10_1));
393 soc15_program_register_sequence(adev,
394 golden_settings_gc_10_0_nv10,
395 (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
396 break;
397 case CHIP_NAVI14:
398 soc15_program_register_sequence(adev,
399 golden_settings_gc_10_1_1,
400 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
401 soc15_program_register_sequence(adev,
402 golden_settings_gc_10_1_nv14,
403 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
404 break;
405 case CHIP_NAVI12:
406 soc15_program_register_sequence(adev,
407 golden_settings_gc_10_1_2,
408 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
409 soc15_program_register_sequence(adev,
410 golden_settings_gc_10_1_2_nv12,
411 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
412 break;
413 default:
414 break;
415 }
416 }
417
gfx_v10_0_scratch_init(struct amdgpu_device * adev)418 static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
419 {
420 adev->gfx.scratch.num_reg = 8;
421 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
422 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
423 }
424
gfx_v10_0_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)425 static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
426 bool wc, uint32_t reg, uint32_t val)
427 {
428 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
429 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
430 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
431 amdgpu_ring_write(ring, reg);
432 amdgpu_ring_write(ring, 0);
433 amdgpu_ring_write(ring, val);
434 }
435
gfx_v10_0_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)436 static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
437 int mem_space, int opt, uint32_t addr0,
438 uint32_t addr1, uint32_t ref, uint32_t mask,
439 uint32_t inv)
440 {
441 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
442 amdgpu_ring_write(ring,
443 /* memory (1) or register (0) */
444 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
445 WAIT_REG_MEM_OPERATION(opt) | /* wait */
446 WAIT_REG_MEM_FUNCTION(3) | /* equal */
447 WAIT_REG_MEM_ENGINE(eng_sel)));
448
449 if (mem_space)
450 BUG_ON(addr0 & 0x3); /* Dword align */
451 amdgpu_ring_write(ring, addr0);
452 amdgpu_ring_write(ring, addr1);
453 amdgpu_ring_write(ring, ref);
454 amdgpu_ring_write(ring, mask);
455 amdgpu_ring_write(ring, inv); /* poll interval */
456 }
457
gfx_v10_0_ring_test_ring(struct amdgpu_ring * ring)458 static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
459 {
460 struct amdgpu_device *adev = ring->adev;
461 uint32_t scratch;
462 uint32_t tmp = 0;
463 unsigned i;
464 int r;
465
466 r = amdgpu_gfx_scratch_get(adev, &scratch);
467 if (r) {
468 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
469 return r;
470 }
471
472 WREG32(scratch, 0xCAFEDEAD);
473
474 r = amdgpu_ring_alloc(ring, 3);
475 if (r) {
476 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
477 ring->idx, r);
478 amdgpu_gfx_scratch_free(adev, scratch);
479 return r;
480 }
481
482 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
483 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
484 amdgpu_ring_write(ring, 0xDEADBEEF);
485 amdgpu_ring_commit(ring);
486
487 for (i = 0; i < adev->usec_timeout; i++) {
488 tmp = RREG32(scratch);
489 if (tmp == 0xDEADBEEF)
490 break;
491 if (amdgpu_emu_mode == 1)
492 msleep(1);
493 else
494 udelay(1);
495 }
496
497 if (i >= adev->usec_timeout)
498 r = -ETIMEDOUT;
499
500 amdgpu_gfx_scratch_free(adev, scratch);
501
502 return r;
503 }
504
gfx_v10_0_ring_test_ib(struct amdgpu_ring * ring,long timeout)505 static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
506 {
507 struct amdgpu_device *adev = ring->adev;
508 struct amdgpu_ib ib;
509 struct dma_fence *f = NULL;
510 uint32_t scratch;
511 uint32_t tmp = 0;
512 long r;
513
514 r = amdgpu_gfx_scratch_get(adev, &scratch);
515 if (r) {
516 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
517 return r;
518 }
519
520 WREG32(scratch, 0xCAFEDEAD);
521
522 memset(&ib, 0, sizeof(ib));
523 r = amdgpu_ib_get(adev, NULL, 256, &ib);
524 if (r) {
525 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
526 goto err1;
527 }
528
529 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
530 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
531 ib.ptr[2] = 0xDEADBEEF;
532 ib.length_dw = 3;
533
534 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
535 if (r)
536 goto err2;
537
538 r = dma_fence_wait_timeout(f, false, timeout);
539 if (r == 0) {
540 DRM_ERROR("amdgpu: IB test timed out.\n");
541 r = -ETIMEDOUT;
542 goto err2;
543 } else if (r < 0) {
544 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
545 goto err2;
546 }
547
548 tmp = RREG32(scratch);
549 if (tmp == 0xDEADBEEF)
550 r = 0;
551 else
552 r = -EINVAL;
553 err2:
554 amdgpu_ib_free(adev, &ib, NULL);
555 dma_fence_put(f);
556 err1:
557 amdgpu_gfx_scratch_free(adev, scratch);
558
559 return r;
560 }
561
gfx_v10_0_free_microcode(struct amdgpu_device * adev)562 static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
563 {
564 release_firmware(adev->gfx.pfp_fw);
565 adev->gfx.pfp_fw = NULL;
566 release_firmware(adev->gfx.me_fw);
567 adev->gfx.me_fw = NULL;
568 release_firmware(adev->gfx.ce_fw);
569 adev->gfx.ce_fw = NULL;
570 release_firmware(adev->gfx.rlc_fw);
571 adev->gfx.rlc_fw = NULL;
572 release_firmware(adev->gfx.mec_fw);
573 adev->gfx.mec_fw = NULL;
574 release_firmware(adev->gfx.mec2_fw);
575 adev->gfx.mec2_fw = NULL;
576
577 kfree(adev->gfx.rlc.register_list_format);
578 }
579
gfx_v10_0_check_fw_write_wait(struct amdgpu_device * adev)580 static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
581 {
582 adev->gfx.cp_fw_write_wait = false;
583
584 switch (adev->asic_type) {
585 case CHIP_NAVI10:
586 case CHIP_NAVI12:
587 case CHIP_NAVI14:
588 if ((adev->gfx.me_fw_version >= 0x00000046) &&
589 (adev->gfx.me_feature_version >= 27) &&
590 (adev->gfx.pfp_fw_version >= 0x00000068) &&
591 (adev->gfx.pfp_feature_version >= 27) &&
592 (adev->gfx.mec_fw_version >= 0x0000005b) &&
593 (adev->gfx.mec_feature_version >= 27))
594 adev->gfx.cp_fw_write_wait = true;
595 break;
596 default:
597 break;
598 }
599
600 if (adev->gfx.cp_fw_write_wait == false)
601 DRM_WARN_ONCE("CP firmware version too old, please update!");
602 }
603
604
gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device * adev)605 static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
606 {
607 const struct rlc_firmware_header_v2_1 *rlc_hdr;
608
609 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
610 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
611 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
612 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
613 adev->gfx.rlc.save_restore_list_cntl = (const u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
614 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
615 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
616 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
617 adev->gfx.rlc.save_restore_list_gpm = (const u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
618 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
619 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
620 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
621 adev->gfx.rlc.save_restore_list_srm = (const u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
622 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
623 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
624 }
625
gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device * adev)626 static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
627 {
628 bool ret = false;
629
630 switch (adev->pdev->revision) {
631 case 0xc2:
632 case 0xc3:
633 ret = true;
634 break;
635 default:
636 ret = false;
637 break;
638 }
639
640 return ret ;
641 }
642
gfx_v10_0_check_gfxoff_flag(struct amdgpu_device * adev)643 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
644 {
645 switch (adev->asic_type) {
646 case CHIP_NAVI10:
647 if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
648 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
649 break;
650 default:
651 break;
652 }
653 }
654
gfx_v10_0_init_microcode(struct amdgpu_device * adev)655 static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
656 {
657 const char *chip_name;
658 char fw_name[40];
659 char wks[10];
660 int err;
661 struct amdgpu_firmware_info *info = NULL;
662 const struct common_firmware_header *header = NULL;
663 const struct gfx_firmware_header_v1_0 *cp_hdr;
664 const struct rlc_firmware_header_v2_0 *rlc_hdr;
665 unsigned int *tmp = NULL;
666 unsigned int i = 0;
667 uint16_t version_major;
668 uint16_t version_minor;
669
670 DRM_DEBUG("\n");
671
672 memset(wks, 0, sizeof(wks));
673 switch (adev->asic_type) {
674 case CHIP_NAVI10:
675 chip_name = "navi10";
676 break;
677 case CHIP_NAVI14:
678 chip_name = "navi14";
679 if (!(adev->pdev->device == 0x7340 &&
680 adev->pdev->revision != 0x00))
681 snprintf(wks, sizeof(wks), "_wks");
682 break;
683 case CHIP_NAVI12:
684 chip_name = "navi12";
685 break;
686 default:
687 BUG();
688 }
689
690 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
691 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
692 if (err)
693 goto out;
694 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
695 if (err)
696 goto out;
697 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
698 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
699 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
700
701 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
702 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
703 if (err)
704 goto out;
705 err = amdgpu_ucode_validate(adev->gfx.me_fw);
706 if (err)
707 goto out;
708 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
709 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
710 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
711
712 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
713 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
714 if (err)
715 goto out;
716 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
717 if (err)
718 goto out;
719 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
720 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
721 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
722
723 if (!amdgpu_sriov_vf(adev)) {
724 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
725 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
726 if (err)
727 goto out;
728 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
729 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
730 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
731 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
732 if (version_major == 2 && version_minor == 1)
733 adev->gfx.rlc.is_rlc_v2_1 = true;
734
735 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
736 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
737 adev->gfx.rlc.save_and_restore_offset =
738 le32_to_cpu(rlc_hdr->save_and_restore_offset);
739 adev->gfx.rlc.clear_state_descriptor_offset =
740 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
741 adev->gfx.rlc.avail_scratch_ram_locations =
742 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
743 adev->gfx.rlc.reg_restore_list_size =
744 le32_to_cpu(rlc_hdr->reg_restore_list_size);
745 adev->gfx.rlc.reg_list_format_start =
746 le32_to_cpu(rlc_hdr->reg_list_format_start);
747 adev->gfx.rlc.reg_list_format_separate_start =
748 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
749 adev->gfx.rlc.starting_offsets_start =
750 le32_to_cpu(rlc_hdr->starting_offsets_start);
751 adev->gfx.rlc.reg_list_format_size_bytes =
752 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
753 adev->gfx.rlc.reg_list_size_bytes =
754 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
755 adev->gfx.rlc.register_list_format =
756 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
757 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
758 if (!adev->gfx.rlc.register_list_format) {
759 err = -ENOMEM;
760 goto out;
761 }
762
763 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
764 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
765 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
766 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
767
768 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
769
770 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
771 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
772 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
773 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
774
775 if (adev->gfx.rlc.is_rlc_v2_1)
776 gfx_v10_0_init_rlc_ext_microcode(adev);
777 }
778
779 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
780 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
781 if (err)
782 goto out;
783 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
784 if (err)
785 goto out;
786 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
787 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
788 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
789
790 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
791 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
792 if (!err) {
793 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
794 if (err)
795 goto out;
796 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
797 adev->gfx.mec2_fw->data;
798 adev->gfx.mec2_fw_version =
799 le32_to_cpu(cp_hdr->header.ucode_version);
800 adev->gfx.mec2_feature_version =
801 le32_to_cpu(cp_hdr->ucode_feature_version);
802 } else {
803 err = 0;
804 adev->gfx.mec2_fw = NULL;
805 }
806
807 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
808 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
809 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
810 info->fw = adev->gfx.pfp_fw;
811 header = (const struct common_firmware_header *)info->fw->data;
812 adev->firmware.fw_size +=
813 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
814
815 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
816 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
817 info->fw = adev->gfx.me_fw;
818 header = (const struct common_firmware_header *)info->fw->data;
819 adev->firmware.fw_size +=
820 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
821
822 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
823 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
824 info->fw = adev->gfx.ce_fw;
825 header = (const struct common_firmware_header *)info->fw->data;
826 adev->firmware.fw_size +=
827 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
828
829 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
830 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
831 info->fw = adev->gfx.rlc_fw;
832 if (info->fw) {
833 header = (const struct common_firmware_header *)info->fw->data;
834 adev->firmware.fw_size +=
835 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
836 }
837 if (adev->gfx.rlc.is_rlc_v2_1 &&
838 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
839 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
840 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
841 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
842 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
843 info->fw = adev->gfx.rlc_fw;
844 adev->firmware.fw_size +=
845 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
846
847 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
848 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
849 info->fw = adev->gfx.rlc_fw;
850 adev->firmware.fw_size +=
851 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
852
853 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
854 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
855 info->fw = adev->gfx.rlc_fw;
856 adev->firmware.fw_size +=
857 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
858 }
859
860 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
861 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
862 info->fw = adev->gfx.mec_fw;
863 header = (const struct common_firmware_header *)info->fw->data;
864 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
865 adev->firmware.fw_size +=
866 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
867 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
868
869 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
870 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
871 info->fw = adev->gfx.mec_fw;
872 adev->firmware.fw_size +=
873 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
874
875 if (adev->gfx.mec2_fw) {
876 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
877 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
878 info->fw = adev->gfx.mec2_fw;
879 header = (const struct common_firmware_header *)info->fw->data;
880 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
881 adev->firmware.fw_size +=
882 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
883 le32_to_cpu(cp_hdr->jt_size) * 4,
884 PAGE_SIZE);
885 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
886 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
887 info->fw = adev->gfx.mec2_fw;
888 adev->firmware.fw_size +=
889 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
890 PAGE_SIZE);
891 }
892 }
893
894 gfx_v10_0_check_fw_write_wait(adev);
895 out:
896 if (err) {
897 dev_err(adev->dev,
898 "gfx10: Failed to load firmware \"%s\"\n",
899 fw_name);
900 release_firmware(adev->gfx.pfp_fw);
901 adev->gfx.pfp_fw = NULL;
902 release_firmware(adev->gfx.me_fw);
903 adev->gfx.me_fw = NULL;
904 release_firmware(adev->gfx.ce_fw);
905 adev->gfx.ce_fw = NULL;
906 release_firmware(adev->gfx.rlc_fw);
907 adev->gfx.rlc_fw = NULL;
908 release_firmware(adev->gfx.mec_fw);
909 adev->gfx.mec_fw = NULL;
910 release_firmware(adev->gfx.mec2_fw);
911 adev->gfx.mec2_fw = NULL;
912 }
913
914 gfx_v10_0_check_gfxoff_flag(adev);
915
916 return err;
917 }
918
gfx_v10_0_get_csb_size(struct amdgpu_device * adev)919 static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
920 {
921 u32 count = 0;
922 const struct cs_section_def *sect = NULL;
923 const struct cs_extent_def *ext = NULL;
924
925 /* begin clear state */
926 count += 2;
927 /* context control state */
928 count += 3;
929
930 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
931 for (ext = sect->section; ext->extent != NULL; ++ext) {
932 if (sect->id == SECT_CONTEXT)
933 count += 2 + ext->reg_count;
934 else
935 return 0;
936 }
937 }
938
939 /* set PA_SC_TILE_STEERING_OVERRIDE */
940 count += 3;
941 /* end clear state */
942 count += 2;
943 /* clear state */
944 count += 2;
945
946 return count;
947 }
948
gfx_v10_0_get_csb_buffer(struct amdgpu_device * adev,volatile u32 * buffer)949 static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
950 volatile u32 *buffer)
951 {
952 u32 count = 0, i;
953 const struct cs_section_def *sect = NULL;
954 const struct cs_extent_def *ext = NULL;
955 int ctx_reg_offset;
956
957 if (adev->gfx.rlc.cs_data == NULL)
958 return;
959 if (buffer == NULL)
960 return;
961
962 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
963 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
964
965 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
966 buffer[count++] = cpu_to_le32(0x80000000);
967 buffer[count++] = cpu_to_le32(0x80000000);
968
969 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
970 for (ext = sect->section; ext->extent != NULL; ++ext) {
971 if (sect->id == SECT_CONTEXT) {
972 buffer[count++] =
973 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
974 buffer[count++] = cpu_to_le32(ext->reg_index -
975 PACKET3_SET_CONTEXT_REG_START);
976 for (i = 0; i < ext->reg_count; i++)
977 buffer[count++] = cpu_to_le32(ext->extent[i]);
978 } else {
979 return;
980 }
981 }
982 }
983
984 ctx_reg_offset =
985 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
986 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
987 buffer[count++] = cpu_to_le32(ctx_reg_offset);
988 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
989
990 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
991 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
992
993 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
994 buffer[count++] = cpu_to_le32(0);
995 }
996
gfx_v10_0_rlc_fini(struct amdgpu_device * adev)997 static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
998 {
999 /* clear state block */
1000 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1001 &adev->gfx.rlc.clear_state_gpu_addr,
1002 (void **)__UNVOLATILE(&adev->gfx.rlc.cs_ptr));
1003
1004 /* jump table block */
1005 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1006 &adev->gfx.rlc.cp_table_gpu_addr,
1007 (void **)__UNVOLATILE(&adev->gfx.rlc.cp_table_ptr));
1008 }
1009
gfx_v10_0_rlc_init(struct amdgpu_device * adev)1010 static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
1011 {
1012 const struct cs_section_def *cs_data;
1013 int r;
1014
1015 adev->gfx.rlc.cs_data = gfx10_cs_data;
1016
1017 cs_data = adev->gfx.rlc.cs_data;
1018
1019 if (cs_data) {
1020 /* init clear state block */
1021 r = amdgpu_gfx_rlc_init_csb(adev);
1022 if (r)
1023 return r;
1024 }
1025
1026 return 0;
1027 }
1028
gfx_v10_0_mec_fini(struct amdgpu_device * adev)1029 static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
1030 {
1031 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1032 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1033 }
1034
gfx_v10_0_me_init(struct amdgpu_device * adev)1035 static int gfx_v10_0_me_init(struct amdgpu_device *adev)
1036 {
1037 int r;
1038
1039 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
1040
1041 amdgpu_gfx_graphics_queue_acquire(adev);
1042
1043 r = gfx_v10_0_init_microcode(adev);
1044 if (r)
1045 DRM_ERROR("Failed to load gfx firmware!\n");
1046
1047 return r;
1048 }
1049
gfx_v10_0_mec_init(struct amdgpu_device * adev)1050 static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
1051 {
1052 int r;
1053 u32 *hpd;
1054 const __le32 *fw_data = NULL;
1055 unsigned fw_size;
1056 u32 *fw = NULL;
1057 size_t mec_hpd_size;
1058
1059 const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
1060
1061 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1062
1063 /* take ownership of the relevant compute queues */
1064 amdgpu_gfx_compute_queue_acquire(adev);
1065 mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
1066
1067 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1068 AMDGPU_GEM_DOMAIN_GTT,
1069 &adev->gfx.mec.hpd_eop_obj,
1070 &adev->gfx.mec.hpd_eop_gpu_addr,
1071 (void **)&hpd);
1072 if (r) {
1073 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1074 gfx_v10_0_mec_fini(adev);
1075 return r;
1076 }
1077
1078 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1079
1080 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1081 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1082
1083 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1084 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1085
1086 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1087 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1088 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1089
1090 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1091 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1092 &adev->gfx.mec.mec_fw_obj,
1093 &adev->gfx.mec.mec_fw_gpu_addr,
1094 (void **)&fw);
1095 if (r) {
1096 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
1097 gfx_v10_0_mec_fini(adev);
1098 return r;
1099 }
1100
1101 memcpy(fw, fw_data, fw_size);
1102
1103 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1104 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1105 }
1106
1107 return 0;
1108 }
1109
wave_read_ind(struct amdgpu_device * adev,uint32_t wave,uint32_t address)1110 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
1111 {
1112 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1113 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1114 (address << SQ_IND_INDEX__INDEX__SHIFT));
1115 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1116 }
1117
wave_read_regs(struct amdgpu_device * adev,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)1118 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
1119 uint32_t thread, uint32_t regno,
1120 uint32_t num, uint32_t *out)
1121 {
1122 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1123 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1124 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1125 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
1126 (SQ_IND_INDEX__AUTO_INCR_MASK));
1127 while (num--)
1128 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1129 }
1130
gfx_v10_0_read_wave_data(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)1131 static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1132 {
1133 /* in gfx10 the SIMD_ID is specified as part of the INSTANCE
1134 * field when performing a select_se_sh so it should be
1135 * zero here */
1136 WARN_ON(simd != 0);
1137
1138 /* type 2 wave data */
1139 dst[(*no_fields)++] = 2;
1140 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1141 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1142 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1143 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1144 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1145 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1146 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1147 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0);
1148 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1149 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1150 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1151 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1152 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1153 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1154 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1155 }
1156
gfx_v10_0_read_wave_sgprs(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)1157 static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1158 uint32_t wave, uint32_t start,
1159 uint32_t size, uint32_t *dst)
1160 {
1161 WARN_ON(simd != 0);
1162
1163 wave_read_regs(
1164 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1165 dst);
1166 }
1167
gfx_v10_0_read_wave_vgprs(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)1168 static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1169 uint32_t wave, uint32_t thread,
1170 uint32_t start, uint32_t size,
1171 uint32_t *dst)
1172 {
1173 wave_read_regs(
1174 adev, wave, thread,
1175 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1176 }
1177
gfx_v10_0_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm)1178 static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
1179 u32 me, u32 pipe, u32 q, u32 vm)
1180 {
1181 nv_grbm_select(adev, me, pipe, q, vm);
1182 }
1183
1184
1185 static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
1186 .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
1187 .select_se_sh = &gfx_v10_0_select_se_sh,
1188 .read_wave_data = &gfx_v10_0_read_wave_data,
1189 .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
1190 .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
1191 .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
1192 };
1193
gfx_v10_0_gpu_early_init(struct amdgpu_device * adev)1194 static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1195 {
1196 u32 gb_addr_config;
1197
1198 adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
1199
1200 switch (adev->asic_type) {
1201 case CHIP_NAVI10:
1202 case CHIP_NAVI14:
1203 case CHIP_NAVI12:
1204 adev->gfx.config.max_hw_contexts = 8;
1205 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1206 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1207 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1208 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1209 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1210 break;
1211 default:
1212 BUG();
1213 break;
1214 }
1215
1216 adev->gfx.config.gb_addr_config = gb_addr_config;
1217
1218 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1219 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1220 GB_ADDR_CONFIG, NUM_PIPES);
1221
1222 adev->gfx.config.max_tile_pipes =
1223 adev->gfx.config.gb_addr_config_fields.num_pipes;
1224
1225 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1226 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1227 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
1228 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1229 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1230 GB_ADDR_CONFIG, NUM_RB_PER_SE);
1231 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1232 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1233 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
1234 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1235 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1236 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
1237 }
1238
gfx_v10_0_gfx_ring_init(struct amdgpu_device * adev,int ring_id,int me,int pipe,int queue)1239 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1240 int me, int pipe, int queue)
1241 {
1242 int r;
1243 struct amdgpu_ring *ring;
1244 unsigned int irq_type;
1245
1246 ring = &adev->gfx.gfx_ring[ring_id];
1247
1248 ring->me = me;
1249 ring->pipe = pipe;
1250 ring->queue = queue;
1251
1252 ring->ring_obj = NULL;
1253 ring->use_doorbell = true;
1254
1255 if (!ring_id)
1256 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1257 else
1258 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1259 snprintf(ring->name, sizeof(ring->name), "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1260
1261 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1262 r = amdgpu_ring_init(adev, ring, 1024,
1263 &adev->gfx.eop_irq, irq_type);
1264 if (r)
1265 return r;
1266 return 0;
1267 }
1268
gfx_v10_0_compute_ring_init(struct amdgpu_device * adev,int ring_id,int mec,int pipe,int queue)1269 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1270 int mec, int pipe, int queue)
1271 {
1272 int r;
1273 unsigned irq_type;
1274 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1275
1276 ring = &adev->gfx.compute_ring[ring_id];
1277
1278 /* mec0 is me1 */
1279 ring->me = mec + 1;
1280 ring->pipe = pipe;
1281 ring->queue = queue;
1282
1283 ring->ring_obj = NULL;
1284 ring->use_doorbell = true;
1285 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1286 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1287 + (ring_id * GFX10_MEC_HPD_SIZE);
1288 snprintf(ring->name, sizeof(ring->name), "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1289
1290 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1291 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1292 + ring->pipe;
1293
1294 /* type-2 packets are deprecated on MEC, use type-3 instead */
1295 r = amdgpu_ring_init(adev, ring, 1024,
1296 &adev->gfx.eop_irq, irq_type);
1297 if (r)
1298 return r;
1299
1300 return 0;
1301 }
1302
gfx_v10_0_sw_init(void * handle)1303 static int gfx_v10_0_sw_init(void *handle)
1304 {
1305 int i, j, k, r, ring_id = 0;
1306 struct amdgpu_kiq *kiq;
1307 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1308
1309 switch (adev->asic_type) {
1310 case CHIP_NAVI10:
1311 case CHIP_NAVI14:
1312 case CHIP_NAVI12:
1313 adev->gfx.me.num_me = 1;
1314 adev->gfx.me.num_pipe_per_me = 2;
1315 adev->gfx.me.num_queue_per_pipe = 1;
1316 adev->gfx.mec.num_mec = 2;
1317 adev->gfx.mec.num_pipe_per_mec = 4;
1318 adev->gfx.mec.num_queue_per_pipe = 8;
1319 break;
1320 default:
1321 adev->gfx.me.num_me = 1;
1322 adev->gfx.me.num_pipe_per_me = 1;
1323 adev->gfx.me.num_queue_per_pipe = 1;
1324 adev->gfx.mec.num_mec = 1;
1325 adev->gfx.mec.num_pipe_per_mec = 4;
1326 adev->gfx.mec.num_queue_per_pipe = 8;
1327 break;
1328 }
1329
1330 /* KIQ event */
1331 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1332 GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
1333 &adev->gfx.kiq.irq);
1334 if (r)
1335 return r;
1336
1337 /* EOP Event */
1338 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1339 GFX_10_1__SRCID__CP_EOP_INTERRUPT,
1340 &adev->gfx.eop_irq);
1341 if (r)
1342 return r;
1343
1344 /* Privileged reg */
1345 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
1346 &adev->gfx.priv_reg_irq);
1347 if (r)
1348 return r;
1349
1350 /* Privileged inst */
1351 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT,
1352 &adev->gfx.priv_inst_irq);
1353 if (r)
1354 return r;
1355
1356 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1357
1358 gfx_v10_0_scratch_init(adev);
1359
1360 r = gfx_v10_0_me_init(adev);
1361 if (r)
1362 return r;
1363
1364 r = gfx_v10_0_rlc_init(adev);
1365 if (r) {
1366 DRM_ERROR("Failed to init rlc BOs!\n");
1367 return r;
1368 }
1369
1370 r = gfx_v10_0_mec_init(adev);
1371 if (r) {
1372 DRM_ERROR("Failed to init MEC BOs!\n");
1373 return r;
1374 }
1375
1376 /* set up the gfx ring */
1377 for (i = 0; i < adev->gfx.me.num_me; i++) {
1378 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1379 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1380 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1381 continue;
1382
1383 r = gfx_v10_0_gfx_ring_init(adev, ring_id,
1384 i, k, j);
1385 if (r)
1386 return r;
1387 ring_id++;
1388 }
1389 }
1390 }
1391
1392 ring_id = 0;
1393 /* set up the compute queues - allocate horizontally across pipes */
1394 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1395 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1396 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1397 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1398 j))
1399 continue;
1400
1401 r = gfx_v10_0_compute_ring_init(adev, ring_id,
1402 i, k, j);
1403 if (r)
1404 return r;
1405
1406 ring_id++;
1407 }
1408 }
1409 }
1410
1411 r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
1412 if (r) {
1413 DRM_ERROR("Failed to init KIQ BOs!\n");
1414 return r;
1415 }
1416
1417 kiq = &adev->gfx.kiq;
1418 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1419 if (r)
1420 return r;
1421
1422 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
1423 if (r)
1424 return r;
1425
1426 /* allocate visible FB for rlc auto-loading fw */
1427 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1428 r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev);
1429 if (r)
1430 return r;
1431 }
1432
1433 adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE;
1434
1435 gfx_v10_0_gpu_early_init(adev);
1436
1437 return 0;
1438 }
1439
gfx_v10_0_pfp_fini(struct amdgpu_device * adev)1440 static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev)
1441 {
1442 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1443 &adev->gfx.pfp.pfp_fw_gpu_addr,
1444 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1445 }
1446
gfx_v10_0_ce_fini(struct amdgpu_device * adev)1447 static void gfx_v10_0_ce_fini(struct amdgpu_device *adev)
1448 {
1449 amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
1450 &adev->gfx.ce.ce_fw_gpu_addr,
1451 (void **)&adev->gfx.ce.ce_fw_ptr);
1452 }
1453
gfx_v10_0_me_fini(struct amdgpu_device * adev)1454 static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
1455 {
1456 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1457 &adev->gfx.me.me_fw_gpu_addr,
1458 (void **)&adev->gfx.me.me_fw_ptr);
1459 }
1460
gfx_v10_0_sw_fini(void * handle)1461 static int gfx_v10_0_sw_fini(void *handle)
1462 {
1463 int i;
1464 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1465
1466 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1467 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1468 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1469 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1470
1471 amdgpu_gfx_mqd_sw_fini(adev);
1472 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
1473 amdgpu_gfx_kiq_fini(adev);
1474
1475 gfx_v10_0_pfp_fini(adev);
1476 gfx_v10_0_ce_fini(adev);
1477 gfx_v10_0_me_fini(adev);
1478 gfx_v10_0_rlc_fini(adev);
1479 gfx_v10_0_mec_fini(adev);
1480
1481 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1482 gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
1483
1484 gfx_v10_0_free_microcode(adev);
1485
1486 return 0;
1487 }
1488
1489
gfx_v10_0_tiling_mode_table_init(struct amdgpu_device * adev)1490 static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
1491 {
1492 /* TODO */
1493 }
1494
gfx_v10_0_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance)1495 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1496 u32 sh_num, u32 instance)
1497 {
1498 u32 data;
1499
1500 if (instance == 0xffffffff)
1501 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1502 INSTANCE_BROADCAST_WRITES, 1);
1503 else
1504 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1505 instance);
1506
1507 if (se_num == 0xffffffff)
1508 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1509 1);
1510 else
1511 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1512
1513 if (sh_num == 0xffffffff)
1514 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1515 1);
1516 else
1517 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1518
1519 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1520 }
1521
gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device * adev)1522 static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1523 {
1524 u32 data, mask;
1525
1526 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1527 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1528
1529 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1530 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1531
1532 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1533 adev->gfx.config.max_sh_per_se);
1534
1535 return (~data) & mask;
1536 }
1537
gfx_v10_0_setup_rb(struct amdgpu_device * adev)1538 static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
1539 {
1540 int i, j;
1541 u32 data;
1542 u32 active_rbs = 0;
1543 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1544 adev->gfx.config.max_sh_per_se;
1545
1546 mutex_lock(&adev->grbm_idx_mutex);
1547 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1548 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1549 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1550 data = gfx_v10_0_get_rb_active_bitmap(adev);
1551 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1552 rb_bitmap_width_per_sh);
1553 }
1554 }
1555 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1556 mutex_unlock(&adev->grbm_idx_mutex);
1557
1558 adev->gfx.config.backend_enable_mask = active_rbs;
1559 adev->gfx.config.num_rbs = hweight32(active_rbs);
1560 }
1561
gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device * adev)1562 static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev)
1563 {
1564 uint32_t num_sc;
1565 uint32_t enabled_rb_per_sh;
1566 uint32_t active_rb_bitmap;
1567 uint32_t num_rb_per_sc;
1568 uint32_t num_packer_per_sc;
1569 uint32_t pa_sc_tile_steering_override;
1570
1571 /* init num_sc */
1572 num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
1573 adev->gfx.config.num_sc_per_sh;
1574 /* init num_rb_per_sc */
1575 active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev);
1576 enabled_rb_per_sh = hweight32(active_rb_bitmap);
1577 num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh;
1578 /* init num_packer_per_sc */
1579 num_packer_per_sc = adev->gfx.config.num_packer_per_sc;
1580
1581 pa_sc_tile_steering_override = 0;
1582 pa_sc_tile_steering_override |=
1583 (order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) &
1584 PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK;
1585 pa_sc_tile_steering_override |=
1586 (order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) &
1587 PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK;
1588 pa_sc_tile_steering_override |=
1589 (order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) &
1590 PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK;
1591
1592 return pa_sc_tile_steering_override;
1593 }
1594
1595 #define DEFAULT_SH_MEM_BASES (0x6000)
1596 #define FIRST_COMPUTE_VMID (8)
1597 #define LAST_COMPUTE_VMID (16)
1598
gfx_v10_0_init_compute_vmid(struct amdgpu_device * adev)1599 static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1600 {
1601 int i;
1602 uint32_t sh_mem_bases;
1603
1604 /*
1605 * Configure apertures:
1606 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1607 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1608 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1609 */
1610 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1611
1612 mutex_lock(&adev->srbm_mutex);
1613 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1614 nv_grbm_select(adev, 0, 0, 0, i);
1615 /* CP and shaders */
1616 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1617 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1618 }
1619 nv_grbm_select(adev, 0, 0, 0, 0);
1620 mutex_unlock(&adev->srbm_mutex);
1621
1622 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1623 acccess. These should be enabled by FW for target VMIDs. */
1624 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1625 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1626 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1627 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1628 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1629 }
1630 }
1631
gfx_v10_0_init_gds_vmid(struct amdgpu_device * adev)1632 static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev)
1633 {
1634 int vmid;
1635
1636 /*
1637 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1638 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1639 * the driver can enable them for graphics. VMID0 should maintain
1640 * access so that HWS firmware can save/restore entries.
1641 */
1642 for (vmid = 1; vmid < 16; vmid++) {
1643 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
1644 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
1645 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
1646 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
1647 }
1648 }
1649
1650
gfx_v10_0_tcp_harvest(struct amdgpu_device * adev)1651 static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1652 {
1653 int i, j, k;
1654 int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1;
1655 u32 tmp, wgp_active_bitmap = 0;
1656 u32 gcrd_targets_disable_tcp = 0;
1657 u32 utcl_invreq_disable = 0;
1658 /*
1659 * GCRD_TARGETS_DISABLE field contains
1660 * for Navi10/Navi12: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
1661 * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0]
1662 */
1663 u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
1664 2 * max_wgp_per_sh + /* TCP */
1665 max_wgp_per_sh + /* SQC */
1666 4); /* GL1C */
1667 /*
1668 * UTCL1_UTCL0_INVREQ_DISABLE field contains
1669 * for Navi10Navi12: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
1670 * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0]
1671 */
1672 u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
1673 2 * max_wgp_per_sh + /* TCP */
1674 2 * max_wgp_per_sh + /* SQC */
1675 4 + /* RMI */
1676 1); /* SQG */
1677
1678 if (adev->asic_type == CHIP_NAVI10 ||
1679 adev->asic_type == CHIP_NAVI14 ||
1680 adev->asic_type == CHIP_NAVI12) {
1681 mutex_lock(&adev->grbm_idx_mutex);
1682 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1683 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1684 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1685 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
1686 /*
1687 * Set corresponding TCP bits for the inactive WGPs in
1688 * GCRD_SA_TARGETS_DISABLE
1689 */
1690 gcrd_targets_disable_tcp = 0;
1691 /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
1692 utcl_invreq_disable = 0;
1693
1694 for (k = 0; k < max_wgp_per_sh; k++) {
1695 if (!(wgp_active_bitmap & (1 << k))) {
1696 gcrd_targets_disable_tcp |= 3 << (2 * k);
1697 utcl_invreq_disable |= (3 << (2 * k)) |
1698 (3 << (2 * (max_wgp_per_sh + k)));
1699 }
1700 }
1701
1702 tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
1703 /* only override TCP & SQC bits */
1704 tmp &= 0xffffffff << (4 * max_wgp_per_sh);
1705 tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
1706 WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
1707
1708 tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
1709 /* only override TCP bits */
1710 tmp &= 0xffffffff << (2 * max_wgp_per_sh);
1711 tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
1712 WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
1713 }
1714 }
1715
1716 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1717 mutex_unlock(&adev->grbm_idx_mutex);
1718 }
1719 }
1720
gfx_v10_0_get_tcc_info(struct amdgpu_device * adev)1721 static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
1722 {
1723 /* TCCs are global (not instanced). */
1724 uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
1725 RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
1726
1727 adev->gfx.config.tcc_disabled_mask =
1728 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1729 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1730 }
1731
gfx_v10_0_constants_init(struct amdgpu_device * adev)1732 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1733 {
1734 u32 tmp;
1735 int i;
1736
1737 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1738
1739 gfx_v10_0_tiling_mode_table_init(adev);
1740
1741 gfx_v10_0_setup_rb(adev);
1742 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1743 gfx_v10_0_get_tcc_info(adev);
1744 adev->gfx.config.pa_sc_tile_steering_override =
1745 gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1746
1747 /* XXX SH_MEM regs */
1748 /* where to put LDS, scratch, GPUVM in FSA64 space */
1749 mutex_lock(&adev->srbm_mutex);
1750 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
1751 nv_grbm_select(adev, 0, 0, 0, i);
1752 /* CP and shaders */
1753 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1754 if (i != 0) {
1755 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1756 (adev->gmc.private_aperture_start >> 48));
1757 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1758 (adev->gmc.shared_aperture_start >> 48));
1759 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1760 }
1761 }
1762 nv_grbm_select(adev, 0, 0, 0, 0);
1763
1764 mutex_unlock(&adev->srbm_mutex);
1765
1766 gfx_v10_0_init_compute_vmid(adev);
1767 gfx_v10_0_init_gds_vmid(adev);
1768
1769 }
1770
gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable)1771 static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1772 bool enable)
1773 {
1774 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1775
1776 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1777 enable ? 1 : 0);
1778 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1779 enable ? 1 : 0);
1780 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1781 enable ? 1 : 0);
1782 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1783 enable ? 1 : 0);
1784
1785 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1786 }
1787
gfx_v10_0_init_csb(struct amdgpu_device * adev)1788 static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
1789 {
1790 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1791
1792 /* csib */
1793 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
1794 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1795 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
1796 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1797 WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1798
1799 return 0;
1800 }
1801
gfx_v10_0_rlc_stop(struct amdgpu_device * adev)1802 void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
1803 {
1804 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1805
1806 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1807 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1808 }
1809
gfx_v10_0_rlc_reset(struct amdgpu_device * adev)1810 static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev)
1811 {
1812 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1813 udelay(50);
1814 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1815 udelay(50);
1816 }
1817
gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device * adev,bool enable)1818 static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1819 bool enable)
1820 {
1821 uint32_t rlc_pg_cntl;
1822
1823 rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
1824
1825 if (!enable) {
1826 /* RLC_PG_CNTL[23] = 0 (default)
1827 * RLC will wait for handshake acks with SMU
1828 * GFXOFF will be enabled
1829 * RLC_PG_CNTL[23] = 1
1830 * RLC will not issue any message to SMU
1831 * hence no handshake between SMU & RLC
1832 * GFXOFF will be disabled
1833 */
1834 rlc_pg_cntl |= 0x800000;
1835 } else
1836 rlc_pg_cntl &= ~0x800000;
1837 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
1838 }
1839
gfx_v10_0_rlc_start(struct amdgpu_device * adev)1840 static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
1841 {
1842 /* TODO: enable rlc & smu handshake until smu
1843 * and gfxoff feature works as expected */
1844 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1845 gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
1846
1847 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1848 udelay(50);
1849 }
1850
gfx_v10_0_rlc_enable_srm(struct amdgpu_device * adev)1851 static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
1852 {
1853 uint32_t tmp;
1854
1855 /* enable Save Restore Machine */
1856 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1857 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1858 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1859 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1860 }
1861
gfx_v10_0_rlc_load_microcode(struct amdgpu_device * adev)1862 static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
1863 {
1864 const struct rlc_firmware_header_v2_0 *hdr;
1865 const __le32 *fw_data;
1866 unsigned i, fw_size;
1867
1868 if (!adev->gfx.rlc_fw)
1869 return -EINVAL;
1870
1871 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1872 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1873
1874 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1875 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1876 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1877
1878 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1879 RLCG_UCODE_LOADING_START_ADDRESS);
1880
1881 for (i = 0; i < fw_size; i++)
1882 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA,
1883 le32_to_cpup(fw_data++));
1884
1885 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1886
1887 return 0;
1888 }
1889
gfx_v10_0_rlc_resume(struct amdgpu_device * adev)1890 static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
1891 {
1892 int r;
1893
1894 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1895
1896 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1897 if (r)
1898 return r;
1899
1900 gfx_v10_0_init_csb(adev);
1901
1902 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1903 gfx_v10_0_rlc_enable_srm(adev);
1904 } else {
1905 adev->gfx.rlc.funcs->stop(adev);
1906
1907 /* disable CG */
1908 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1909
1910 /* disable PG */
1911 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1912
1913 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1914 /* legacy rlc firmware loading */
1915 r = gfx_v10_0_rlc_load_microcode(adev);
1916 if (r)
1917 return r;
1918 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1919 /* rlc backdoor autoload firmware */
1920 r = gfx_v10_0_rlc_backdoor_autoload_enable(adev);
1921 if (r)
1922 return r;
1923 }
1924
1925 gfx_v10_0_init_csb(adev);
1926
1927 adev->gfx.rlc.funcs->start(adev);
1928
1929 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1930 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1931 if (r)
1932 return r;
1933 }
1934 }
1935 return 0;
1936 }
1937
1938 static struct {
1939 FIRMWARE_ID id;
1940 unsigned int offset;
1941 unsigned int size;
1942 } rlc_autoload_info[FIRMWARE_ID_MAX];
1943
gfx_v10_0_parse_rlc_toc(struct amdgpu_device * adev)1944 static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
1945 {
1946 int ret;
1947 RLC_TABLE_OF_CONTENT *rlc_toc;
1948
1949 ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
1950 AMDGPU_GEM_DOMAIN_GTT,
1951 &adev->gfx.rlc.rlc_toc_bo,
1952 &adev->gfx.rlc.rlc_toc_gpu_addr,
1953 (void **)&adev->gfx.rlc.rlc_toc_buf);
1954 if (ret) {
1955 dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret);
1956 return ret;
1957 }
1958
1959 /* Copy toc from psp sos fw to rlc toc buffer */
1960 memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
1961
1962 rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
1963 while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
1964 (rlc_toc->id < FIRMWARE_ID_MAX)) {
1965 if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) &&
1966 (rlc_toc->id <= FIRMWARE_ID_CP_MES)) {
1967 /* Offset needs 4KB alignment */
1968 rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
1969 }
1970
1971 rlc_autoload_info[rlc_toc->id].id = rlc_toc->id;
1972 rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4;
1973 rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
1974
1975 rlc_toc++;
1976 }
1977
1978 return 0;
1979 }
1980
gfx_v10_0_calc_toc_total_size(struct amdgpu_device * adev)1981 static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev)
1982 {
1983 uint32_t total_size = 0;
1984 FIRMWARE_ID id;
1985 int ret;
1986
1987 ret = gfx_v10_0_parse_rlc_toc(adev);
1988 if (ret) {
1989 dev_err(adev->dev, "failed to parse rlc toc\n");
1990 return 0;
1991 }
1992
1993 for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++)
1994 total_size += rlc_autoload_info[id].size;
1995
1996 /* In case the offset in rlc toc ucode is aligned */
1997 if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset)
1998 total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset +
1999 rlc_autoload_info[FIRMWARE_ID_MAX-1].size;
2000
2001 return total_size;
2002 }
2003
gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device * adev)2004 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev)
2005 {
2006 int r;
2007 uint32_t total_size;
2008
2009 total_size = gfx_v10_0_calc_toc_total_size(adev);
2010
2011 r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
2012 AMDGPU_GEM_DOMAIN_GTT,
2013 &adev->gfx.rlc.rlc_autoload_bo,
2014 &adev->gfx.rlc.rlc_autoload_gpu_addr,
2015 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
2016 if (r) {
2017 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
2018 return r;
2019 }
2020
2021 return 0;
2022 }
2023
gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device * adev)2024 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev)
2025 {
2026 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo,
2027 &adev->gfx.rlc.rlc_toc_gpu_addr,
2028 (void **)&adev->gfx.rlc.rlc_toc_buf);
2029 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
2030 &adev->gfx.rlc.rlc_autoload_gpu_addr,
2031 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
2032 }
2033
gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device * adev,FIRMWARE_ID id,const void * fw_data,uint32_t fw_size)2034 static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
2035 FIRMWARE_ID id,
2036 const void *fw_data,
2037 uint32_t fw_size)
2038 {
2039 uint32_t toc_offset;
2040 uint32_t toc_fw_size;
2041 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
2042
2043 if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX)
2044 return;
2045
2046 toc_offset = rlc_autoload_info[id].offset;
2047 toc_fw_size = rlc_autoload_info[id].size;
2048
2049 if (fw_size == 0)
2050 fw_size = toc_fw_size;
2051
2052 if (fw_size > toc_fw_size)
2053 fw_size = toc_fw_size;
2054
2055 memcpy(ptr + toc_offset, fw_data, fw_size);
2056
2057 if (fw_size < toc_fw_size)
2058 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
2059 }
2060
gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device * adev)2061 static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
2062 {
2063 void *data;
2064 uint32_t size;
2065
2066 data = adev->gfx.rlc.rlc_toc_buf;
2067 size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size;
2068
2069 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2070 FIRMWARE_ID_RLC_TOC,
2071 data, size);
2072 }
2073
gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device * adev)2074 static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
2075 {
2076 const __le32 *fw_data;
2077 uint32_t fw_size;
2078 const struct gfx_firmware_header_v1_0 *cp_hdr;
2079 const struct rlc_firmware_header_v2_0 *rlc_hdr;
2080
2081 /* pfp ucode */
2082 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2083 adev->gfx.pfp_fw->data;
2084 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2085 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2086 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2087 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2088 FIRMWARE_ID_CP_PFP,
2089 fw_data, fw_size);
2090
2091 /* ce ucode */
2092 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2093 adev->gfx.ce_fw->data;
2094 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2095 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2096 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2097 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2098 FIRMWARE_ID_CP_CE,
2099 fw_data, fw_size);
2100
2101 /* me ucode */
2102 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2103 adev->gfx.me_fw->data;
2104 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2105 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2106 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2107 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2108 FIRMWARE_ID_CP_ME,
2109 fw_data, fw_size);
2110
2111 /* rlc ucode */
2112 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
2113 adev->gfx.rlc_fw->data;
2114 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2115 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
2116 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
2117 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2118 FIRMWARE_ID_RLC_G_UCODE,
2119 fw_data, fw_size);
2120
2121 /* mec1 ucode */
2122 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2123 adev->gfx.mec_fw->data;
2124 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
2125 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2126 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
2127 cp_hdr->jt_size * 4;
2128 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2129 FIRMWARE_ID_CP_MEC,
2130 fw_data, fw_size);
2131 /* mec2 ucode is not necessary if mec2 ucode is same as mec1 */
2132 }
2133
2134 /* Temporarily put sdma part here */
gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device * adev)2135 static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
2136 {
2137 const __le32 *fw_data;
2138 uint32_t fw_size;
2139 const struct sdma_firmware_header_v1_0 *sdma_hdr;
2140 int i;
2141
2142 for (i = 0; i < adev->sdma.num_instances; i++) {
2143 sdma_hdr = (const struct sdma_firmware_header_v1_0 *)
2144 adev->sdma.instance[i].fw->data;
2145 fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
2146 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
2147 fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes);
2148
2149 if (i == 0) {
2150 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2151 FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size);
2152 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2153 FIRMWARE_ID_SDMA0_JT,
2154 (const uint32_t *)fw_data +
2155 sdma_hdr->jt_offset,
2156 sdma_hdr->jt_size * 4);
2157 } else if (i == 1) {
2158 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2159 FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size);
2160 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2161 FIRMWARE_ID_SDMA1_JT,
2162 (const uint32_t *)fw_data +
2163 sdma_hdr->jt_offset,
2164 sdma_hdr->jt_size * 4);
2165 }
2166 }
2167 }
2168
gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device * adev)2169 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
2170 {
2171 uint32_t rlc_g_offset, rlc_g_size, tmp;
2172 uint64_t gpu_addr;
2173
2174 gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
2175 gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
2176 gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
2177
2178 rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset;
2179 rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size;
2180 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
2181
2182 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr));
2183 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr));
2184 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size);
2185
2186 tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR);
2187 if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK |
2188 RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) {
2189 DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n");
2190 return -EINVAL;
2191 }
2192
2193 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2194 if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) {
2195 DRM_ERROR("RLC ROM should halt itself\n");
2196 return -EINVAL;
2197 }
2198
2199 return 0;
2200 }
2201
gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device * adev)2202 static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev)
2203 {
2204 uint32_t usec_timeout = 50000; /* wait for 50ms */
2205 uint32_t tmp;
2206 int i;
2207 uint64_t addr;
2208
2209 /* Trigger an invalidation of the L1 instruction caches */
2210 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2211 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2212 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2213
2214 /* Wait for invalidation complete */
2215 for (i = 0; i < usec_timeout; i++) {
2216 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2217 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2218 INVALIDATE_CACHE_COMPLETE))
2219 break;
2220 udelay(1);
2221 }
2222
2223 if (i >= usec_timeout) {
2224 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2225 return -EINVAL;
2226 }
2227
2228 /* Program me ucode address into intruction cache address register */
2229 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2230 rlc_autoload_info[FIRMWARE_ID_CP_ME].offset;
2231 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2232 lower_32_bits(addr) & 0xFFFFF000);
2233 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2234 upper_32_bits(addr));
2235
2236 return 0;
2237 }
2238
gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device * adev)2239 static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev)
2240 {
2241 uint32_t usec_timeout = 50000; /* wait for 50ms */
2242 uint32_t tmp;
2243 int i;
2244 uint64_t addr;
2245
2246 /* Trigger an invalidation of the L1 instruction caches */
2247 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2248 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2249 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2250
2251 /* Wait for invalidation complete */
2252 for (i = 0; i < usec_timeout; i++) {
2253 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2254 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2255 INVALIDATE_CACHE_COMPLETE))
2256 break;
2257 udelay(1);
2258 }
2259
2260 if (i >= usec_timeout) {
2261 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2262 return -EINVAL;
2263 }
2264
2265 /* Program ce ucode address into intruction cache address register */
2266 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2267 rlc_autoload_info[FIRMWARE_ID_CP_CE].offset;
2268 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2269 lower_32_bits(addr) & 0xFFFFF000);
2270 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2271 upper_32_bits(addr));
2272
2273 return 0;
2274 }
2275
gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device * adev)2276 static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev)
2277 {
2278 uint32_t usec_timeout = 50000; /* wait for 50ms */
2279 uint32_t tmp;
2280 int i;
2281 uint64_t addr;
2282
2283 /* Trigger an invalidation of the L1 instruction caches */
2284 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2285 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2286 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2287
2288 /* Wait for invalidation complete */
2289 for (i = 0; i < usec_timeout; i++) {
2290 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2291 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2292 INVALIDATE_CACHE_COMPLETE))
2293 break;
2294 udelay(1);
2295 }
2296
2297 if (i >= usec_timeout) {
2298 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2299 return -EINVAL;
2300 }
2301
2302 /* Program pfp ucode address into intruction cache address register */
2303 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2304 rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset;
2305 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2306 lower_32_bits(addr) & 0xFFFFF000);
2307 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2308 upper_32_bits(addr));
2309
2310 return 0;
2311 }
2312
gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device * adev)2313 static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev)
2314 {
2315 uint32_t usec_timeout = 50000; /* wait for 50ms */
2316 uint32_t tmp;
2317 int i;
2318 uint64_t addr;
2319
2320 /* Trigger an invalidation of the L1 instruction caches */
2321 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2322 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2323 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2324
2325 /* Wait for invalidation complete */
2326 for (i = 0; i < usec_timeout; i++) {
2327 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2328 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2329 INVALIDATE_CACHE_COMPLETE))
2330 break;
2331 udelay(1);
2332 }
2333
2334 if (i >= usec_timeout) {
2335 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2336 return -EINVAL;
2337 }
2338
2339 /* Program mec1 ucode address into intruction cache address register */
2340 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2341 rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset;
2342 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2343 lower_32_bits(addr) & 0xFFFFF000);
2344 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2345 upper_32_bits(addr));
2346
2347 return 0;
2348 }
2349
gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device * adev)2350 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2351 {
2352 uint32_t cp_status;
2353 uint32_t bootload_status;
2354 int i, r;
2355
2356 for (i = 0; i < adev->usec_timeout; i++) {
2357 cp_status = RREG32_SOC15(GC, 0, mmCP_STAT);
2358 bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS);
2359 if ((cp_status == 0) &&
2360 (REG_GET_FIELD(bootload_status,
2361 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2362 break;
2363 }
2364 udelay(1);
2365 }
2366
2367 if (i >= adev->usec_timeout) {
2368 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2369 return -ETIMEDOUT;
2370 }
2371
2372 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2373 r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev);
2374 if (r)
2375 return r;
2376
2377 r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev);
2378 if (r)
2379 return r;
2380
2381 r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev);
2382 if (r)
2383 return r;
2384
2385 r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev);
2386 if (r)
2387 return r;
2388 }
2389
2390 return 0;
2391 }
2392
gfx_v10_0_cp_gfx_enable(struct amdgpu_device * adev,bool enable)2393 static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2394 {
2395 int i;
2396 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2397
2398 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2399 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2400 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2401 if (!enable) {
2402 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2403 adev->gfx.gfx_ring[i].sched.ready = false;
2404 }
2405 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2406
2407 for (i = 0; i < adev->usec_timeout; i++) {
2408 if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
2409 break;
2410 udelay(1);
2411 }
2412
2413 if (i >= adev->usec_timeout)
2414 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2415
2416 return 0;
2417 }
2418
gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device * adev)2419 static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2420 {
2421 int r;
2422 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2423 const __le32 *fw_data;
2424 unsigned i, fw_size;
2425 uint32_t tmp;
2426 uint32_t usec_timeout = 50000; /* wait for 50ms */
2427
2428 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2429 adev->gfx.pfp_fw->data;
2430
2431 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2432
2433 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2434 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2435 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2436
2437 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2438 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2439 &adev->gfx.pfp.pfp_fw_obj,
2440 &adev->gfx.pfp.pfp_fw_gpu_addr,
2441 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2442 if (r) {
2443 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2444 gfx_v10_0_pfp_fini(adev);
2445 return r;
2446 }
2447
2448 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2449
2450 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2451 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2452
2453 /* Trigger an invalidation of the L1 instruction caches */
2454 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2455 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2456 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2457
2458 /* Wait for invalidation complete */
2459 for (i = 0; i < usec_timeout; i++) {
2460 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2461 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2462 INVALIDATE_CACHE_COMPLETE))
2463 break;
2464 udelay(1);
2465 }
2466
2467 if (i >= usec_timeout) {
2468 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2469 return -EINVAL;
2470 }
2471
2472 if (amdgpu_emu_mode == 1)
2473 adev->nbio.funcs->hdp_flush(adev, NULL);
2474
2475 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
2476 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2477 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2478 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2479 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2480 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp);
2481 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2482 adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000);
2483 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2484 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2485
2486 return 0;
2487 }
2488
gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device * adev)2489 static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
2490 {
2491 int r;
2492 const struct gfx_firmware_header_v1_0 *ce_hdr;
2493 const __le32 *fw_data;
2494 unsigned i, fw_size;
2495 uint32_t tmp;
2496 uint32_t usec_timeout = 50000; /* wait for 50ms */
2497
2498 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2499 adev->gfx.ce_fw->data;
2500
2501 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2502
2503 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2504 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2505 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes);
2506
2507 r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes,
2508 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2509 &adev->gfx.ce.ce_fw_obj,
2510 &adev->gfx.ce.ce_fw_gpu_addr,
2511 (void **)&adev->gfx.ce.ce_fw_ptr);
2512 if (r) {
2513 dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r);
2514 gfx_v10_0_ce_fini(adev);
2515 return r;
2516 }
2517
2518 memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
2519
2520 amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
2521 amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
2522
2523 /* Trigger an invalidation of the L1 instruction caches */
2524 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2525 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2526 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2527
2528 /* Wait for invalidation complete */
2529 for (i = 0; i < usec_timeout; i++) {
2530 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2531 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2532 INVALIDATE_CACHE_COMPLETE))
2533 break;
2534 udelay(1);
2535 }
2536
2537 if (i >= usec_timeout) {
2538 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2539 return -EINVAL;
2540 }
2541
2542 if (amdgpu_emu_mode == 1)
2543 adev->nbio.funcs->hdp_flush(adev, NULL);
2544
2545 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
2546 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
2547 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0);
2548 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0);
2549 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2550 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2551 adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
2552 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2553 upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
2554
2555 return 0;
2556 }
2557
gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device * adev)2558 static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2559 {
2560 int r;
2561 const struct gfx_firmware_header_v1_0 *me_hdr;
2562 const __le32 *fw_data;
2563 unsigned i, fw_size;
2564 uint32_t tmp;
2565 uint32_t usec_timeout = 50000; /* wait for 50ms */
2566
2567 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2568 adev->gfx.me_fw->data;
2569
2570 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2571
2572 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2573 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2574 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2575
2576 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2577 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2578 &adev->gfx.me.me_fw_obj,
2579 &adev->gfx.me.me_fw_gpu_addr,
2580 (void **)&adev->gfx.me.me_fw_ptr);
2581 if (r) {
2582 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2583 gfx_v10_0_me_fini(adev);
2584 return r;
2585 }
2586
2587 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2588
2589 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2590 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2591
2592 /* Trigger an invalidation of the L1 instruction caches */
2593 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2594 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2595 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2596
2597 /* Wait for invalidation complete */
2598 for (i = 0; i < usec_timeout; i++) {
2599 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2600 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2601 INVALIDATE_CACHE_COMPLETE))
2602 break;
2603 udelay(1);
2604 }
2605
2606 if (i >= usec_timeout) {
2607 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2608 return -EINVAL;
2609 }
2610
2611 if (amdgpu_emu_mode == 1)
2612 adev->nbio.funcs->hdp_flush(adev, NULL);
2613
2614 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
2615 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2616 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2617 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2618 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2619 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2620 adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000);
2621 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2622 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2623
2624 return 0;
2625 }
2626
gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device * adev)2627 static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2628 {
2629 int r;
2630
2631 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2632 return -EINVAL;
2633
2634 gfx_v10_0_cp_gfx_enable(adev, false);
2635
2636 r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev);
2637 if (r) {
2638 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2639 return r;
2640 }
2641
2642 r = gfx_v10_0_cp_gfx_load_ce_microcode(adev);
2643 if (r) {
2644 dev_err(adev->dev, "(%d) failed to load ce fw\n", r);
2645 return r;
2646 }
2647
2648 r = gfx_v10_0_cp_gfx_load_me_microcode(adev);
2649 if (r) {
2650 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2651 return r;
2652 }
2653
2654 return 0;
2655 }
2656
gfx_v10_0_cp_gfx_start(struct amdgpu_device * adev)2657 static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
2658 {
2659 struct amdgpu_ring *ring;
2660 const struct cs_section_def *sect = NULL;
2661 const struct cs_extent_def *ext = NULL;
2662 int r, i;
2663 int ctx_reg_offset;
2664
2665 /* init the CP */
2666 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT,
2667 adev->gfx.config.max_hw_contexts - 1);
2668 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2669
2670 gfx_v10_0_cp_gfx_enable(adev, true);
2671
2672 ring = &adev->gfx.gfx_ring[0];
2673 r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
2674 if (r) {
2675 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2676 return r;
2677 }
2678
2679 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2680 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2681
2682 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2683 amdgpu_ring_write(ring, 0x80000000);
2684 amdgpu_ring_write(ring, 0x80000000);
2685
2686 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
2687 for (ext = sect->section; ext->extent != NULL; ++ext) {
2688 if (sect->id == SECT_CONTEXT) {
2689 amdgpu_ring_write(ring,
2690 PACKET3(PACKET3_SET_CONTEXT_REG,
2691 ext->reg_count));
2692 amdgpu_ring_write(ring, ext->reg_index -
2693 PACKET3_SET_CONTEXT_REG_START);
2694 for (i = 0; i < ext->reg_count; i++)
2695 amdgpu_ring_write(ring, ext->extent[i]);
2696 }
2697 }
2698 }
2699
2700 ctx_reg_offset =
2701 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
2702 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2703 amdgpu_ring_write(ring, ctx_reg_offset);
2704 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
2705
2706 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2707 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2708
2709 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2710 amdgpu_ring_write(ring, 0);
2711
2712 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2713 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2714 amdgpu_ring_write(ring, 0x8000);
2715 amdgpu_ring_write(ring, 0x8000);
2716
2717 amdgpu_ring_commit(ring);
2718
2719 /* submit cs packet to copy state 0 to next available state */
2720 ring = &adev->gfx.gfx_ring[1];
2721 r = amdgpu_ring_alloc(ring, 2);
2722 if (r) {
2723 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2724 return r;
2725 }
2726
2727 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2728 amdgpu_ring_write(ring, 0);
2729
2730 amdgpu_ring_commit(ring);
2731
2732 return 0;
2733 }
2734
gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device * adev,CP_PIPE_ID pipe)2735 static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2736 CP_PIPE_ID pipe)
2737 {
2738 u32 tmp;
2739
2740 tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL);
2741 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2742
2743 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp);
2744 }
2745
gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device * adev,struct amdgpu_ring * ring)2746 static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2747 struct amdgpu_ring *ring)
2748 {
2749 u32 tmp;
2750
2751 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2752 if (ring->use_doorbell) {
2753 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2754 DOORBELL_OFFSET, ring->doorbell_index);
2755 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2756 DOORBELL_EN, 1);
2757 } else {
2758 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2759 DOORBELL_EN, 0);
2760 }
2761 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2762 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2763 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2764 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2765
2766 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2767 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2768 }
2769
gfx_v10_0_cp_gfx_resume(struct amdgpu_device * adev)2770 static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
2771 {
2772 struct amdgpu_ring *ring;
2773 u32 tmp;
2774 u32 rb_bufsz;
2775 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2776 u32 i;
2777
2778 /* Set the write pointer delay */
2779 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2780
2781 /* set the RB to use vmid 0 */
2782 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2783
2784 /* Init gfx ring 0 for pipe 0 */
2785 mutex_lock(&adev->srbm_mutex);
2786 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2787
2788 /* Set ring buffer size */
2789 ring = &adev->gfx.gfx_ring[0];
2790 rb_bufsz = order_base_2(ring->ring_size / 8);
2791 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2792 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2793 #ifdef __BIG_ENDIAN
2794 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2795 #endif
2796 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2797
2798 /* Initialize the ring buffer's write pointers */
2799 ring->wptr = 0;
2800 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2801 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2802
2803 /* set the wb address wether it's enabled or not */
2804 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2805 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2806 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2807 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2808
2809 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2810 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2811 lower_32_bits(wptr_gpu_addr));
2812 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2813 upper_32_bits(wptr_gpu_addr));
2814
2815 mdelay(1);
2816 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2817
2818 rb_addr = ring->gpu_addr >> 8;
2819 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2820 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2821
2822 WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
2823
2824 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2825 mutex_unlock(&adev->srbm_mutex);
2826
2827 /* Init gfx ring 1 for pipe 1 */
2828 mutex_lock(&adev->srbm_mutex);
2829 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
2830 ring = &adev->gfx.gfx_ring[1];
2831 rb_bufsz = order_base_2(ring->ring_size / 8);
2832 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
2833 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
2834 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2835 /* Initialize the ring buffer's write pointers */
2836 ring->wptr = 0;
2837 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2838 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
2839 /* Set the wb address wether it's enabled or not */
2840 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2841 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2842 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2843 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2844 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2845 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2846 lower_32_bits(wptr_gpu_addr));
2847 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2848 upper_32_bits(wptr_gpu_addr));
2849
2850 mdelay(1);
2851 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2852
2853 rb_addr = ring->gpu_addr >> 8;
2854 WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
2855 WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
2856 WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
2857
2858 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2859 mutex_unlock(&adev->srbm_mutex);
2860
2861 /* Switch to pipe 0 */
2862 mutex_lock(&adev->srbm_mutex);
2863 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2864 mutex_unlock(&adev->srbm_mutex);
2865
2866 /* start the ring */
2867 gfx_v10_0_cp_gfx_start(adev);
2868
2869 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2870 ring = &adev->gfx.gfx_ring[i];
2871 ring->sched.ready = true;
2872 }
2873
2874 return 0;
2875 }
2876
gfx_v10_0_cp_compute_enable(struct amdgpu_device * adev,bool enable)2877 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2878 {
2879 int i;
2880
2881 if (enable) {
2882 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2883 } else {
2884 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2885 (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2886 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2887 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2888 adev->gfx.compute_ring[i].sched.ready = false;
2889 adev->gfx.kiq.ring.sched.ready = false;
2890 }
2891 udelay(50);
2892 }
2893
gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device * adev)2894 static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2895 {
2896 const struct gfx_firmware_header_v1_0 *mec_hdr;
2897 const __le32 *fw_data;
2898 unsigned i;
2899 u32 tmp;
2900 u32 usec_timeout = 50000; /* Wait for 50 ms */
2901
2902 if (!adev->gfx.mec_fw)
2903 return -EINVAL;
2904
2905 gfx_v10_0_cp_compute_enable(adev, false);
2906
2907 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2908 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2909
2910 fw_data = (const __le32 *)
2911 (adev->gfx.mec_fw->data +
2912 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2913
2914 /* Trigger an invalidation of the L1 instruction caches */
2915 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2916 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2917 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2918
2919 /* Wait for invalidation complete */
2920 for (i = 0; i < usec_timeout; i++) {
2921 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2922 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2923 INVALIDATE_CACHE_COMPLETE))
2924 break;
2925 udelay(1);
2926 }
2927
2928 if (i >= usec_timeout) {
2929 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2930 return -EINVAL;
2931 }
2932
2933 if (amdgpu_emu_mode == 1)
2934 adev->nbio.funcs->hdp_flush(adev, NULL);
2935
2936 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
2937 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2938 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2939 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2940 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2941
2942 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr &
2943 0xFFFFF000);
2944 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2945 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2946
2947 /* MEC1 */
2948 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0);
2949
2950 for (i = 0; i < mec_hdr->jt_size; i++)
2951 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2952 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2953
2954 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
2955
2956 /*
2957 * TODO: Loading MEC2 firmware is only necessary if MEC2 should run
2958 * different microcode than MEC1.
2959 */
2960
2961 return 0;
2962 }
2963
gfx_v10_0_kiq_setting(struct amdgpu_ring * ring)2964 static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
2965 {
2966 uint32_t tmp;
2967 struct amdgpu_device *adev = ring->adev;
2968
2969 /* tell RLC which is KIQ queue */
2970 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2971 tmp &= 0xffffff00;
2972 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2973 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2974 tmp |= 0x80;
2975 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2976 }
2977
gfx_v10_0_gfx_mqd_init(struct amdgpu_ring * ring)2978 static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
2979 {
2980 struct amdgpu_device *adev = ring->adev;
2981 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2982 uint64_t hqd_gpu_addr, wb_gpu_addr;
2983 uint32_t tmp;
2984 uint32_t rb_bufsz;
2985
2986 /* set up gfx hqd wptr */
2987 mqd->cp_gfx_hqd_wptr = 0;
2988 mqd->cp_gfx_hqd_wptr_hi = 0;
2989
2990 /* set the pointer to the MQD */
2991 mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
2992 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2993
2994 /* set up mqd control */
2995 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL);
2996 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
2997 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
2998 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
2999 mqd->cp_gfx_mqd_control = tmp;
3000
3001 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3002 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID);
3003 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3004 mqd->cp_gfx_hqd_vmid = 0;
3005
3006 /* set up default queue priority level
3007 * 0x0 = low priority, 0x1 = high priority */
3008 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
3009 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3010 mqd->cp_gfx_hqd_queue_priority = tmp;
3011
3012 /* set up time quantum */
3013 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);
3014 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3015 mqd->cp_gfx_hqd_quantum = tmp;
3016
3017 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3018 hqd_gpu_addr = ring->gpu_addr >> 8;
3019 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3020 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3021
3022 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3023 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3024 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3025 mqd->cp_gfx_hqd_rptr_addr_hi =
3026 upper_32_bits(wb_gpu_addr) & 0xffff;
3027
3028 /* set up rb_wptr_poll addr */
3029 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3030 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3031 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3032
3033 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3034 rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
3035 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL);
3036 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3037 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3038 #ifdef __BIG_ENDIAN
3039 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3040 #endif
3041 mqd->cp_gfx_hqd_cntl = tmp;
3042
3043 /* set up cp_doorbell_control */
3044 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3045 if (ring->use_doorbell) {
3046 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3047 DOORBELL_OFFSET, ring->doorbell_index);
3048 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3049 DOORBELL_EN, 1);
3050 } else
3051 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3052 DOORBELL_EN, 0);
3053 mqd->cp_rb_doorbell_control = tmp;
3054
3055 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3056 ring->wptr = 0;
3057 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR);
3058
3059 /* active the queue */
3060 mqd->cp_gfx_hqd_active = 1;
3061
3062 return 0;
3063 }
3064
3065 #ifdef BRING_UP_DEBUG
gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring * ring)3066 static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
3067 {
3068 struct amdgpu_device *adev = ring->adev;
3069 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3070
3071 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
3072 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
3073 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
3074
3075 /* set GFX_MQD_BASE */
3076 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
3077 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3078
3079 /* set GFX_MQD_CONTROL */
3080 WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
3081
3082 /* set GFX_HQD_VMID to 0 */
3083 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
3084
3085 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
3086 mqd->cp_gfx_hqd_queue_priority);
3087 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
3088
3089 /* set GFX_HQD_BASE, similar as CP_RB_BASE */
3090 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
3091 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
3092
3093 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
3094 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
3095 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
3096
3097 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
3098 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
3099
3100 /* set RB_WPTR_POLL_ADDR */
3101 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
3102 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
3103
3104 /* set RB_DOORBELL_CONTROL */
3105 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
3106
3107 /* active the queue */
3108 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
3109
3110 return 0;
3111 }
3112 #endif
3113
gfx_v10_0_gfx_init_queue(struct amdgpu_ring * ring)3114 static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
3115 {
3116 struct amdgpu_device *adev = ring->adev;
3117 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3118 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3119
3120 if (!adev->in_gpu_reset && !adev->in_suspend) {
3121 memset((void *)mqd, 0, sizeof(*mqd));
3122 mutex_lock(&adev->srbm_mutex);
3123 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3124 gfx_v10_0_gfx_mqd_init(ring);
3125 #ifdef BRING_UP_DEBUG
3126 gfx_v10_0_gfx_queue_init_register(ring);
3127 #endif
3128 nv_grbm_select(adev, 0, 0, 0, 0);
3129 mutex_unlock(&adev->srbm_mutex);
3130 if (adev->gfx.me.mqd_backup[mqd_idx])
3131 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3132 } else if (adev->in_gpu_reset) {
3133 /* reset mqd with the backup copy */
3134 if (adev->gfx.me.mqd_backup[mqd_idx])
3135 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3136 /* reset the ring */
3137 ring->wptr = 0;
3138 adev->wb.wb[ring->wptr_offs] = 0;
3139 amdgpu_ring_clear_ring(ring);
3140 #ifdef BRING_UP_DEBUG
3141 mutex_lock(&adev->srbm_mutex);
3142 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3143 gfx_v10_0_gfx_queue_init_register(ring);
3144 nv_grbm_select(adev, 0, 0, 0, 0);
3145 mutex_unlock(&adev->srbm_mutex);
3146 #endif
3147 } else {
3148 amdgpu_ring_clear_ring(ring);
3149 }
3150
3151 return 0;
3152 }
3153
3154 #ifndef BRING_UP_DEBUG
gfx_v10_0_kiq_enable_kgq(struct amdgpu_device * adev)3155 static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
3156 {
3157 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3158 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3159 int r, i;
3160
3161 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
3162 return -EINVAL;
3163
3164 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
3165 adev->gfx.num_gfx_rings);
3166 if (r) {
3167 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3168 return r;
3169 }
3170
3171 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3172 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
3173
3174 r = amdgpu_ring_test_ring(kiq_ring);
3175 if (r) {
3176 DRM_ERROR("kfq enable failed\n");
3177 kiq_ring->sched.ready = false;
3178 }
3179 return r;
3180 }
3181 #endif
3182
gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device * adev)3183 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3184 {
3185 int r, i;
3186 struct amdgpu_ring *ring;
3187
3188 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3189 ring = &adev->gfx.gfx_ring[i];
3190
3191 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3192 if (unlikely(r != 0))
3193 goto done;
3194
3195 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3196 if (!r) {
3197 r = gfx_v10_0_gfx_init_queue(ring);
3198 amdgpu_bo_kunmap(ring->mqd_obj);
3199 ring->mqd_ptr = NULL;
3200 }
3201 amdgpu_bo_unreserve(ring->mqd_obj);
3202 if (r)
3203 goto done;
3204 }
3205 #ifndef BRING_UP_DEBUG
3206 r = gfx_v10_0_kiq_enable_kgq(adev);
3207 if (r)
3208 goto done;
3209 #endif
3210 r = gfx_v10_0_cp_gfx_start(adev);
3211 if (r)
3212 goto done;
3213
3214 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3215 ring = &adev->gfx.gfx_ring[i];
3216 ring->sched.ready = true;
3217 }
3218 done:
3219 return r;
3220 }
3221
gfx_v10_0_compute_mqd_init(struct amdgpu_ring * ring)3222 static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
3223 {
3224 struct amdgpu_device *adev = ring->adev;
3225 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3226 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3227 uint32_t tmp;
3228
3229 mqd->header = 0xC0310800;
3230 mqd->compute_pipelinestat_enable = 0x00000001;
3231 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3232 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3233 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3234 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3235 mqd->compute_misc_reserved = 0x00000003;
3236
3237 eop_base_addr = ring->eop_gpu_addr >> 8;
3238 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3239 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3240
3241 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3242 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3243 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3244 (order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1));
3245
3246 mqd->cp_hqd_eop_control = tmp;
3247
3248 /* enable doorbell? */
3249 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3250
3251 if (ring->use_doorbell) {
3252 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3253 DOORBELL_OFFSET, ring->doorbell_index);
3254 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3255 DOORBELL_EN, 1);
3256 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3257 DOORBELL_SOURCE, 0);
3258 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3259 DOORBELL_HIT, 0);
3260 } else {
3261 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3262 DOORBELL_EN, 0);
3263 }
3264
3265 mqd->cp_hqd_pq_doorbell_control = tmp;
3266
3267 /* disable the queue if it's active */
3268 ring->wptr = 0;
3269 mqd->cp_hqd_dequeue_request = 0;
3270 mqd->cp_hqd_pq_rptr = 0;
3271 mqd->cp_hqd_pq_wptr_lo = 0;
3272 mqd->cp_hqd_pq_wptr_hi = 0;
3273
3274 /* set the pointer to the MQD */
3275 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3276 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3277
3278 /* set MQD vmid to 0 */
3279 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3280 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3281 mqd->cp_mqd_control = tmp;
3282
3283 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3284 hqd_gpu_addr = ring->gpu_addr >> 8;
3285 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3286 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3287
3288 /* set up the HQD, this is similar to CP_RB0_CNTL */
3289 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3290 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3291 (order_base_2(ring->ring_size / 4) - 1));
3292 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3293 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3294 #ifdef __BIG_ENDIAN
3295 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3296 #endif
3297 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3298 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3299 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3300 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3301 mqd->cp_hqd_pq_control = tmp;
3302
3303 /* set the wb address whether it's enabled or not */
3304 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3305 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3306 mqd->cp_hqd_pq_rptr_report_addr_hi =
3307 upper_32_bits(wb_gpu_addr) & 0xffff;
3308
3309 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3310 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3311 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3312 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3313
3314 tmp = 0;
3315 /* enable the doorbell if requested */
3316 if (ring->use_doorbell) {
3317 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3318 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3319 DOORBELL_OFFSET, ring->doorbell_index);
3320
3321 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3322 DOORBELL_EN, 1);
3323 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3324 DOORBELL_SOURCE, 0);
3325 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3326 DOORBELL_HIT, 0);
3327 }
3328
3329 mqd->cp_hqd_pq_doorbell_control = tmp;
3330
3331 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3332 ring->wptr = 0;
3333 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3334
3335 /* set the vmid for the queue */
3336 mqd->cp_hqd_vmid = 0;
3337
3338 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3339 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3340 mqd->cp_hqd_persistent_state = tmp;
3341
3342 /* set MIN_IB_AVAIL_SIZE */
3343 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3344 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3345 mqd->cp_hqd_ib_control = tmp;
3346
3347 /* map_queues packet doesn't need activate the queue,
3348 * so only kiq need set this field.
3349 */
3350 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3351 mqd->cp_hqd_active = 1;
3352
3353 return 0;
3354 }
3355
gfx_v10_0_kiq_init_register(struct amdgpu_ring * ring)3356 static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
3357 {
3358 struct amdgpu_device *adev = ring->adev;
3359 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3360 int j;
3361
3362 /* disable wptr polling */
3363 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3364
3365 /* write the EOP addr */
3366 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3367 mqd->cp_hqd_eop_base_addr_lo);
3368 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3369 mqd->cp_hqd_eop_base_addr_hi);
3370
3371 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3372 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3373 mqd->cp_hqd_eop_control);
3374
3375 /* enable doorbell? */
3376 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3377 mqd->cp_hqd_pq_doorbell_control);
3378
3379 /* disable the queue if it's active */
3380 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3381 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3382 for (j = 0; j < adev->usec_timeout; j++) {
3383 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3384 break;
3385 udelay(1);
3386 }
3387 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3388 mqd->cp_hqd_dequeue_request);
3389 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3390 mqd->cp_hqd_pq_rptr);
3391 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3392 mqd->cp_hqd_pq_wptr_lo);
3393 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3394 mqd->cp_hqd_pq_wptr_hi);
3395 }
3396
3397 /* set the pointer to the MQD */
3398 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3399 mqd->cp_mqd_base_addr_lo);
3400 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3401 mqd->cp_mqd_base_addr_hi);
3402
3403 /* set MQD vmid to 0 */
3404 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3405 mqd->cp_mqd_control);
3406
3407 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3408 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3409 mqd->cp_hqd_pq_base_lo);
3410 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3411 mqd->cp_hqd_pq_base_hi);
3412
3413 /* set up the HQD, this is similar to CP_RB0_CNTL */
3414 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3415 mqd->cp_hqd_pq_control);
3416
3417 /* set the wb address whether it's enabled or not */
3418 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3419 mqd->cp_hqd_pq_rptr_report_addr_lo);
3420 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3421 mqd->cp_hqd_pq_rptr_report_addr_hi);
3422
3423 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3424 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3425 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3426 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3427 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3428
3429 /* enable the doorbell if requested */
3430 if (ring->use_doorbell) {
3431 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3432 (adev->doorbell_index.kiq * 2) << 2);
3433 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3434 (adev->doorbell_index.userqueue_end * 2) << 2);
3435 }
3436
3437 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3438 mqd->cp_hqd_pq_doorbell_control);
3439
3440 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3441 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3442 mqd->cp_hqd_pq_wptr_lo);
3443 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3444 mqd->cp_hqd_pq_wptr_hi);
3445
3446 /* set the vmid for the queue */
3447 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3448
3449 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3450 mqd->cp_hqd_persistent_state);
3451
3452 /* activate the queue */
3453 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3454 mqd->cp_hqd_active);
3455
3456 if (ring->use_doorbell)
3457 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3458
3459 return 0;
3460 }
3461
gfx_v10_0_kiq_init_queue(struct amdgpu_ring * ring)3462 static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
3463 {
3464 struct amdgpu_device *adev = ring->adev;
3465 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3466 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3467
3468 gfx_v10_0_kiq_setting(ring);
3469
3470 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3471 /* reset MQD to a clean status */
3472 if (adev->gfx.mec.mqd_backup[mqd_idx])
3473 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3474
3475 /* reset ring buffer */
3476 ring->wptr = 0;
3477 amdgpu_ring_clear_ring(ring);
3478
3479 mutex_lock(&adev->srbm_mutex);
3480 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3481 gfx_v10_0_kiq_init_register(ring);
3482 nv_grbm_select(adev, 0, 0, 0, 0);
3483 mutex_unlock(&adev->srbm_mutex);
3484 } else {
3485 memset((void *)mqd, 0, sizeof(*mqd));
3486 mutex_lock(&adev->srbm_mutex);
3487 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3488 gfx_v10_0_compute_mqd_init(ring);
3489 gfx_v10_0_kiq_init_register(ring);
3490 nv_grbm_select(adev, 0, 0, 0, 0);
3491 mutex_unlock(&adev->srbm_mutex);
3492
3493 if (adev->gfx.mec.mqd_backup[mqd_idx])
3494 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3495 }
3496
3497 return 0;
3498 }
3499
gfx_v10_0_kcq_init_queue(struct amdgpu_ring * ring)3500 static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
3501 {
3502 struct amdgpu_device *adev = ring->adev;
3503 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3504 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3505
3506 if (!adev->in_gpu_reset && !adev->in_suspend) {
3507 memset((void *)mqd, 0, sizeof(*mqd));
3508 mutex_lock(&adev->srbm_mutex);
3509 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3510 gfx_v10_0_compute_mqd_init(ring);
3511 nv_grbm_select(adev, 0, 0, 0, 0);
3512 mutex_unlock(&adev->srbm_mutex);
3513
3514 if (adev->gfx.mec.mqd_backup[mqd_idx])
3515 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3516 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3517 /* reset MQD to a clean status */
3518 if (adev->gfx.mec.mqd_backup[mqd_idx])
3519 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3520
3521 /* reset ring buffer */
3522 ring->wptr = 0;
3523 amdgpu_ring_clear_ring(ring);
3524 } else {
3525 amdgpu_ring_clear_ring(ring);
3526 }
3527
3528 return 0;
3529 }
3530
gfx_v10_0_kiq_resume(struct amdgpu_device * adev)3531 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
3532 {
3533 struct amdgpu_ring *ring;
3534 int r;
3535
3536 ring = &adev->gfx.kiq.ring;
3537
3538 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3539 if (unlikely(r != 0))
3540 return r;
3541
3542 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3543 if (unlikely(r != 0))
3544 return r;
3545
3546 gfx_v10_0_kiq_init_queue(ring);
3547 amdgpu_bo_kunmap(ring->mqd_obj);
3548 ring->mqd_ptr = NULL;
3549 amdgpu_bo_unreserve(ring->mqd_obj);
3550 ring->sched.ready = true;
3551 return 0;
3552 }
3553
gfx_v10_0_kcq_resume(struct amdgpu_device * adev)3554 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
3555 {
3556 struct amdgpu_ring *ring = NULL;
3557 int r = 0, i;
3558
3559 gfx_v10_0_cp_compute_enable(adev, true);
3560
3561 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3562 ring = &adev->gfx.compute_ring[i];
3563
3564 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3565 if (unlikely(r != 0))
3566 goto done;
3567 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3568 if (!r) {
3569 r = gfx_v10_0_kcq_init_queue(ring);
3570 amdgpu_bo_kunmap(ring->mqd_obj);
3571 ring->mqd_ptr = NULL;
3572 }
3573 amdgpu_bo_unreserve(ring->mqd_obj);
3574 if (r)
3575 goto done;
3576 }
3577
3578 r = amdgpu_gfx_enable_kcq(adev);
3579 done:
3580 return r;
3581 }
3582
gfx_v10_0_cp_resume(struct amdgpu_device * adev)3583 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
3584 {
3585 int r, i;
3586 struct amdgpu_ring *ring;
3587
3588 if (!(adev->flags & AMD_IS_APU))
3589 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3590
3591 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3592 /* legacy firmware loading */
3593 r = gfx_v10_0_cp_gfx_load_microcode(adev);
3594 if (r)
3595 return r;
3596
3597 r = gfx_v10_0_cp_compute_load_microcode(adev);
3598 if (r)
3599 return r;
3600 }
3601
3602 r = gfx_v10_0_kiq_resume(adev);
3603 if (r)
3604 return r;
3605
3606 r = gfx_v10_0_kcq_resume(adev);
3607 if (r)
3608 return r;
3609
3610 if (!amdgpu_async_gfx_ring) {
3611 r = gfx_v10_0_cp_gfx_resume(adev);
3612 if (r)
3613 return r;
3614 } else {
3615 r = gfx_v10_0_cp_async_gfx_ring_resume(adev);
3616 if (r)
3617 return r;
3618 }
3619
3620 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3621 ring = &adev->gfx.gfx_ring[i];
3622 r = amdgpu_ring_test_helper(ring);
3623 if (r)
3624 return r;
3625 }
3626
3627 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3628 ring = &adev->gfx.compute_ring[i];
3629 r = amdgpu_ring_test_helper(ring);
3630 if (r)
3631 return r;
3632 }
3633
3634 return 0;
3635 }
3636
gfx_v10_0_cp_enable(struct amdgpu_device * adev,bool enable)3637 static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable)
3638 {
3639 gfx_v10_0_cp_gfx_enable(adev, enable);
3640 gfx_v10_0_cp_compute_enable(adev, enable);
3641 }
3642
gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device * adev)3643 static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
3644 {
3645 uint32_t data, pattern = 0xDEADBEEF;
3646
3647 /* check if mmVGT_ESGS_RING_SIZE_UMD
3648 * has been remapped to mmVGT_ESGS_RING_SIZE */
3649 data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
3650
3651 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
3652
3653 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
3654
3655 if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
3656 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
3657 return true;
3658 } else {
3659 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
3660 return false;
3661 }
3662 }
3663
gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device * adev)3664 static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
3665 {
3666 uint32_t data;
3667
3668 /* initialize cam_index to 0
3669 * index will auto-inc after each data writting */
3670 WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
3671
3672 /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
3673 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
3674 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3675 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
3676 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3677 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3678 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3679
3680 /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
3681 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
3682 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3683 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
3684 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3685 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3686 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3687
3688 /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
3689 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
3690 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3691 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
3692 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3693 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3694 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3695
3696 /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
3697 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
3698 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3699 (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
3700 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3701 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3702 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3703
3704 /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
3705 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
3706 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3707 (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
3708 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3709 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3710 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3711
3712 /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
3713 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
3714 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3715 (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
3716 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3717 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3718 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3719
3720 /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
3721 data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
3722 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3723 (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
3724 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3725 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3726 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3727 }
3728
gfx_v10_0_hw_init(void * handle)3729 static int gfx_v10_0_hw_init(void *handle)
3730 {
3731 int r;
3732 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3733
3734 if (!amdgpu_emu_mode)
3735 gfx_v10_0_init_golden_registers(adev);
3736
3737 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3738 /**
3739 * For gfx 10, rlc firmware loading relies on smu firmware is
3740 * loaded firstly, so in direct type, it has to load smc ucode
3741 * here before rlc.
3742 */
3743 r = smu_load_microcode(&adev->smu);
3744 if (r)
3745 return r;
3746
3747 r = smu_check_fw_status(&adev->smu);
3748 if (r) {
3749 pr_err("SMC firmware status is not correct\n");
3750 return r;
3751 }
3752 }
3753
3754 /* if GRBM CAM not remapped, set up the remapping */
3755 if (!gfx_v10_0_check_grbm_cam_remapping(adev))
3756 gfx_v10_0_setup_grbm_cam_remapping(adev);
3757
3758 gfx_v10_0_constants_init(adev);
3759
3760 r = gfx_v10_0_rlc_resume(adev);
3761 if (r)
3762 return r;
3763
3764 /*
3765 * init golden registers and rlc resume may override some registers,
3766 * reconfig them here
3767 */
3768 gfx_v10_0_tcp_harvest(adev);
3769
3770 r = gfx_v10_0_cp_resume(adev);
3771 if (r)
3772 return r;
3773
3774 return r;
3775 }
3776
3777 #ifndef BRING_UP_DEBUG
gfx_v10_0_kiq_disable_kgq(struct amdgpu_device * adev)3778 static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
3779 {
3780 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3781 struct amdgpu_ring *kiq_ring = &kiq->ring;
3782 int i;
3783
3784 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3785 return -EINVAL;
3786
3787 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
3788 adev->gfx.num_gfx_rings))
3789 return -ENOMEM;
3790
3791 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3792 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
3793 PREEMPT_QUEUES, 0, 0);
3794
3795 return amdgpu_ring_test_ring(kiq_ring);
3796 }
3797 #endif
3798
gfx_v10_0_hw_fini(void * handle)3799 static int gfx_v10_0_hw_fini(void *handle)
3800 {
3801 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3802 int r;
3803
3804 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3805 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3806 #ifndef BRING_UP_DEBUG
3807 if (amdgpu_async_gfx_ring) {
3808 r = gfx_v10_0_kiq_disable_kgq(adev);
3809 if (r)
3810 DRM_ERROR("KGQ disable failed\n");
3811 }
3812 #endif
3813 if (amdgpu_gfx_disable_kcq(adev))
3814 DRM_ERROR("KCQ disable failed\n");
3815 if (amdgpu_sriov_vf(adev)) {
3816 gfx_v10_0_cp_gfx_enable(adev, false);
3817 return 0;
3818 }
3819 gfx_v10_0_cp_enable(adev, false);
3820 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3821
3822 return 0;
3823 }
3824
gfx_v10_0_suspend(void * handle)3825 static int gfx_v10_0_suspend(void *handle)
3826 {
3827 return gfx_v10_0_hw_fini(handle);
3828 }
3829
gfx_v10_0_resume(void * handle)3830 static int gfx_v10_0_resume(void *handle)
3831 {
3832 return gfx_v10_0_hw_init(handle);
3833 }
3834
gfx_v10_0_is_idle(void * handle)3835 static bool gfx_v10_0_is_idle(void *handle)
3836 {
3837 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3838
3839 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3840 GRBM_STATUS, GUI_ACTIVE))
3841 return false;
3842 else
3843 return true;
3844 }
3845
gfx_v10_0_wait_for_idle(void * handle)3846 static int gfx_v10_0_wait_for_idle(void *handle)
3847 {
3848 unsigned i;
3849 u32 tmp;
3850 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3851
3852 for (i = 0; i < adev->usec_timeout; i++) {
3853 /* read MC_STATUS */
3854 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3855 GRBM_STATUS__GUI_ACTIVE_MASK;
3856
3857 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3858 return 0;
3859 udelay(1);
3860 }
3861 return -ETIMEDOUT;
3862 }
3863
gfx_v10_0_soft_reset(void * handle)3864 static int gfx_v10_0_soft_reset(void *handle)
3865 {
3866 u32 grbm_soft_reset = 0;
3867 u32 tmp;
3868 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3869
3870 /* GRBM_STATUS */
3871 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3872 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3873 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3874 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
3875 GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
3876 GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
3877 | GRBM_STATUS__BCI_BUSY_MASK)) {
3878 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3879 GRBM_SOFT_RESET, SOFT_RESET_CP,
3880 1);
3881 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3882 GRBM_SOFT_RESET, SOFT_RESET_GFX,
3883 1);
3884 }
3885
3886 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3887 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3888 GRBM_SOFT_RESET, SOFT_RESET_CP,
3889 1);
3890 }
3891
3892 /* GRBM_STATUS2 */
3893 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3894 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3895 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3896 GRBM_SOFT_RESET, SOFT_RESET_RLC,
3897 1);
3898
3899 if (grbm_soft_reset) {
3900 /* stop the rlc */
3901 gfx_v10_0_rlc_stop(adev);
3902
3903 /* Disable GFX parsing/prefetching */
3904 gfx_v10_0_cp_gfx_enable(adev, false);
3905
3906 /* Disable MEC parsing/prefetching */
3907 gfx_v10_0_cp_compute_enable(adev, false);
3908
3909 if (grbm_soft_reset) {
3910 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3911 tmp |= grbm_soft_reset;
3912 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3913 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3914 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3915
3916 udelay(50);
3917
3918 tmp &= ~grbm_soft_reset;
3919 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3920 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3921 }
3922
3923 /* Wait a little for things to settle down */
3924 udelay(50);
3925 }
3926 return 0;
3927 }
3928
gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device * adev)3929 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3930 {
3931 uint64_t clock;
3932
3933 amdgpu_gfx_off_ctrl(adev, false);
3934 mutex_lock(&adev->gfx.gpu_clock_mutex);
3935 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3936 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3937 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3938 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3939 amdgpu_gfx_off_ctrl(adev, true);
3940 return clock;
3941 }
3942
gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)3943 static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3944 uint32_t vmid,
3945 uint32_t gds_base, uint32_t gds_size,
3946 uint32_t gws_base, uint32_t gws_size,
3947 uint32_t oa_base, uint32_t oa_size)
3948 {
3949 struct amdgpu_device *adev = ring->adev;
3950
3951 /* GDS Base */
3952 gfx_v10_0_write_data_to_reg(ring, 0, false,
3953 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3954 gds_base);
3955
3956 /* GDS Size */
3957 gfx_v10_0_write_data_to_reg(ring, 0, false,
3958 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3959 gds_size);
3960
3961 /* GWS */
3962 gfx_v10_0_write_data_to_reg(ring, 0, false,
3963 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3964 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3965
3966 /* OA */
3967 gfx_v10_0_write_data_to_reg(ring, 0, false,
3968 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3969 (1 << (oa_size + oa_base)) - (1 << oa_base));
3970 }
3971
gfx_v10_0_early_init(void * handle)3972 static int gfx_v10_0_early_init(void *handle)
3973 {
3974 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3975
3976 adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
3977 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3978
3979 gfx_v10_0_set_kiq_pm4_funcs(adev);
3980 gfx_v10_0_set_ring_funcs(adev);
3981 gfx_v10_0_set_irq_funcs(adev);
3982 gfx_v10_0_set_gds_init(adev);
3983 gfx_v10_0_set_rlc_funcs(adev);
3984
3985 return 0;
3986 }
3987
gfx_v10_0_late_init(void * handle)3988 static int gfx_v10_0_late_init(void *handle)
3989 {
3990 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3991 int r;
3992
3993 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3994 if (r)
3995 return r;
3996
3997 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3998 if (r)
3999 return r;
4000
4001 return 0;
4002 }
4003
gfx_v10_0_is_rlc_enabled(struct amdgpu_device * adev)4004 static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
4005 {
4006 uint32_t rlc_cntl;
4007
4008 /* if RLC is not enabled, do nothing */
4009 rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4010 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4011 }
4012
gfx_v10_0_set_safe_mode(struct amdgpu_device * adev)4013 static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
4014 {
4015 uint32_t data;
4016 unsigned i;
4017
4018 data = RLC_SAFE_MODE__CMD_MASK;
4019 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4020 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4021
4022 /* wait for RLC_SAFE_MODE */
4023 for (i = 0; i < adev->usec_timeout; i++) {
4024 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4025 break;
4026 udelay(1);
4027 }
4028 }
4029
gfx_v10_0_unset_safe_mode(struct amdgpu_device * adev)4030 static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
4031 {
4032 uint32_t data;
4033
4034 data = RLC_SAFE_MODE__CMD_MASK;
4035 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4036 }
4037
gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)4038 static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4039 bool enable)
4040 {
4041 uint32_t data, def;
4042
4043 /* It is disabled by HW by default */
4044 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4045 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4046 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4047 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4048 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4049 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4050
4051 /* only for Vega10 & Raven1 */
4052 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4053
4054 if (def != data)
4055 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4056
4057 /* MGLS is a global flag to control all MGLS in GFX */
4058 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4059 /* 2 - RLC memory Light sleep */
4060 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4061 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4062 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4063 if (def != data)
4064 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4065 }
4066 /* 3 - CP memory Light sleep */
4067 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4068 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4069 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4070 if (def != data)
4071 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4072 }
4073 }
4074 } else {
4075 /* 1 - MGCG_OVERRIDE */
4076 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4077 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4078 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4079 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4080 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4081 if (def != data)
4082 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4083
4084 /* 2 - disable MGLS in RLC */
4085 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4086 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4087 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4088 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4089 }
4090
4091 /* 3 - disable MGLS in CP */
4092 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4093 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4094 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4095 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4096 }
4097 }
4098 }
4099
gfx_v10_0_update_3d_clock_gating(struct amdgpu_device * adev,bool enable)4100 static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
4101 bool enable)
4102 {
4103 uint32_t data, def;
4104
4105 /* Enable 3D CGCG/CGLS */
4106 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4107 /* write cmd to clear cgcg/cgls ov */
4108 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4109 /* unset CGCG override */
4110 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4111 /* update CGCG and CGLS override bits */
4112 if (def != data)
4113 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4114 /* enable 3Dcgcg FSM(0x0000363f) */
4115 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4116 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4117 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4118 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4119 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4120 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4121 if (def != data)
4122 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4123
4124 /* set IDLE_POLL_COUNT(0x00900100) */
4125 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4126 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4127 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4128 if (def != data)
4129 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4130 } else {
4131 /* Disable CGCG/CGLS */
4132 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4133 /* disable cgcg, cgls should be disabled */
4134 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4135 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4136 /* disable cgcg and cgls in FSM */
4137 if (def != data)
4138 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4139 }
4140 }
4141
gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable)4142 static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4143 bool enable)
4144 {
4145 uint32_t def, data;
4146
4147 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4148 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4149 /* unset CGCG override */
4150 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4151 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4152 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4153 else
4154 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4155 /* update CGCG and CGLS override bits */
4156 if (def != data)
4157 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4158
4159 /* enable cgcg FSM(0x0000363F) */
4160 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4161 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4162 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4163 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4164 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4165 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4166 if (def != data)
4167 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4168
4169 /* set IDLE_POLL_COUNT(0x00900100) */
4170 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4171 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4172 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4173 if (def != data)
4174 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4175 } else {
4176 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4177 /* reset CGCG/CGLS bits */
4178 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4179 /* disable cgcg and cgls in FSM */
4180 if (def != data)
4181 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4182 }
4183 }
4184
gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable)4185 static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4186 bool enable)
4187 {
4188 amdgpu_gfx_rlc_enter_safe_mode(adev);
4189
4190 if (enable) {
4191 /* CGCG/CGLS should be enabled after MGCG/MGLS
4192 * === MGCG + MGLS ===
4193 */
4194 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4195 /* === CGCG /CGLS for GFX 3D Only === */
4196 gfx_v10_0_update_3d_clock_gating(adev, enable);
4197 /* === CGCG + CGLS === */
4198 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4199 } else {
4200 /* CGCG/CGLS should be disabled before MGCG/MGLS
4201 * === CGCG + CGLS ===
4202 */
4203 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4204 /* === CGCG /CGLS for GFX 3D Only === */
4205 gfx_v10_0_update_3d_clock_gating(adev, enable);
4206 /* === MGCG + MGLS === */
4207 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4208 }
4209
4210 if (adev->cg_flags &
4211 (AMD_CG_SUPPORT_GFX_MGCG |
4212 AMD_CG_SUPPORT_GFX_CGLS |
4213 AMD_CG_SUPPORT_GFX_CGCG |
4214 AMD_CG_SUPPORT_GFX_CGLS |
4215 AMD_CG_SUPPORT_GFX_3D_CGCG |
4216 AMD_CG_SUPPORT_GFX_3D_CGLS))
4217 gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
4218
4219 amdgpu_gfx_rlc_exit_safe_mode(adev);
4220
4221 return 0;
4222 }
4223
4224 static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
4225 .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
4226 .set_safe_mode = gfx_v10_0_set_safe_mode,
4227 .unset_safe_mode = gfx_v10_0_unset_safe_mode,
4228 .init = gfx_v10_0_rlc_init,
4229 .get_csb_size = gfx_v10_0_get_csb_size,
4230 .get_csb_buffer = gfx_v10_0_get_csb_buffer,
4231 .resume = gfx_v10_0_rlc_resume,
4232 .stop = gfx_v10_0_rlc_stop,
4233 .reset = gfx_v10_0_rlc_reset,
4234 .start = gfx_v10_0_rlc_start
4235 };
4236
gfx_v10_0_set_powergating_state(void * handle,enum amd_powergating_state state)4237 static int gfx_v10_0_set_powergating_state(void *handle,
4238 enum amd_powergating_state state)
4239 {
4240 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4241 bool enable = (state == AMD_PG_STATE_GATE);
4242 switch (adev->asic_type) {
4243 case CHIP_NAVI10:
4244 case CHIP_NAVI14:
4245 if (!enable) {
4246 amdgpu_gfx_off_ctrl(adev, false);
4247 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4248 } else
4249 amdgpu_gfx_off_ctrl(adev, true);
4250 break;
4251 default:
4252 break;
4253 }
4254 return 0;
4255 }
4256
gfx_v10_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)4257 static int gfx_v10_0_set_clockgating_state(void *handle,
4258 enum amd_clockgating_state state)
4259 {
4260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4261
4262 switch (adev->asic_type) {
4263 case CHIP_NAVI10:
4264 case CHIP_NAVI14:
4265 case CHIP_NAVI12:
4266 gfx_v10_0_update_gfx_clock_gating(adev,
4267 state == AMD_CG_STATE_GATE);
4268 break;
4269 default:
4270 break;
4271 }
4272 return 0;
4273 }
4274
gfx_v10_0_get_clockgating_state(void * handle,u32 * flags)4275 static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
4276 {
4277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4278 int data;
4279
4280 /* AMD_CG_SUPPORT_GFX_MGCG */
4281 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4282 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4283 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4284
4285 /* AMD_CG_SUPPORT_GFX_CGCG */
4286 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4287 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4288 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4289
4290 /* AMD_CG_SUPPORT_GFX_CGLS */
4291 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4292 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4293
4294 /* AMD_CG_SUPPORT_GFX_RLC_LS */
4295 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4296 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4297 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4298
4299 /* AMD_CG_SUPPORT_GFX_CP_LS */
4300 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4301 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4302 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4303
4304 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4305 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4306 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4307 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4308
4309 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4310 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4311 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4312 }
4313
gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring * ring)4314 static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4315 {
4316 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/
4317 }
4318
gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring * ring)4319 static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4320 {
4321 struct amdgpu_device *adev = ring->adev;
4322 u64 wptr;
4323
4324 /* XXX check if swapping is necessary on BE */
4325 if (ring->use_doorbell) {
4326 wptr = atomic_load_relaxed(&adev->wb.wb[ring->wptr_offs]);
4327 } else {
4328 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4329 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4330 }
4331
4332 return wptr;
4333 }
4334
gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring * ring)4335 static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4336 {
4337 struct amdgpu_device *adev = ring->adev;
4338
4339 if (ring->use_doorbell) {
4340 /* XXX check if swapping is necessary on BE */
4341 atomic_store_relaxed(&adev->wb.wb[ring->wptr_offs], ring->wptr);
4342 WDOORBELL64(ring->doorbell_index, ring->wptr);
4343 } else {
4344 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4345 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4346 }
4347 }
4348
gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring * ring)4349 static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4350 {
4351 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */
4352 }
4353
gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring * ring)4354 static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4355 {
4356 u64 wptr;
4357
4358 /* XXX check if swapping is necessary on BE */
4359 if (ring->use_doorbell)
4360 wptr = atomic_load_relaxed(&ring->adev->wb.wb[ring->wptr_offs]);
4361 else
4362 BUG();
4363 return wptr;
4364 }
4365
gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring * ring)4366 static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4367 {
4368 struct amdgpu_device *adev = ring->adev;
4369
4370 /* XXX check if swapping is necessary on BE */
4371 if (ring->use_doorbell) {
4372 atomic_store_relaxed(&adev->wb.wb[ring->wptr_offs], ring->wptr);
4373 WDOORBELL64(ring->doorbell_index, ring->wptr);
4374 } else {
4375 BUG(); /* only DOORBELL method supported on gfx10 now */
4376 }
4377 }
4378
gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)4379 static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4380 {
4381 struct amdgpu_device *adev = ring->adev;
4382 u32 ref_and_mask, reg_mem_engine;
4383 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
4384
4385 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4386 switch (ring->me) {
4387 case 1:
4388 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4389 break;
4390 case 2:
4391 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4392 break;
4393 default:
4394 return;
4395 }
4396 reg_mem_engine = 0;
4397 } else {
4398 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4399 reg_mem_engine = 1; /* pfp */
4400 }
4401
4402 gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4403 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
4404 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
4405 ref_and_mask, ref_and_mask, 0x20);
4406 }
4407
gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)4408 static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4409 struct amdgpu_job *job,
4410 struct amdgpu_ib *ib,
4411 uint32_t flags)
4412 {
4413 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4414 u32 header, control = 0;
4415
4416 if (ib->flags & AMDGPU_IB_FLAG_CE)
4417 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4418 else
4419 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4420
4421 control |= ib->length_dw | (vmid << 24);
4422
4423 if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4424 control |= INDIRECT_BUFFER_PRE_ENB(1);
4425
4426 if (flags & AMDGPU_IB_PREEMPTED)
4427 control |= INDIRECT_BUFFER_PRE_RESUME(1);
4428
4429 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4430 gfx_v10_0_ring_emit_de_meta(ring,
4431 flags & AMDGPU_IB_PREEMPTED ? true : false);
4432 }
4433
4434 amdgpu_ring_write(ring, header);
4435 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4436 amdgpu_ring_write(ring,
4437 #ifdef __BIG_ENDIAN
4438 (2 << 0) |
4439 #endif
4440 lower_32_bits(ib->gpu_addr));
4441 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4442 amdgpu_ring_write(ring, control);
4443 }
4444
gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)4445 static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4446 struct amdgpu_job *job,
4447 struct amdgpu_ib *ib,
4448 uint32_t flags)
4449 {
4450 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4451 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4452
4453 /* Currently, there is a high possibility to get wave ID mismatch
4454 * between ME and GDS, leading to a hw deadlock, because ME generates
4455 * different wave IDs than the GDS expects. This situation happens
4456 * randomly when at least 5 compute pipes use GDS ordered append.
4457 * The wave IDs generated by ME are also wrong after suspend/resume.
4458 * Those are probably bugs somewhere else in the kernel driver.
4459 *
4460 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4461 * GDS to 0 for this ring (me/pipe).
4462 */
4463 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4464 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4465 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4466 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4467 }
4468
4469 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4470 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4471 amdgpu_ring_write(ring,
4472 #ifdef __BIG_ENDIAN
4473 (2 << 0) |
4474 #endif
4475 lower_32_bits(ib->gpu_addr));
4476 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4477 amdgpu_ring_write(ring, control);
4478 }
4479
gfx_v10_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)4480 static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4481 u64 seq, unsigned flags)
4482 {
4483 struct amdgpu_device *adev = ring->adev;
4484 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4485 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4486
4487 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
4488 if (adev->pdev->device == 0x50)
4489 int_sel = false;
4490
4491 /* RELEASE_MEM - flush caches, send int */
4492 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4493 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4494 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4495 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
4496 PACKET3_RELEASE_MEM_GCR_GLM_WB |
4497 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4498 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4499 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4500 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4501 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4502
4503 /*
4504 * the address should be Qword aligned if 64bit write, Dword
4505 * aligned if only send 32bit data low (discard data high)
4506 */
4507 if (write64bit)
4508 BUG_ON(addr & 0x7);
4509 else
4510 BUG_ON(addr & 0x3);
4511 amdgpu_ring_write(ring, lower_32_bits(addr));
4512 amdgpu_ring_write(ring, upper_32_bits(addr));
4513 amdgpu_ring_write(ring, lower_32_bits(seq));
4514 amdgpu_ring_write(ring, upper_32_bits(seq));
4515 amdgpu_ring_write(ring, 0);
4516 }
4517
gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)4518 static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4519 {
4520 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4521 uint32_t seq = ring->fence_drv.sync_seq;
4522 uint64_t addr = ring->fence_drv.gpu_addr;
4523
4524 gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4525 upper_32_bits(addr), seq, 0xffffffff, 4);
4526 }
4527
gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)4528 static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4529 unsigned vmid, uint64_t pd_addr)
4530 {
4531 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4532
4533 /* compute doesn't have PFP */
4534 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4535 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4536 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4537 amdgpu_ring_write(ring, 0x0);
4538 }
4539 }
4540
gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)4541 static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4542 u64 seq, unsigned int flags)
4543 {
4544 struct amdgpu_device *adev = ring->adev;
4545
4546 /* we only allocate 32bit for each seq wb address */
4547 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4548
4549 /* write fence seq to the "addr" */
4550 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4551 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4552 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4553 amdgpu_ring_write(ring, lower_32_bits(addr));
4554 amdgpu_ring_write(ring, upper_32_bits(addr));
4555 amdgpu_ring_write(ring, lower_32_bits(seq));
4556
4557 if (flags & AMDGPU_FENCE_FLAG_INT) {
4558 /* set register to trigger INT */
4559 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4560 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4561 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4562 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4563 amdgpu_ring_write(ring, 0);
4564 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4565 }
4566 }
4567
gfx_v10_0_ring_emit_sb(struct amdgpu_ring * ring)4568 static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
4569 {
4570 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4571 amdgpu_ring_write(ring, 0);
4572 }
4573
gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring * ring,uint32_t flags)4574 static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4575 {
4576 uint32_t dw2 = 0;
4577
4578 if (amdgpu_mcbp)
4579 gfx_v10_0_ring_emit_ce_meta(ring,
4580 flags & AMDGPU_IB_PREEMPTED ? true : false);
4581
4582 gfx_v10_0_ring_emit_tmz(ring, true);
4583
4584 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4585 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4586 /* set load_global_config & load_global_uconfig */
4587 dw2 |= 0x8001;
4588 /* set load_cs_sh_regs */
4589 dw2 |= 0x01000000;
4590 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4591 dw2 |= 0x10002;
4592
4593 /* set load_ce_ram if preamble presented */
4594 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4595 dw2 |= 0x10000000;
4596 } else {
4597 /* still load_ce_ram if this is the first time preamble presented
4598 * although there is no context switch happens.
4599 */
4600 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4601 dw2 |= 0x10000000;
4602 }
4603
4604 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4605 amdgpu_ring_write(ring, dw2);
4606 amdgpu_ring_write(ring, 0);
4607 }
4608
gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring * ring)4609 static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4610 {
4611 unsigned ret;
4612
4613 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4614 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4615 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4616 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4617 ret = ring->wptr & ring->buf_mask;
4618 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4619
4620 return ret;
4621 }
4622
gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring * ring,unsigned offset)4623 static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4624 {
4625 unsigned cur;
4626 BUG_ON(offset > ring->buf_mask);
4627 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4628
4629 cur = (ring->wptr - 1) & ring->buf_mask;
4630 if (likely(cur > offset))
4631 ring->ring[offset] = cur - offset;
4632 else
4633 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
4634 }
4635
gfx_v10_0_ring_preempt_ib(struct amdgpu_ring * ring)4636 static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
4637 {
4638 int i, r = 0;
4639 struct amdgpu_device *adev = ring->adev;
4640 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4641 struct amdgpu_ring *kiq_ring = &kiq->ring;
4642
4643 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4644 return -EINVAL;
4645
4646 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
4647 return -ENOMEM;
4648
4649 /* assert preemption condition */
4650 amdgpu_ring_set_preempt_cond_exec(ring, false);
4651
4652 /* assert IB preemption, emit the trailing fence */
4653 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4654 ring->trail_fence_gpu_addr,
4655 ++ring->trail_seq);
4656 amdgpu_ring_commit(kiq_ring);
4657
4658 /* poll the trailing fence */
4659 for (i = 0; i < adev->usec_timeout; i++) {
4660 if (ring->trail_seq ==
4661 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4662 break;
4663 udelay(1);
4664 }
4665
4666 if (i >= adev->usec_timeout) {
4667 r = -EINVAL;
4668 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4669 }
4670
4671 /* deassert preemption condition */
4672 amdgpu_ring_set_preempt_cond_exec(ring, true);
4673 return r;
4674 }
4675
gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring * ring,bool resume)4676 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
4677 {
4678 struct amdgpu_device *adev = ring->adev;
4679 struct v10_ce_ib_state ce_payload = {0};
4680 uint64_t csa_addr;
4681 int cnt;
4682
4683 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4684 csa_addr = amdgpu_csa_vaddr(ring->adev);
4685
4686 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4687 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4688 WRITE_DATA_DST_SEL(8) |
4689 WR_CONFIRM) |
4690 WRITE_DATA_CACHE_POLICY(0));
4691 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4692 offsetof(struct v10_gfx_meta_data, ce_payload)));
4693 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4694 offsetof(struct v10_gfx_meta_data, ce_payload)));
4695
4696 if (resume)
4697 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4698 offsetof(struct v10_gfx_meta_data,
4699 ce_payload),
4700 sizeof(ce_payload) >> 2);
4701 else
4702 amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
4703 sizeof(ce_payload) >> 2);
4704 }
4705
gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring * ring,bool resume)4706 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
4707 {
4708 struct amdgpu_device *adev = ring->adev;
4709 struct v10_de_ib_state de_payload = {0};
4710 uint64_t csa_addr, gds_addr;
4711 int cnt;
4712
4713 csa_addr = amdgpu_csa_vaddr(ring->adev);
4714 gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size,
4715 PAGE_SIZE);
4716 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4717 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4718
4719 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4720 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4721 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4722 WRITE_DATA_DST_SEL(8) |
4723 WR_CONFIRM) |
4724 WRITE_DATA_CACHE_POLICY(0));
4725 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4726 offsetof(struct v10_gfx_meta_data, de_payload)));
4727 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4728 offsetof(struct v10_gfx_meta_data, de_payload)));
4729
4730 if (resume)
4731 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4732 offsetof(struct v10_gfx_meta_data,
4733 de_payload),
4734 sizeof(de_payload) >> 2);
4735 else
4736 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
4737 sizeof(de_payload) >> 2);
4738 }
4739
gfx_v10_0_ring_emit_tmz(struct amdgpu_ring * ring,bool start)4740 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4741 {
4742 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4743 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4744 }
4745
gfx_v10_0_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg)4746 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4747 {
4748 struct amdgpu_device *adev = ring->adev;
4749 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4750
4751 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4752 amdgpu_ring_write(ring, 0 | /* src: register*/
4753 (5 << 8) | /* dst: memory */
4754 (1 << 20)); /* write confirm */
4755 amdgpu_ring_write(ring, reg);
4756 amdgpu_ring_write(ring, 0);
4757 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4758 kiq->reg_val_offs * 4));
4759 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4760 kiq->reg_val_offs * 4));
4761 }
4762
gfx_v10_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)4763 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4764 uint32_t val)
4765 {
4766 uint32_t cmd = 0;
4767
4768 switch (ring->funcs->type) {
4769 case AMDGPU_RING_TYPE_GFX:
4770 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4771 break;
4772 case AMDGPU_RING_TYPE_KIQ:
4773 cmd = (1 << 16); /* no inc addr */
4774 break;
4775 default:
4776 cmd = WR_CONFIRM;
4777 break;
4778 }
4779 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4780 amdgpu_ring_write(ring, cmd);
4781 amdgpu_ring_write(ring, reg);
4782 amdgpu_ring_write(ring, 0);
4783 amdgpu_ring_write(ring, val);
4784 }
4785
gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)4786 static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4787 uint32_t val, uint32_t mask)
4788 {
4789 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4790 }
4791
gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)4792 static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4793 uint32_t reg0, uint32_t reg1,
4794 uint32_t ref, uint32_t mask)
4795 {
4796 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4797 struct amdgpu_device *adev = ring->adev;
4798 bool fw_version_ok = false;
4799
4800 fw_version_ok = adev->gfx.cp_fw_write_wait;
4801
4802 if (fw_version_ok)
4803 gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4804 ref, mask, 0x20);
4805 else
4806 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4807 ref, mask);
4808 }
4809
4810 static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device * adev,uint32_t me,uint32_t pipe,enum amdgpu_interrupt_state state)4811 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4812 uint32_t me, uint32_t pipe,
4813 enum amdgpu_interrupt_state state)
4814 {
4815 uint32_t cp_int_cntl, cp_int_cntl_reg;
4816
4817 if (!me) {
4818 switch (pipe) {
4819 case 0:
4820 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
4821 break;
4822 case 1:
4823 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
4824 break;
4825 default:
4826 DRM_DEBUG("invalid pipe %d\n", pipe);
4827 return;
4828 }
4829 } else {
4830 DRM_DEBUG("invalid me %d\n", me);
4831 return;
4832 }
4833
4834 switch (state) {
4835 case AMDGPU_IRQ_STATE_DISABLE:
4836 cp_int_cntl = RREG32(cp_int_cntl_reg);
4837 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4838 TIME_STAMP_INT_ENABLE, 0);
4839 WREG32(cp_int_cntl_reg, cp_int_cntl);
4840 break;
4841 case AMDGPU_IRQ_STATE_ENABLE:
4842 cp_int_cntl = RREG32(cp_int_cntl_reg);
4843 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4844 TIME_STAMP_INT_ENABLE, 1);
4845 WREG32(cp_int_cntl_reg, cp_int_cntl);
4846 break;
4847 default:
4848 break;
4849 }
4850 }
4851
gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state)4852 static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4853 int me, int pipe,
4854 enum amdgpu_interrupt_state state)
4855 {
4856 u32 mec_int_cntl, mec_int_cntl_reg;
4857
4858 /*
4859 * amdgpu controls only the first MEC. That's why this function only
4860 * handles the setting of interrupts for this specific MEC. All other
4861 * pipes' interrupts are set by amdkfd.
4862 */
4863
4864 if (me == 1) {
4865 switch (pipe) {
4866 case 0:
4867 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4868 break;
4869 case 1:
4870 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4871 break;
4872 case 2:
4873 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4874 break;
4875 case 3:
4876 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4877 break;
4878 default:
4879 DRM_DEBUG("invalid pipe %d\n", pipe);
4880 return;
4881 }
4882 } else {
4883 DRM_DEBUG("invalid me %d\n", me);
4884 return;
4885 }
4886
4887 switch (state) {
4888 case AMDGPU_IRQ_STATE_DISABLE:
4889 mec_int_cntl = RREG32(mec_int_cntl_reg);
4890 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4891 TIME_STAMP_INT_ENABLE, 0);
4892 WREG32(mec_int_cntl_reg, mec_int_cntl);
4893 break;
4894 case AMDGPU_IRQ_STATE_ENABLE:
4895 mec_int_cntl = RREG32(mec_int_cntl_reg);
4896 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4897 TIME_STAMP_INT_ENABLE, 1);
4898 WREG32(mec_int_cntl_reg, mec_int_cntl);
4899 break;
4900 default:
4901 break;
4902 }
4903 }
4904
gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)4905 static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4906 struct amdgpu_irq_src *src,
4907 unsigned type,
4908 enum amdgpu_interrupt_state state)
4909 {
4910 switch (type) {
4911 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4912 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
4913 break;
4914 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
4915 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
4916 break;
4917 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4918 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4919 break;
4920 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4921 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4922 break;
4923 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4924 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4925 break;
4926 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4927 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4928 break;
4929 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4930 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4931 break;
4932 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4933 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4934 break;
4935 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4936 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4937 break;
4938 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4939 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4940 break;
4941 default:
4942 break;
4943 }
4944 return 0;
4945 }
4946
gfx_v10_0_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)4947 static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
4948 struct amdgpu_irq_src *source,
4949 struct amdgpu_iv_entry *entry)
4950 {
4951 int i;
4952 u8 me_id, pipe_id, queue_id;
4953 struct amdgpu_ring *ring;
4954
4955 DRM_DEBUG("IH: CP EOP\n");
4956 me_id = (entry->ring_id & 0x0c) >> 2;
4957 pipe_id = (entry->ring_id & 0x03) >> 0;
4958 queue_id = (entry->ring_id & 0x70) >> 4;
4959
4960 switch (me_id) {
4961 case 0:
4962 if (pipe_id == 0)
4963 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4964 else
4965 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
4966 break;
4967 case 1:
4968 case 2:
4969 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4970 ring = &adev->gfx.compute_ring[i];
4971 /* Per-queue interrupt is supported for MEC starting from VI.
4972 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4973 */
4974 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4975 amdgpu_fence_process(ring);
4976 }
4977 break;
4978 }
4979 return 0;
4980 }
4981
gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)4982 static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4983 struct amdgpu_irq_src *source,
4984 unsigned type,
4985 enum amdgpu_interrupt_state state)
4986 {
4987 switch (state) {
4988 case AMDGPU_IRQ_STATE_DISABLE:
4989 case AMDGPU_IRQ_STATE_ENABLE:
4990 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4991 PRIV_REG_INT_ENABLE,
4992 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4993 break;
4994 default:
4995 break;
4996 }
4997
4998 return 0;
4999 }
5000
gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)5001 static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5002 struct amdgpu_irq_src *source,
5003 unsigned type,
5004 enum amdgpu_interrupt_state state)
5005 {
5006 switch (state) {
5007 case AMDGPU_IRQ_STATE_DISABLE:
5008 case AMDGPU_IRQ_STATE_ENABLE:
5009 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5010 PRIV_INSTR_INT_ENABLE,
5011 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5012 default:
5013 break;
5014 }
5015
5016 return 0;
5017 }
5018
gfx_v10_0_handle_priv_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)5019 static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
5020 struct amdgpu_iv_entry *entry)
5021 {
5022 u8 me_id, pipe_id, queue_id;
5023 struct amdgpu_ring *ring;
5024 int i;
5025
5026 me_id = (entry->ring_id & 0x0c) >> 2;
5027 pipe_id = (entry->ring_id & 0x03) >> 0;
5028 queue_id = (entry->ring_id & 0x70) >> 4;
5029
5030 switch (me_id) {
5031 case 0:
5032 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5033 ring = &adev->gfx.gfx_ring[i];
5034 /* we only enabled 1 gfx queue per pipe for now */
5035 if (ring->me == me_id && ring->pipe == pipe_id)
5036 drm_sched_fault(&ring->sched);
5037 }
5038 break;
5039 case 1:
5040 case 2:
5041 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5042 ring = &adev->gfx.compute_ring[i];
5043 if (ring->me == me_id && ring->pipe == pipe_id &&
5044 ring->queue == queue_id)
5045 drm_sched_fault(&ring->sched);
5046 }
5047 break;
5048 default:
5049 BUG();
5050 }
5051 }
5052
gfx_v10_0_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)5053 static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
5054 struct amdgpu_irq_src *source,
5055 struct amdgpu_iv_entry *entry)
5056 {
5057 DRM_ERROR("Illegal register access in command stream\n");
5058 gfx_v10_0_handle_priv_fault(adev, entry);
5059 return 0;
5060 }
5061
gfx_v10_0_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)5062 static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
5063 struct amdgpu_irq_src *source,
5064 struct amdgpu_iv_entry *entry)
5065 {
5066 DRM_ERROR("Illegal instruction in command stream\n");
5067 gfx_v10_0_handle_priv_fault(adev, entry);
5068 return 0;
5069 }
5070
gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)5071 static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
5072 struct amdgpu_irq_src *src,
5073 unsigned int type,
5074 enum amdgpu_interrupt_state state)
5075 {
5076 uint32_t tmp, target;
5077 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5078
5079 if (ring->me == 1)
5080 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5081 else
5082 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
5083 target += ring->pipe;
5084
5085 switch (type) {
5086 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
5087 if (state == AMDGPU_IRQ_STATE_DISABLE) {
5088 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5089 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5090 GENERIC2_INT_ENABLE, 0);
5091 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5092
5093 tmp = RREG32(target);
5094 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5095 GENERIC2_INT_ENABLE, 0);
5096 WREG32(target, tmp);
5097 } else {
5098 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5099 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5100 GENERIC2_INT_ENABLE, 1);
5101 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5102
5103 tmp = RREG32(target);
5104 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5105 GENERIC2_INT_ENABLE, 1);
5106 WREG32(target, tmp);
5107 }
5108 break;
5109 default:
5110 BUG(); /* kiq only support GENERIC2_INT now */
5111 break;
5112 }
5113 return 0;
5114 }
5115
gfx_v10_0_kiq_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)5116 static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
5117 struct amdgpu_irq_src *source,
5118 struct amdgpu_iv_entry *entry)
5119 {
5120 u8 me_id, pipe_id, queue_id;
5121 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5122
5123 me_id = (entry->ring_id & 0x0c) >> 2;
5124 pipe_id = (entry->ring_id & 0x03) >> 0;
5125 queue_id = (entry->ring_id & 0x70) >> 4;
5126 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
5127 me_id, pipe_id, queue_id);
5128
5129 amdgpu_fence_process(ring);
5130 return 0;
5131 }
5132
5133 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
5134 .name = "gfx_v10_0",
5135 .early_init = gfx_v10_0_early_init,
5136 .late_init = gfx_v10_0_late_init,
5137 .sw_init = gfx_v10_0_sw_init,
5138 .sw_fini = gfx_v10_0_sw_fini,
5139 .hw_init = gfx_v10_0_hw_init,
5140 .hw_fini = gfx_v10_0_hw_fini,
5141 .suspend = gfx_v10_0_suspend,
5142 .resume = gfx_v10_0_resume,
5143 .is_idle = gfx_v10_0_is_idle,
5144 .wait_for_idle = gfx_v10_0_wait_for_idle,
5145 .soft_reset = gfx_v10_0_soft_reset,
5146 .set_clockgating_state = gfx_v10_0_set_clockgating_state,
5147 .set_powergating_state = gfx_v10_0_set_powergating_state,
5148 .get_clockgating_state = gfx_v10_0_get_clockgating_state,
5149 };
5150
5151 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5152 .type = AMDGPU_RING_TYPE_GFX,
5153 .align_mask = 0xff,
5154 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5155 .support_64bit_ptrs = true,
5156 .vmhub = AMDGPU_GFXHUB_0,
5157 .get_rptr = gfx_v10_0_ring_get_rptr_gfx,
5158 .get_wptr = gfx_v10_0_ring_get_wptr_gfx,
5159 .set_wptr = gfx_v10_0_ring_set_wptr_gfx,
5160 .emit_frame_size = /* totally 242 maximum if 16 IBs */
5161 5 + /* COND_EXEC */
5162 7 + /* PIPELINE_SYNC */
5163 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5164 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5165 2 + /* VM_FLUSH */
5166 8 + /* FENCE for VM_FLUSH */
5167 20 + /* GDS switch */
5168 4 + /* double SWITCH_BUFFER,
5169 * the first COND_EXEC jump to the place
5170 * just prior to this double SWITCH_BUFFER
5171 */
5172 5 + /* COND_EXEC */
5173 7 + /* HDP_flush */
5174 4 + /* VGT_flush */
5175 14 + /* CE_META */
5176 31 + /* DE_META */
5177 3 + /* CNTX_CTRL */
5178 5 + /* HDP_INVL */
5179 8 + 8 + /* FENCE x2 */
5180 2, /* SWITCH_BUFFER */
5181 .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
5182 .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
5183 .emit_fence = gfx_v10_0_ring_emit_fence,
5184 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5185 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5186 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5187 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5188 .test_ring = gfx_v10_0_ring_test_ring,
5189 .test_ib = gfx_v10_0_ring_test_ib,
5190 .insert_nop = amdgpu_ring_insert_nop,
5191 .pad_ib = amdgpu_ring_generic_pad_ib,
5192 .emit_switch_buffer = gfx_v10_0_ring_emit_sb,
5193 .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
5194 .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
5195 .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
5196 .preempt_ib = gfx_v10_0_ring_preempt_ib,
5197 .emit_tmz = gfx_v10_0_ring_emit_tmz,
5198 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5199 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5200 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5201 };
5202
5203 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
5204 .type = AMDGPU_RING_TYPE_COMPUTE,
5205 .align_mask = 0xff,
5206 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5207 .support_64bit_ptrs = true,
5208 .vmhub = AMDGPU_GFXHUB_0,
5209 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5210 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5211 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5212 .emit_frame_size =
5213 20 + /* gfx_v10_0_ring_emit_gds_switch */
5214 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5215 5 + /* hdp invalidate */
5216 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5217 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5218 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5219 2 + /* gfx_v10_0_ring_emit_vm_flush */
5220 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
5221 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5222 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5223 .emit_fence = gfx_v10_0_ring_emit_fence,
5224 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5225 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5226 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5227 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5228 .test_ring = gfx_v10_0_ring_test_ring,
5229 .test_ib = gfx_v10_0_ring_test_ib,
5230 .insert_nop = amdgpu_ring_insert_nop,
5231 .pad_ib = amdgpu_ring_generic_pad_ib,
5232 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5233 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5234 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5235 };
5236
5237 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5238 .type = AMDGPU_RING_TYPE_KIQ,
5239 .align_mask = 0xff,
5240 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5241 .support_64bit_ptrs = true,
5242 .vmhub = AMDGPU_GFXHUB_0,
5243 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5244 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5245 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5246 .emit_frame_size =
5247 20 + /* gfx_v10_0_ring_emit_gds_switch */
5248 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5249 5 + /*hdp invalidate */
5250 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5251 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5252 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5253 2 + /* gfx_v10_0_ring_emit_vm_flush */
5254 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
5255 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5256 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5257 .emit_fence = gfx_v10_0_ring_emit_fence_kiq,
5258 .test_ring = gfx_v10_0_ring_test_ring,
5259 .test_ib = gfx_v10_0_ring_test_ib,
5260 .insert_nop = amdgpu_ring_insert_nop,
5261 .pad_ib = amdgpu_ring_generic_pad_ib,
5262 .emit_rreg = gfx_v10_0_ring_emit_rreg,
5263 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5264 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5265 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5266 };
5267
gfx_v10_0_set_ring_funcs(struct amdgpu_device * adev)5268 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
5269 {
5270 int i;
5271
5272 adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
5273
5274 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5275 adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
5276
5277 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5278 adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute;
5279 }
5280
5281 static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = {
5282 .set = gfx_v10_0_set_eop_interrupt_state,
5283 .process = gfx_v10_0_eop_irq,
5284 };
5285
5286 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
5287 .set = gfx_v10_0_set_priv_reg_fault_state,
5288 .process = gfx_v10_0_priv_reg_irq,
5289 };
5290
5291 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
5292 .set = gfx_v10_0_set_priv_inst_fault_state,
5293 .process = gfx_v10_0_priv_inst_irq,
5294 };
5295
5296 static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = {
5297 .set = gfx_v10_0_kiq_set_interrupt_state,
5298 .process = gfx_v10_0_kiq_irq,
5299 };
5300
gfx_v10_0_set_irq_funcs(struct amdgpu_device * adev)5301 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
5302 {
5303 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5304 adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
5305
5306 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
5307 adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
5308
5309 adev->gfx.priv_reg_irq.num_types = 1;
5310 adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
5311
5312 adev->gfx.priv_inst_irq.num_types = 1;
5313 adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
5314 }
5315
gfx_v10_0_set_rlc_funcs(struct amdgpu_device * adev)5316 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
5317 {
5318 switch (adev->asic_type) {
5319 case CHIP_NAVI10:
5320 case CHIP_NAVI14:
5321 case CHIP_NAVI12:
5322 adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
5323 break;
5324 default:
5325 break;
5326 }
5327 }
5328
gfx_v10_0_set_gds_init(struct amdgpu_device * adev)5329 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5330 {
5331 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
5332 adev->gfx.config.max_sh_per_se *
5333 adev->gfx.config.max_shader_engines;
5334
5335 adev->gds.gds_size = 0x10000;
5336 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
5337 adev->gds.gws_size = 64;
5338 adev->gds.oa_size = 16;
5339 }
5340
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device * adev,u32 bitmap)5341 static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5342 u32 bitmap)
5343 {
5344 u32 data;
5345
5346 if (!bitmap)
5347 return;
5348
5349 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5350 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5351
5352 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5353 }
5354
gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device * adev)5355 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5356 {
5357 u32 data, wgp_bitmask;
5358 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5359 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5360
5361 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5362 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5363
5364 wgp_bitmask =
5365 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5366
5367 return (~data) & wgp_bitmask;
5368 }
5369
gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device * adev)5370 static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5371 {
5372 u32 wgp_idx, wgp_active_bitmap;
5373 u32 cu_bitmap_per_wgp, cu_active_bitmap;
5374
5375 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
5376 cu_active_bitmap = 0;
5377
5378 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5379 /* if there is one WGP enabled, it means 2 CUs will be enabled */
5380 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5381 if (wgp_active_bitmap & (1 << wgp_idx))
5382 cu_active_bitmap |= cu_bitmap_per_wgp;
5383 }
5384
5385 return cu_active_bitmap;
5386 }
5387
gfx_v10_0_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)5388 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
5389 struct amdgpu_cu_info *cu_info)
5390 {
5391 int i, j, k, counter, active_cu_number = 0;
5392 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5393 unsigned disable_masks[4 * 2];
5394
5395 if (!adev || !cu_info)
5396 return -EINVAL;
5397
5398 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5399
5400 mutex_lock(&adev->grbm_idx_mutex);
5401 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5402 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5403 mask = 1;
5404 ao_bitmap = 0;
5405 counter = 0;
5406 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
5407 if (i < 4 && j < 2)
5408 gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
5409 adev, disable_masks[i * 2 + j]);
5410 bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
5411 cu_info->bitmap[i][j] = bitmap;
5412
5413 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5414 if (bitmap & mask) {
5415 if (counter < adev->gfx.config.max_cu_per_sh)
5416 ao_bitmap |= mask;
5417 counter++;
5418 }
5419 mask <<= 1;
5420 }
5421 active_cu_number += counter;
5422 if (i < 2 && j < 2)
5423 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5424 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5425 }
5426 }
5427 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5428 mutex_unlock(&adev->grbm_idx_mutex);
5429
5430 cu_info->number = active_cu_number;
5431 cu_info->ao_cu_mask = ao_cu_mask;
5432 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5433
5434 return 0;
5435 }
5436
5437 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
5438 {
5439 .type = AMD_IP_BLOCK_TYPE_GFX,
5440 .major = 10,
5441 .minor = 0,
5442 .rev = 0,
5443 .funcs = &gfx_v10_0_ip_funcs,
5444 };
5445