1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/kernel.h> 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_gfx.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 #include "amdgpu_atomfirmware.h" 31 32 #include "gc/gc_9_0_offset.h" 33 #include "gc/gc_9_0_sh_mask.h" 34 #include "vega10_enum.h" 35 #include "hdp/hdp_4_0_offset.h" 36 37 #include "soc15_common.h" 38 #include "clearstate_gfx9.h" 39 #include "v9_structs.h" 40 41 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" 42 43 #define GFX9_NUM_GFX_RINGS 1 44 #define GFX9_MEC_HPD_SIZE 2048 45 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 46 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L 47 48 #define mmPWR_MISC_CNTL_STATUS 0x0183 49 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 52 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L 53 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L 54 55 MODULE_FIRMWARE("amdgpufw_vega10_ce"); 56 MODULE_FIRMWARE("amdgpufw_vega10_pfp"); 57 MODULE_FIRMWARE("amdgpufw_vega10_me"); 58 MODULE_FIRMWARE("amdgpufw_vega10_mec"); 59 MODULE_FIRMWARE("amdgpufw_vega10_mec2"); 60 MODULE_FIRMWARE("amdgpufw_vega10_rlc"); 61 62 MODULE_FIRMWARE("amdgpufw_vega12_ce"); 63 MODULE_FIRMWARE("amdgpufw_vega12_pfp"); 64 MODULE_FIRMWARE("amdgpufw_vega12_me"); 65 MODULE_FIRMWARE("amdgpufw_vega12_mec"); 66 MODULE_FIRMWARE("amdgpufw_vega12_mec2"); 67 MODULE_FIRMWARE("amdgpufw_vega12_rlc"); 68 69 MODULE_FIRMWARE("amdgpufw_vega20_ce"); 70 MODULE_FIRMWARE("amdgpufw_vega20_pfp"); 71 MODULE_FIRMWARE("amdgpufw_vega20_me"); 72 MODULE_FIRMWARE("amdgpufw_vega20_mec"); 73 MODULE_FIRMWARE("amdgpufw_vega20_mec2"); 74 MODULE_FIRMWARE("amdgpufw_vega20_rlc"); 75 76 MODULE_FIRMWARE("amdgpufw_raven_ce"); 77 MODULE_FIRMWARE("amdgpufw_raven_pfp"); 78 MODULE_FIRMWARE("amdgpufw_raven_me"); 79 MODULE_FIRMWARE("amdgpufw_raven_mec"); 80 MODULE_FIRMWARE("amdgpufw_raven_mec2"); 81 MODULE_FIRMWARE("amdgpufw_raven_rlc"); 82 83 static const struct soc15_reg_golden golden_settings_gc_9_0[] = 84 { 85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), 86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000), 87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), 92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), 93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), 94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), 95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), 96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), 97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 102 }; 103 104 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = 105 { 106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107), 107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), 109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), 110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), 111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042), 112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042), 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000), 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), 117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), 118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), 119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), 120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 124 }; 125 126 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 127 { 128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080), 129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), 130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), 132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042), 133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400), 134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000), 135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000), 136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107), 137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000), 138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000) 139 }; 140 141 static const struct soc15_reg_golden golden_settings_gc_9_1[] = 142 { 143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), 144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), 145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), 146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), 147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), 148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), 150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), 154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), 155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), 156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), 157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), 158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120), 161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), 163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 164 }; 165 166 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = 167 { 168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042), 170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042), 171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000), 172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000), 173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800) 175 }; 176 177 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = 178 { 179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), 180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) 181 }; 182 183 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] = 184 { 185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), 186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), 191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), 192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), 193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), 194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), 195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), 196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 201 }; 202 203 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = 204 { 205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080), 206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), 207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), 208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041), 209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041), 210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), 211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107), 212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), 214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000) 215 }; 216 217 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = 218 { 219 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 220 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 221 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 222 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 223 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 224 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 225 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 226 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 227 }; 228 229 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = 230 { 231 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0, 232 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0, 233 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0, 234 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0, 235 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0, 236 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0, 237 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0, 238 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, 239 }; 240 241 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 242 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 243 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 244 245 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); 246 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev); 247 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev); 248 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); 249 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, 250 struct amdgpu_cu_info *cu_info); 251 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); 252 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); 253 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); 254 255 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) 256 { 257 switch (adev->asic_type) { 258 case CHIP_VEGA10: 259 soc15_program_register_sequence(adev, 260 golden_settings_gc_9_0, 261 ARRAY_SIZE(golden_settings_gc_9_0)); 262 soc15_program_register_sequence(adev, 263 golden_settings_gc_9_0_vg10, 264 ARRAY_SIZE(golden_settings_gc_9_0_vg10)); 265 break; 266 case CHIP_VEGA12: 267 soc15_program_register_sequence(adev, 268 golden_settings_gc_9_2_1, 269 ARRAY_SIZE(golden_settings_gc_9_2_1)); 270 soc15_program_register_sequence(adev, 271 golden_settings_gc_9_2_1_vg12, 272 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); 273 break; 274 case CHIP_VEGA20: 275 soc15_program_register_sequence(adev, 276 golden_settings_gc_9_0, 277 ARRAY_SIZE(golden_settings_gc_9_0)); 278 soc15_program_register_sequence(adev, 279 golden_settings_gc_9_0_vg20, 280 ARRAY_SIZE(golden_settings_gc_9_0_vg20)); 281 break; 282 case CHIP_RAVEN: 283 soc15_program_register_sequence(adev, 284 golden_settings_gc_9_1, 285 ARRAY_SIZE(golden_settings_gc_9_1)); 286 soc15_program_register_sequence(adev, 287 golden_settings_gc_9_1_rv1, 288 ARRAY_SIZE(golden_settings_gc_9_1_rv1)); 289 break; 290 default: 291 break; 292 } 293 294 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, 295 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); 296 } 297 298 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) 299 { 300 adev->gfx.scratch.num_reg = 8; 301 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); 302 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; 303 } 304 305 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 306 bool wc, uint32_t reg, uint32_t val) 307 { 308 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 309 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 310 WRITE_DATA_DST_SEL(0) | 311 (wc ? WR_CONFIRM : 0)); 312 amdgpu_ring_write(ring, reg); 313 amdgpu_ring_write(ring, 0); 314 amdgpu_ring_write(ring, val); 315 } 316 317 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 318 int mem_space, int opt, uint32_t addr0, 319 uint32_t addr1, uint32_t ref, uint32_t mask, 320 uint32_t inv) 321 { 322 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 323 amdgpu_ring_write(ring, 324 /* memory (1) or register (0) */ 325 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 326 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 327 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 328 WAIT_REG_MEM_ENGINE(eng_sel))); 329 330 if (mem_space) 331 BUG_ON(addr0 & 0x3); /* Dword align */ 332 amdgpu_ring_write(ring, addr0); 333 amdgpu_ring_write(ring, addr1); 334 amdgpu_ring_write(ring, ref); 335 amdgpu_ring_write(ring, mask); 336 amdgpu_ring_write(ring, inv); /* poll interval */ 337 } 338 339 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) 340 { 341 struct amdgpu_device *adev = ring->adev; 342 uint32_t scratch; 343 uint32_t tmp = 0; 344 unsigned i; 345 int r; 346 347 r = amdgpu_gfx_scratch_get(adev, &scratch); 348 if (r) { 349 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); 350 return r; 351 } 352 WREG32(scratch, 0xCAFEDEAD); 353 r = amdgpu_ring_alloc(ring, 3); 354 if (r) { 355 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 356 ring->idx, r); 357 amdgpu_gfx_scratch_free(adev, scratch); 358 return r; 359 } 360 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 361 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 362 amdgpu_ring_write(ring, 0xDEADBEEF); 363 amdgpu_ring_commit(ring); 364 365 for (i = 0; i < adev->usec_timeout; i++) { 366 tmp = RREG32(scratch); 367 if (tmp == 0xDEADBEEF) 368 break; 369 DRM_UDELAY(1); 370 } 371 if (i < adev->usec_timeout) { 372 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", 373 ring->idx, i); 374 } else { 375 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 376 ring->idx, scratch, tmp); 377 r = -EINVAL; 378 } 379 amdgpu_gfx_scratch_free(adev, scratch); 380 return r; 381 } 382 383 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 384 { 385 struct amdgpu_device *adev = ring->adev; 386 struct amdgpu_ib ib; 387 struct dma_fence *f = NULL; 388 389 unsigned index; 390 uint64_t gpu_addr; 391 uint32_t tmp; 392 long r; 393 394 r = amdgpu_device_wb_get(adev, &index); 395 if (r) { 396 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 397 return r; 398 } 399 400 gpu_addr = adev->wb.gpu_addr + (index * 4); 401 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 402 memset(&ib, 0, sizeof(ib)); 403 r = amdgpu_ib_get(adev, NULL, 16, &ib); 404 if (r) { 405 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 406 goto err1; 407 } 408 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 409 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 410 ib.ptr[2] = lower_32_bits(gpu_addr); 411 ib.ptr[3] = upper_32_bits(gpu_addr); 412 ib.ptr[4] = 0xDEADBEEF; 413 ib.length_dw = 5; 414 415 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 416 if (r) 417 goto err2; 418 419 r = dma_fence_wait_timeout(f, false, timeout); 420 if (r == 0) { 421 DRM_ERROR("amdgpu: IB test timed out.\n"); 422 r = -ETIMEDOUT; 423 goto err2; 424 } else if (r < 0) { 425 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 426 goto err2; 427 } 428 429 tmp = adev->wb.wb[index]; 430 if (tmp == 0xDEADBEEF) { 431 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); 432 r = 0; 433 } else { 434 DRM_ERROR("ib test on ring %d failed\n", ring->idx); 435 r = -EINVAL; 436 } 437 438 err2: 439 amdgpu_ib_free(adev, &ib, NULL); 440 dma_fence_put(f); 441 err1: 442 amdgpu_device_wb_free(adev, index); 443 return r; 444 } 445 446 447 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) 448 { 449 release_firmware(adev->gfx.pfp_fw); 450 adev->gfx.pfp_fw = NULL; 451 release_firmware(adev->gfx.me_fw); 452 adev->gfx.me_fw = NULL; 453 release_firmware(adev->gfx.ce_fw); 454 adev->gfx.ce_fw = NULL; 455 release_firmware(adev->gfx.rlc_fw); 456 adev->gfx.rlc_fw = NULL; 457 release_firmware(adev->gfx.mec_fw); 458 adev->gfx.mec_fw = NULL; 459 release_firmware(adev->gfx.mec2_fw); 460 adev->gfx.mec2_fw = NULL; 461 462 kfree(adev->gfx.rlc.register_list_format); 463 } 464 465 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev) 466 { 467 const struct rlc_firmware_header_v2_1 *rlc_hdr; 468 469 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 470 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); 471 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); 472 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); 473 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); 474 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); 475 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); 476 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); 477 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); 478 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); 479 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); 480 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); 481 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); 482 adev->gfx.rlc.reg_list_format_direct_reg_list_length = 483 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); 484 } 485 486 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) 487 { 488 const char *chip_name; 489 char fw_name[30]; 490 int err; 491 struct amdgpu_firmware_info *info = NULL; 492 const struct common_firmware_header *header = NULL; 493 const struct gfx_firmware_header_v1_0 *cp_hdr; 494 const struct rlc_firmware_header_v2_0 *rlc_hdr; 495 unsigned int *tmp = NULL; 496 unsigned int i = 0; 497 uint16_t version_major; 498 uint16_t version_minor; 499 500 DRM_DEBUG("\n"); 501 502 switch (adev->asic_type) { 503 case CHIP_VEGA10: 504 chip_name = "vega10"; 505 break; 506 case CHIP_VEGA12: 507 chip_name = "vega12"; 508 break; 509 case CHIP_VEGA20: 510 chip_name = "vega20"; 511 break; 512 case CHIP_RAVEN: 513 chip_name = "raven"; 514 break; 515 default: 516 BUG(); 517 } 518 519 snprintf(fw_name, sizeof(fw_name), "amdgpufw_%s_pfp", chip_name); 520 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 521 if (err) 522 goto out; 523 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 524 if (err) 525 goto out; 526 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 527 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 528 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 529 530 snprintf(fw_name, sizeof(fw_name), "amdgpufw_%s_me", chip_name); 531 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 532 if (err) 533 goto out; 534 err = amdgpu_ucode_validate(adev->gfx.me_fw); 535 if (err) 536 goto out; 537 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 538 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 539 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 540 541 snprintf(fw_name, sizeof(fw_name), "amdgpufw_%s_ce", chip_name); 542 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 543 if (err) 544 goto out; 545 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 546 if (err) 547 goto out; 548 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 549 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 550 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 551 552 snprintf(fw_name, sizeof(fw_name), "amdgpufw_%s_rlc", chip_name); 553 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 554 if (err) 555 goto out; 556 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 557 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 558 559 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 560 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 561 if (version_major == 2 && version_minor == 1) 562 adev->gfx.rlc.is_rlc_v2_1 = true; 563 564 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 565 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 566 adev->gfx.rlc.save_and_restore_offset = 567 le32_to_cpu(rlc_hdr->save_and_restore_offset); 568 adev->gfx.rlc.clear_state_descriptor_offset = 569 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 570 adev->gfx.rlc.avail_scratch_ram_locations = 571 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 572 adev->gfx.rlc.reg_restore_list_size = 573 le32_to_cpu(rlc_hdr->reg_restore_list_size); 574 adev->gfx.rlc.reg_list_format_start = 575 le32_to_cpu(rlc_hdr->reg_list_format_start); 576 adev->gfx.rlc.reg_list_format_separate_start = 577 le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 578 adev->gfx.rlc.starting_offsets_start = 579 le32_to_cpu(rlc_hdr->starting_offsets_start); 580 adev->gfx.rlc.reg_list_format_size_bytes = 581 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 582 adev->gfx.rlc.reg_list_size_bytes = 583 le32_to_cpu(rlc_hdr->reg_list_size_bytes); 584 adev->gfx.rlc.register_list_format = 585 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 586 adev->gfx.rlc.reg_list_size_bytes, M_DRM, GFP_KERNEL); 587 if (!adev->gfx.rlc.register_list_format) { 588 err = -ENOMEM; 589 goto out; 590 } 591 592 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 593 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 594 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) 595 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 596 597 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 598 599 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 600 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 601 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) 602 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 603 604 if (adev->gfx.rlc.is_rlc_v2_1) 605 gfx_v9_0_init_rlc_ext_microcode(adev); 606 607 snprintf(fw_name, sizeof(fw_name), "amdgpufw_%s_mec", chip_name); 608 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 609 if (err) 610 goto out; 611 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 612 if (err) 613 goto out; 614 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 615 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 616 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 617 618 619 snprintf(fw_name, sizeof(fw_name), "amdgpufw_%s_mec2", chip_name); 620 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 621 if (!err) { 622 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 623 if (err) 624 goto out; 625 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 626 adev->gfx.mec2_fw->data; 627 adev->gfx.mec2_fw_version = 628 le32_to_cpu(cp_hdr->header.ucode_version); 629 adev->gfx.mec2_feature_version = 630 le32_to_cpu(cp_hdr->ucode_feature_version); 631 } else { 632 err = 0; 633 adev->gfx.mec2_fw = NULL; 634 } 635 636 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 637 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; 638 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; 639 info->fw = adev->gfx.pfp_fw; 640 header = (const struct common_firmware_header *)info->fw->data; 641 adev->firmware.fw_size += 642 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 643 644 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; 645 info->ucode_id = AMDGPU_UCODE_ID_CP_ME; 646 info->fw = adev->gfx.me_fw; 647 header = (const struct common_firmware_header *)info->fw->data; 648 adev->firmware.fw_size += 649 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 650 651 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; 652 info->ucode_id = AMDGPU_UCODE_ID_CP_CE; 653 info->fw = adev->gfx.ce_fw; 654 header = (const struct common_firmware_header *)info->fw->data; 655 adev->firmware.fw_size += 656 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 657 658 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 659 info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 660 info->fw = adev->gfx.rlc_fw; 661 header = (const struct common_firmware_header *)info->fw->data; 662 adev->firmware.fw_size += 663 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 664 665 if (adev->gfx.rlc.is_rlc_v2_1 && 666 adev->gfx.rlc.save_restore_list_cntl_size_bytes && 667 adev->gfx.rlc.save_restore_list_gpm_size_bytes && 668 adev->gfx.rlc.save_restore_list_srm_size_bytes) { 669 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; 670 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; 671 info->fw = adev->gfx.rlc_fw; 672 adev->firmware.fw_size += 673 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE); 674 675 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; 676 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; 677 info->fw = adev->gfx.rlc_fw; 678 adev->firmware.fw_size += 679 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); 680 681 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; 682 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; 683 info->fw = adev->gfx.rlc_fw; 684 adev->firmware.fw_size += 685 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); 686 } 687 688 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 689 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 690 info->fw = adev->gfx.mec_fw; 691 header = (const struct common_firmware_header *)info->fw->data; 692 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 693 adev->firmware.fw_size += 694 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 695 696 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; 697 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; 698 info->fw = adev->gfx.mec_fw; 699 adev->firmware.fw_size += 700 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 701 702 if (adev->gfx.mec2_fw) { 703 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; 704 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 705 info->fw = adev->gfx.mec2_fw; 706 header = (const struct common_firmware_header *)info->fw->data; 707 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 708 adev->firmware.fw_size += 709 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 710 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; 711 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; 712 info->fw = adev->gfx.mec2_fw; 713 adev->firmware.fw_size += 714 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 715 } 716 717 } 718 719 out: 720 if (err) { 721 dev_err(adev->dev, 722 "gfx9: Failed to load firmware \"%s\"\n", 723 fw_name); 724 release_firmware(adev->gfx.pfp_fw); 725 adev->gfx.pfp_fw = NULL; 726 release_firmware(adev->gfx.me_fw); 727 adev->gfx.me_fw = NULL; 728 release_firmware(adev->gfx.ce_fw); 729 adev->gfx.ce_fw = NULL; 730 release_firmware(adev->gfx.rlc_fw); 731 adev->gfx.rlc_fw = NULL; 732 release_firmware(adev->gfx.mec_fw); 733 adev->gfx.mec_fw = NULL; 734 release_firmware(adev->gfx.mec2_fw); 735 adev->gfx.mec2_fw = NULL; 736 } 737 return err; 738 } 739 740 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) 741 { 742 u32 count = 0; 743 const struct cs_section_def *sect = NULL; 744 const struct cs_extent_def *ext = NULL; 745 746 /* begin clear state */ 747 count += 2; 748 /* context control state */ 749 count += 3; 750 751 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { 752 for (ext = sect->section; ext->extent != NULL; ++ext) { 753 if (sect->id == SECT_CONTEXT) 754 count += 2 + ext->reg_count; 755 else 756 return 0; 757 } 758 } 759 760 /* end clear state */ 761 count += 2; 762 /* clear state */ 763 count += 2; 764 765 return count; 766 } 767 768 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, 769 volatile u32 *buffer) 770 { 771 u32 count = 0, i; 772 const struct cs_section_def *sect = NULL; 773 const struct cs_extent_def *ext = NULL; 774 775 if (adev->gfx.rlc.cs_data == NULL) 776 return; 777 if (buffer == NULL) 778 return; 779 780 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 781 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 782 783 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 784 buffer[count++] = cpu_to_le32(0x80000000); 785 buffer[count++] = cpu_to_le32(0x80000000); 786 787 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 788 for (ext = sect->section; ext->extent != NULL; ++ext) { 789 if (sect->id == SECT_CONTEXT) { 790 buffer[count++] = 791 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 792 buffer[count++] = cpu_to_le32(ext->reg_index - 793 PACKET3_SET_CONTEXT_REG_START); 794 for (i = 0; i < ext->reg_count; i++) 795 buffer[count++] = cpu_to_le32(ext->extent[i]); 796 } else { 797 return; 798 } 799 } 800 } 801 802 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 803 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 804 805 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 806 buffer[count++] = cpu_to_le32(0); 807 } 808 809 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) 810 { 811 uint32_t data; 812 813 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */ 814 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F); 815 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7); 816 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077); 817 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16)); 818 819 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */ 820 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000); 821 822 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */ 823 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500); 824 825 mutex_lock(&adev->grbm_idx_mutex); 826 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ 827 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 828 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); 829 830 /* set mmRLC_LB_PARAMS = 0x003F_1006 */ 831 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003); 832 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010); 833 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F); 834 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data); 835 836 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */ 837 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7); 838 data &= 0x0000FFFF; 839 data |= 0x00C00000; 840 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); 841 842 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */ 843 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF); 844 845 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, 846 * but used for RLC_LB_CNTL configuration */ 847 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK; 848 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09); 849 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); 850 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); 851 mutex_unlock(&adev->grbm_idx_mutex); 852 } 853 854 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) 855 { 856 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); 857 } 858 859 static void rv_init_cp_jump_table(struct amdgpu_device *adev) 860 { 861 const __le32 *fw_data = NULL; 862 volatile u32 *dst_ptr; 863 int me, i, max_me = 5; 864 u32 bo_offset = 0; 865 u32 table_offset = 0, table_size = 0; 866 867 /* write the cp table buffer */ 868 dst_ptr = adev->gfx.rlc.cp_table_ptr; 869 for (me = 0; me < max_me; me++) { 870 if (me == 0) { 871 const struct gfx_firmware_header_v1_0 *hdr = 872 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 873 fw_data = (const __le32 *) 874 (adev->gfx.ce_fw->data + 875 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 876 table_offset = le32_to_cpu(hdr->jt_offset); 877 table_size = le32_to_cpu(hdr->jt_size); 878 } else if (me == 1) { 879 const struct gfx_firmware_header_v1_0 *hdr = 880 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 881 fw_data = (const __le32 *) 882 (adev->gfx.pfp_fw->data + 883 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 884 table_offset = le32_to_cpu(hdr->jt_offset); 885 table_size = le32_to_cpu(hdr->jt_size); 886 } else if (me == 2) { 887 const struct gfx_firmware_header_v1_0 *hdr = 888 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 889 fw_data = (const __le32 *) 890 (adev->gfx.me_fw->data + 891 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 892 table_offset = le32_to_cpu(hdr->jt_offset); 893 table_size = le32_to_cpu(hdr->jt_size); 894 } else if (me == 3) { 895 const struct gfx_firmware_header_v1_0 *hdr = 896 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 897 fw_data = (const __le32 *) 898 (adev->gfx.mec_fw->data + 899 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 900 table_offset = le32_to_cpu(hdr->jt_offset); 901 table_size = le32_to_cpu(hdr->jt_size); 902 } else if (me == 4) { 903 const struct gfx_firmware_header_v1_0 *hdr = 904 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 905 fw_data = (const __le32 *) 906 (adev->gfx.mec2_fw->data + 907 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 908 table_offset = le32_to_cpu(hdr->jt_offset); 909 table_size = le32_to_cpu(hdr->jt_size); 910 } 911 912 for (i = 0; i < table_size; i ++) { 913 dst_ptr[bo_offset + i] = 914 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); 915 } 916 917 bo_offset += table_size; 918 } 919 } 920 921 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev) 922 { 923 /* clear state block */ 924 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 925 (u64 *)&adev->gfx.rlc.clear_state_gpu_addr, 926 (void **)&adev->gfx.rlc.cs_ptr); 927 928 /* jump table block */ 929 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 930 (u64 *)&adev->gfx.rlc.cp_table_gpu_addr, 931 (void **)&adev->gfx.rlc.cp_table_ptr); 932 } 933 934 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) 935 { 936 volatile u32 *dst_ptr; 937 u32 dws; 938 const struct cs_section_def *cs_data; 939 int r; 940 941 adev->gfx.rlc.cs_data = gfx9_cs_data; 942 943 cs_data = adev->gfx.rlc.cs_data; 944 945 if (cs_data) { 946 /* clear state block */ 947 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); 948 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, 949 AMDGPU_GEM_DOMAIN_VRAM, 950 &adev->gfx.rlc.clear_state_obj, 951 (u64 *)&adev->gfx.rlc.clear_state_gpu_addr, 952 (void **)&adev->gfx.rlc.cs_ptr); 953 if (r) { 954 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", 955 r); 956 gfx_v9_0_rlc_fini(adev); 957 return r; 958 } 959 /* set up the cs buffer */ 960 dst_ptr = adev->gfx.rlc.cs_ptr; 961 gfx_v9_0_get_csb_buffer(adev, dst_ptr); 962 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); 963 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); 964 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 965 } 966 967 if (adev->asic_type == CHIP_RAVEN) { 968 /* TODO: double check the cp_table_size for RV */ 969 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ 970 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, 971 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 972 &adev->gfx.rlc.cp_table_obj, 973 (u64 *)&adev->gfx.rlc.cp_table_gpu_addr, 974 (void **)&adev->gfx.rlc.cp_table_ptr); 975 if (r) { 976 dev_err(adev->dev, 977 "(%d) failed to create cp table bo\n", r); 978 gfx_v9_0_rlc_fini(adev); 979 return r; 980 } 981 982 rv_init_cp_jump_table(adev); 983 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); 984 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); 985 986 gfx_v9_0_init_lbpw(adev); 987 } 988 989 return 0; 990 } 991 992 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) 993 { 994 int r; 995 996 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); 997 if (unlikely(r != 0)) 998 return r; 999 1000 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, 1001 AMDGPU_GEM_DOMAIN_VRAM); 1002 if (!r) 1003 adev->gfx.rlc.clear_state_gpu_addr = 1004 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); 1005 1006 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 1007 1008 return r; 1009 } 1010 1011 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) 1012 { 1013 int r; 1014 1015 if (!adev->gfx.rlc.clear_state_obj) 1016 return; 1017 1018 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); 1019 if (likely(r == 0)) { 1020 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); 1021 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 1022 } 1023 } 1024 1025 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) 1026 { 1027 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 1028 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 1029 } 1030 1031 static int gfx_v9_0_mec_init(struct amdgpu_device *adev) 1032 { 1033 int r; 1034 u32 *hpd; 1035 const __le32 *fw_data; 1036 unsigned fw_size; 1037 u32 *fw; 1038 size_t mec_hpd_size; 1039 1040 const struct gfx_firmware_header_v1_0 *mec_hdr; 1041 1042 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 1043 1044 /* take ownership of the relevant compute queues */ 1045 amdgpu_gfx_compute_queue_acquire(adev); 1046 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; 1047 1048 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 1049 AMDGPU_GEM_DOMAIN_GTT, 1050 &adev->gfx.mec.hpd_eop_obj, 1051 &adev->gfx.mec.hpd_eop_gpu_addr, 1052 (void **)&hpd); 1053 if (r) { 1054 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 1055 gfx_v9_0_mec_fini(adev); 1056 return r; 1057 } 1058 1059 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); 1060 1061 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 1062 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 1063 1064 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 1065 1066 fw_data = (const __le32 *) 1067 (adev->gfx.mec_fw->data + 1068 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 1069 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; 1070 1071 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 1072 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1073 &adev->gfx.mec.mec_fw_obj, 1074 &adev->gfx.mec.mec_fw_gpu_addr, 1075 (void **)&fw); 1076 if (r) { 1077 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); 1078 gfx_v9_0_mec_fini(adev); 1079 return r; 1080 } 1081 1082 memcpy(fw, fw_data, fw_size); 1083 1084 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 1085 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 1086 1087 return 0; 1088 } 1089 1090 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) 1091 { 1092 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 1093 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1094 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 1095 (address << SQ_IND_INDEX__INDEX__SHIFT) | 1096 (SQ_IND_INDEX__FORCE_READ_MASK)); 1097 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 1098 } 1099 1100 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, 1101 uint32_t wave, uint32_t thread, 1102 uint32_t regno, uint32_t num, uint32_t *out) 1103 { 1104 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 1105 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1106 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 1107 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 1108 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 1109 (SQ_IND_INDEX__FORCE_READ_MASK) | 1110 (SQ_IND_INDEX__AUTO_INCR_MASK)); 1111 while (num--) 1112 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 1113 } 1114 1115 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 1116 { 1117 /* type 1 wave data */ 1118 dst[(*no_fields)++] = 1; 1119 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); 1120 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); 1121 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); 1122 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); 1123 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); 1124 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); 1125 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); 1126 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); 1127 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); 1128 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); 1129 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); 1130 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); 1131 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); 1132 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); 1133 } 1134 1135 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 1136 uint32_t wave, uint32_t start, 1137 uint32_t size, uint32_t *dst) 1138 { 1139 wave_read_regs( 1140 adev, simd, wave, 0, 1141 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 1142 } 1143 1144 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, 1145 uint32_t wave, uint32_t thread, 1146 uint32_t start, uint32_t size, 1147 uint32_t *dst) 1148 { 1149 wave_read_regs( 1150 adev, simd, wave, thread, 1151 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1152 } 1153 1154 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, 1155 u32 me, u32 pipe, u32 q) 1156 { 1157 soc15_grbm_select(adev, me, pipe, q, 0); 1158 } 1159 1160 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { 1161 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, 1162 .select_se_sh = &gfx_v9_0_select_se_sh, 1163 .read_wave_data = &gfx_v9_0_read_wave_data, 1164 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, 1165 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, 1166 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q 1167 }; 1168 1169 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) 1170 { 1171 u32 gb_addr_config; 1172 int err; 1173 1174 adev->gfx.funcs = &gfx_v9_0_gfx_funcs; 1175 1176 switch (adev->asic_type) { 1177 case CHIP_VEGA10: 1178 adev->gfx.config.max_hw_contexts = 8; 1179 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1180 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1181 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1182 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1183 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; 1184 break; 1185 case CHIP_VEGA12: 1186 adev->gfx.config.max_hw_contexts = 8; 1187 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1188 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1189 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1190 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1191 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; 1192 DRM_INFO("fix gfx.config for vega12\n"); 1193 break; 1194 case CHIP_VEGA20: 1195 adev->gfx.config.max_hw_contexts = 8; 1196 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1197 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1198 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1199 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1200 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); 1201 gb_addr_config &= ~0xf3e777ff; 1202 gb_addr_config |= 0x22014042; 1203 /* check vbios table if gpu info is not available */ 1204 err = amdgpu_atomfirmware_get_gfx_info(adev); 1205 if (err) 1206 return err; 1207 break; 1208 case CHIP_RAVEN: 1209 adev->gfx.config.max_hw_contexts = 8; 1210 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1211 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1212 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1213 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1214 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; 1215 break; 1216 default: 1217 BUG(); 1218 break; 1219 } 1220 1221 adev->gfx.config.gb_addr_config = gb_addr_config; 1222 1223 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 1224 REG_GET_FIELD( 1225 adev->gfx.config.gb_addr_config, 1226 GB_ADDR_CONFIG, 1227 NUM_PIPES); 1228 1229 adev->gfx.config.max_tile_pipes = 1230 adev->gfx.config.gb_addr_config_fields.num_pipes; 1231 1232 adev->gfx.config.gb_addr_config_fields.num_banks = 1 << 1233 REG_GET_FIELD( 1234 adev->gfx.config.gb_addr_config, 1235 GB_ADDR_CONFIG, 1236 NUM_BANKS); 1237 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 1238 REG_GET_FIELD( 1239 adev->gfx.config.gb_addr_config, 1240 GB_ADDR_CONFIG, 1241 MAX_COMPRESSED_FRAGS); 1242 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 1243 REG_GET_FIELD( 1244 adev->gfx.config.gb_addr_config, 1245 GB_ADDR_CONFIG, 1246 NUM_RB_PER_SE); 1247 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 1248 REG_GET_FIELD( 1249 adev->gfx.config.gb_addr_config, 1250 GB_ADDR_CONFIG, 1251 NUM_SHADER_ENGINES); 1252 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 1253 REG_GET_FIELD( 1254 adev->gfx.config.gb_addr_config, 1255 GB_ADDR_CONFIG, 1256 PIPE_INTERLEAVE_SIZE)); 1257 1258 return 0; 1259 } 1260 1261 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, 1262 struct amdgpu_ngg_buf *ngg_buf, 1263 int size_se, 1264 int default_size_se) 1265 { 1266 int r; 1267 1268 if (size_se < 0) { 1269 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se); 1270 return -EINVAL; 1271 } 1272 size_se = size_se ? size_se : default_size_se; 1273 1274 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines; 1275 r = amdgpu_bo_create_kernel(adev, ngg_buf->size, 1276 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1277 &ngg_buf->bo, 1278 (u64 *)&ngg_buf->gpu_addr, 1279 NULL); 1280 if (r) { 1281 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r); 1282 return r; 1283 } 1284 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo); 1285 1286 return r; 1287 } 1288 1289 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev) 1290 { 1291 int i; 1292 1293 for (i = 0; i < NGG_BUF_MAX; i++) 1294 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo, 1295 (u64 *)&adev->gfx.ngg.buf[i].gpu_addr, 1296 NULL); 1297 1298 memset(&adev->gfx.ngg.buf[0], 0, 1299 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX); 1300 1301 adev->gfx.ngg.init = false; 1302 1303 return 0; 1304 } 1305 1306 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev) 1307 { 1308 int r; 1309 1310 if (!amdgpu_ngg || adev->gfx.ngg.init == true) 1311 return 0; 1312 1313 /* GDS reserve memory: 64 bytes alignment */ 1314 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); 1315 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size; 1316 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size; 1317 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE); 1318 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); 1319 1320 /* Primitive Buffer */ 1321 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], 1322 amdgpu_prim_buf_per_se, 1323 64 * 1024); 1324 if (r) { 1325 dev_err(adev->dev, "Failed to create Primitive Buffer\n"); 1326 goto err; 1327 } 1328 1329 /* Position Buffer */ 1330 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS], 1331 amdgpu_pos_buf_per_se, 1332 256 * 1024); 1333 if (r) { 1334 dev_err(adev->dev, "Failed to create Position Buffer\n"); 1335 goto err; 1336 } 1337 1338 /* Control Sideband */ 1339 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL], 1340 amdgpu_cntl_sb_buf_per_se, 1341 256); 1342 if (r) { 1343 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n"); 1344 goto err; 1345 } 1346 1347 /* Parameter Cache, not created by default */ 1348 if (amdgpu_param_buf_per_se <= 0) 1349 goto out; 1350 1351 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM], 1352 amdgpu_param_buf_per_se, 1353 512 * 1024); 1354 if (r) { 1355 dev_err(adev->dev, "Failed to create Parameter Cache\n"); 1356 goto err; 1357 } 1358 1359 out: 1360 adev->gfx.ngg.init = true; 1361 return 0; 1362 err: 1363 gfx_v9_0_ngg_fini(adev); 1364 return r; 1365 } 1366 1367 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) 1368 { 1369 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 1370 int r; 1371 u32 data, base; 1372 1373 if (!amdgpu_ngg) 1374 return 0; 1375 1376 /* Program buffer size */ 1377 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, 1378 adev->gfx.ngg.buf[NGG_PRIM].size >> 8); 1379 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, 1380 adev->gfx.ngg.buf[NGG_POS].size >> 8); 1381 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); 1382 1383 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, 1384 adev->gfx.ngg.buf[NGG_CNTL].size >> 8); 1385 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, 1386 adev->gfx.ngg.buf[NGG_PARAM].size >> 10); 1387 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); 1388 1389 /* Program buffer base address */ 1390 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); 1391 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base); 1392 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data); 1393 1394 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); 1395 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base); 1396 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data); 1397 1398 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); 1399 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base); 1400 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data); 1401 1402 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); 1403 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base); 1404 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data); 1405 1406 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); 1407 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base); 1408 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data); 1409 1410 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); 1411 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base); 1412 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data); 1413 1414 /* Clear GDS reserved memory */ 1415 r = amdgpu_ring_alloc(ring, 17); 1416 if (r) { 1417 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n", 1418 ring->idx, r); 1419 return r; 1420 } 1421 1422 gfx_v9_0_write_data_to_reg(ring, 0, false, 1423 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 1424 (adev->gds.mem.total_size + 1425 adev->gfx.ngg.gds_reserve_size) >> 1426 AMDGPU_GDS_SHIFT); 1427 1428 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); 1429 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | 1430 PACKET3_DMA_DATA_DST_SEL(1) | 1431 PACKET3_DMA_DATA_SRC_SEL(2))); 1432 amdgpu_ring_write(ring, 0); 1433 amdgpu_ring_write(ring, 0); 1434 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); 1435 amdgpu_ring_write(ring, 0); 1436 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT | 1437 adev->gfx.ngg.gds_reserve_size); 1438 1439 gfx_v9_0_write_data_to_reg(ring, 0, false, 1440 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); 1441 1442 amdgpu_ring_commit(ring); 1443 1444 return 0; 1445 } 1446 1447 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1448 int mec, int pipe, int queue) 1449 { 1450 int r; 1451 unsigned irq_type; 1452 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1453 1454 ring = &adev->gfx.compute_ring[ring_id]; 1455 1456 /* mec0 is me1 */ 1457 ring->me = mec + 1; 1458 ring->pipe = pipe; 1459 ring->queue = queue; 1460 1461 ring->ring_obj = NULL; 1462 ring->use_doorbell = true; 1463 ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1; 1464 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1465 + (ring_id * GFX9_MEC_HPD_SIZE); 1466 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1467 1468 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1469 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1470 + ring->pipe; 1471 1472 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1473 r = amdgpu_ring_init(adev, ring, 1024, 1474 &adev->gfx.eop_irq, irq_type); 1475 if (r) 1476 return r; 1477 1478 1479 return 0; 1480 } 1481 1482 static int gfx_v9_0_sw_init(void *handle) 1483 { 1484 int i, j, k, r, ring_id; 1485 struct amdgpu_ring *ring; 1486 struct amdgpu_kiq *kiq; 1487 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1488 1489 switch (adev->asic_type) { 1490 case CHIP_VEGA10: 1491 case CHIP_VEGA12: 1492 case CHIP_VEGA20: 1493 case CHIP_RAVEN: 1494 adev->gfx.mec.num_mec = 2; 1495 break; 1496 default: 1497 adev->gfx.mec.num_mec = 1; 1498 break; 1499 } 1500 1501 adev->gfx.mec.num_pipe_per_mec = 4; 1502 adev->gfx.mec.num_queue_per_pipe = 8; 1503 1504 /* KIQ event */ 1505 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq); 1506 if (r) 1507 return r; 1508 1509 /* EOP Event */ 1510 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); 1511 if (r) 1512 return r; 1513 1514 /* Privileged reg */ 1515 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, 1516 &adev->gfx.priv_reg_irq); 1517 if (r) 1518 return r; 1519 1520 /* Privileged inst */ 1521 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, 1522 &adev->gfx.priv_inst_irq); 1523 if (r) 1524 return r; 1525 1526 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1527 1528 gfx_v9_0_scratch_init(adev); 1529 1530 r = gfx_v9_0_init_microcode(adev); 1531 if (r) { 1532 DRM_ERROR("Failed to load gfx firmware!\n"); 1533 return r; 1534 } 1535 1536 r = gfx_v9_0_rlc_init(adev); 1537 if (r) { 1538 DRM_ERROR("Failed to init rlc BOs!\n"); 1539 return r; 1540 } 1541 1542 r = gfx_v9_0_mec_init(adev); 1543 if (r) { 1544 DRM_ERROR("Failed to init MEC BOs!\n"); 1545 return r; 1546 } 1547 1548 /* set up the gfx ring */ 1549 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 1550 ring = &adev->gfx.gfx_ring[i]; 1551 ring->ring_obj = NULL; 1552 if (!i) 1553 sprintf(ring->name, "gfx"); 1554 else 1555 sprintf(ring->name, "gfx_%d", i); 1556 ring->use_doorbell = true; 1557 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1; 1558 r = amdgpu_ring_init(adev, ring, 1024, 1559 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); 1560 if (r) 1561 return r; 1562 } 1563 1564 /* set up the compute queues - allocate horizontally across pipes */ 1565 ring_id = 0; 1566 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1567 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1568 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1569 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) 1570 continue; 1571 1572 r = gfx_v9_0_compute_ring_init(adev, 1573 ring_id, 1574 i, k, j); 1575 if (r) 1576 return r; 1577 1578 ring_id++; 1579 } 1580 } 1581 } 1582 1583 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE); 1584 if (r) { 1585 DRM_ERROR("Failed to init KIQ BOs!\n"); 1586 return r; 1587 } 1588 1589 kiq = &adev->gfx.kiq; 1590 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 1591 if (r) 1592 return r; 1593 1594 /* create MQD for all compute queues as wel as KIQ for SRIOV case */ 1595 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation)); 1596 if (r) 1597 return r; 1598 1599 /* reserve GDS, GWS and OA resource for gfx */ 1600 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, 1601 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, 1602 &adev->gds.gds_gfx_bo, NULL, NULL); 1603 if (r) 1604 return r; 1605 1606 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, 1607 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, 1608 &adev->gds.gws_gfx_bo, NULL, NULL); 1609 if (r) 1610 return r; 1611 1612 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, 1613 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, 1614 &adev->gds.oa_gfx_bo, NULL, NULL); 1615 if (r) 1616 return r; 1617 1618 adev->gfx.ce_ram_size = 0x8000; 1619 1620 r = gfx_v9_0_gpu_early_init(adev); 1621 if (r) 1622 return r; 1623 1624 r = gfx_v9_0_ngg_init(adev); 1625 if (r) 1626 return r; 1627 1628 return 0; 1629 } 1630 1631 1632 static int gfx_v9_0_sw_fini(void *handle) 1633 { 1634 int i; 1635 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1636 1637 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL); 1638 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL); 1639 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL); 1640 1641 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1642 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1643 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1644 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1645 1646 amdgpu_gfx_compute_mqd_sw_fini(adev); 1647 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); 1648 amdgpu_gfx_kiq_fini(adev); 1649 1650 gfx_v9_0_mec_fini(adev); 1651 gfx_v9_0_ngg_fini(adev); 1652 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 1653 (u64 *)&adev->gfx.rlc.clear_state_gpu_addr, 1654 (void **)&adev->gfx.rlc.cs_ptr); 1655 if (adev->asic_type == CHIP_RAVEN) { 1656 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 1657 (u64 *)&adev->gfx.rlc.cp_table_gpu_addr, 1658 (void **)&adev->gfx.rlc.cp_table_ptr); 1659 } 1660 gfx_v9_0_free_microcode(adev); 1661 1662 return 0; 1663 } 1664 1665 1666 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) 1667 { 1668 /* TODO */ 1669 } 1670 1671 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) 1672 { 1673 u32 data; 1674 1675 if (instance == 0xffffffff) 1676 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1677 else 1678 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); 1679 1680 if (se_num == 0xffffffff) 1681 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1682 else 1683 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1684 1685 if (sh_num == 0xffffffff) 1686 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 1687 else 1688 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1689 1690 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); 1691 } 1692 1693 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1694 { 1695 u32 data, mask; 1696 1697 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE); 1698 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE); 1699 1700 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1701 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1702 1703 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1704 adev->gfx.config.max_sh_per_se); 1705 1706 return (~data) & mask; 1707 } 1708 1709 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) 1710 { 1711 int i, j; 1712 u32 data; 1713 u32 active_rbs = 0; 1714 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1715 adev->gfx.config.max_sh_per_se; 1716 1717 mutex_lock(&adev->grbm_idx_mutex); 1718 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1719 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1720 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 1721 data = gfx_v9_0_get_rb_active_bitmap(adev); 1722 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1723 rb_bitmap_width_per_sh); 1724 } 1725 } 1726 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1727 mutex_unlock(&adev->grbm_idx_mutex); 1728 1729 adev->gfx.config.backend_enable_mask = active_rbs; 1730 adev->gfx.config.num_rbs = hweight32(active_rbs); 1731 } 1732 1733 #define DEFAULT_SH_MEM_BASES (0x6000) 1734 #define FIRST_COMPUTE_VMID (8) 1735 #define LAST_COMPUTE_VMID (16) 1736 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) 1737 { 1738 int i; 1739 uint32_t sh_mem_config; 1740 uint32_t sh_mem_bases; 1741 1742 /* 1743 * Configure apertures: 1744 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1745 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1746 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1747 */ 1748 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1749 1750 sh_mem_config = SH_MEM_ADDRESS_MODE_64 | 1751 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1752 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1753 1754 mutex_lock(&adev->srbm_mutex); 1755 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { 1756 soc15_grbm_select(adev, 0, 0, 0, i); 1757 /* CP and shaders */ 1758 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); 1759 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); 1760 } 1761 soc15_grbm_select(adev, 0, 0, 0, 0); 1762 mutex_unlock(&adev->srbm_mutex); 1763 } 1764 1765 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) 1766 { 1767 u32 tmp; 1768 int i; 1769 1770 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1771 1772 gfx_v9_0_tiling_mode_table_init(adev); 1773 1774 gfx_v9_0_setup_rb(adev); 1775 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); 1776 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); 1777 1778 /* XXX SH_MEM regs */ 1779 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1780 mutex_lock(&adev->srbm_mutex); 1781 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { 1782 soc15_grbm_select(adev, 0, 0, 0, i); 1783 /* CP and shaders */ 1784 if (i == 0) { 1785 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1786 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1787 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1788 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0); 1789 } else { 1790 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1791 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1792 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1793 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1794 (adev->gmc.private_aperture_start >> 48)); 1795 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1796 (adev->gmc.shared_aperture_start >> 48)); 1797 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); 1798 } 1799 } 1800 soc15_grbm_select(adev, 0, 0, 0, 0); 1801 1802 mutex_unlock(&adev->srbm_mutex); 1803 1804 gfx_v9_0_init_compute_vmid(adev); 1805 } 1806 1807 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) 1808 { 1809 u32 i, j, k; 1810 u32 mask; 1811 1812 mutex_lock(&adev->grbm_idx_mutex); 1813 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1814 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1815 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 1816 for (k = 0; k < adev->usec_timeout; k++) { 1817 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) 1818 break; 1819 udelay(1); 1820 } 1821 if (k == adev->usec_timeout) { 1822 gfx_v9_0_select_se_sh(adev, 0xffffffff, 1823 0xffffffff, 0xffffffff); 1824 mutex_unlock(&adev->grbm_idx_mutex); 1825 DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 1826 i, j); 1827 return; 1828 } 1829 } 1830 } 1831 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1832 mutex_unlock(&adev->grbm_idx_mutex); 1833 1834 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 1835 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 1836 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 1837 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 1838 for (k = 0; k < adev->usec_timeout; k++) { 1839 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 1840 break; 1841 udelay(1); 1842 } 1843 } 1844 1845 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1846 bool enable) 1847 { 1848 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); 1849 1850 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 1851 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); 1852 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); 1853 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); 1854 1855 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); 1856 } 1857 1858 static void gfx_v9_0_init_csb(struct amdgpu_device *adev) 1859 { 1860 /* csib */ 1861 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), 1862 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1863 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO), 1864 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1865 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH), 1866 adev->gfx.rlc.clear_state_size); 1867 } 1868 1869 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, 1870 int indirect_offset, 1871 int list_size, 1872 int *unique_indirect_regs, 1873 int unique_indirect_reg_count, 1874 int *indirect_start_offsets, 1875 int *indirect_start_offsets_count, 1876 int max_start_offsets_count) 1877 { 1878 int idx; 1879 1880 for (; indirect_offset < list_size; indirect_offset++) { 1881 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count); 1882 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; 1883 *indirect_start_offsets_count = *indirect_start_offsets_count + 1; 1884 1885 while (register_list_format[indirect_offset] != 0xFFFFFFFF) { 1886 indirect_offset += 2; 1887 1888 /* look for the matching indice */ 1889 for (idx = 0; idx < unique_indirect_reg_count; idx++) { 1890 if (unique_indirect_regs[idx] == 1891 register_list_format[indirect_offset] || 1892 !unique_indirect_regs[idx]) 1893 break; 1894 } 1895 1896 BUG_ON(idx >= unique_indirect_reg_count); 1897 1898 if (!unique_indirect_regs[idx]) 1899 unique_indirect_regs[idx] = register_list_format[indirect_offset]; 1900 1901 indirect_offset++; 1902 } 1903 } 1904 } 1905 1906 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) 1907 { 1908 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; 1909 int unique_indirect_reg_count = 0; 1910 1911 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; 1912 int indirect_start_offsets_count = 0; 1913 1914 int list_size = 0; 1915 int i = 0, j = 0; 1916 u32 tmp = 0; 1917 1918 u32 *register_list_format = 1919 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, M_DRM, GFP_KERNEL); 1920 if (!register_list_format) 1921 return -ENOMEM; 1922 memcpy(register_list_format, adev->gfx.rlc.register_list_format, 1923 adev->gfx.rlc.reg_list_format_size_bytes); 1924 1925 /* setup unique_indirect_regs array and indirect_start_offsets array */ 1926 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs); 1927 gfx_v9_1_parse_ind_reg_list(register_list_format, 1928 adev->gfx.rlc.reg_list_format_direct_reg_list_length, 1929 adev->gfx.rlc.reg_list_format_size_bytes >> 2, 1930 unique_indirect_regs, 1931 unique_indirect_reg_count, 1932 indirect_start_offsets, 1933 &indirect_start_offsets_count, 1934 ARRAY_SIZE(indirect_start_offsets)); 1935 1936 /* enable auto inc in case it is disabled */ 1937 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); 1938 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1939 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); 1940 1941 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */ 1942 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 1943 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET); 1944 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) 1945 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), 1946 adev->gfx.rlc.register_restore[i]); 1947 1948 /* load indirect register */ 1949 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1950 adev->gfx.rlc.reg_list_format_start); 1951 1952 /* direct register portion */ 1953 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++) 1954 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 1955 register_list_format[i]); 1956 1957 /* indirect register portion */ 1958 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) { 1959 if (register_list_format[i] == 0xFFFFFFFF) { 1960 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); 1961 continue; 1962 } 1963 1964 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); 1965 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); 1966 1967 for (j = 0; j < unique_indirect_reg_count; j++) { 1968 if (register_list_format[i] == unique_indirect_regs[j]) { 1969 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j); 1970 break; 1971 } 1972 } 1973 1974 BUG_ON(j >= unique_indirect_reg_count); 1975 1976 i++; 1977 } 1978 1979 /* set save/restore list size */ 1980 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; 1981 list_size = list_size >> 1; 1982 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1983 adev->gfx.rlc.reg_restore_list_size); 1984 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size); 1985 1986 /* write the starting offsets to RLC scratch ram */ 1987 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1988 adev->gfx.rlc.starting_offsets_start); 1989 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++) 1990 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 1991 indirect_start_offsets[i]); 1992 1993 /* load unique indirect regs*/ 1994 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) { 1995 if (unique_indirect_regs[i] != 0) { 1996 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) 1997 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i], 1998 unique_indirect_regs[i] & 0x3FFFF); 1999 2000 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) 2001 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i], 2002 unique_indirect_regs[i] >> 20); 2003 } 2004 } 2005 2006 kfree(register_list_format); 2007 return 0; 2008 } 2009 2010 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev) 2011 { 2012 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1); 2013 } 2014 2015 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev, 2016 bool enable) 2017 { 2018 uint32_t data = 0; 2019 uint32_t default_data = 0; 2020 2021 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS)); 2022 if (enable == true) { 2023 /* enable GFXIP control over CGPG */ 2024 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; 2025 if(default_data != data) 2026 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 2027 2028 /* update status */ 2029 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK; 2030 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT); 2031 if(default_data != data) 2032 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 2033 } else { 2034 /* restore GFXIP control over GCPG */ 2035 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; 2036 if(default_data != data) 2037 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 2038 } 2039 } 2040 2041 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) 2042 { 2043 uint32_t data = 0; 2044 2045 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 2046 AMD_PG_SUPPORT_GFX_SMG | 2047 AMD_PG_SUPPORT_GFX_DMG)) { 2048 /* init IDLE_POLL_COUNT = 60 */ 2049 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL)); 2050 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; 2051 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 2052 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data); 2053 2054 /* init RLC PG Delay */ 2055 data = 0; 2056 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); 2057 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); 2058 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); 2059 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); 2060 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data); 2061 2062 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2)); 2063 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; 2064 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); 2065 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data); 2066 2067 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3)); 2068 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK; 2069 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT); 2070 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data); 2071 2072 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL)); 2073 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; 2074 2075 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ 2076 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); 2077 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); 2078 2079 pwr_10_0_gfxip_control_over_cgpg(adev, true); 2080 } 2081 } 2082 2083 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, 2084 bool enable) 2085 { 2086 uint32_t data = 0; 2087 uint32_t default_data = 0; 2088 2089 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2090 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2091 SMU_CLK_SLOWDOWN_ON_PU_ENABLE, 2092 enable ? 1 : 0); 2093 if (default_data != data) 2094 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2095 } 2096 2097 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, 2098 bool enable) 2099 { 2100 uint32_t data = 0; 2101 uint32_t default_data = 0; 2102 2103 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2104 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2105 SMU_CLK_SLOWDOWN_ON_PD_ENABLE, 2106 enable ? 1 : 0); 2107 if(default_data != data) 2108 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2109 } 2110 2111 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev, 2112 bool enable) 2113 { 2114 uint32_t data = 0; 2115 uint32_t default_data = 0; 2116 2117 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2118 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2119 CP_PG_DISABLE, 2120 enable ? 0 : 1); 2121 if(default_data != data) 2122 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2123 } 2124 2125 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev, 2126 bool enable) 2127 { 2128 uint32_t data, default_data; 2129 2130 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2131 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2132 GFX_POWER_GATING_ENABLE, 2133 enable ? 1 : 0); 2134 if(default_data != data) 2135 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2136 } 2137 2138 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev, 2139 bool enable) 2140 { 2141 uint32_t data, default_data; 2142 2143 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2144 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2145 GFX_PIPELINE_PG_ENABLE, 2146 enable ? 1 : 0); 2147 if(default_data != data) 2148 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2149 2150 if (!enable) 2151 /* read any GFX register to wake up GFX */ 2152 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL)); 2153 } 2154 2155 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, 2156 bool enable) 2157 { 2158 uint32_t data, default_data; 2159 2160 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2161 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2162 STATIC_PER_CU_PG_ENABLE, 2163 enable ? 1 : 0); 2164 if(default_data != data) 2165 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2166 } 2167 2168 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, 2169 bool enable) 2170 { 2171 uint32_t data, default_data; 2172 2173 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2174 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2175 DYN_PER_CU_PG_ENABLE, 2176 enable ? 1 : 0); 2177 if(default_data != data) 2178 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2179 } 2180 2181 static void gfx_v9_0_init_pg(struct amdgpu_device *adev) 2182 { 2183 gfx_v9_0_init_csb(adev); 2184 2185 /* 2186 * Rlc save restore list is workable since v2_1. 2187 * And it's needed by gfxoff feature. 2188 */ 2189 if (adev->gfx.rlc.is_rlc_v2_1) { 2190 if (adev->asic_type == CHIP_VEGA12) 2191 gfx_v9_1_init_rlc_save_restore_list(adev); 2192 gfx_v9_0_enable_save_restore_machine(adev); 2193 } 2194 2195 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 2196 AMD_PG_SUPPORT_GFX_SMG | 2197 AMD_PG_SUPPORT_GFX_DMG | 2198 AMD_PG_SUPPORT_CP | 2199 AMD_PG_SUPPORT_GDS | 2200 AMD_PG_SUPPORT_RLC_SMU_HS)) { 2201 WREG32(mmRLC_JUMP_TABLE_RESTORE, 2202 adev->gfx.rlc.cp_table_gpu_addr >> 8); 2203 gfx_v9_0_init_gfx_power_gating(adev); 2204 } 2205 } 2206 2207 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev); 2208 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) 2209 { 2210 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); 2211 gfx_v9_0_enable_gui_idle_interrupt(adev, false); 2212 gfx_v9_0_wait_for_rlc_serdes(adev); 2213 } 2214 2215 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev) 2216 { 2217 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2218 udelay(50); 2219 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2220 udelay(50); 2221 } 2222 2223 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) 2224 { 2225 #ifdef AMDGPU_RLC_DEBUG_RETRY 2226 u32 rlc_ucode_ver; 2227 #endif 2228 2229 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2230 udelay(50); 2231 2232 /* carrizo do enable cp interrupt after cp inited */ 2233 if (!(adev->flags & AMD_IS_APU)) { 2234 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 2235 udelay(50); 2236 } 2237 2238 #ifdef AMDGPU_RLC_DEBUG_RETRY 2239 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 2240 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6); 2241 if(rlc_ucode_ver == 0x108) { 2242 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 2243 rlc_ucode_ver, adev->gfx.rlc_fw_version); 2244 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 2245 * default is 0x9C4 to create a 100us interval */ 2246 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4); 2247 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr 2248 * to disable the page fault retry interrupts, default is 2249 * 0x100 (256) */ 2250 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100); 2251 } 2252 #endif 2253 } 2254 2255 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev) 2256 { 2257 const struct rlc_firmware_header_v2_0 *hdr; 2258 const __le32 *fw_data; 2259 unsigned i, fw_size; 2260 2261 if (!adev->gfx.rlc_fw) 2262 return -EINVAL; 2263 2264 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2265 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2266 2267 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2268 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2269 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2270 2271 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, 2272 RLCG_UCODE_LOADING_START_ADDRESS); 2273 for (i = 0; i < fw_size; i++) 2274 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 2275 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2276 2277 return 0; 2278 } 2279 2280 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) 2281 { 2282 int r; 2283 2284 if (amdgpu_sriov_vf(adev)) { 2285 gfx_v9_0_init_csb(adev); 2286 return 0; 2287 } 2288 2289 gfx_v9_0_rlc_stop(adev); 2290 2291 /* disable CG */ 2292 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2293 2294 gfx_v9_0_rlc_reset(adev); 2295 2296 gfx_v9_0_init_pg(adev); 2297 2298 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2299 /* legacy rlc firmware loading */ 2300 r = gfx_v9_0_rlc_load_microcode(adev); 2301 if (r) 2302 return r; 2303 } 2304 2305 if (adev->asic_type == CHIP_RAVEN) { 2306 if (amdgpu_lbpw != 0) 2307 gfx_v9_0_enable_lbpw(adev, true); 2308 else 2309 gfx_v9_0_enable_lbpw(adev, false); 2310 } 2311 2312 gfx_v9_0_rlc_start(adev); 2313 2314 return 0; 2315 } 2316 2317 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2318 { 2319 int i; 2320 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); 2321 2322 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2323 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2324 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); 2325 if (!enable) { 2326 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2327 adev->gfx.gfx_ring[i].ready = false; 2328 } 2329 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); 2330 udelay(50); 2331 } 2332 2333 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2334 { 2335 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2336 const struct gfx_firmware_header_v1_0 *ce_hdr; 2337 const struct gfx_firmware_header_v1_0 *me_hdr; 2338 const __le32 *fw_data; 2339 unsigned i, fw_size; 2340 2341 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) 2342 return -EINVAL; 2343 2344 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2345 adev->gfx.pfp_fw->data; 2346 ce_hdr = (const struct gfx_firmware_header_v1_0 *) 2347 adev->gfx.ce_fw->data; 2348 me_hdr = (const struct gfx_firmware_header_v1_0 *) 2349 adev->gfx.me_fw->data; 2350 2351 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2352 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2353 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2354 2355 gfx_v9_0_cp_gfx_enable(adev, false); 2356 2357 /* PFP */ 2358 fw_data = (const __le32 *) 2359 (adev->gfx.pfp_fw->data + 2360 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2361 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; 2362 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0); 2363 for (i = 0; i < fw_size; i++) 2364 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); 2365 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2366 2367 /* CE */ 2368 fw_data = (const __le32 *) 2369 (adev->gfx.ce_fw->data + 2370 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); 2371 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; 2372 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0); 2373 for (i = 0; i < fw_size; i++) 2374 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); 2375 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); 2376 2377 /* ME */ 2378 fw_data = (const __le32 *) 2379 (adev->gfx.me_fw->data + 2380 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 2381 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; 2382 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0); 2383 for (i = 0; i < fw_size; i++) 2384 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); 2385 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); 2386 2387 return 0; 2388 } 2389 2390 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) 2391 { 2392 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 2393 const struct cs_section_def *sect = NULL; 2394 const struct cs_extent_def *ext = NULL; 2395 int r, i, tmp; 2396 2397 /* init the CP */ 2398 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); 2399 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1); 2400 2401 gfx_v9_0_cp_gfx_enable(adev, true); 2402 2403 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); 2404 if (r) { 2405 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2406 return r; 2407 } 2408 2409 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2410 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 2411 2412 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2413 amdgpu_ring_write(ring, 0x80000000); 2414 amdgpu_ring_write(ring, 0x80000000); 2415 2416 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { 2417 for (ext = sect->section; ext->extent != NULL; ++ext) { 2418 if (sect->id == SECT_CONTEXT) { 2419 amdgpu_ring_write(ring, 2420 PACKET3(PACKET3_SET_CONTEXT_REG, 2421 ext->reg_count)); 2422 amdgpu_ring_write(ring, 2423 ext->reg_index - PACKET3_SET_CONTEXT_REG_START); 2424 for (i = 0; i < ext->reg_count; i++) 2425 amdgpu_ring_write(ring, ext->extent[i]); 2426 } 2427 } 2428 } 2429 2430 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2431 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 2432 2433 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2434 amdgpu_ring_write(ring, 0); 2435 2436 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 2437 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 2438 amdgpu_ring_write(ring, 0x8000); 2439 amdgpu_ring_write(ring, 0x8000); 2440 2441 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1)); 2442 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE | 2443 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START)); 2444 amdgpu_ring_write(ring, tmp); 2445 amdgpu_ring_write(ring, 0); 2446 2447 amdgpu_ring_commit(ring); 2448 2449 return 0; 2450 } 2451 2452 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) 2453 { 2454 struct amdgpu_ring *ring; 2455 u32 tmp; 2456 u32 rb_bufsz; 2457 u64 rb_addr, rptr_addr, wptr_gpu_addr; 2458 2459 /* Set the write pointer delay */ 2460 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0); 2461 2462 /* set the RB to use vmid 0 */ 2463 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0); 2464 2465 /* Set ring buffer size */ 2466 ring = &adev->gfx.gfx_ring[0]; 2467 rb_bufsz = order_base_2(ring->ring_size / 8); 2468 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2469 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2470 #ifdef __BIG_ENDIAN 2471 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); 2472 #endif 2473 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2474 2475 /* Initialize the ring buffer's write pointers */ 2476 ring->wptr = 0; 2477 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2478 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2479 2480 /* set the wb address wether it's enabled or not */ 2481 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2482 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2483 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2484 2485 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2486 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr)); 2487 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); 2488 2489 mdelay(1); 2490 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2491 2492 rb_addr = ring->gpu_addr >> 8; 2493 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr); 2494 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2495 2496 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); 2497 if (ring->use_doorbell) { 2498 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2499 DOORBELL_OFFSET, ring->doorbell_index); 2500 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2501 DOORBELL_EN, 1); 2502 } else { 2503 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0); 2504 } 2505 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); 2506 2507 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2508 DOORBELL_RANGE_LOWER, ring->doorbell_index); 2509 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); 2510 2511 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, 2512 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2513 2514 2515 /* start the ring */ 2516 gfx_v9_0_cp_gfx_start(adev); 2517 ring->ready = true; 2518 2519 return 0; 2520 } 2521 2522 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2523 { 2524 int i; 2525 2526 if (enable) { 2527 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); 2528 } else { 2529 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 2530 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2531 for (i = 0; i < adev->gfx.num_compute_rings; i++) 2532 adev->gfx.compute_ring[i].ready = false; 2533 adev->gfx.kiq.ring.ready = false; 2534 } 2535 udelay(50); 2536 } 2537 2538 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev) 2539 { 2540 const struct gfx_firmware_header_v1_0 *mec_hdr; 2541 const __le32 *fw_data; 2542 unsigned i; 2543 u32 tmp; 2544 2545 if (!adev->gfx.mec_fw) 2546 return -EINVAL; 2547 2548 gfx_v9_0_cp_compute_enable(adev, false); 2549 2550 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2551 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2552 2553 fw_data = (const __le32 *) 2554 (adev->gfx.mec_fw->data + 2555 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 2556 tmp = 0; 2557 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2558 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2559 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp); 2560 2561 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, 2562 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); 2563 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI, 2564 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2565 2566 /* MEC1 */ 2567 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 2568 mec_hdr->jt_offset); 2569 for (i = 0; i < mec_hdr->jt_size; i++) 2570 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA, 2571 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 2572 2573 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 2574 adev->gfx.mec_fw_version); 2575 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ 2576 2577 return 0; 2578 } 2579 2580 /* KIQ functions */ 2581 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) 2582 { 2583 uint32_t tmp; 2584 struct amdgpu_device *adev = ring->adev; 2585 2586 /* tell RLC which is KIQ queue */ 2587 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); 2588 tmp &= 0xffffff00; 2589 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2590 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2591 tmp |= 0x80; 2592 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2593 } 2594 2595 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) 2596 { 2597 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 2598 uint32_t scratch, tmp = 0; 2599 uint64_t queue_mask = 0; 2600 int r, i; 2601 2602 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { 2603 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) 2604 continue; 2605 2606 /* This situation may be hit in the future if a new HW 2607 * generation exposes more than 64 queues. If so, the 2608 * definition of queue_mask needs updating */ 2609 if (WARN_ON(i >= (sizeof(queue_mask)*8))) { 2610 DRM_ERROR("Invalid KCQ enabled: %d\n", i); 2611 break; 2612 } 2613 2614 queue_mask |= (1ull << i); 2615 } 2616 2617 r = amdgpu_gfx_scratch_get(adev, &scratch); 2618 if (r) { 2619 DRM_ERROR("Failed to get scratch reg (%d).\n", r); 2620 return r; 2621 } 2622 WREG32(scratch, 0xCAFEDEAD); 2623 2624 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11); 2625 if (r) { 2626 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 2627 amdgpu_gfx_scratch_free(adev, scratch); 2628 return r; 2629 } 2630 2631 /* set resources */ 2632 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 2633 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 2634 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 2635 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 2636 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 2637 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 2638 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 2639 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 2640 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 2641 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2642 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 2643 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 2644 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2645 2646 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 2647 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 2648 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 2649 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 2650 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 2651 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 2652 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 2653 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 2654 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 2655 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 2656 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ 2657 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 2658 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 2659 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 2660 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 2661 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 2662 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 2663 } 2664 /* write to scratch for completion */ 2665 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 2666 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 2667 amdgpu_ring_write(kiq_ring, 0xDEADBEEF); 2668 amdgpu_ring_commit(kiq_ring); 2669 2670 for (i = 0; i < adev->usec_timeout; i++) { 2671 tmp = RREG32(scratch); 2672 if (tmp == 0xDEADBEEF) 2673 break; 2674 DRM_UDELAY(1); 2675 } 2676 if (i >= adev->usec_timeout) { 2677 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n", 2678 scratch, tmp); 2679 r = -EINVAL; 2680 } 2681 amdgpu_gfx_scratch_free(adev, scratch); 2682 2683 return r; 2684 } 2685 2686 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) 2687 { 2688 struct amdgpu_device *adev = ring->adev; 2689 struct v9_mqd *mqd = ring->mqd_ptr; 2690 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 2691 uint32_t tmp; 2692 2693 mqd->header = 0xC0310800; 2694 mqd->compute_pipelinestat_enable = 0x00000001; 2695 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2696 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2697 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2698 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2699 mqd->compute_misc_reserved = 0x00000003; 2700 2701 mqd->dynamic_cu_mask_addr_lo = 2702 lower_32_bits(ring->mqd_gpu_addr 2703 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 2704 mqd->dynamic_cu_mask_addr_hi = 2705 upper_32_bits(ring->mqd_gpu_addr 2706 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 2707 2708 eop_base_addr = ring->eop_gpu_addr >> 8; 2709 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2710 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2711 2712 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2713 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL); 2714 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 2715 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); 2716 2717 mqd->cp_hqd_eop_control = tmp; 2718 2719 /* enable doorbell? */ 2720 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 2721 2722 if (ring->use_doorbell) { 2723 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2724 DOORBELL_OFFSET, ring->doorbell_index); 2725 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2726 DOORBELL_EN, 1); 2727 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2728 DOORBELL_SOURCE, 0); 2729 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2730 DOORBELL_HIT, 0); 2731 } else { 2732 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2733 DOORBELL_EN, 0); 2734 } 2735 2736 mqd->cp_hqd_pq_doorbell_control = tmp; 2737 2738 /* disable the queue if it's active */ 2739 ring->wptr = 0; 2740 mqd->cp_hqd_dequeue_request = 0; 2741 mqd->cp_hqd_pq_rptr = 0; 2742 mqd->cp_hqd_pq_wptr_lo = 0; 2743 mqd->cp_hqd_pq_wptr_hi = 0; 2744 2745 /* set the pointer to the MQD */ 2746 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 2747 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 2748 2749 /* set MQD vmid to 0 */ 2750 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); 2751 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 2752 mqd->cp_mqd_control = tmp; 2753 2754 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2755 hqd_gpu_addr = ring->gpu_addr >> 8; 2756 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 2757 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 2758 2759 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2760 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL); 2761 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 2762 (order_base_2(ring->ring_size / 4) - 1)); 2763 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 2764 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 2765 #ifdef __BIG_ENDIAN 2766 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 2767 #endif 2768 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 2769 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); 2770 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 2771 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 2772 mqd->cp_hqd_pq_control = tmp; 2773 2774 /* set the wb address whether it's enabled or not */ 2775 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2776 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 2777 mqd->cp_hqd_pq_rptr_report_addr_hi = 2778 upper_32_bits(wb_gpu_addr) & 0xffff; 2779 2780 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2781 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2782 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2783 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2784 2785 tmp = 0; 2786 /* enable the doorbell if requested */ 2787 if (ring->use_doorbell) { 2788 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 2789 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2790 DOORBELL_OFFSET, ring->doorbell_index); 2791 2792 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2793 DOORBELL_EN, 1); 2794 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2795 DOORBELL_SOURCE, 0); 2796 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2797 DOORBELL_HIT, 0); 2798 } 2799 2800 mqd->cp_hqd_pq_doorbell_control = tmp; 2801 2802 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2803 ring->wptr = 0; 2804 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR); 2805 2806 /* set the vmid for the queue */ 2807 mqd->cp_hqd_vmid = 0; 2808 2809 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE); 2810 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 2811 mqd->cp_hqd_persistent_state = tmp; 2812 2813 /* set MIN_IB_AVAIL_SIZE */ 2814 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL); 2815 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 2816 mqd->cp_hqd_ib_control = tmp; 2817 2818 /* activate the queue */ 2819 mqd->cp_hqd_active = 1; 2820 2821 return 0; 2822 } 2823 2824 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) 2825 { 2826 struct amdgpu_device *adev = ring->adev; 2827 struct v9_mqd *mqd = ring->mqd_ptr; 2828 int j; 2829 2830 /* disable wptr polling */ 2831 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 2832 2833 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, 2834 mqd->cp_hqd_eop_base_addr_lo); 2835 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, 2836 mqd->cp_hqd_eop_base_addr_hi); 2837 2838 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2839 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, 2840 mqd->cp_hqd_eop_control); 2841 2842 /* enable doorbell? */ 2843 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 2844 mqd->cp_hqd_pq_doorbell_control); 2845 2846 /* disable the queue if it's active */ 2847 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { 2848 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); 2849 for (j = 0; j < adev->usec_timeout; j++) { 2850 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) 2851 break; 2852 udelay(1); 2853 } 2854 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 2855 mqd->cp_hqd_dequeue_request); 2856 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 2857 mqd->cp_hqd_pq_rptr); 2858 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 2859 mqd->cp_hqd_pq_wptr_lo); 2860 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 2861 mqd->cp_hqd_pq_wptr_hi); 2862 } 2863 2864 /* set the pointer to the MQD */ 2865 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, 2866 mqd->cp_mqd_base_addr_lo); 2867 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, 2868 mqd->cp_mqd_base_addr_hi); 2869 2870 /* set MQD vmid to 0 */ 2871 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, 2872 mqd->cp_mqd_control); 2873 2874 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2875 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, 2876 mqd->cp_hqd_pq_base_lo); 2877 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, 2878 mqd->cp_hqd_pq_base_hi); 2879 2880 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2881 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, 2882 mqd->cp_hqd_pq_control); 2883 2884 /* set the wb address whether it's enabled or not */ 2885 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR, 2886 mqd->cp_hqd_pq_rptr_report_addr_lo); 2887 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 2888 mqd->cp_hqd_pq_rptr_report_addr_hi); 2889 2890 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2891 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, 2892 mqd->cp_hqd_pq_wptr_poll_addr_lo); 2893 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, 2894 mqd->cp_hqd_pq_wptr_poll_addr_hi); 2895 2896 /* enable the doorbell if requested */ 2897 if (ring->use_doorbell) { 2898 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, 2899 (AMDGPU_DOORBELL64_KIQ *2) << 2); 2900 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, 2901 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2); 2902 } 2903 2904 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 2905 mqd->cp_hqd_pq_doorbell_control); 2906 2907 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2908 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 2909 mqd->cp_hqd_pq_wptr_lo); 2910 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 2911 mqd->cp_hqd_pq_wptr_hi); 2912 2913 /* set the vmid for the queue */ 2914 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid); 2915 2916 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 2917 mqd->cp_hqd_persistent_state); 2918 2919 /* activate the queue */ 2920 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 2921 mqd->cp_hqd_active); 2922 2923 if (ring->use_doorbell) 2924 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 2925 2926 return 0; 2927 } 2928 2929 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring) 2930 { 2931 struct amdgpu_device *adev = ring->adev; 2932 int j; 2933 2934 /* disable the queue if it's active */ 2935 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { 2936 2937 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); 2938 2939 for (j = 0; j < adev->usec_timeout; j++) { 2940 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) 2941 break; 2942 udelay(1); 2943 } 2944 2945 if (j == AMDGPU_MAX_USEC_TIMEOUT) { 2946 DRM_DEBUG("KIQ dequeue request failed.\n"); 2947 2948 /* Manual disable if dequeue request times out */ 2949 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0); 2950 } 2951 2952 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 2953 0); 2954 } 2955 2956 WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0); 2957 WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0); 2958 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0); 2959 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); 2960 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0); 2961 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0); 2962 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0); 2963 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0); 2964 2965 return 0; 2966 } 2967 2968 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) 2969 { 2970 struct amdgpu_device *adev = ring->adev; 2971 struct v9_mqd *mqd = ring->mqd_ptr; 2972 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 2973 2974 gfx_v9_0_kiq_setting(ring); 2975 2976 if (adev->in_gpu_reset) { /* for GPU_RESET case */ 2977 /* reset MQD to a clean status */ 2978 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2979 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 2980 2981 /* reset ring buffer */ 2982 ring->wptr = 0; 2983 amdgpu_ring_clear_ring(ring); 2984 2985 mutex_lock(&adev->srbm_mutex); 2986 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2987 gfx_v9_0_kiq_init_register(ring); 2988 soc15_grbm_select(adev, 0, 0, 0, 0); 2989 mutex_unlock(&adev->srbm_mutex); 2990 } else { 2991 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2992 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2993 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2994 mutex_lock(&adev->srbm_mutex); 2995 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2996 gfx_v9_0_mqd_init(ring); 2997 gfx_v9_0_kiq_init_register(ring); 2998 soc15_grbm_select(adev, 0, 0, 0, 0); 2999 mutex_unlock(&adev->srbm_mutex); 3000 3001 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3002 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 3003 } 3004 3005 return 0; 3006 } 3007 3008 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) 3009 { 3010 struct amdgpu_device *adev = ring->adev; 3011 struct v9_mqd *mqd = ring->mqd_ptr; 3012 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 3013 3014 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { 3015 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 3016 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 3017 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 3018 mutex_lock(&adev->srbm_mutex); 3019 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3020 gfx_v9_0_mqd_init(ring); 3021 soc15_grbm_select(adev, 0, 0, 0, 0); 3022 mutex_unlock(&adev->srbm_mutex); 3023 3024 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3025 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 3026 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ 3027 /* reset MQD to a clean status */ 3028 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3029 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 3030 3031 /* reset ring buffer */ 3032 ring->wptr = 0; 3033 amdgpu_ring_clear_ring(ring); 3034 } else { 3035 amdgpu_ring_clear_ring(ring); 3036 } 3037 3038 return 0; 3039 } 3040 3041 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) 3042 { 3043 struct amdgpu_ring *ring = NULL; 3044 int r = 0, i; 3045 3046 gfx_v9_0_cp_compute_enable(adev, true); 3047 3048 ring = &adev->gfx.kiq.ring; 3049 3050 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3051 if (unlikely(r != 0)) 3052 goto done; 3053 3054 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3055 if (!r) { 3056 r = gfx_v9_0_kiq_init_queue(ring); 3057 amdgpu_bo_kunmap(ring->mqd_obj); 3058 ring->mqd_ptr = NULL; 3059 } 3060 amdgpu_bo_unreserve(ring->mqd_obj); 3061 if (r) 3062 goto done; 3063 3064 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3065 ring = &adev->gfx.compute_ring[i]; 3066 3067 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3068 if (unlikely(r != 0)) 3069 goto done; 3070 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3071 if (!r) { 3072 r = gfx_v9_0_kcq_init_queue(ring); 3073 amdgpu_bo_kunmap(ring->mqd_obj); 3074 ring->mqd_ptr = NULL; 3075 } 3076 amdgpu_bo_unreserve(ring->mqd_obj); 3077 if (r) 3078 goto done; 3079 } 3080 3081 r = gfx_v9_0_kiq_kcq_enable(adev); 3082 done: 3083 return r; 3084 } 3085 3086 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) 3087 { 3088 int r, i; 3089 struct amdgpu_ring *ring; 3090 3091 if (!(adev->flags & AMD_IS_APU)) 3092 gfx_v9_0_enable_gui_idle_interrupt(adev, false); 3093 3094 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 3095 /* legacy firmware loading */ 3096 r = gfx_v9_0_cp_gfx_load_microcode(adev); 3097 if (r) 3098 return r; 3099 3100 r = gfx_v9_0_cp_compute_load_microcode(adev); 3101 if (r) 3102 return r; 3103 } 3104 3105 r = gfx_v9_0_cp_gfx_resume(adev); 3106 if (r) 3107 return r; 3108 3109 r = gfx_v9_0_kiq_resume(adev); 3110 if (r) 3111 return r; 3112 3113 ring = &adev->gfx.gfx_ring[0]; 3114 r = amdgpu_ring_test_ring(ring); 3115 if (r) { 3116 ring->ready = false; 3117 return r; 3118 } 3119 3120 ring = &adev->gfx.kiq.ring; 3121 ring->ready = true; 3122 r = amdgpu_ring_test_ring(ring); 3123 if (r) 3124 ring->ready = false; 3125 3126 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3127 ring = &adev->gfx.compute_ring[i]; 3128 3129 ring->ready = true; 3130 r = amdgpu_ring_test_ring(ring); 3131 if (r) 3132 ring->ready = false; 3133 } 3134 3135 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 3136 3137 return 0; 3138 } 3139 3140 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) 3141 { 3142 gfx_v9_0_cp_gfx_enable(adev, enable); 3143 gfx_v9_0_cp_compute_enable(adev, enable); 3144 } 3145 3146 static int gfx_v9_0_hw_init(void *handle) 3147 { 3148 int r; 3149 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3150 3151 gfx_v9_0_init_golden_registers(adev); 3152 3153 gfx_v9_0_gpu_init(adev); 3154 3155 r = gfx_v9_0_csb_vram_pin(adev); 3156 if (r) 3157 return r; 3158 3159 r = gfx_v9_0_rlc_resume(adev); 3160 if (r) 3161 return r; 3162 3163 r = gfx_v9_0_cp_resume(adev); 3164 if (r) 3165 return r; 3166 3167 r = gfx_v9_0_ngg_en(adev); 3168 if (r) 3169 return r; 3170 3171 return r; 3172 } 3173 3174 static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) 3175 { 3176 struct amdgpu_device *adev = kiq_ring->adev; 3177 uint32_t scratch, tmp = 0; 3178 int r, i; 3179 3180 r = amdgpu_gfx_scratch_get(adev, &scratch); 3181 if (r) { 3182 DRM_ERROR("Failed to get scratch reg (%d).\n", r); 3183 return r; 3184 } 3185 WREG32(scratch, 0xCAFEDEAD); 3186 3187 r = amdgpu_ring_alloc(kiq_ring, 10); 3188 if (r) { 3189 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 3190 amdgpu_gfx_scratch_free(adev, scratch); 3191 return r; 3192 } 3193 3194 /* unmap queues */ 3195 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 3196 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 3197 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ 3198 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 3199 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | 3200 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 3201 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 3202 amdgpu_ring_write(kiq_ring, 0); 3203 amdgpu_ring_write(kiq_ring, 0); 3204 amdgpu_ring_write(kiq_ring, 0); 3205 /* write to scratch for completion */ 3206 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 3207 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 3208 amdgpu_ring_write(kiq_ring, 0xDEADBEEF); 3209 amdgpu_ring_commit(kiq_ring); 3210 3211 for (i = 0; i < adev->usec_timeout; i++) { 3212 tmp = RREG32(scratch); 3213 if (tmp == 0xDEADBEEF) 3214 break; 3215 DRM_UDELAY(1); 3216 } 3217 if (i >= adev->usec_timeout) { 3218 DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); 3219 r = -EINVAL; 3220 } 3221 amdgpu_gfx_scratch_free(adev, scratch); 3222 return r; 3223 } 3224 3225 static int gfx_v9_0_hw_fini(void *handle) 3226 { 3227 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3228 int i; 3229 3230 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 3231 AMD_PG_STATE_UNGATE); 3232 3233 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3234 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3235 3236 /* disable KCQ to avoid CPC touch memory not valid anymore */ 3237 for (i = 0; i < adev->gfx.num_compute_rings; i++) 3238 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]); 3239 3240 if (amdgpu_sriov_vf(adev)) { 3241 gfx_v9_0_cp_gfx_enable(adev, false); 3242 /* must disable polling for SRIOV when hw finished, otherwise 3243 * CPC engine may still keep fetching WB address which is already 3244 * invalid after sw finished and trigger DMAR reading error in 3245 * hypervisor side. 3246 */ 3247 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3248 return 0; 3249 } 3250 3251 /* Use deinitialize sequence from CAIL when unbinding device from driver, 3252 * otherwise KIQ is hanging when binding back 3253 */ 3254 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { 3255 mutex_lock(&adev->srbm_mutex); 3256 soc15_grbm_select(adev, adev->gfx.kiq.ring.me, 3257 adev->gfx.kiq.ring.pipe, 3258 adev->gfx.kiq.ring.queue, 0); 3259 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring); 3260 soc15_grbm_select(adev, 0, 0, 0, 0); 3261 mutex_unlock(&adev->srbm_mutex); 3262 } 3263 3264 gfx_v9_0_cp_enable(adev, false); 3265 gfx_v9_0_rlc_stop(adev); 3266 3267 gfx_v9_0_csb_vram_unpin(adev); 3268 3269 return 0; 3270 } 3271 3272 static int gfx_v9_0_suspend(void *handle) 3273 { 3274 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3275 3276 adev->gfx.in_suspend = true; 3277 return gfx_v9_0_hw_fini(adev); 3278 } 3279 3280 static int gfx_v9_0_resume(void *handle) 3281 { 3282 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3283 int r; 3284 3285 r = gfx_v9_0_hw_init(adev); 3286 adev->gfx.in_suspend = false; 3287 return r; 3288 } 3289 3290 static bool gfx_v9_0_is_idle(void *handle) 3291 { 3292 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3293 3294 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS), 3295 GRBM_STATUS, GUI_ACTIVE)) 3296 return false; 3297 else 3298 return true; 3299 } 3300 3301 static int gfx_v9_0_wait_for_idle(void *handle) 3302 { 3303 unsigned i; 3304 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3305 3306 for (i = 0; i < adev->usec_timeout; i++) { 3307 if (gfx_v9_0_is_idle(handle)) 3308 return 0; 3309 udelay(1); 3310 } 3311 return -ETIMEDOUT; 3312 } 3313 3314 static int gfx_v9_0_soft_reset(void *handle) 3315 { 3316 u32 grbm_soft_reset = 0; 3317 u32 tmp; 3318 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3319 3320 /* GRBM_STATUS */ 3321 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); 3322 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 3323 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 3324 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 3325 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 3326 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 3327 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { 3328 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3329 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 3330 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3331 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 3332 } 3333 3334 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 3335 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3336 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 3337 } 3338 3339 /* GRBM_STATUS2 */ 3340 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); 3341 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 3342 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3343 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 3344 3345 3346 if (grbm_soft_reset) { 3347 /* stop the rlc */ 3348 gfx_v9_0_rlc_stop(adev); 3349 3350 /* Disable GFX parsing/prefetching */ 3351 gfx_v9_0_cp_gfx_enable(adev, false); 3352 3353 /* Disable MEC parsing/prefetching */ 3354 gfx_v9_0_cp_compute_enable(adev, false); 3355 3356 if (grbm_soft_reset) { 3357 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3358 tmp |= grbm_soft_reset; 3359 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 3360 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3361 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3362 3363 udelay(50); 3364 3365 tmp &= ~grbm_soft_reset; 3366 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3367 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3368 } 3369 3370 /* Wait a little for things to settle down */ 3371 udelay(50); 3372 } 3373 return 0; 3374 } 3375 3376 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3377 { 3378 uint64_t clock; 3379 3380 mutex_lock(&adev->gfx.gpu_clock_mutex); 3381 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 3382 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | 3383 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 3384 mutex_unlock(&adev->gfx.gpu_clock_mutex); 3385 return clock; 3386 } 3387 3388 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 3389 uint32_t vmid, 3390 uint32_t gds_base, uint32_t gds_size, 3391 uint32_t gws_base, uint32_t gws_size, 3392 uint32_t oa_base, uint32_t oa_size) 3393 { 3394 struct amdgpu_device *adev = ring->adev; 3395 3396 gds_base = gds_base >> AMDGPU_GDS_SHIFT; 3397 gds_size = gds_size >> AMDGPU_GDS_SHIFT; 3398 3399 gws_base = gws_base >> AMDGPU_GWS_SHIFT; 3400 gws_size = gws_size >> AMDGPU_GWS_SHIFT; 3401 3402 oa_base = oa_base >> AMDGPU_OA_SHIFT; 3403 oa_size = oa_size >> AMDGPU_OA_SHIFT; 3404 3405 /* GDS Base */ 3406 gfx_v9_0_write_data_to_reg(ring, 0, false, 3407 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid, 3408 gds_base); 3409 3410 /* GDS Size */ 3411 gfx_v9_0_write_data_to_reg(ring, 0, false, 3412 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid, 3413 gds_size); 3414 3415 /* GWS */ 3416 gfx_v9_0_write_data_to_reg(ring, 0, false, 3417 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid, 3418 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 3419 3420 /* OA */ 3421 gfx_v9_0_write_data_to_reg(ring, 0, false, 3422 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid, 3423 (1 << (oa_size + oa_base)) - (1 << oa_base)); 3424 } 3425 3426 static int gfx_v9_0_early_init(void *handle) 3427 { 3428 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3429 3430 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; 3431 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; 3432 gfx_v9_0_set_ring_funcs(adev); 3433 gfx_v9_0_set_irq_funcs(adev); 3434 gfx_v9_0_set_gds_init(adev); 3435 gfx_v9_0_set_rlc_funcs(adev); 3436 3437 return 0; 3438 } 3439 3440 static int gfx_v9_0_late_init(void *handle) 3441 { 3442 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3443 int r; 3444 3445 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3446 if (r) 3447 return r; 3448 3449 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3450 if (r) 3451 return r; 3452 3453 return 0; 3454 } 3455 3456 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) 3457 { 3458 uint32_t rlc_setting, data; 3459 unsigned i; 3460 3461 if (adev->gfx.rlc.in_safe_mode) 3462 return; 3463 3464 /* if RLC is not enabled, do nothing */ 3465 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); 3466 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 3467 return; 3468 3469 if (adev->cg_flags & 3470 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | 3471 AMD_CG_SUPPORT_GFX_3D_CGCG)) { 3472 data = RLC_SAFE_MODE__CMD_MASK; 3473 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3474 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3475 3476 /* wait for RLC_SAFE_MODE */ 3477 for (i = 0; i < adev->usec_timeout; i++) { 3478 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 3479 break; 3480 udelay(1); 3481 } 3482 adev->gfx.rlc.in_safe_mode = true; 3483 } 3484 } 3485 3486 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) 3487 { 3488 uint32_t rlc_setting, data; 3489 3490 if (!adev->gfx.rlc.in_safe_mode) 3491 return; 3492 3493 /* if RLC is not enabled, do nothing */ 3494 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); 3495 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 3496 return; 3497 3498 if (adev->cg_flags & 3499 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { 3500 /* 3501 * Try to exit safe mode only if it is already in safe 3502 * mode. 3503 */ 3504 data = RLC_SAFE_MODE__CMD_MASK; 3505 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3506 adev->gfx.rlc.in_safe_mode = false; 3507 } 3508 } 3509 3510 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, 3511 bool enable) 3512 { 3513 gfx_v9_0_enter_rlc_safe_mode(adev); 3514 3515 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { 3516 gfx_v9_0_enable_gfx_cg_power_gating(adev, true); 3517 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) 3518 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true); 3519 } else { 3520 gfx_v9_0_enable_gfx_cg_power_gating(adev, false); 3521 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); 3522 } 3523 3524 gfx_v9_0_exit_rlc_safe_mode(adev); 3525 } 3526 3527 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, 3528 bool enable) 3529 { 3530 /* TODO: double check if we need to perform under safe mode */ 3531 /* gfx_v9_0_enter_rlc_safe_mode(adev); */ 3532 3533 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) 3534 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true); 3535 else 3536 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false); 3537 3538 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) 3539 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true); 3540 else 3541 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false); 3542 3543 /* gfx_v9_0_exit_rlc_safe_mode(adev); */ 3544 } 3545 3546 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 3547 bool enable) 3548 { 3549 uint32_t data, def; 3550 3551 /* It is disabled by HW by default */ 3552 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3553 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3554 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3555 3556 if (adev->asic_type != CHIP_VEGA12) 3557 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; 3558 3559 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3560 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3561 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3562 3563 /* only for Vega10 & Raven1 */ 3564 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; 3565 3566 if (def != data) 3567 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3568 3569 /* MGLS is a global flag to control all MGLS in GFX */ 3570 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 3571 /* 2 - RLC memory Light sleep */ 3572 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 3573 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3574 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3575 if (def != data) 3576 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3577 } 3578 /* 3 - CP memory Light sleep */ 3579 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 3580 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3581 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3582 if (def != data) 3583 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3584 } 3585 } 3586 } else { 3587 /* 1 - MGCG_OVERRIDE */ 3588 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3589 3590 if (adev->asic_type != CHIP_VEGA12) 3591 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; 3592 3593 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 3594 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3595 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3596 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3597 3598 if (def != data) 3599 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3600 3601 /* 2 - disable MGLS in RLC */ 3602 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3603 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 3604 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3605 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3606 } 3607 3608 /* 3 - disable MGLS in CP */ 3609 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3610 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 3611 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3612 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3613 } 3614 } 3615 } 3616 3617 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, 3618 bool enable) 3619 { 3620 uint32_t data, def; 3621 3622 adev->gfx.rlc.funcs->enter_safe_mode(adev); 3623 3624 /* Enable 3D CGCG/CGLS */ 3625 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { 3626 /* write cmd to clear cgcg/cgls ov */ 3627 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3628 /* unset CGCG override */ 3629 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 3630 /* update CGCG and CGLS override bits */ 3631 if (def != data) 3632 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3633 3634 /* enable 3Dcgcg FSM(0x0000363f) */ 3635 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3636 3637 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3638 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3639 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3640 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3641 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3642 if (def != data) 3643 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3644 3645 /* set IDLE_POLL_COUNT(0x00900100) */ 3646 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3647 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3648 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3649 if (def != data) 3650 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3651 } else { 3652 /* Disable CGCG/CGLS */ 3653 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3654 /* disable cgcg, cgls should be disabled */ 3655 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK | 3656 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK); 3657 /* disable cgcg and cgls in FSM */ 3658 if (def != data) 3659 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3660 } 3661 3662 adev->gfx.rlc.funcs->exit_safe_mode(adev); 3663 } 3664 3665 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 3666 bool enable) 3667 { 3668 uint32_t def, data; 3669 3670 adev->gfx.rlc.funcs->enter_safe_mode(adev); 3671 3672 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 3673 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3674 /* unset CGCG override */ 3675 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 3676 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3677 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3678 else 3679 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3680 /* update CGCG and CGLS override bits */ 3681 if (def != data) 3682 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3683 3684 /* enable cgcg FSM(0x0000363F) */ 3685 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3686 3687 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3688 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3689 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3690 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3691 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3692 if (def != data) 3693 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3694 3695 /* set IDLE_POLL_COUNT(0x00900100) */ 3696 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3697 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3698 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3699 if (def != data) 3700 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3701 } else { 3702 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3703 /* reset CGCG/CGLS bits */ 3704 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 3705 /* disable cgcg and cgls in FSM */ 3706 if (def != data) 3707 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3708 } 3709 3710 adev->gfx.rlc.funcs->exit_safe_mode(adev); 3711 } 3712 3713 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, 3714 bool enable) 3715 { 3716 if (enable) { 3717 /* CGCG/CGLS should be enabled after MGCG/MGLS 3718 * === MGCG + MGLS === 3719 */ 3720 gfx_v9_0_update_medium_grain_clock_gating(adev, enable); 3721 /* === CGCG /CGLS for GFX 3D Only === */ 3722 gfx_v9_0_update_3d_clock_gating(adev, enable); 3723 /* === CGCG + CGLS === */ 3724 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); 3725 } else { 3726 /* CGCG/CGLS should be disabled before MGCG/MGLS 3727 * === CGCG + CGLS === 3728 */ 3729 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); 3730 /* === CGCG /CGLS for GFX 3D Only === */ 3731 gfx_v9_0_update_3d_clock_gating(adev, enable); 3732 /* === MGCG + MGLS === */ 3733 gfx_v9_0_update_medium_grain_clock_gating(adev, enable); 3734 } 3735 return 0; 3736 } 3737 3738 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { 3739 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, 3740 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode 3741 }; 3742 3743 static int gfx_v9_0_set_powergating_state(void *handle, 3744 enum amd_powergating_state state) 3745 { 3746 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3747 bool enable = (state == AMD_PG_STATE_GATE) ? true : false; 3748 3749 switch (adev->asic_type) { 3750 case CHIP_RAVEN: 3751 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { 3752 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); 3753 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); 3754 } else { 3755 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false); 3756 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false); 3757 } 3758 3759 if (adev->pg_flags & AMD_PG_SUPPORT_CP) 3760 gfx_v9_0_enable_cp_power_gating(adev, true); 3761 else 3762 gfx_v9_0_enable_cp_power_gating(adev, false); 3763 3764 /* update gfx cgpg state */ 3765 gfx_v9_0_update_gfx_cg_power_gating(adev, enable); 3766 3767 /* update mgcg state */ 3768 gfx_v9_0_update_gfx_mg_power_gating(adev, enable); 3769 3770 /* set gfx off through smu */ 3771 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu) 3772 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true); 3773 break; 3774 case CHIP_VEGA12: 3775 /* set gfx off through smu */ 3776 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu) 3777 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true); 3778 break; 3779 default: 3780 break; 3781 } 3782 3783 return 0; 3784 } 3785 3786 static int gfx_v9_0_set_clockgating_state(void *handle, 3787 enum amd_clockgating_state state) 3788 { 3789 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3790 3791 if (amdgpu_sriov_vf(adev)) 3792 return 0; 3793 3794 switch (adev->asic_type) { 3795 case CHIP_VEGA10: 3796 case CHIP_VEGA12: 3797 case CHIP_VEGA20: 3798 case CHIP_RAVEN: 3799 gfx_v9_0_update_gfx_clock_gating(adev, 3800 state == AMD_CG_STATE_GATE ? true : false); 3801 break; 3802 default: 3803 break; 3804 } 3805 return 0; 3806 } 3807 3808 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) 3809 { 3810 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3811 int data; 3812 3813 if (amdgpu_sriov_vf(adev)) 3814 *flags = 0; 3815 3816 /* AMD_CG_SUPPORT_GFX_MGCG */ 3817 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3818 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 3819 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 3820 3821 /* AMD_CG_SUPPORT_GFX_CGCG */ 3822 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3823 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 3824 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 3825 3826 /* AMD_CG_SUPPORT_GFX_CGLS */ 3827 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 3828 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 3829 3830 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 3831 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3832 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 3833 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 3834 3835 /* AMD_CG_SUPPORT_GFX_CP_LS */ 3836 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3837 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 3838 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 3839 3840 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 3841 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3842 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 3843 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 3844 3845 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 3846 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 3847 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 3848 } 3849 3850 static uint64_t gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 3851 { 3852 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/ 3853 } 3854 3855 static uint64_t gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 3856 { 3857 struct amdgpu_device *adev = ring->adev; 3858 u64 wptr; 3859 3860 /* XXX check if swapping is necessary on BE */ 3861 if (ring->use_doorbell) { 3862 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]); 3863 } else { 3864 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR); 3865 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32; 3866 } 3867 3868 return wptr; 3869 } 3870 3871 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 3872 { 3873 struct amdgpu_device *adev = ring->adev; 3874 3875 if (ring->use_doorbell) { 3876 /* XXX check if swapping is necessary on BE */ 3877 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); 3878 WDOORBELL64(ring->doorbell_index, ring->wptr); 3879 } else { 3880 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3881 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3882 } 3883 } 3884 3885 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 3886 { 3887 struct amdgpu_device *adev = ring->adev; 3888 u32 ref_and_mask, reg_mem_engine; 3889 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 3890 3891 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 3892 switch (ring->me) { 3893 case 1: 3894 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 3895 break; 3896 case 2: 3897 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 3898 break; 3899 default: 3900 return; 3901 } 3902 reg_mem_engine = 0; 3903 } else { 3904 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 3905 reg_mem_engine = 1; /* pfp */ 3906 } 3907 3908 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 3909 adev->nbio_funcs->get_hdp_flush_req_offset(adev), 3910 adev->nbio_funcs->get_hdp_flush_done_offset(adev), 3911 ref_and_mask, ref_and_mask, 0x20); 3912 } 3913 3914 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 3915 struct amdgpu_ib *ib, 3916 unsigned vmid, bool ctx_switch) 3917 { 3918 u32 header, control = 0; 3919 3920 if (ib->flags & AMDGPU_IB_FLAG_CE) 3921 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); 3922 else 3923 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 3924 3925 control |= ib->length_dw | (vmid << 24); 3926 3927 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 3928 control |= INDIRECT_BUFFER_PRE_ENB(1); 3929 3930 if (!(ib->flags & AMDGPU_IB_FLAG_CE)) 3931 gfx_v9_0_ring_emit_de_meta(ring); 3932 } 3933 3934 amdgpu_ring_write(ring, header); 3935 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3936 amdgpu_ring_write(ring, 3937 #ifdef __BIG_ENDIAN 3938 (2 << 0) | 3939 #endif 3940 lower_32_bits(ib->gpu_addr)); 3941 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 3942 amdgpu_ring_write(ring, control); 3943 } 3944 3945 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 3946 struct amdgpu_ib *ib, 3947 unsigned vmid, bool ctx_switch) 3948 { 3949 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 3950 3951 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3952 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3953 amdgpu_ring_write(ring, 3954 #ifdef __BIG_ENDIAN 3955 (2 << 0) | 3956 #endif 3957 lower_32_bits(ib->gpu_addr)); 3958 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 3959 amdgpu_ring_write(ring, control); 3960 } 3961 3962 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, 3963 uint64_t seq, unsigned flags) 3964 { 3965 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 3966 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 3967 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; 3968 3969 /* RELEASE_MEM - flush caches, send int */ 3970 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 3971 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | 3972 EOP_TC_NC_ACTION_EN) : 3973 (EOP_TCL1_ACTION_EN | 3974 EOP_TC_ACTION_EN | 3975 EOP_TC_WB_ACTION_EN | 3976 EOP_TC_MD_ACTION_EN)) | 3977 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3978 EVENT_INDEX(5))); 3979 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3980 3981 /* 3982 * the address should be Qword aligned if 64bit write, Dword 3983 * aligned if only send 32bit data low (discard data high) 3984 */ 3985 if (write64bit) 3986 BUG_ON(addr & 0x7); 3987 else 3988 BUG_ON(addr & 0x3); 3989 amdgpu_ring_write(ring, lower_32_bits(addr)); 3990 amdgpu_ring_write(ring, upper_32_bits(addr)); 3991 amdgpu_ring_write(ring, lower_32_bits(seq)); 3992 amdgpu_ring_write(ring, upper_32_bits(seq)); 3993 amdgpu_ring_write(ring, 0); 3994 } 3995 3996 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 3997 { 3998 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3999 uint32_t seq = ring->fence_drv.sync_seq; 4000 uint64_t addr = ring->fence_drv.gpu_addr; 4001 4002 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0, 4003 lower_32_bits(addr), upper_32_bits(addr), 4004 seq, 0xffffffff, 4); 4005 } 4006 4007 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 4008 unsigned vmid, uint64_t pd_addr) 4009 { 4010 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 4011 4012 /* compute doesn't have PFP */ 4013 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 4014 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4015 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4016 amdgpu_ring_write(ring, 0x0); 4017 } 4018 } 4019 4020 static uint64_t gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 4021 { 4022 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ 4023 } 4024 4025 static uint64_t gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 4026 { 4027 uint64_t wptr; 4028 4029 /* XXX check if swapping is necessary on BE */ 4030 if (ring->use_doorbell) 4031 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); 4032 else 4033 BUG(); 4034 return wptr; 4035 } 4036 4037 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, 4038 bool acquire) 4039 { 4040 struct amdgpu_device *adev = ring->adev; 4041 int pipe_num, tmp, reg; 4042 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; 4043 4044 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; 4045 4046 /* first me only has 2 entries, GFX and HP3D */ 4047 if (ring->me > 0) 4048 pipe_num -= 2; 4049 4050 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; 4051 tmp = RREG32(reg); 4052 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); 4053 WREG32(reg, tmp); 4054 } 4055 4056 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, 4057 struct amdgpu_ring *ring, 4058 bool acquire) 4059 { 4060 int i, pipe; 4061 bool reserve; 4062 struct amdgpu_ring *iring; 4063 4064 mutex_lock(&adev->gfx.pipe_reserve_mutex); 4065 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0); 4066 if (acquire) 4067 set_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4068 else 4069 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4070 4071 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { 4072 /* Clear all reservations - everyone reacquires all resources */ 4073 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) 4074 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], 4075 true); 4076 4077 for (i = 0; i < adev->gfx.num_compute_rings; ++i) 4078 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], 4079 true); 4080 } else { 4081 /* Lower all pipes without a current reservation */ 4082 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { 4083 iring = &adev->gfx.gfx_ring[i]; 4084 pipe = amdgpu_gfx_queue_to_bit(adev, 4085 iring->me, 4086 iring->pipe, 4087 0); 4088 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4089 gfx_v9_0_ring_set_pipe_percent(iring, reserve); 4090 } 4091 4092 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { 4093 iring = &adev->gfx.compute_ring[i]; 4094 pipe = amdgpu_gfx_queue_to_bit(adev, 4095 iring->me, 4096 iring->pipe, 4097 0); 4098 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4099 gfx_v9_0_ring_set_pipe_percent(iring, reserve); 4100 } 4101 } 4102 4103 mutex_unlock(&adev->gfx.pipe_reserve_mutex); 4104 } 4105 4106 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, 4107 struct amdgpu_ring *ring, 4108 bool acquire) 4109 { 4110 uint32_t pipe_priority = acquire ? 0x2 : 0x0; 4111 uint32_t queue_priority = acquire ? 0xf : 0x0; 4112 4113 mutex_lock(&adev->srbm_mutex); 4114 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4115 4116 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); 4117 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); 4118 4119 soc15_grbm_select(adev, 0, 0, 0, 0); 4120 mutex_unlock(&adev->srbm_mutex); 4121 } 4122 4123 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, 4124 enum drm_sched_priority priority) 4125 { 4126 struct amdgpu_device *adev = ring->adev; 4127 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; 4128 4129 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) 4130 return; 4131 4132 gfx_v9_0_hqd_set_priority(adev, ring, acquire); 4133 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); 4134 } 4135 4136 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 4137 { 4138 struct amdgpu_device *adev = ring->adev; 4139 4140 /* XXX check if swapping is necessary on BE */ 4141 if (ring->use_doorbell) { 4142 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); 4143 WDOORBELL64(ring->doorbell_index, ring->wptr); 4144 } else{ 4145 BUG(); /* only DOORBELL method supported on gfx9 now */ 4146 } 4147 } 4148 4149 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, uint64_t addr, 4150 uint64_t seq, unsigned int flags) 4151 { 4152 struct amdgpu_device *adev = ring->adev; 4153 4154 /* we only allocate 32bit for each seq wb address */ 4155 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 4156 4157 /* write fence seq to the "addr" */ 4158 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4159 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4160 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 4161 amdgpu_ring_write(ring, lower_32_bits(addr)); 4162 amdgpu_ring_write(ring, upper_32_bits(addr)); 4163 amdgpu_ring_write(ring, lower_32_bits(seq)); 4164 4165 if (flags & AMDGPU_FENCE_FLAG_INT) { 4166 /* set register to trigger INT */ 4167 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4168 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4169 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 4170 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS)); 4171 amdgpu_ring_write(ring, 0); 4172 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 4173 } 4174 } 4175 4176 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring) 4177 { 4178 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 4179 amdgpu_ring_write(ring, 0); 4180 } 4181 4182 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) 4183 { 4184 struct v9_ce_ib_state ce_payload = {0}; 4185 uint64_t csa_addr; 4186 int cnt; 4187 4188 cnt = (sizeof(ce_payload) >> 2) + 4 - 2; 4189 csa_addr = amdgpu_csa_vaddr(ring->adev); 4190 4191 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 4192 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 4193 WRITE_DATA_DST_SEL(8) | 4194 WR_CONFIRM) | 4195 WRITE_DATA_CACHE_POLICY(0)); 4196 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 4197 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 4198 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2); 4199 } 4200 4201 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) 4202 { 4203 struct v9_de_ib_state de_payload = {0}; 4204 uint64_t csa_addr, gds_addr; 4205 int cnt; 4206 4207 csa_addr = amdgpu_csa_vaddr(ring->adev); 4208 gds_addr = csa_addr + 4096; 4209 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 4210 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 4211 4212 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 4213 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 4214 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 4215 WRITE_DATA_DST_SEL(8) | 4216 WR_CONFIRM) | 4217 WRITE_DATA_CACHE_POLICY(0)); 4218 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 4219 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 4220 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); 4221 } 4222 4223 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) 4224 { 4225 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4226 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ 4227 } 4228 4229 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) 4230 { 4231 uint32_t dw2 = 0; 4232 4233 if (amdgpu_sriov_vf(ring->adev)) 4234 gfx_v9_0_ring_emit_ce_meta(ring); 4235 4236 gfx_v9_0_ring_emit_tmz(ring, true); 4237 4238 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 4239 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 4240 /* set load_global_config & load_global_uconfig */ 4241 dw2 |= 0x8001; 4242 /* set load_cs_sh_regs */ 4243 dw2 |= 0x01000000; 4244 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 4245 dw2 |= 0x10002; 4246 4247 /* set load_ce_ram if preamble presented */ 4248 if (AMDGPU_PREAMBLE_IB_PRESENT & flags) 4249 dw2 |= 0x10000000; 4250 } else { 4251 /* still load_ce_ram if this is the first time preamble presented 4252 * although there is no context switch happens. 4253 */ 4254 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags) 4255 dw2 |= 0x10000000; 4256 } 4257 4258 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4259 amdgpu_ring_write(ring, dw2); 4260 amdgpu_ring_write(ring, 0); 4261 } 4262 4263 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 4264 { 4265 unsigned ret; 4266 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 4267 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 4268 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 4269 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 4270 ret = ring->wptr & ring->buf_mask; 4271 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 4272 return ret; 4273 } 4274 4275 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 4276 { 4277 unsigned cur; 4278 BUG_ON(offset > ring->buf_mask); 4279 BUG_ON(ring->ring[offset] != 0x55aa55aa); 4280 4281 cur = (ring->wptr & ring->buf_mask) - 1; 4282 if (likely(cur > offset)) 4283 ring->ring[offset] = cur - offset; 4284 else 4285 ring->ring[offset] = (ring->ring_size>>2) - offset + cur; 4286 } 4287 4288 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 4289 { 4290 struct amdgpu_device *adev = ring->adev; 4291 4292 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4293 amdgpu_ring_write(ring, 0 | /* src: register*/ 4294 (5 << 8) | /* dst: memory */ 4295 (1 << 20)); /* write confirm */ 4296 amdgpu_ring_write(ring, reg); 4297 amdgpu_ring_write(ring, 0); 4298 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4299 adev->virt.reg_val_offs * 4)); 4300 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4301 adev->virt.reg_val_offs * 4)); 4302 } 4303 4304 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 4305 uint32_t val) 4306 { 4307 uint32_t cmd = 0; 4308 4309 switch (ring->funcs->type) { 4310 case AMDGPU_RING_TYPE_GFX: 4311 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 4312 break; 4313 case AMDGPU_RING_TYPE_KIQ: 4314 cmd = (1 << 16); /* no inc addr */ 4315 break; 4316 default: 4317 cmd = WR_CONFIRM; 4318 break; 4319 } 4320 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4321 amdgpu_ring_write(ring, cmd); 4322 amdgpu_ring_write(ring, reg); 4323 amdgpu_ring_write(ring, 0); 4324 amdgpu_ring_write(ring, val); 4325 } 4326 4327 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 4328 uint32_t val, uint32_t mask) 4329 { 4330 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4331 } 4332 4333 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 4334 uint32_t reg0, uint32_t reg1, 4335 uint32_t ref, uint32_t mask) 4336 { 4337 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4338 4339 if (amdgpu_sriov_vf(ring->adev)) 4340 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 4341 ref, mask, 0x20); 4342 else 4343 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, 4344 ref, mask); 4345 } 4346 4347 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4348 enum amdgpu_interrupt_state state) 4349 { 4350 switch (state) { 4351 case AMDGPU_IRQ_STATE_DISABLE: 4352 case AMDGPU_IRQ_STATE_ENABLE: 4353 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4354 TIME_STAMP_INT_ENABLE, 4355 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4356 break; 4357 default: 4358 break; 4359 } 4360 } 4361 4362 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4363 int me, int pipe, 4364 enum amdgpu_interrupt_state state) 4365 { 4366 u32 mec_int_cntl, mec_int_cntl_reg; 4367 4368 /* 4369 * amdgpu controls only the first MEC. That's why this function only 4370 * handles the setting of interrupts for this specific MEC. All other 4371 * pipes' interrupts are set by amdkfd. 4372 */ 4373 4374 if (me == 1) { 4375 switch (pipe) { 4376 case 0: 4377 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); 4378 break; 4379 case 1: 4380 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL); 4381 break; 4382 case 2: 4383 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL); 4384 break; 4385 case 3: 4386 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL); 4387 break; 4388 default: 4389 DRM_DEBUG("invalid pipe %d\n", pipe); 4390 return; 4391 } 4392 } else { 4393 DRM_DEBUG("invalid me %d\n", me); 4394 return; 4395 } 4396 4397 switch (state) { 4398 case AMDGPU_IRQ_STATE_DISABLE: 4399 mec_int_cntl = RREG32(mec_int_cntl_reg); 4400 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4401 TIME_STAMP_INT_ENABLE, 0); 4402 WREG32(mec_int_cntl_reg, mec_int_cntl); 4403 break; 4404 case AMDGPU_IRQ_STATE_ENABLE: 4405 mec_int_cntl = RREG32(mec_int_cntl_reg); 4406 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4407 TIME_STAMP_INT_ENABLE, 1); 4408 WREG32(mec_int_cntl_reg, mec_int_cntl); 4409 break; 4410 default: 4411 break; 4412 } 4413 } 4414 4415 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4416 struct amdgpu_irq_src *source, 4417 unsigned type, 4418 enum amdgpu_interrupt_state state) 4419 { 4420 switch (state) { 4421 case AMDGPU_IRQ_STATE_DISABLE: 4422 case AMDGPU_IRQ_STATE_ENABLE: 4423 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4424 PRIV_REG_INT_ENABLE, 4425 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4426 break; 4427 default: 4428 break; 4429 } 4430 4431 return 0; 4432 } 4433 4434 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4435 struct amdgpu_irq_src *source, 4436 unsigned type, 4437 enum amdgpu_interrupt_state state) 4438 { 4439 switch (state) { 4440 case AMDGPU_IRQ_STATE_DISABLE: 4441 case AMDGPU_IRQ_STATE_ENABLE: 4442 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4443 PRIV_INSTR_INT_ENABLE, 4444 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4445 default: 4446 break; 4447 } 4448 4449 return 0; 4450 } 4451 4452 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4453 struct amdgpu_irq_src *src, 4454 unsigned type, 4455 enum amdgpu_interrupt_state state) 4456 { 4457 switch (type) { 4458 case AMDGPU_CP_IRQ_GFX_EOP: 4459 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state); 4460 break; 4461 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4462 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4463 break; 4464 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4465 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4466 break; 4467 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4468 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4469 break; 4470 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4471 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4472 break; 4473 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 4474 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state); 4475 break; 4476 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 4477 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state); 4478 break; 4479 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 4480 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state); 4481 break; 4482 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 4483 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state); 4484 break; 4485 default: 4486 break; 4487 } 4488 return 0; 4489 } 4490 4491 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, 4492 struct amdgpu_irq_src *source, 4493 struct amdgpu_iv_entry *entry) 4494 { 4495 int i; 4496 u8 me_id, pipe_id, queue_id; 4497 struct amdgpu_ring *ring; 4498 4499 DRM_DEBUG("IH: CP EOP\n"); 4500 me_id = (entry->ring_id & 0x0c) >> 2; 4501 pipe_id = (entry->ring_id & 0x03) >> 0; 4502 queue_id = (entry->ring_id & 0x70) >> 4; 4503 4504 switch (me_id) { 4505 case 0: 4506 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4507 break; 4508 case 1: 4509 case 2: 4510 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4511 ring = &adev->gfx.compute_ring[i]; 4512 /* Per-queue interrupt is supported for MEC starting from VI. 4513 * The interrupt can only be enabled/disabled per pipe instead of per queue. 4514 */ 4515 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 4516 amdgpu_fence_process(ring); 4517 } 4518 break; 4519 } 4520 return 0; 4521 } 4522 4523 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, 4524 struct amdgpu_irq_src *source, 4525 struct amdgpu_iv_entry *entry) 4526 { 4527 DRM_ERROR("Illegal register access in command stream\n"); 4528 schedule_work(&adev->reset_work); 4529 return 0; 4530 } 4531 4532 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, 4533 struct amdgpu_irq_src *source, 4534 struct amdgpu_iv_entry *entry) 4535 { 4536 DRM_ERROR("Illegal instruction in command stream\n"); 4537 schedule_work(&adev->reset_work); 4538 return 0; 4539 } 4540 4541 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 4542 struct amdgpu_irq_src *src, 4543 unsigned int type, 4544 enum amdgpu_interrupt_state state) 4545 { 4546 uint32_t tmp, target; 4547 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 4548 4549 if (ring->me == 1) 4550 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); 4551 else 4552 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL); 4553 target += ring->pipe; 4554 4555 switch (type) { 4556 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 4557 if (state == AMDGPU_IRQ_STATE_DISABLE) { 4558 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); 4559 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 4560 GENERIC2_INT_ENABLE, 0); 4561 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); 4562 4563 tmp = RREG32(target); 4564 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, 4565 GENERIC2_INT_ENABLE, 0); 4566 WREG32(target, tmp); 4567 } else { 4568 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); 4569 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 4570 GENERIC2_INT_ENABLE, 1); 4571 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); 4572 4573 tmp = RREG32(target); 4574 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, 4575 GENERIC2_INT_ENABLE, 1); 4576 WREG32(target, tmp); 4577 } 4578 break; 4579 default: 4580 BUG(); /* kiq only support GENERIC2_INT now */ 4581 break; 4582 } 4583 return 0; 4584 } 4585 4586 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev, 4587 struct amdgpu_irq_src *source, 4588 struct amdgpu_iv_entry *entry) 4589 { 4590 u8 me_id, pipe_id, queue_id; 4591 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 4592 4593 me_id = (entry->ring_id & 0x0c) >> 2; 4594 pipe_id = (entry->ring_id & 0x03) >> 0; 4595 queue_id = (entry->ring_id & 0x70) >> 4; 4596 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n", 4597 me_id, pipe_id, queue_id); 4598 4599 amdgpu_fence_process(ring); 4600 return 0; 4601 } 4602 4603 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { 4604 .name = "gfx_v9_0", 4605 .early_init = gfx_v9_0_early_init, 4606 .late_init = gfx_v9_0_late_init, 4607 .sw_init = gfx_v9_0_sw_init, 4608 .sw_fini = gfx_v9_0_sw_fini, 4609 .hw_init = gfx_v9_0_hw_init, 4610 .hw_fini = gfx_v9_0_hw_fini, 4611 .suspend = gfx_v9_0_suspend, 4612 .resume = gfx_v9_0_resume, 4613 .is_idle = gfx_v9_0_is_idle, 4614 .wait_for_idle = gfx_v9_0_wait_for_idle, 4615 .soft_reset = gfx_v9_0_soft_reset, 4616 .set_clockgating_state = gfx_v9_0_set_clockgating_state, 4617 .set_powergating_state = gfx_v9_0_set_powergating_state, 4618 .get_clockgating_state = gfx_v9_0_get_clockgating_state, 4619 }; 4620 4621 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { 4622 .type = AMDGPU_RING_TYPE_GFX, 4623 .align_mask = 0xff, 4624 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4625 .support_64bit_ptrs = true, 4626 .vmhub = AMDGPU_GFXHUB, 4627 .get_rptr = gfx_v9_0_ring_get_rptr_gfx, 4628 .get_wptr = gfx_v9_0_ring_get_wptr_gfx, 4629 .set_wptr = gfx_v9_0_ring_set_wptr_gfx, 4630 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 4631 5 + /* COND_EXEC */ 4632 7 + /* PIPELINE_SYNC */ 4633 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4634 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4635 2 + /* VM_FLUSH */ 4636 8 + /* FENCE for VM_FLUSH */ 4637 20 + /* GDS switch */ 4638 4 + /* double SWITCH_BUFFER, 4639 the first COND_EXEC jump to the place just 4640 prior to this double SWITCH_BUFFER */ 4641 5 + /* COND_EXEC */ 4642 7 + /* HDP_flush */ 4643 4 + /* VGT_flush */ 4644 14 + /* CE_META */ 4645 31 + /* DE_META */ 4646 3 + /* CNTX_CTRL */ 4647 5 + /* HDP_INVL */ 4648 8 + 8 + /* FENCE x2 */ 4649 2, /* SWITCH_BUFFER */ 4650 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ 4651 .emit_ib = gfx_v9_0_ring_emit_ib_gfx, 4652 .emit_fence = gfx_v9_0_ring_emit_fence, 4653 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, 4654 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4655 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4656 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4657 .test_ring = gfx_v9_0_ring_test_ring, 4658 .test_ib = gfx_v9_0_ring_test_ib, 4659 .insert_nop = amdgpu_ring_insert_nop, 4660 .pad_ib = amdgpu_ring_generic_pad_ib, 4661 .emit_switch_buffer = gfx_v9_ring_emit_sb, 4662 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, 4663 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, 4664 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, 4665 .emit_tmz = gfx_v9_0_ring_emit_tmz, 4666 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4667 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4668 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, 4669 }; 4670 4671 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { 4672 .type = AMDGPU_RING_TYPE_COMPUTE, 4673 .align_mask = 0xff, 4674 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4675 .support_64bit_ptrs = true, 4676 .vmhub = AMDGPU_GFXHUB, 4677 .get_rptr = gfx_v9_0_ring_get_rptr_compute, 4678 .get_wptr = gfx_v9_0_ring_get_wptr_compute, 4679 .set_wptr = gfx_v9_0_ring_set_wptr_compute, 4680 .emit_frame_size = 4681 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4682 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4683 5 + /* hdp invalidate */ 4684 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4685 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4686 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4687 2 + /* gfx_v9_0_ring_emit_vm_flush */ 4688 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 4689 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ 4690 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 4691 .emit_fence = gfx_v9_0_ring_emit_fence, 4692 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, 4693 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4694 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4695 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4696 .test_ring = gfx_v9_0_ring_test_ring, 4697 .test_ib = gfx_v9_0_ring_test_ib, 4698 .insert_nop = amdgpu_ring_insert_nop, 4699 .pad_ib = amdgpu_ring_generic_pad_ib, 4700 .set_priority = gfx_v9_0_ring_set_priority_compute, 4701 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4702 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4703 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, 4704 }; 4705 4706 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { 4707 .type = AMDGPU_RING_TYPE_KIQ, 4708 .align_mask = 0xff, 4709 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4710 .support_64bit_ptrs = true, 4711 .vmhub = AMDGPU_GFXHUB, 4712 .get_rptr = gfx_v9_0_ring_get_rptr_compute, 4713 .get_wptr = gfx_v9_0_ring_get_wptr_compute, 4714 .set_wptr = gfx_v9_0_ring_set_wptr_compute, 4715 .emit_frame_size = 4716 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4717 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4718 5 + /* hdp invalidate */ 4719 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4720 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4721 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4722 2 + /* gfx_v9_0_ring_emit_vm_flush */ 4723 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 4724 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ 4725 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 4726 .emit_fence = gfx_v9_0_ring_emit_fence_kiq, 4727 .test_ring = gfx_v9_0_ring_test_ring, 4728 .test_ib = gfx_v9_0_ring_test_ib, 4729 .insert_nop = amdgpu_ring_insert_nop, 4730 .pad_ib = amdgpu_ring_generic_pad_ib, 4731 .emit_rreg = gfx_v9_0_ring_emit_rreg, 4732 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4733 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4734 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, 4735 }; 4736 4737 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) 4738 { 4739 int i; 4740 4741 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq; 4742 4743 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4744 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; 4745 4746 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4747 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; 4748 } 4749 4750 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = { 4751 .set = gfx_v9_0_kiq_set_interrupt_state, 4752 .process = gfx_v9_0_kiq_irq, 4753 }; 4754 4755 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = { 4756 .set = gfx_v9_0_set_eop_interrupt_state, 4757 .process = gfx_v9_0_eop_irq, 4758 }; 4759 4760 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = { 4761 .set = gfx_v9_0_set_priv_reg_fault_state, 4762 .process = gfx_v9_0_priv_reg_irq, 4763 }; 4764 4765 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { 4766 .set = gfx_v9_0_set_priv_inst_fault_state, 4767 .process = gfx_v9_0_priv_inst_irq, 4768 }; 4769 4770 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) 4771 { 4772 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 4773 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs; 4774 4775 adev->gfx.priv_reg_irq.num_types = 1; 4776 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs; 4777 4778 adev->gfx.priv_inst_irq.num_types = 1; 4779 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs; 4780 4781 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; 4782 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs; 4783 } 4784 4785 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) 4786 { 4787 switch (adev->asic_type) { 4788 case CHIP_VEGA10: 4789 case CHIP_VEGA12: 4790 case CHIP_VEGA20: 4791 case CHIP_RAVEN: 4792 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; 4793 break; 4794 default: 4795 break; 4796 } 4797 } 4798 4799 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) 4800 { 4801 /* init asci gds info */ 4802 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); 4803 adev->gds.gws.total_size = 64; 4804 adev->gds.oa.total_size = 16; 4805 4806 if (adev->gds.mem.total_size == 64 * 1024) { 4807 adev->gds.mem.gfx_partition_size = 4096; 4808 adev->gds.mem.cs_partition_size = 4096; 4809 4810 adev->gds.gws.gfx_partition_size = 4; 4811 adev->gds.gws.cs_partition_size = 4; 4812 4813 adev->gds.oa.gfx_partition_size = 4; 4814 adev->gds.oa.cs_partition_size = 1; 4815 } else { 4816 adev->gds.mem.gfx_partition_size = 1024; 4817 adev->gds.mem.cs_partition_size = 1024; 4818 4819 adev->gds.gws.gfx_partition_size = 16; 4820 adev->gds.gws.cs_partition_size = 16; 4821 4822 adev->gds.oa.gfx_partition_size = 4; 4823 adev->gds.oa.cs_partition_size = 4; 4824 } 4825 } 4826 4827 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 4828 u32 bitmap) 4829 { 4830 u32 data; 4831 4832 if (!bitmap) 4833 return; 4834 4835 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4836 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4837 4838 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data); 4839 } 4840 4841 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev) 4842 { 4843 u32 data, mask; 4844 4845 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG); 4846 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG); 4847 4848 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4849 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4850 4851 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 4852 4853 return (~data) & mask; 4854 } 4855 4856 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, 4857 struct amdgpu_cu_info *cu_info) 4858 { 4859 int i, j, k, counter, active_cu_number = 0; 4860 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 4861 unsigned disable_masks[4 * 2]; 4862 4863 if (!adev || !cu_info) 4864 return -EINVAL; 4865 4866 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); 4867 4868 mutex_lock(&adev->grbm_idx_mutex); 4869 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4870 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4871 mask = 1; 4872 ao_bitmap = 0; 4873 counter = 0; 4874 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 4875 if (i < 4 && j < 2) 4876 gfx_v9_0_set_user_cu_inactive_bitmap( 4877 adev, disable_masks[i * 2 + j]); 4878 bitmap = gfx_v9_0_get_cu_active_bitmap(adev); 4879 cu_info->bitmap[i][j] = bitmap; 4880 4881 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { 4882 if (bitmap & mask) { 4883 if (counter < adev->gfx.config.max_cu_per_sh) 4884 ao_bitmap |= mask; 4885 counter ++; 4886 } 4887 mask <<= 1; 4888 } 4889 active_cu_number += counter; 4890 if (i < 2 && j < 2) 4891 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 4892 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 4893 } 4894 } 4895 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 4896 mutex_unlock(&adev->grbm_idx_mutex); 4897 4898 cu_info->number = active_cu_number; 4899 cu_info->ao_cu_mask = ao_cu_mask; 4900 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 4901 4902 return 0; 4903 } 4904 4905 const struct amdgpu_ip_block_version gfx_v9_0_ip_block = 4906 { 4907 .type = AMD_IP_BLOCK_TYPE_GFX, 4908 .major = 9, 4909 .minor = 0, 4910 .rev = 0, 4911 .funcs = &gfx_v9_0_ip_funcs, 4912 }; 4913