1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "kfd_device_queue_manager.h" 25 #include "gca/gfx_8_0_enum.h" 26 #include "gca/gfx_8_0_sh_mask.h" 27 #include "oss/oss_3_0_sh_mask.h" 28 29 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, 30 struct qcm_process_device *qpd, 31 enum cache_policy default_policy, 32 enum cache_policy alternate_policy, 33 void __user *alternate_aperture_base, 34 uint64_t alternate_aperture_size); 35 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, 36 struct qcm_process_device *qpd, 37 enum cache_policy default_policy, 38 enum cache_policy alternate_policy, 39 void __user *alternate_aperture_base, 40 uint64_t alternate_aperture_size); 41 static int update_qpd_vi(struct device_queue_manager *dqm, 42 struct qcm_process_device *qpd); 43 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, 44 struct qcm_process_device *qpd); 45 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 46 struct qcm_process_device *qpd); 47 static void init_sdma_vm_tonga(struct device_queue_manager *dqm, 48 struct queue *q, 49 struct qcm_process_device *qpd); 50 51 void device_queue_manager_init_vi( 52 struct device_queue_manager_asic_ops *asic_ops) 53 { 54 asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi; 55 asic_ops->update_qpd = update_qpd_vi; 56 asic_ops->init_sdma_vm = init_sdma_vm; 57 } 58 59 void device_queue_manager_init_vi_tonga( 60 struct device_queue_manager_asic_ops *asic_ops) 61 { 62 asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga; 63 asic_ops->update_qpd = update_qpd_vi_tonga; 64 asic_ops->init_sdma_vm = init_sdma_vm_tonga; 65 } 66 67 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) 68 { 69 /* In 64-bit mode, we can only control the top 3 bits of the LDS, 70 * scratch and GPUVM apertures. 71 * The hardware fills in the remaining 59 bits according to the 72 * following pattern: 73 * LDS: X0000000'00000000 - X0000001'00000000 (4GB) 74 * Scratch: X0000001'00000000 - X0000002'00000000 (4GB) 75 * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB) 76 * 77 * (where X/Y is the configurable nybble with the low-bit 0) 78 * 79 * LDS and scratch will have the same top nybble programmed in the 80 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE. 81 * GPUVM can have a different top nybble programmed in the 82 * top 3 bits of SH_MEM_BASES.SHARED_BASE. 83 * We don't bother to support different top nybbles 84 * for LDS/Scratch and GPUVM. 85 */ 86 87 WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE || 88 top_address_nybble == 0); 89 90 return top_address_nybble << 12 | 91 (top_address_nybble << 12) << 92 SH_MEM_BASES__SHARED_BASE__SHIFT; 93 } 94 95 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, 96 struct qcm_process_device *qpd, 97 enum cache_policy default_policy, 98 enum cache_policy alternate_policy, 99 void __user *alternate_aperture_base, 100 uint64_t alternate_aperture_size) 101 { 102 uint32_t default_mtype; 103 uint32_t ape1_mtype; 104 105 default_mtype = (default_policy == cache_policy_coherent) ? 106 MTYPE_CC : 107 MTYPE_NC; 108 109 ape1_mtype = (alternate_policy == cache_policy_coherent) ? 110 MTYPE_CC : 111 MTYPE_NC; 112 113 qpd->sh_mem_config = (qpd->sh_mem_config & 114 SH_MEM_CONFIG__ADDRESS_MODE_MASK) | 115 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 116 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | 117 default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | 118 ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT | 119 SH_MEM_CONFIG__PRIVATE_ATC_MASK; 120 121 return true; 122 } 123 124 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, 125 struct qcm_process_device *qpd, 126 enum cache_policy default_policy, 127 enum cache_policy alternate_policy, 128 void __user *alternate_aperture_base, 129 uint64_t alternate_aperture_size) 130 { 131 uint32_t default_mtype; 132 uint32_t ape1_mtype; 133 134 default_mtype = (default_policy == cache_policy_coherent) ? 135 MTYPE_UC : 136 MTYPE_NC; 137 138 ape1_mtype = (alternate_policy == cache_policy_coherent) ? 139 MTYPE_UC : 140 MTYPE_NC; 141 142 qpd->sh_mem_config = 143 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 144 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | 145 default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | 146 ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT; 147 148 return true; 149 } 150 151 static int update_qpd_vi(struct device_queue_manager *dqm, 152 struct qcm_process_device *qpd) 153 { 154 struct kfd_process_device *pdd; 155 unsigned int temp; 156 157 pdd = qpd_to_pdd(qpd); 158 159 /* check if sh_mem_config register already configured */ 160 if (qpd->sh_mem_config == 0) { 161 qpd->sh_mem_config = 162 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 163 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | 164 MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | 165 MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT | 166 SH_MEM_CONFIG__PRIVATE_ATC_MASK; 167 168 qpd->sh_mem_ape1_limit = 0; 169 qpd->sh_mem_ape1_base = 0; 170 } 171 172 if (qpd->pqm->process->is_32bit_user_mode) { 173 temp = get_sh_mem_bases_32(pdd); 174 qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT; 175 qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 << 176 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; 177 } else { 178 temp = get_sh_mem_bases_nybble_64(pdd); 179 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); 180 qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 << 181 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; 182 qpd->sh_mem_config |= 1 << 183 SH_MEM_CONFIG__PRIVATE_ATC__SHIFT; 184 } 185 186 pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", 187 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); 188 189 return 0; 190 } 191 192 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, 193 struct qcm_process_device *qpd) 194 { 195 struct kfd_process_device *pdd; 196 unsigned int temp; 197 198 pdd = qpd_to_pdd(qpd); 199 200 /* check if sh_mem_config register already configured */ 201 if (qpd->sh_mem_config == 0) { 202 qpd->sh_mem_config = 203 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 204 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | 205 MTYPE_UC << 206 SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | 207 MTYPE_UC << 208 SH_MEM_CONFIG__APE1_MTYPE__SHIFT; 209 210 qpd->sh_mem_ape1_limit = 0; 211 qpd->sh_mem_ape1_base = 0; 212 } 213 214 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit 215 * aperture addresses. 216 */ 217 temp = get_sh_mem_bases_nybble_64(pdd); 218 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); 219 220 pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n", 221 temp, qpd->sh_mem_bases); 222 223 return 0; 224 } 225 226 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 227 struct qcm_process_device *qpd) 228 { 229 uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT); 230 231 if (q->process->is_32bit_user_mode) 232 value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) | 233 get_sh_mem_bases_32(qpd_to_pdd(qpd)); 234 else 235 value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << 236 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & 237 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; 238 239 q->properties.sdma_vm_addr = value; 240 } 241 242 static void init_sdma_vm_tonga(struct device_queue_manager *dqm, 243 struct queue *q, 244 struct qcm_process_device *qpd) 245 { 246 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit 247 * aperture addresses. 248 */ 249 q->properties.sdma_vm_addr = 250 ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << 251 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & 252 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; 253 } 254