1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_ucode.h" 31 #include "amdgpu_trace.h" 32 33 #include "gc/gc_11_0_0_offset.h" 34 #include "gc/gc_11_0_0_sh_mask.h" 35 #include "gc/gc_11_0_0_default.h" 36 #include "hdp/hdp_6_0_0_offset.h" 37 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 38 39 #include "soc15_common.h" 40 #include "soc15.h" 41 #include "sdma_v6_0_0_pkt_open.h" 42 #include "nbio_v4_3.h" 43 #include "sdma_common.h" 44 #include "sdma_v6_0.h" 45 #include "v11_structs.h" 46 47 MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin"); 48 MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin"); 49 MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin"); 50 MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin"); 51 MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin"); 52 MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin"); 53 54 #define SDMA1_REG_OFFSET 0x600 55 #define SDMA0_HYP_DEC_REG_START 0x5880 56 #define SDMA0_HYP_DEC_REG_END 0x589a 57 #define SDMA1_HYP_DEC_REG_OFFSET 0x20 58 59 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev); 60 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev); 61 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev); 62 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev); 63 static int sdma_v6_0_start(struct amdgpu_device *adev); 64 65 static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) 66 { 67 u32 base; 68 69 if (internal_offset >= SDMA0_HYP_DEC_REG_START && 70 internal_offset <= SDMA0_HYP_DEC_REG_END) { 71 base = adev->reg_offset[GC_HWIP][0][1]; 72 if (instance != 0) 73 internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance; 74 } else { 75 base = adev->reg_offset[GC_HWIP][0][0]; 76 if (instance == 1) 77 internal_offset += SDMA1_REG_OFFSET; 78 } 79 80 return base + internal_offset; 81 } 82 83 static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring) 84 { 85 unsigned ret; 86 87 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE)); 88 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 89 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 90 amdgpu_ring_write(ring, 1); 91 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */ 92 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ 93 94 return ret; 95 } 96 97 static void sdma_v6_0_ring_patch_cond_exec(struct amdgpu_ring *ring, 98 unsigned offset) 99 { 100 unsigned cur; 101 102 BUG_ON(offset > ring->buf_mask); 103 BUG_ON(ring->ring[offset] != 0x55aa55aa); 104 105 cur = (ring->wptr - 1) & ring->buf_mask; 106 if (cur > offset) 107 ring->ring[offset] = cur - offset; 108 else 109 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 110 } 111 112 /** 113 * sdma_v6_0_ring_get_rptr - get the current read pointer 114 * 115 * @ring: amdgpu ring pointer 116 * 117 * Get the current rptr from the hardware. 118 */ 119 static uint64_t sdma_v6_0_ring_get_rptr(struct amdgpu_ring *ring) 120 { 121 u64 *rptr; 122 123 /* XXX check if swapping is necessary on BE */ 124 rptr = (u64 *)ring->rptr_cpu_addr; 125 126 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); 127 return ((*rptr) >> 2); 128 } 129 130 /** 131 * sdma_v6_0_ring_get_wptr - get the current write pointer 132 * 133 * @ring: amdgpu ring pointer 134 * 135 * Get the current wptr from the hardware. 136 */ 137 static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring) 138 { 139 u64 wptr = 0; 140 141 if (ring->use_doorbell) { 142 /* XXX check if swapping is necessary on BE */ 143 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr)); 144 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); 145 } 146 147 return wptr >> 2; 148 } 149 150 /** 151 * sdma_v6_0_ring_set_wptr - commit the write pointer 152 * 153 * @ring: amdgpu ring pointer 154 * 155 * Write the wptr back to the hardware. 156 */ 157 static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring) 158 { 159 struct amdgpu_device *adev = ring->adev; 160 161 if (ring->use_doorbell) { 162 DRM_DEBUG("Using doorbell -- " 163 "wptr_offs == 0x%08x " 164 "lower_32_bits(ring->wptr) << 2 == 0x%08x " 165 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", 166 ring->wptr_offs, 167 lower_32_bits(ring->wptr << 2), 168 upper_32_bits(ring->wptr << 2)); 169 /* XXX check if swapping is necessary on BE */ 170 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 171 ring->wptr << 2); 172 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", 173 ring->doorbell_index, ring->wptr << 2); 174 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); 175 } else { 176 DRM_DEBUG("Not using doorbell -- " 177 "regSDMA%i_GFX_RB_WPTR == 0x%08x " 178 "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", 179 ring->me, 180 lower_32_bits(ring->wptr << 2), 181 ring->me, 182 upper_32_bits(ring->wptr << 2)); 183 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 184 ring->me, regSDMA0_QUEUE0_RB_WPTR), 185 lower_32_bits(ring->wptr << 2)); 186 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 187 ring->me, regSDMA0_QUEUE0_RB_WPTR_HI), 188 upper_32_bits(ring->wptr << 2)); 189 } 190 } 191 192 static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 193 { 194 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 195 int i; 196 197 for (i = 0; i < count; i++) 198 if (sdma && sdma->burst_nop && (i == 0)) 199 amdgpu_ring_write(ring, ring->funcs->nop | 200 SDMA_PKT_NOP_HEADER_COUNT(count - 1)); 201 else 202 amdgpu_ring_write(ring, ring->funcs->nop); 203 } 204 205 /* 206 * sdma_v6_0_ring_emit_ib - Schedule an IB on the DMA engine 207 * 208 * @ring: amdgpu ring pointer 209 * @ib: IB object to schedule 210 * @flags: unused 211 * @job: job to retrieve vmid from 212 * 213 * Schedule an IB in the DMA ring. 214 */ 215 static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 216 struct amdgpu_job *job, 217 struct amdgpu_ib *ib, 218 uint32_t flags) 219 { 220 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 221 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); 222 223 /* An IB packet must end on a 8 DW boundary--the next dword 224 * must be on a 8-dword boundary. Our IB packet below is 6 225 * dwords long, thus add x number of NOPs, such that, in 226 * modular arithmetic, 227 * wptr + 6 + x = 8k, k >= 0, which in C is, 228 * (wptr + 6 + x) % 8 = 0. 229 * The expression below, is a solution of x. 230 */ 231 sdma_v6_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 232 233 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_INDIRECT) | 234 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); 235 /* base must be 32 byte aligned */ 236 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 237 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 238 amdgpu_ring_write(ring, ib->length_dw); 239 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); 240 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); 241 } 242 243 /** 244 * sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse 245 * 246 * @ring: amdgpu ring pointer 247 * 248 * flush the IB by graphics cache rinse. 249 */ 250 static void sdma_v6_0_ring_emit_mem_sync(struct amdgpu_ring *ring) 251 { 252 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | 253 SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | 254 SDMA_GCR_GLI_INV(1); 255 256 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ 257 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_GCR_REQ)); 258 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0)); 259 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) | 260 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0)); 261 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) | 262 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16)); 263 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) | 264 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0)); 265 } 266 267 268 /** 269 * sdma_v6_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 270 * 271 * @ring: amdgpu ring pointer 272 * 273 * Emit an hdp flush packet on the requested DMA ring. 274 */ 275 static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 276 { 277 struct amdgpu_device *adev = ring->adev; 278 u32 ref_and_mask = 0; 279 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 280 281 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; 282 283 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) | 284 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 285 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 286 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); 287 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); 288 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 289 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 290 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 291 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 292 } 293 294 /** 295 * sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring 296 * 297 * @ring: amdgpu ring pointer 298 * @addr: address 299 * @seq: fence seq number 300 * @flags: fence flags 301 * 302 * Add a DMA fence packet to the ring to write 303 * the fence seq number and DMA trap packet to generate 304 * an interrupt if needed. 305 */ 306 static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 307 unsigned flags) 308 { 309 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 310 /* write the fence */ 311 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) | 312 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */ 313 /* zero in first two bits */ 314 BUG_ON(addr & 0x3); 315 amdgpu_ring_write(ring, lower_32_bits(addr)); 316 amdgpu_ring_write(ring, upper_32_bits(addr)); 317 amdgpu_ring_write(ring, lower_32_bits(seq)); 318 319 /* optionally write high bits as well */ 320 if (write64bit) { 321 addr += 4; 322 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) | 323 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); 324 /* zero in first two bits */ 325 BUG_ON(addr & 0x3); 326 amdgpu_ring_write(ring, lower_32_bits(addr)); 327 amdgpu_ring_write(ring, upper_32_bits(addr)); 328 amdgpu_ring_write(ring, upper_32_bits(seq)); 329 } 330 331 if (flags & AMDGPU_FENCE_FLAG_INT) { 332 uint32_t ctx = ring->is_mes_queue ? 333 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0; 334 /* generate an interrupt */ 335 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP)); 336 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx)); 337 } 338 } 339 340 /** 341 * sdma_v6_0_gfx_stop - stop the gfx async dma engines 342 * 343 * @adev: amdgpu_device pointer 344 * 345 * Stop the gfx async dma ring buffers. 346 */ 347 static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) 348 { 349 u32 rb_cntl, ib_cntl; 350 int i; 351 352 for (i = 0; i < adev->sdma.num_instances; i++) { 353 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); 354 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0); 355 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); 356 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); 357 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0); 358 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); 359 } 360 } 361 362 /** 363 * sdma_v6_0_rlc_stop - stop the compute async dma engines 364 * 365 * @adev: amdgpu_device pointer 366 * 367 * Stop the compute async dma queues. 368 */ 369 static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev) 370 { 371 /* XXX todo */ 372 } 373 374 /** 375 * sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts 376 * 377 * @adev: amdgpu_device pointer 378 * @enable: enable/disable context switching due to queue empty conditions 379 * 380 * Enable or disable the async dma engines queue empty context switch. 381 */ 382 static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable) 383 { 384 u32 f32_cntl; 385 int i; 386 387 if (!amdgpu_sriov_vf(adev)) { 388 for (i = 0; i < adev->sdma.num_instances; i++) { 389 f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL)); 390 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 391 CTXEMPTY_INT_ENABLE, enable ? 1 : 0); 392 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl); 393 } 394 } 395 } 396 397 /** 398 * sdma_v6_0_enable - stop the async dma engines 399 * 400 * @adev: amdgpu_device pointer 401 * @enable: enable/disable the DMA MEs. 402 * 403 * Halt or unhalt the async dma engines. 404 */ 405 static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable) 406 { 407 u32 f32_cntl; 408 int i; 409 410 if (!enable) { 411 sdma_v6_0_gfx_stop(adev); 412 sdma_v6_0_rlc_stop(adev); 413 } 414 415 if (amdgpu_sriov_vf(adev)) 416 return; 417 418 for (i = 0; i < adev->sdma.num_instances; i++) { 419 f32_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); 420 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); 421 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), f32_cntl); 422 } 423 } 424 425 /** 426 * sdma_v6_0_gfx_resume - setup and start the async dma engines 427 * 428 * @adev: amdgpu_device pointer 429 * 430 * Set up the gfx DMA ring buffers and enable them. 431 * Returns 0 for success, error for failure. 432 */ 433 static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) 434 { 435 struct amdgpu_ring *ring; 436 u32 rb_cntl, ib_cntl; 437 u32 rb_bufsz; 438 u32 doorbell; 439 u32 doorbell_offset; 440 u32 temp; 441 u64 wptr_gpu_addr; 442 int i, r; 443 444 for (i = 0; i < adev->sdma.num_instances; i++) { 445 ring = &adev->sdma.instance[i].ring; 446 447 if (!amdgpu_sriov_vf(adev)) 448 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); 449 450 /* Set ring buffer size in dwords */ 451 rb_bufsz = order_base_2(ring->ring_size / 4); 452 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); 453 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); 454 #ifdef __BIG_ENDIAN 455 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); 456 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, 457 RPTR_WRITEBACK_SWAP_ENABLE, 1); 458 #endif 459 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); 460 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); 461 462 /* Initialize the ring buffer's read and write pointers */ 463 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0); 464 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0); 465 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0); 466 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0); 467 468 /* setup the wptr shadow polling */ 469 wptr_gpu_addr = ring->wptr_gpu_addr; 470 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), 471 lower_32_bits(wptr_gpu_addr)); 472 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), 473 upper_32_bits(wptr_gpu_addr)); 474 475 /* set the wb address whether it's enabled or not */ 476 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), 477 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); 478 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), 479 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); 480 481 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); 482 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); 483 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); 484 485 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); 486 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); 487 488 ring->wptr = 0; 489 490 /* before programing wptr to a less value, need set minor_ptr_update first */ 491 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); 492 493 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ 494 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); 495 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); 496 } 497 498 doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); 499 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); 500 501 if (ring->use_doorbell) { 502 doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); 503 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, 504 OFFSET, ring->doorbell_index); 505 } else { 506 doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); 507 } 508 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); 509 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); 510 511 if (i == 0) 512 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, 513 ring->doorbell_index, 514 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); 515 516 if (amdgpu_sriov_vf(adev)) 517 sdma_v6_0_ring_set_wptr(ring); 518 519 /* set minor_ptr_update to 0 after wptr programed */ 520 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); 521 522 /* Set up RESP_MODE to non-copy addresses */ 523 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); 524 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); 525 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); 526 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); 527 528 /* program default cache read and write policy */ 529 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); 530 /* clean read policy and write policy bits */ 531 temp &= 0xFF0FFF; 532 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | 533 (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | 534 SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); 535 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); 536 537 if (!amdgpu_sriov_vf(adev)) { 538 /* unhalt engine */ 539 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); 540 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); 541 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); 542 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); 543 } 544 545 /* enable DMA RB */ 546 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); 547 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); 548 549 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); 550 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); 551 #ifdef __BIG_ENDIAN 552 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); 553 #endif 554 /* enable DMA IBs */ 555 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); 556 557 if (amdgpu_sriov_vf(adev)) 558 sdma_v6_0_enable(adev, true); 559 560 r = amdgpu_ring_test_helper(ring); 561 if (r) 562 return r; 563 } 564 565 return 0; 566 } 567 568 /** 569 * sdma_v6_0_rlc_resume - setup and start the async dma engines 570 * 571 * @adev: amdgpu_device pointer 572 * 573 * Set up the compute DMA queues and enable them. 574 * Returns 0 for success, error for failure. 575 */ 576 static int sdma_v6_0_rlc_resume(struct amdgpu_device *adev) 577 { 578 return 0; 579 } 580 581 /** 582 * sdma_v6_0_load_microcode - load the sDMA ME ucode 583 * 584 * @adev: amdgpu_device pointer 585 * 586 * Loads the sDMA0/1 ucode. 587 * Returns 0 for success, -EINVAL if the ucode is not available. 588 */ 589 static int sdma_v6_0_load_microcode(struct amdgpu_device *adev) 590 { 591 const struct sdma_firmware_header_v2_0 *hdr; 592 const __le32 *fw_data; 593 u32 fw_size; 594 int i, j; 595 bool use_broadcast; 596 597 /* halt the MEs */ 598 sdma_v6_0_enable(adev, false); 599 600 if (!adev->sdma.instance[0].fw) 601 return -EINVAL; 602 603 /* use broadcast mode to load SDMA microcode by default */ 604 use_broadcast = true; 605 606 if (use_broadcast) { 607 dev_info(adev->dev, "Use broadcast method to load SDMA firmware\n"); 608 /* load Control Thread microcode */ 609 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data; 610 amdgpu_ucode_print_sdma_hdr(&hdr->header); 611 fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4; 612 613 fw_data = (const __le32 *) 614 (adev->sdma.instance[0].fw->data + 615 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 616 617 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0); 618 619 for (j = 0; j < fw_size; j++) { 620 if (amdgpu_emu_mode == 1 && j % 500 == 0) 621 msleep(1); 622 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++)); 623 } 624 625 /* load Context Switch microcode */ 626 fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4; 627 628 fw_data = (const __le32 *) 629 (adev->sdma.instance[0].fw->data + 630 le32_to_cpu(hdr->ctl_ucode_offset)); 631 632 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0x8000); 633 634 for (j = 0; j < fw_size; j++) { 635 if (amdgpu_emu_mode == 1 && j % 500 == 0) 636 msleep(1); 637 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++)); 638 } 639 } else { 640 dev_info(adev->dev, "Use legacy method to load SDMA firmware\n"); 641 for (i = 0; i < adev->sdma.num_instances; i++) { 642 /* load Control Thread microcode */ 643 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data; 644 amdgpu_ucode_print_sdma_hdr(&hdr->header); 645 fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4; 646 647 fw_data = (const __le32 *) 648 (adev->sdma.instance[0].fw->data + 649 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 650 651 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0); 652 653 for (j = 0; j < fw_size; j++) { 654 if (amdgpu_emu_mode == 1 && j % 500 == 0) 655 msleep(1); 656 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); 657 } 658 659 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version); 660 661 /* load Context Switch microcode */ 662 fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4; 663 664 fw_data = (const __le32 *) 665 (adev->sdma.instance[0].fw->data + 666 le32_to_cpu(hdr->ctl_ucode_offset)); 667 668 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0x8000); 669 670 for (j = 0; j < fw_size; j++) { 671 if (amdgpu_emu_mode == 1 && j % 500 == 0) 672 msleep(1); 673 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); 674 } 675 676 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version); 677 } 678 } 679 680 return 0; 681 } 682 683 static int sdma_v6_0_soft_reset(void *handle) 684 { 685 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 686 u32 tmp; 687 int i; 688 689 sdma_v6_0_gfx_stop(adev); 690 691 for (i = 0; i < adev->sdma.num_instances; i++) { 692 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE)); 693 tmp |= SDMA0_FREEZE__FREEZE_MASK; 694 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE), tmp); 695 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); 696 tmp |= SDMA0_F32_CNTL__HALT_MASK; 697 tmp |= SDMA0_F32_CNTL__TH1_RESET_MASK; 698 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), tmp); 699 700 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_PREEMPT), 0); 701 702 udelay(100); 703 704 tmp = GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK << i; 705 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); 706 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 707 708 udelay(100); 709 710 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, 0); 711 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 712 713 udelay(100); 714 } 715 716 return sdma_v6_0_start(adev); 717 } 718 719 static bool sdma_v6_0_check_soft_reset(void *handle) 720 { 721 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 722 struct amdgpu_ring *ring; 723 int i, r; 724 long tmo = msecs_to_jiffies(1000); 725 726 for (i = 0; i < adev->sdma.num_instances; i++) { 727 ring = &adev->sdma.instance[i].ring; 728 r = amdgpu_ring_test_ib(ring, tmo); 729 if (r) 730 return true; 731 } 732 733 return false; 734 } 735 736 /** 737 * sdma_v6_0_start - setup and start the async dma engines 738 * 739 * @adev: amdgpu_device pointer 740 * 741 * Set up the DMA engines and enable them. 742 * Returns 0 for success, error for failure. 743 */ 744 static int sdma_v6_0_start(struct amdgpu_device *adev) 745 { 746 int r = 0; 747 748 if (amdgpu_sriov_vf(adev)) { 749 sdma_v6_0_enable(adev, false); 750 751 /* set RB registers */ 752 r = sdma_v6_0_gfx_resume(adev); 753 return r; 754 } 755 756 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 757 r = sdma_v6_0_load_microcode(adev); 758 if (r) 759 return r; 760 761 /* The value of regSDMA_F32_CNTL is invalid the moment after loading fw */ 762 if (amdgpu_emu_mode == 1) 763 msleep(1000); 764 } 765 766 /* unhalt the MEs */ 767 sdma_v6_0_enable(adev, true); 768 /* enable sdma ring preemption */ 769 sdma_v6_0_ctxempty_int_enable(adev, true); 770 771 /* start the gfx rings and rlc compute queues */ 772 r = sdma_v6_0_gfx_resume(adev); 773 if (r) 774 return r; 775 r = sdma_v6_0_rlc_resume(adev); 776 777 return r; 778 } 779 780 static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd, 781 struct amdgpu_mqd_prop *prop) 782 { 783 struct v11_sdma_mqd *m = mqd; 784 uint64_t wb_gpu_addr; 785 786 m->sdmax_rlcx_rb_cntl = 787 order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT | 788 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 789 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT | 790 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT; 791 792 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8); 793 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8); 794 795 wb_gpu_addr = prop->wptr_gpu_addr; 796 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr); 797 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr); 798 799 wb_gpu_addr = prop->rptr_gpu_addr; 800 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr); 801 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr); 802 803 m->sdmax_rlcx_ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 0, 804 regSDMA0_QUEUE0_IB_CNTL)); 805 806 m->sdmax_rlcx_doorbell_offset = 807 prop->doorbell_index << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 808 809 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); 810 811 m->sdmax_rlcx_skip_cntl = 0; 812 m->sdmax_rlcx_context_status = 0; 813 m->sdmax_rlcx_doorbell_log = 0; 814 815 m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT; 816 m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT; 817 818 return 0; 819 } 820 821 static void sdma_v6_0_set_mqd_funcs(struct amdgpu_device *adev) 822 { 823 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v11_sdma_mqd); 824 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v6_0_mqd_init; 825 } 826 827 /** 828 * sdma_v6_0_ring_test_ring - simple async dma engine test 829 * 830 * @ring: amdgpu_ring structure holding ring information 831 * 832 * Test the DMA engine by writing using it to write an 833 * value to memory. 834 * Returns 0 for success, error for failure. 835 */ 836 static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) 837 { 838 struct amdgpu_device *adev = ring->adev; 839 unsigned i; 840 unsigned index; 841 int r; 842 u32 tmp; 843 u64 gpu_addr; 844 volatile uint32_t *cpu_ptr = NULL; 845 846 tmp = 0xCAFEDEAD; 847 848 if (ring->is_mes_queue) { 849 uint32_t offset = 0; 850 offset = amdgpu_mes_ctx_get_offs(ring, 851 AMDGPU_MES_CTX_PADDING_OFFS); 852 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 853 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 854 *cpu_ptr = tmp; 855 } else { 856 r = amdgpu_device_wb_get(adev, &index); 857 if (r) { 858 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 859 return r; 860 } 861 862 gpu_addr = adev->wb.gpu_addr + (index * 4); 863 adev->wb.wb[index] = cpu_to_le32(tmp); 864 } 865 866 r = amdgpu_ring_alloc(ring, 5); 867 if (r) { 868 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 869 amdgpu_device_wb_free(adev, index); 870 return r; 871 } 872 873 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | 874 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 875 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 876 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 877 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); 878 amdgpu_ring_write(ring, 0xDEADBEEF); 879 amdgpu_ring_commit(ring); 880 881 for (i = 0; i < adev->usec_timeout; i++) { 882 if (ring->is_mes_queue) 883 tmp = le32_to_cpu(*cpu_ptr); 884 else 885 tmp = le32_to_cpu(adev->wb.wb[index]); 886 if (tmp == 0xDEADBEEF) 887 break; 888 if (amdgpu_emu_mode == 1) 889 msleep(1); 890 else 891 udelay(1); 892 } 893 894 if (i >= adev->usec_timeout) 895 r = -ETIMEDOUT; 896 897 if (!ring->is_mes_queue) 898 amdgpu_device_wb_free(adev, index); 899 900 return r; 901 } 902 903 /* 904 * sdma_v6_0_ring_test_ib - test an IB on the DMA engine 905 * 906 * @ring: amdgpu_ring structure holding ring information 907 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 908 * 909 * Test a simple IB in the DMA ring. 910 * Returns 0 on success, error on failure. 911 */ 912 static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 913 { 914 struct amdgpu_device *adev = ring->adev; 915 struct amdgpu_ib ib; 916 struct dma_fence *f = NULL; 917 unsigned index; 918 long r; 919 u32 tmp = 0; 920 u64 gpu_addr; 921 volatile uint32_t *cpu_ptr = NULL; 922 923 tmp = 0xCAFEDEAD; 924 memset(&ib, 0, sizeof(ib)); 925 926 if (ring->is_mes_queue) { 927 uint32_t offset = 0; 928 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 929 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 930 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 931 932 offset = amdgpu_mes_ctx_get_offs(ring, 933 AMDGPU_MES_CTX_PADDING_OFFS); 934 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 935 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 936 *cpu_ptr = tmp; 937 } else { 938 r = amdgpu_device_wb_get(adev, &index); 939 if (r) { 940 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 941 return r; 942 } 943 944 gpu_addr = adev->wb.gpu_addr + (index * 4); 945 adev->wb.wb[index] = cpu_to_le32(tmp); 946 947 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 948 if (r) { 949 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 950 goto err0; 951 } 952 } 953 954 ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | 955 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 956 ib.ptr[1] = lower_32_bits(gpu_addr); 957 ib.ptr[2] = upper_32_bits(gpu_addr); 958 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0); 959 ib.ptr[4] = 0xDEADBEEF; 960 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 961 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 962 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 963 ib.length_dw = 8; 964 965 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 966 if (r) 967 goto err1; 968 969 r = dma_fence_wait_timeout(f, false, timeout); 970 if (r == 0) { 971 DRM_ERROR("amdgpu: IB test timed out\n"); 972 r = -ETIMEDOUT; 973 goto err1; 974 } else if (r < 0) { 975 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 976 goto err1; 977 } 978 979 if (ring->is_mes_queue) 980 tmp = le32_to_cpu(*cpu_ptr); 981 else 982 tmp = le32_to_cpu(adev->wb.wb[index]); 983 984 if (tmp == 0xDEADBEEF) 985 r = 0; 986 else 987 r = -EINVAL; 988 989 err1: 990 amdgpu_ib_free(adev, &ib, NULL); 991 dma_fence_put(f); 992 err0: 993 if (!ring->is_mes_queue) 994 amdgpu_device_wb_free(adev, index); 995 return r; 996 } 997 998 999 /** 1000 * sdma_v6_0_vm_copy_pte - update PTEs by copying them from the GART 1001 * 1002 * @ib: indirect buffer to fill with commands 1003 * @pe: addr of the page entry 1004 * @src: src addr to copy from 1005 * @count: number of page entries to update 1006 * 1007 * Update PTEs by copying them from the GART using sDMA. 1008 */ 1009 static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib, 1010 uint64_t pe, uint64_t src, 1011 unsigned count) 1012 { 1013 unsigned bytes = count * 8; 1014 1015 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | 1016 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1017 ib->ptr[ib->length_dw++] = bytes - 1; 1018 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1019 ib->ptr[ib->length_dw++] = lower_32_bits(src); 1020 ib->ptr[ib->length_dw++] = upper_32_bits(src); 1021 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1022 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1023 1024 } 1025 1026 /** 1027 * sdma_v6_0_vm_write_pte - update PTEs by writing them manually 1028 * 1029 * @ib: indirect buffer to fill with commands 1030 * @pe: addr of the page entry 1031 * @value: dst addr to write into pe 1032 * @count: number of page entries to update 1033 * @incr: increase next addr by incr bytes 1034 * 1035 * Update PTEs by writing them manually using sDMA. 1036 */ 1037 static void sdma_v6_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 1038 uint64_t value, unsigned count, 1039 uint32_t incr) 1040 { 1041 unsigned ndw = count * 2; 1042 1043 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | 1044 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 1045 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1046 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1047 ib->ptr[ib->length_dw++] = ndw - 1; 1048 for (; ndw > 0; ndw -= 2) { 1049 ib->ptr[ib->length_dw++] = lower_32_bits(value); 1050 ib->ptr[ib->length_dw++] = upper_32_bits(value); 1051 value += incr; 1052 } 1053 } 1054 1055 /** 1056 * sdma_v6_0_vm_set_pte_pde - update the page tables using sDMA 1057 * 1058 * @ib: indirect buffer to fill with commands 1059 * @pe: addr of the page entry 1060 * @addr: dst addr to write into pe 1061 * @count: number of page entries to update 1062 * @incr: increase next addr by incr bytes 1063 * @flags: access flags 1064 * 1065 * Update the page tables using sDMA. 1066 */ 1067 static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib, 1068 uint64_t pe, 1069 uint64_t addr, unsigned count, 1070 uint32_t incr, uint64_t flags) 1071 { 1072 /* for physically contiguous pages (vram) */ 1073 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_PTEPDE); 1074 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ 1075 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1076 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 1077 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 1078 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ 1079 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 1080 ib->ptr[ib->length_dw++] = incr; /* increment size */ 1081 ib->ptr[ib->length_dw++] = 0; 1082 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ 1083 } 1084 1085 /* 1086 * sdma_v6_0_ring_pad_ib - pad the IB 1087 * @ib: indirect buffer to fill with padding 1088 * @ring: amdgpu ring pointer 1089 * 1090 * Pad the IB with NOPs to a boundary multiple of 8. 1091 */ 1092 static void sdma_v6_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 1093 { 1094 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 1095 u32 pad_count; 1096 int i; 1097 1098 pad_count = (-ib->length_dw) & 0x7; 1099 for (i = 0; i < pad_count; i++) 1100 if (sdma && sdma->burst_nop && (i == 0)) 1101 ib->ptr[ib->length_dw++] = 1102 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP) | 1103 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); 1104 else 1105 ib->ptr[ib->length_dw++] = 1106 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP); 1107 } 1108 1109 /** 1110 * sdma_v6_0_ring_emit_pipeline_sync - sync the pipeline 1111 * 1112 * @ring: amdgpu_ring pointer 1113 * 1114 * Make sure all previous operations are completed (CIK). 1115 */ 1116 static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1117 { 1118 uint32_t seq = ring->fence_drv.sync_seq; 1119 uint64_t addr = ring->fence_drv.gpu_addr; 1120 1121 /* wait for idle */ 1122 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1123 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1124 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 1125 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); 1126 amdgpu_ring_write(ring, addr & 0xfffffffc); 1127 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 1128 amdgpu_ring_write(ring, seq); /* reference */ 1129 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 1130 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1131 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1132 } 1133 1134 /* 1135 * sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA 1136 * 1137 * @ring: amdgpu_ring pointer 1138 * @vmid: vmid number to use 1139 * @pd_addr: address 1140 * 1141 * Update the page table base and flush the VM TLB 1142 * using sDMA. 1143 */ 1144 static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1145 unsigned vmid, uint64_t pd_addr) 1146 { 1147 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 1148 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); 1149 1150 /* Update the PD address for this VMID. */ 1151 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 1152 (hub->ctx_addr_distance * vmid), 1153 lower_32_bits(pd_addr)); 1154 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 1155 (hub->ctx_addr_distance * vmid), 1156 upper_32_bits(pd_addr)); 1157 1158 /* Trigger invalidation. */ 1159 amdgpu_ring_write(ring, 1160 SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1161 SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) | 1162 SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) | 1163 SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f)); 1164 amdgpu_ring_write(ring, req); 1165 amdgpu_ring_write(ring, 0xFFFFFFFF); 1166 amdgpu_ring_write(ring, 1167 SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) | 1168 SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F)); 1169 } 1170 1171 static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, 1172 uint32_t reg, uint32_t val) 1173 { 1174 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1175 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1176 amdgpu_ring_write(ring, reg); 1177 amdgpu_ring_write(ring, val); 1178 } 1179 1180 static void sdma_v6_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 1181 uint32_t val, uint32_t mask) 1182 { 1183 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1184 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1185 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ 1186 amdgpu_ring_write(ring, reg << 2); 1187 amdgpu_ring_write(ring, 0); 1188 amdgpu_ring_write(ring, val); /* reference */ 1189 amdgpu_ring_write(ring, mask); /* mask */ 1190 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1191 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); 1192 } 1193 1194 static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 1195 uint32_t reg0, uint32_t reg1, 1196 uint32_t ref, uint32_t mask) 1197 { 1198 amdgpu_ring_emit_wreg(ring, reg0, ref); 1199 /* wait for a cycle to reset vm_inv_eng*_ack */ 1200 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0); 1201 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); 1202 } 1203 1204 static struct amdgpu_sdma_ras sdma_v6_0_3_ras = { 1205 .ras_block = { 1206 .ras_late_init = amdgpu_ras_block_late_init, 1207 }, 1208 }; 1209 1210 static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) 1211 { 1212 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1213 case IP_VERSION(6, 0, 3): 1214 adev->sdma.ras = &sdma_v6_0_3_ras; 1215 break; 1216 default: 1217 break; 1218 } 1219 } 1220 1221 static int sdma_v6_0_early_init(void *handle) 1222 { 1223 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1224 int r; 1225 1226 r = amdgpu_sdma_init_microcode(adev, 0, true); 1227 if (r) 1228 return r; 1229 1230 sdma_v6_0_set_ring_funcs(adev); 1231 sdma_v6_0_set_buffer_funcs(adev); 1232 sdma_v6_0_set_vm_pte_funcs(adev); 1233 sdma_v6_0_set_irq_funcs(adev); 1234 sdma_v6_0_set_mqd_funcs(adev); 1235 sdma_v6_0_set_ras_funcs(adev); 1236 1237 return 0; 1238 } 1239 1240 static int sdma_v6_0_sw_init(void *handle) 1241 { 1242 struct amdgpu_ring *ring; 1243 int r, i; 1244 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1245 1246 /* SDMA trap event */ 1247 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1248 GFX_11_0_0__SRCID__SDMA_TRAP, 1249 &adev->sdma.trap_irq); 1250 if (r) 1251 return r; 1252 1253 for (i = 0; i < adev->sdma.num_instances; i++) { 1254 ring = &adev->sdma.instance[i].ring; 1255 ring->ring_obj = NULL; 1256 ring->use_doorbell = true; 1257 ring->me = i; 1258 1259 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, 1260 ring->use_doorbell?"true":"false"); 1261 1262 ring->doorbell_index = 1263 (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset 1264 1265 ring->vm_hub = AMDGPU_GFXHUB(0); 1266 sprintf(ring->name, "sdma%d", i); 1267 r = amdgpu_ring_init(adev, ring, 1024, 1268 &adev->sdma.trap_irq, 1269 AMDGPU_SDMA_IRQ_INSTANCE0 + i, 1270 AMDGPU_RING_PRIO_DEFAULT, NULL); 1271 if (r) 1272 return r; 1273 } 1274 1275 if (amdgpu_sdma_ras_sw_init(adev)) { 1276 dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); 1277 return -EINVAL; 1278 } 1279 1280 return r; 1281 } 1282 1283 static int sdma_v6_0_sw_fini(void *handle) 1284 { 1285 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1286 int i; 1287 1288 for (i = 0; i < adev->sdma.num_instances; i++) 1289 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1290 1291 amdgpu_sdma_destroy_inst_ctx(adev, true); 1292 1293 return 0; 1294 } 1295 1296 static int sdma_v6_0_hw_init(void *handle) 1297 { 1298 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1299 1300 return sdma_v6_0_start(adev); 1301 } 1302 1303 static int sdma_v6_0_hw_fini(void *handle) 1304 { 1305 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1306 1307 if (amdgpu_sriov_vf(adev)) 1308 return 0; 1309 1310 sdma_v6_0_ctxempty_int_enable(adev, false); 1311 sdma_v6_0_enable(adev, false); 1312 1313 return 0; 1314 } 1315 1316 static int sdma_v6_0_suspend(void *handle) 1317 { 1318 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1319 1320 return sdma_v6_0_hw_fini(adev); 1321 } 1322 1323 static int sdma_v6_0_resume(void *handle) 1324 { 1325 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1326 1327 return sdma_v6_0_hw_init(adev); 1328 } 1329 1330 static bool sdma_v6_0_is_idle(void *handle) 1331 { 1332 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1333 u32 i; 1334 1335 for (i = 0; i < adev->sdma.num_instances; i++) { 1336 u32 tmp = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG)); 1337 1338 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 1339 return false; 1340 } 1341 1342 return true; 1343 } 1344 1345 static int sdma_v6_0_wait_for_idle(void *handle) 1346 { 1347 unsigned i; 1348 u32 sdma0, sdma1; 1349 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1350 1351 for (i = 0; i < adev->usec_timeout; i++) { 1352 sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); 1353 sdma1 = RREG32(sdma_v6_0_get_reg_offset(adev, 1, regSDMA0_STATUS_REG)); 1354 1355 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) 1356 return 0; 1357 udelay(1); 1358 } 1359 return -ETIMEDOUT; 1360 } 1361 1362 static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) 1363 { 1364 int i, r = 0; 1365 struct amdgpu_device *adev = ring->adev; 1366 u32 index = 0; 1367 u64 sdma_gfx_preempt; 1368 1369 amdgpu_sdma_get_index_from_ring(ring, &index); 1370 sdma_gfx_preempt = 1371 sdma_v6_0_get_reg_offset(adev, index, regSDMA0_QUEUE0_PREEMPT); 1372 1373 /* assert preemption condition */ 1374 amdgpu_ring_set_preempt_cond_exec(ring, false); 1375 1376 /* emit the trailing fence */ 1377 ring->trail_seq += 1; 1378 amdgpu_ring_alloc(ring, 10); 1379 sdma_v6_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, 1380 ring->trail_seq, 0); 1381 amdgpu_ring_commit(ring); 1382 1383 /* assert IB preemption */ 1384 WREG32(sdma_gfx_preempt, 1); 1385 1386 /* poll the trailing fence */ 1387 for (i = 0; i < adev->usec_timeout; i++) { 1388 if (ring->trail_seq == 1389 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 1390 break; 1391 udelay(1); 1392 } 1393 1394 if (i >= adev->usec_timeout) { 1395 r = -EINVAL; 1396 DRM_ERROR("ring %d failed to be preempted\n", ring->idx); 1397 } 1398 1399 /* deassert IB preemption */ 1400 WREG32(sdma_gfx_preempt, 0); 1401 1402 /* deassert the preemption condition */ 1403 amdgpu_ring_set_preempt_cond_exec(ring, true); 1404 return r; 1405 } 1406 1407 static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, 1408 struct amdgpu_irq_src *source, 1409 unsigned type, 1410 enum amdgpu_interrupt_state state) 1411 { 1412 u32 sdma_cntl; 1413 1414 u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL); 1415 1416 if (!amdgpu_sriov_vf(adev)) { 1417 sdma_cntl = RREG32(reg_offset); 1418 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1419 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 1420 WREG32(reg_offset, sdma_cntl); 1421 } 1422 1423 return 0; 1424 } 1425 1426 static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev, 1427 struct amdgpu_irq_src *source, 1428 struct amdgpu_iv_entry *entry) 1429 { 1430 int instances, queue; 1431 uint32_t mes_queue_id = entry->src_data[0]; 1432 1433 DRM_DEBUG("IH: SDMA trap\n"); 1434 1435 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 1436 struct amdgpu_mes_queue *queue; 1437 1438 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 1439 1440 spin_lock(&adev->mes.queue_id_lock); 1441 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 1442 if (queue) { 1443 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id); 1444 amdgpu_fence_process(queue->ring); 1445 } 1446 spin_unlock(&adev->mes.queue_id_lock); 1447 return 0; 1448 } 1449 1450 queue = entry->ring_id & 0xf; 1451 instances = (entry->ring_id & 0xf0) >> 4; 1452 if (instances > 1) { 1453 DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n"); 1454 return -EINVAL; 1455 } 1456 1457 switch (entry->client_id) { 1458 case SOC21_IH_CLIENTID_GFX: 1459 switch (queue) { 1460 case 0: 1461 amdgpu_fence_process(&adev->sdma.instance[instances].ring); 1462 break; 1463 default: 1464 break; 1465 } 1466 break; 1467 } 1468 return 0; 1469 } 1470 1471 static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev, 1472 struct amdgpu_irq_src *source, 1473 struct amdgpu_iv_entry *entry) 1474 { 1475 return 0; 1476 } 1477 1478 static int sdma_v6_0_set_clockgating_state(void *handle, 1479 enum amd_clockgating_state state) 1480 { 1481 return 0; 1482 } 1483 1484 static int sdma_v6_0_set_powergating_state(void *handle, 1485 enum amd_powergating_state state) 1486 { 1487 return 0; 1488 } 1489 1490 static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags) 1491 { 1492 } 1493 1494 const struct amd_ip_funcs sdma_v6_0_ip_funcs = { 1495 .name = "sdma_v6_0", 1496 .early_init = sdma_v6_0_early_init, 1497 .late_init = NULL, 1498 .sw_init = sdma_v6_0_sw_init, 1499 .sw_fini = sdma_v6_0_sw_fini, 1500 .hw_init = sdma_v6_0_hw_init, 1501 .hw_fini = sdma_v6_0_hw_fini, 1502 .suspend = sdma_v6_0_suspend, 1503 .resume = sdma_v6_0_resume, 1504 .is_idle = sdma_v6_0_is_idle, 1505 .wait_for_idle = sdma_v6_0_wait_for_idle, 1506 .soft_reset = sdma_v6_0_soft_reset, 1507 .check_soft_reset = sdma_v6_0_check_soft_reset, 1508 .set_clockgating_state = sdma_v6_0_set_clockgating_state, 1509 .set_powergating_state = sdma_v6_0_set_powergating_state, 1510 .get_clockgating_state = sdma_v6_0_get_clockgating_state, 1511 }; 1512 1513 static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = { 1514 .type = AMDGPU_RING_TYPE_SDMA, 1515 .align_mask = 0xf, 1516 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 1517 .support_64bit_ptrs = true, 1518 .secure_submission_supported = true, 1519 .get_rptr = sdma_v6_0_ring_get_rptr, 1520 .get_wptr = sdma_v6_0_ring_get_wptr, 1521 .set_wptr = sdma_v6_0_ring_set_wptr, 1522 .emit_frame_size = 1523 5 + /* sdma_v6_0_ring_init_cond_exec */ 1524 6 + /* sdma_v6_0_ring_emit_hdp_flush */ 1525 6 + /* sdma_v6_0_ring_emit_pipeline_sync */ 1526 /* sdma_v6_0_ring_emit_vm_flush */ 1527 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1528 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 1529 10 + 10 + 10, /* sdma_v6_0_ring_emit_fence x3 for user fence, vm fence */ 1530 .emit_ib_size = 5 + 7 + 6, /* sdma_v6_0_ring_emit_ib */ 1531 .emit_ib = sdma_v6_0_ring_emit_ib, 1532 .emit_mem_sync = sdma_v6_0_ring_emit_mem_sync, 1533 .emit_fence = sdma_v6_0_ring_emit_fence, 1534 .emit_pipeline_sync = sdma_v6_0_ring_emit_pipeline_sync, 1535 .emit_vm_flush = sdma_v6_0_ring_emit_vm_flush, 1536 .emit_hdp_flush = sdma_v6_0_ring_emit_hdp_flush, 1537 .test_ring = sdma_v6_0_ring_test_ring, 1538 .test_ib = sdma_v6_0_ring_test_ib, 1539 .insert_nop = sdma_v6_0_ring_insert_nop, 1540 .pad_ib = sdma_v6_0_ring_pad_ib, 1541 .emit_wreg = sdma_v6_0_ring_emit_wreg, 1542 .emit_reg_wait = sdma_v6_0_ring_emit_reg_wait, 1543 .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait, 1544 .init_cond_exec = sdma_v6_0_ring_init_cond_exec, 1545 .patch_cond_exec = sdma_v6_0_ring_patch_cond_exec, 1546 .preempt_ib = sdma_v6_0_ring_preempt_ib, 1547 }; 1548 1549 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev) 1550 { 1551 int i; 1552 1553 for (i = 0; i < adev->sdma.num_instances; i++) { 1554 adev->sdma.instance[i].ring.funcs = &sdma_v6_0_ring_funcs; 1555 adev->sdma.instance[i].ring.me = i; 1556 } 1557 } 1558 1559 static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = { 1560 .set = sdma_v6_0_set_trap_irq_state, 1561 .process = sdma_v6_0_process_trap_irq, 1562 }; 1563 1564 static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = { 1565 .process = sdma_v6_0_process_illegal_inst_irq, 1566 }; 1567 1568 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev) 1569 { 1570 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + 1571 adev->sdma.num_instances; 1572 adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs; 1573 adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs; 1574 } 1575 1576 /** 1577 * sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine 1578 * 1579 * @ib: indirect buffer to fill with commands 1580 * @src_offset: src GPU address 1581 * @dst_offset: dst GPU address 1582 * @byte_count: number of bytes to xfer 1583 * @tmz: if a secure copy should be used 1584 * 1585 * Copy GPU buffers using the DMA engine. 1586 * Used by the amdgpu ttm implementation to move pages if 1587 * registered as the asic copy callback. 1588 */ 1589 static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib, 1590 uint64_t src_offset, 1591 uint64_t dst_offset, 1592 uint32_t byte_count, 1593 bool tmz) 1594 { 1595 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | 1596 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | 1597 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); 1598 ib->ptr[ib->length_dw++] = byte_count - 1; 1599 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1600 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1601 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1602 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1603 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1604 } 1605 1606 /** 1607 * sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine 1608 * 1609 * @ib: indirect buffer to fill 1610 * @src_data: value to write to buffer 1611 * @dst_offset: dst GPU address 1612 * @byte_count: number of bytes to xfer 1613 * 1614 * Fill GPU buffers using the DMA engine. 1615 */ 1616 static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib, 1617 uint32_t src_data, 1618 uint64_t dst_offset, 1619 uint32_t byte_count) 1620 { 1621 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); 1622 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1623 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1624 ib->ptr[ib->length_dw++] = src_data; 1625 ib->ptr[ib->length_dw++] = byte_count - 1; 1626 } 1627 1628 static const struct amdgpu_buffer_funcs sdma_v6_0_buffer_funcs = { 1629 .copy_max_bytes = 0x400000, 1630 .copy_num_dw = 7, 1631 .emit_copy_buffer = sdma_v6_0_emit_copy_buffer, 1632 1633 .fill_max_bytes = 0x400000, 1634 .fill_num_dw = 5, 1635 .emit_fill_buffer = sdma_v6_0_emit_fill_buffer, 1636 }; 1637 1638 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev) 1639 { 1640 adev->mman.buffer_funcs = &sdma_v6_0_buffer_funcs; 1641 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1642 } 1643 1644 static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = { 1645 .copy_pte_num_dw = 7, 1646 .copy_pte = sdma_v6_0_vm_copy_pte, 1647 .write_pte = sdma_v6_0_vm_write_pte, 1648 .set_pte_pde = sdma_v6_0_vm_set_pte_pde, 1649 }; 1650 1651 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1652 { 1653 unsigned i; 1654 1655 adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs; 1656 for (i = 0; i < adev->sdma.num_instances; i++) { 1657 adev->vm_manager.vm_pte_scheds[i] = 1658 &adev->sdma.instance[i].ring.sched; 1659 } 1660 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1661 } 1662 1663 const struct amdgpu_ip_block_version sdma_v6_0_ip_block = { 1664 .type = AMD_IP_BLOCK_TYPE_SDMA, 1665 .major = 6, 1666 .minor = 0, 1667 .rev = 0, 1668 .funcs = &sdma_v6_0_ip_funcs, 1669 }; 1670