1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
28 #include "umc_v8_7.h"
29
30 #include "athub/athub_2_0_0_sh_mask.h"
31 #include "athub/athub_2_0_0_offset.h"
32 #include "dcn/dcn_2_0_0_offset.h"
33 #include "dcn/dcn_2_0_0_sh_mask.h"
34 #include "oss/osssys_5_0_0_offset.h"
35 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36 #include "navi10_enum.h"
37
38 #include "soc15.h"
39 #include "soc15d.h"
40 #include "soc15_common.h"
41
42 #include "nbio_v2_3.h"
43
44 #include "gfxhub_v2_0.h"
45 #include "gfxhub_v2_1.h"
46 #include "mmhub_v2_0.h"
47 #include "mmhub_v2_3.h"
48 #include "athub_v2_0.h"
49 #include "athub_v2_1.h"
50
51 #if 0
52 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
53 {
54 /* TODO add golden setting for hdp */
55 };
56 #endif
57
gmc_v10_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)58 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
59 struct amdgpu_irq_src *src,
60 unsigned type,
61 enum amdgpu_interrupt_state state)
62 {
63 return 0;
64 }
65
66 static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)67 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
68 struct amdgpu_irq_src *src, unsigned type,
69 enum amdgpu_interrupt_state state)
70 {
71 switch (state) {
72 case AMDGPU_IRQ_STATE_DISABLE:
73 /* MM HUB */
74 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
75 /* GFX HUB */
76 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
77 break;
78 case AMDGPU_IRQ_STATE_ENABLE:
79 /* MM HUB */
80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
81 /* GFX HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
83 break;
84 default:
85 break;
86 }
87
88 return 0;
89 }
90
gmc_v10_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)91 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
92 struct amdgpu_irq_src *source,
93 struct amdgpu_iv_entry *entry)
94 {
95 bool retry_fault = !!(entry->src_data[1] & 0x80);
96 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
97 struct amdgpu_task_info task_info;
98 uint32_t status = 0;
99 u64 addr;
100
101 addr = (u64)entry->src_data[0] << 12;
102 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
103
104 if (retry_fault) {
105 /* Returning 1 here also prevents sending the IV to the KFD */
106
107 /* Process it onyl if it's the first fault for this address */
108 if (entry->ih != &adev->irq.ih_soft &&
109 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
110 entry->timestamp))
111 return 1;
112
113 /* Delegate it to a different ring if the hardware hasn't
114 * already done it.
115 */
116 if (entry->ih == &adev->irq.ih) {
117 amdgpu_irq_delegate(adev, entry, 8);
118 return 1;
119 }
120
121 /* Try to handle the recoverable page faults by filling page
122 * tables
123 */
124 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
125 return 1;
126 }
127
128 if (!amdgpu_sriov_vf(adev)) {
129 /*
130 * Issue a dummy read to wait for the status register to
131 * be updated to avoid reading an incorrect value due to
132 * the new fast GRBM interface.
133 */
134 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
135 (adev->asic_type < CHIP_SIENNA_CICHLID))
136 RREG32(hub->vm_l2_pro_fault_status);
137
138 status = RREG32(hub->vm_l2_pro_fault_status);
139 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
140 }
141
142 if (!printk_ratelimit())
143 return 0;
144
145 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
146 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
147
148 dev_err(adev->dev,
149 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
150 "for process %s pid %d thread %s pid %d)\n",
151 entry->vmid_src ? "mmhub" : "gfxhub",
152 entry->src_id, entry->ring_id, entry->vmid,
153 entry->pasid, task_info.process_name, task_info.tgid,
154 task_info.task_name, task_info.pid);
155 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
156 addr, entry->client_id,
157 soc15_ih_clientid_name[entry->client_id]);
158
159 if (!amdgpu_sriov_vf(adev))
160 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
161 status);
162
163 return 0;
164 }
165
166 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
167 .set = gmc_v10_0_vm_fault_interrupt_state,
168 .process = gmc_v10_0_process_interrupt,
169 };
170
171 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
172 .set = gmc_v10_0_ecc_interrupt_state,
173 .process = amdgpu_umc_process_ecc_irq,
174 };
175
gmc_v10_0_set_irq_funcs(struct amdgpu_device * adev)176 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
177 {
178 adev->gmc.vm_fault.num_types = 1;
179 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
180
181 if (!amdgpu_sriov_vf(adev)) {
182 adev->gmc.ecc_irq.num_types = 1;
183 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
184 }
185 }
186
187 /**
188 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
189 *
190 * @adev: amdgpu_device pointer
191 * @vmhub: vmhub type
192 *
193 */
gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)194 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
195 uint32_t vmhub)
196 {
197 return ((vmhub == AMDGPU_MMHUB_0 ||
198 vmhub == AMDGPU_MMHUB_1) &&
199 (!amdgpu_sriov_vf(adev)));
200 }
201
gmc_v10_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)202 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
203 struct amdgpu_device *adev,
204 uint8_t vmid, uint16_t *p_pasid)
205 {
206 uint32_t value;
207
208 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
209 + vmid);
210 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
211
212 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
213 }
214
215 /*
216 * GART
217 * VMID 0 is the physical GPU addresses as used by the kernel.
218 * VMIDs 1-15 are used for userspace clients and are handled
219 * by the amdgpu vm/hsa code.
220 */
221
gmc_v10_0_flush_vm_hub(struct amdgpu_device * adev,uint32_t vmid,unsigned int vmhub,uint32_t flush_type)222 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
223 unsigned int vmhub, uint32_t flush_type)
224 {
225 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
226 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
227 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
228 u32 tmp;
229 /* Use register 17 for GART */
230 const unsigned eng = 17;
231 unsigned int i;
232
233 spin_lock(&adev->gmc.invalidate_lock);
234 /*
235 * It may lose gpuvm invalidate acknowldege state across power-gating
236 * off cycle, add semaphore acquire before invalidation and semaphore
237 * release after invalidation to avoid entering power gated state
238 * to WA the Issue
239 */
240
241 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
242 if (use_semaphore) {
243 for (i = 0; i < adev->usec_timeout; i++) {
244 /* a read return value of 1 means semaphore acuqire */
245 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
246 hub->eng_distance * eng);
247 if (tmp & 0x1)
248 break;
249 udelay(1);
250 }
251
252 if (i >= adev->usec_timeout)
253 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
254 }
255
256 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
257
258 /*
259 * Issue a dummy read to wait for the ACK register to be cleared
260 * to avoid a false ACK due to the new fast GRBM interface.
261 */
262 if ((vmhub == AMDGPU_GFXHUB_0) &&
263 (adev->asic_type < CHIP_SIENNA_CICHLID))
264 RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
265
266 /* Wait for ACK with a delay.*/
267 for (i = 0; i < adev->usec_timeout; i++) {
268 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
269 hub->eng_distance * eng);
270 tmp &= 1 << vmid;
271 if (tmp)
272 break;
273
274 udelay(1);
275 }
276
277 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
278 if (use_semaphore)
279 /*
280 * add semaphore release after invalidation,
281 * write with 0 means semaphore release
282 */
283 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
284 hub->eng_distance * eng, 0);
285
286 spin_unlock(&adev->gmc.invalidate_lock);
287
288 if (i < adev->usec_timeout)
289 return;
290
291 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
292 }
293
294 /**
295 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
296 *
297 * @adev: amdgpu_device pointer
298 * @vmid: vm instance to flush
299 * @vmhub: vmhub type
300 * @flush_type: the flush type
301 *
302 * Flush the TLB for the requested page table.
303 */
gmc_v10_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)304 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
305 uint32_t vmhub, uint32_t flush_type)
306 {
307 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
308 struct dma_fence *fence;
309 struct amdgpu_job *job;
310
311 int r;
312
313 /* flush hdp cache */
314 adev->hdp.funcs->flush_hdp(adev, NULL);
315
316 /* For SRIOV run time, driver shouldn't access the register through MMIO
317 * Directly use kiq to do the vm invalidation instead
318 */
319 if (adev->gfx.kiq.ring.sched.ready &&
320 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
321 down_read_trylock(&adev->reset_sem)) {
322 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
323 const unsigned eng = 17;
324 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
325 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
326 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
327
328 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
329 1 << vmid);
330
331 up_read(&adev->reset_sem);
332 return;
333 }
334
335 mutex_lock(&adev->mman.gtt_window_lock);
336
337 if (vmhub == AMDGPU_MMHUB_0) {
338 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
339 mutex_unlock(&adev->mman.gtt_window_lock);
340 return;
341 }
342
343 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
344
345 if (!adev->mman.buffer_funcs_enabled ||
346 !adev->ib_pool_ready ||
347 amdgpu_in_reset(adev) ||
348 ring->sched.ready == false) {
349 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
350 mutex_unlock(&adev->mman.gtt_window_lock);
351 return;
352 }
353
354 /* The SDMA on Navi has a bug which can theoretically result in memory
355 * corruption if an invalidation happens at the same time as an VA
356 * translation. Avoid this by doing the invalidation from the SDMA
357 * itself.
358 */
359 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
360 &job);
361 if (r)
362 goto error_alloc;
363
364 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
365 job->vm_needs_flush = true;
366 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
367 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
368 r = amdgpu_job_submit(job, &adev->mman.entity,
369 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
370 if (r)
371 goto error_submit;
372
373 mutex_unlock(&adev->mman.gtt_window_lock);
374
375 dma_fence_wait(fence, false);
376 dma_fence_put(fence);
377
378 return;
379
380 error_submit:
381 amdgpu_job_free(job);
382
383 error_alloc:
384 mutex_unlock(&adev->mman.gtt_window_lock);
385 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
386 }
387
388 /**
389 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
390 *
391 * @adev: amdgpu_device pointer
392 * @pasid: pasid to be flush
393 * @flush_type: the flush type
394 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
395 *
396 * Flush the TLB for the requested pasid.
397 */
gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub)398 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
399 uint16_t pasid, uint32_t flush_type,
400 bool all_hub)
401 {
402 int vmid, i;
403 signed long r;
404 uint32_t seq;
405 uint16_t queried_pasid;
406 bool ret;
407 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
408 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
409
410 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
411 spin_lock(&adev->gfx.kiq.ring_lock);
412 /* 2 dwords flush + 8 dwords fence */
413 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
414 kiq->pmf->kiq_invalidate_tlbs(ring,
415 pasid, flush_type, all_hub);
416 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
417 if (r) {
418 amdgpu_ring_undo(ring);
419 spin_unlock(&adev->gfx.kiq.ring_lock);
420 return -ETIME;
421 }
422
423 amdgpu_ring_commit(ring);
424 spin_unlock(&adev->gfx.kiq.ring_lock);
425 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
426 if (r < 1) {
427 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
428 return -ETIME;
429 }
430
431 return 0;
432 }
433
434 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
435
436 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
437 &queried_pasid);
438 if (ret && queried_pasid == pasid) {
439 if (all_hub) {
440 for (i = 0; i < adev->num_vmhubs; i++)
441 gmc_v10_0_flush_gpu_tlb(adev, vmid,
442 i, flush_type);
443 } else {
444 gmc_v10_0_flush_gpu_tlb(adev, vmid,
445 AMDGPU_GFXHUB_0, flush_type);
446 }
447 break;
448 }
449 }
450
451 return 0;
452 }
453
gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)454 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
455 unsigned vmid, uint64_t pd_addr)
456 {
457 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
458 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
459 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
460 unsigned eng = ring->vm_inv_eng;
461
462 /*
463 * It may lose gpuvm invalidate acknowldege state across power-gating
464 * off cycle, add semaphore acquire before invalidation and semaphore
465 * release after invalidation to avoid entering power gated state
466 * to WA the Issue
467 */
468
469 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
470 if (use_semaphore)
471 /* a read return value of 1 means semaphore acuqire */
472 amdgpu_ring_emit_reg_wait(ring,
473 hub->vm_inv_eng0_sem +
474 hub->eng_distance * eng, 0x1, 0x1);
475
476 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
477 (hub->ctx_addr_distance * vmid),
478 lower_32_bits(pd_addr));
479
480 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
481 (hub->ctx_addr_distance * vmid),
482 upper_32_bits(pd_addr));
483
484 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
485 hub->eng_distance * eng,
486 hub->vm_inv_eng0_ack +
487 hub->eng_distance * eng,
488 req, 1 << vmid);
489
490 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
491 if (use_semaphore)
492 /*
493 * add semaphore release after invalidation,
494 * write with 0 means semaphore release
495 */
496 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
497 hub->eng_distance * eng, 0);
498
499 return pd_addr;
500 }
501
gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)502 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
503 unsigned pasid)
504 {
505 struct amdgpu_device *adev = ring->adev;
506 uint32_t reg;
507
508 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
509 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
510 else
511 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
512
513 amdgpu_ring_emit_wreg(ring, reg, pasid);
514 }
515
516 /*
517 * PTE format on NAVI 10:
518 * 63:59 reserved
519 * 58 reserved and for sienna_cichlid is used for MALL noalloc
520 * 57 reserved
521 * 56 F
522 * 55 L
523 * 54 reserved
524 * 53:52 SW
525 * 51 T
526 * 50:48 mtype
527 * 47:12 4k physical page base address
528 * 11:7 fragment
529 * 6 write
530 * 5 read
531 * 4 exe
532 * 3 Z
533 * 2 snooped
534 * 1 system
535 * 0 valid
536 *
537 * PDE format on NAVI 10:
538 * 63:59 block fragment size
539 * 58:55 reserved
540 * 54 P
541 * 53:48 reserved
542 * 47:6 physical base address of PD or PTE
543 * 5:3 reserved
544 * 2 C
545 * 1 system
546 * 0 valid
547 */
548
gmc_v10_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)549 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
550 {
551 switch (flags) {
552 case AMDGPU_VM_MTYPE_DEFAULT:
553 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
554 case AMDGPU_VM_MTYPE_NC:
555 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
556 case AMDGPU_VM_MTYPE_WC:
557 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
558 case AMDGPU_VM_MTYPE_CC:
559 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
560 case AMDGPU_VM_MTYPE_UC:
561 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
562 default:
563 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
564 }
565 }
566
gmc_v10_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)567 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
568 uint64_t *addr, uint64_t *flags)
569 {
570 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
571 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
572 BUG_ON(*addr & 0xFFFF00000000003FULL);
573
574 if (!adev->gmc.translate_further)
575 return;
576
577 if (level == AMDGPU_VM_PDB1) {
578 /* Set the block fragment size */
579 if (!(*flags & AMDGPU_PDE_PTE))
580 *flags |= AMDGPU_PDE_BFS(0x9);
581
582 } else if (level == AMDGPU_VM_PDB0) {
583 if (*flags & AMDGPU_PDE_PTE)
584 *flags &= ~AMDGPU_PDE_PTE;
585 else
586 *flags |= AMDGPU_PTE_TF;
587 }
588 }
589
gmc_v10_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)590 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
591 struct amdgpu_bo_va_mapping *mapping,
592 uint64_t *flags)
593 {
594 *flags &= ~AMDGPU_PTE_EXECUTABLE;
595 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
596
597 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
598 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
599
600 if (mapping->flags & AMDGPU_PTE_PRT) {
601 *flags |= AMDGPU_PTE_PRT;
602 *flags |= AMDGPU_PTE_SNOOPED;
603 *flags |= AMDGPU_PTE_LOG;
604 *flags |= AMDGPU_PTE_SYSTEM;
605 *flags &= ~AMDGPU_PTE_VALID;
606 }
607 }
608
gmc_v10_0_get_vbios_fb_size(struct amdgpu_device * adev)609 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
610 {
611 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
612 unsigned size;
613
614 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
615 size = AMDGPU_VBIOS_VGA_ALLOCATION;
616 } else {
617 u32 viewport;
618 u32 pitch;
619
620 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
621 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
622 size = (REG_GET_FIELD(viewport,
623 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
624 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
625 4);
626 }
627
628 return size;
629 }
630
631 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
632 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
633 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
634 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
635 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
636 .map_mtype = gmc_v10_0_map_mtype,
637 .get_vm_pde = gmc_v10_0_get_vm_pde,
638 .get_vm_pte = gmc_v10_0_get_vm_pte,
639 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
640 };
641
gmc_v10_0_set_gmc_funcs(struct amdgpu_device * adev)642 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
643 {
644 if (adev->gmc.gmc_funcs == NULL)
645 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
646 }
647
gmc_v10_0_set_umc_funcs(struct amdgpu_device * adev)648 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
649 {
650 switch (adev->asic_type) {
651 case CHIP_SIENNA_CICHLID:
652 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
653 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
654 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
655 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
656 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
657 adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
658 break;
659 default:
660 break;
661 }
662 }
663
664
gmc_v10_0_set_mmhub_funcs(struct amdgpu_device * adev)665 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
666 {
667 switch (adev->asic_type) {
668 case CHIP_VANGOGH:
669 adev->mmhub.funcs = &mmhub_v2_3_funcs;
670 break;
671 default:
672 adev->mmhub.funcs = &mmhub_v2_0_funcs;
673 break;
674 }
675 }
676
gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device * adev)677 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
678 {
679 switch (adev->asic_type) {
680 case CHIP_SIENNA_CICHLID:
681 case CHIP_NAVY_FLOUNDER:
682 case CHIP_VANGOGH:
683 case CHIP_DIMGREY_CAVEFISH:
684 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
685 break;
686 default:
687 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
688 break;
689 }
690 }
691
692
gmc_v10_0_early_init(void * handle)693 static int gmc_v10_0_early_init(void *handle)
694 {
695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
696
697 gmc_v10_0_set_mmhub_funcs(adev);
698 gmc_v10_0_set_gfxhub_funcs(adev);
699 gmc_v10_0_set_gmc_funcs(adev);
700 gmc_v10_0_set_irq_funcs(adev);
701 gmc_v10_0_set_umc_funcs(adev);
702
703 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
704 adev->gmc.shared_aperture_end =
705 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
706 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
707 adev->gmc.private_aperture_end =
708 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
709
710 return 0;
711 }
712
gmc_v10_0_late_init(void * handle)713 static int gmc_v10_0_late_init(void *handle)
714 {
715 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
716 int r;
717
718 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
719 if (r)
720 return r;
721
722 r = amdgpu_gmc_ras_late_init(adev);
723 if (r)
724 return r;
725
726 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
727 }
728
gmc_v10_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)729 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
730 struct amdgpu_gmc *mc)
731 {
732 u64 base = 0;
733
734 base = adev->gfxhub.funcs->get_fb_location(adev);
735
736 /* add the xgmi offset of the physical node */
737 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
738
739 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
740 amdgpu_gmc_gart_location(adev, mc);
741 amdgpu_gmc_agp_location(adev, mc);
742
743 /* base offset of vram pages */
744 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
745
746 /* add the xgmi offset of the physical node */
747 adev->vm_manager.vram_base_offset +=
748 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
749 }
750
751 /**
752 * gmc_v10_0_mc_init - initialize the memory controller driver params
753 *
754 * @adev: amdgpu_device pointer
755 *
756 * Look up the amount of vram, vram width, and decide how to place
757 * vram and gart within the GPU's physical address space.
758 * Returns 0 for success.
759 */
gmc_v10_0_mc_init(struct amdgpu_device * adev)760 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
761 {
762 int r;
763
764 /* size in MB on si */
765 adev->gmc.mc_vram_size =
766 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
767 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
768
769 if (!(adev->flags & AMD_IS_APU)) {
770 r = amdgpu_device_resize_fb_bar(adev);
771 if (r)
772 return r;
773 }
774 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
775 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
776
777 #ifdef CONFIG_X86_64
778 if (adev->flags & AMD_IS_APU) {
779 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
780 adev->gmc.aper_size = adev->gmc.real_vram_size;
781 }
782 #endif
783
784 /* In case the PCI BAR is larger than the actual amount of vram */
785 adev->gmc.visible_vram_size = adev->gmc.aper_size;
786 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
787 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
788
789 /* set the gart size */
790 if (amdgpu_gart_size == -1) {
791 switch (adev->asic_type) {
792 case CHIP_NAVI10:
793 case CHIP_NAVI14:
794 case CHIP_NAVI12:
795 case CHIP_SIENNA_CICHLID:
796 case CHIP_NAVY_FLOUNDER:
797 case CHIP_VANGOGH:
798 case CHIP_DIMGREY_CAVEFISH:
799 default:
800 adev->gmc.gart_size = 512ULL << 20;
801 break;
802 }
803 } else
804 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
805
806 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
807
808 return 0;
809 }
810
gmc_v10_0_gart_init(struct amdgpu_device * adev)811 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
812 {
813 int r;
814
815 if (adev->gart.bo) {
816 WARN(1, "NAVI10 PCIE GART already initialized\n");
817 return 0;
818 }
819
820 /* Initialize common gart structure */
821 r = amdgpu_gart_init(adev);
822 if (r)
823 return r;
824
825 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
826 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
827 AMDGPU_PTE_EXECUTABLE;
828
829 return amdgpu_gart_table_vram_alloc(adev);
830 }
831
gmc_v10_0_sw_init(void * handle)832 static int gmc_v10_0_sw_init(void *handle)
833 {
834 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
835 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
836
837 adev->gfxhub.funcs->init(adev);
838
839 adev->mmhub.funcs->init(adev);
840
841 spin_lock_init(&adev->gmc.invalidate_lock);
842
843 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
844 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
845 adev->gmc.vram_width = 64;
846 } else if (amdgpu_emu_mode == 1) {
847 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
848 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
849 } else {
850 r = amdgpu_atomfirmware_get_vram_info(adev,
851 &vram_width, &vram_type, &vram_vendor);
852 adev->gmc.vram_width = vram_width;
853
854 adev->gmc.vram_type = vram_type;
855 adev->gmc.vram_vendor = vram_vendor;
856 }
857
858 switch (adev->asic_type) {
859 case CHIP_NAVI10:
860 case CHIP_NAVI14:
861 case CHIP_NAVI12:
862 case CHIP_SIENNA_CICHLID:
863 case CHIP_NAVY_FLOUNDER:
864 case CHIP_VANGOGH:
865 case CHIP_DIMGREY_CAVEFISH:
866 adev->num_vmhubs = 2;
867 /*
868 * To fulfill 4-level page support,
869 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
870 * block size 512 (9bit)
871 */
872 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
873 break;
874 default:
875 break;
876 }
877
878 /* This interrupt is VMC page fault.*/
879 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
880 VMC_1_0__SRCID__VM_FAULT,
881 &adev->gmc.vm_fault);
882
883 if (r)
884 return r;
885
886 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
887 UTCL2_1_0__SRCID__FAULT,
888 &adev->gmc.vm_fault);
889 if (r)
890 return r;
891
892 if (!amdgpu_sriov_vf(adev)) {
893 /* interrupt sent to DF. */
894 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
895 &adev->gmc.ecc_irq);
896 if (r)
897 return r;
898 }
899
900 /*
901 * Set the internal MC address mask This is the max address of the GPU's
902 * internal address space.
903 */
904 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
905
906 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
907 if (r) {
908 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
909 return r;
910 }
911
912 if (adev->gmc.xgmi.supported) {
913 r = adev->gfxhub.funcs->get_xgmi_info(adev);
914 if (r)
915 return r;
916 }
917
918 r = gmc_v10_0_mc_init(adev);
919 if (r)
920 return r;
921
922 amdgpu_gmc_get_vbios_allocations(adev);
923
924 /* Memory manager */
925 r = amdgpu_bo_init(adev);
926 if (r)
927 return r;
928
929 r = gmc_v10_0_gart_init(adev);
930 if (r)
931 return r;
932
933 /*
934 * number of VMs
935 * VMID 0 is reserved for System
936 * amdgpu graphics/compute will use VMIDs 1-7
937 * amdkfd will use VMIDs 8-15
938 */
939 adev->vm_manager.first_kfd_vmid = 8;
940
941 amdgpu_vm_manager_init(adev);
942
943 return 0;
944 }
945
946 /**
947 * gmc_v8_0_gart_fini - vm fini callback
948 *
949 * @adev: amdgpu_device pointer
950 *
951 * Tears down the driver GART/VM setup (CIK).
952 */
gmc_v10_0_gart_fini(struct amdgpu_device * adev)953 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
954 {
955 amdgpu_gart_table_vram_free(adev);
956 amdgpu_gart_fini(adev);
957 }
958
gmc_v10_0_sw_fini(void * handle)959 static int gmc_v10_0_sw_fini(void *handle)
960 {
961 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
962
963 amdgpu_vm_manager_fini(adev);
964 gmc_v10_0_gart_fini(adev);
965 amdgpu_gem_force_release(adev);
966 amdgpu_bo_fini(adev);
967
968 return 0;
969 }
970
gmc_v10_0_init_golden_registers(struct amdgpu_device * adev)971 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
972 {
973 switch (adev->asic_type) {
974 case CHIP_NAVI10:
975 case CHIP_NAVI14:
976 case CHIP_NAVI12:
977 case CHIP_SIENNA_CICHLID:
978 case CHIP_NAVY_FLOUNDER:
979 case CHIP_VANGOGH:
980 case CHIP_DIMGREY_CAVEFISH:
981 break;
982 default:
983 break;
984 }
985 }
986
987 /**
988 * gmc_v10_0_gart_enable - gart enable
989 *
990 * @adev: amdgpu_device pointer
991 */
gmc_v10_0_gart_enable(struct amdgpu_device * adev)992 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
993 {
994 int r;
995 bool value;
996
997 if (adev->gart.bo == NULL) {
998 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
999 return -EINVAL;
1000 }
1001
1002 r = amdgpu_gart_table_vram_pin(adev);
1003 if (r)
1004 return r;
1005
1006 r = adev->gfxhub.funcs->gart_enable(adev);
1007 if (r)
1008 return r;
1009
1010 r = adev->mmhub.funcs->gart_enable(adev);
1011 if (r)
1012 return r;
1013
1014 adev->hdp.funcs->init_registers(adev);
1015
1016 /* Flush HDP after it is initialized */
1017 adev->hdp.funcs->flush_hdp(adev, NULL);
1018
1019 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1020 false : true;
1021
1022 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1023 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1024 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1025 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1026
1027 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1028 (unsigned)(adev->gmc.gart_size >> 20),
1029 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1030
1031 adev->gart.ready = true;
1032
1033 return 0;
1034 }
1035
gmc_v10_0_hw_init(void * handle)1036 static int gmc_v10_0_hw_init(void *handle)
1037 {
1038 int r;
1039 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1040
1041 /* The sequence of these two function calls matters.*/
1042 gmc_v10_0_init_golden_registers(adev);
1043
1044 r = gmc_v10_0_gart_enable(adev);
1045 if (r)
1046 return r;
1047
1048 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1049 adev->umc.funcs->init_registers(adev);
1050
1051 return 0;
1052 }
1053
1054 /**
1055 * gmc_v10_0_gart_disable - gart disable
1056 *
1057 * @adev: amdgpu_device pointer
1058 *
1059 * This disables all VM page table.
1060 */
gmc_v10_0_gart_disable(struct amdgpu_device * adev)1061 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1062 {
1063 adev->gfxhub.funcs->gart_disable(adev);
1064 adev->mmhub.funcs->gart_disable(adev);
1065 amdgpu_gart_table_vram_unpin(adev);
1066 }
1067
gmc_v10_0_hw_fini(void * handle)1068 static int gmc_v10_0_hw_fini(void *handle)
1069 {
1070 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071
1072 if (amdgpu_sriov_vf(adev)) {
1073 /* full access mode, so don't touch any GMC register */
1074 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1075 return 0;
1076 }
1077
1078 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1079 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1080 gmc_v10_0_gart_disable(adev);
1081
1082 return 0;
1083 }
1084
gmc_v10_0_suspend(void * handle)1085 static int gmc_v10_0_suspend(void *handle)
1086 {
1087 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1088
1089 gmc_v10_0_hw_fini(adev);
1090
1091 return 0;
1092 }
1093
gmc_v10_0_resume(void * handle)1094 static int gmc_v10_0_resume(void *handle)
1095 {
1096 int r;
1097 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1098
1099 r = gmc_v10_0_hw_init(adev);
1100 if (r)
1101 return r;
1102
1103 amdgpu_vmid_reset_all(adev);
1104
1105 return 0;
1106 }
1107
gmc_v10_0_is_idle(void * handle)1108 static bool gmc_v10_0_is_idle(void *handle)
1109 {
1110 /* MC is always ready in GMC v10.*/
1111 return true;
1112 }
1113
gmc_v10_0_wait_for_idle(void * handle)1114 static int gmc_v10_0_wait_for_idle(void *handle)
1115 {
1116 /* There is no need to wait for MC idle in GMC v10.*/
1117 return 0;
1118 }
1119
gmc_v10_0_soft_reset(void * handle)1120 static int gmc_v10_0_soft_reset(void *handle)
1121 {
1122 return 0;
1123 }
1124
gmc_v10_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1125 static int gmc_v10_0_set_clockgating_state(void *handle,
1126 enum amd_clockgating_state state)
1127 {
1128 int r;
1129 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130
1131 r = adev->mmhub.funcs->set_clockgating(adev, state);
1132 if (r)
1133 return r;
1134
1135 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1136 adev->asic_type <= CHIP_DIMGREY_CAVEFISH)
1137 return athub_v2_1_set_clockgating(adev, state);
1138 else
1139 return athub_v2_0_set_clockgating(adev, state);
1140 }
1141
gmc_v10_0_get_clockgating_state(void * handle,u32 * flags)1142 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1143 {
1144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1145
1146 adev->mmhub.funcs->get_clockgating(adev, flags);
1147
1148 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1149 adev->asic_type <= CHIP_DIMGREY_CAVEFISH)
1150 athub_v2_1_get_clockgating(adev, flags);
1151 else
1152 athub_v2_0_get_clockgating(adev, flags);
1153 }
1154
gmc_v10_0_set_powergating_state(void * handle,enum amd_powergating_state state)1155 static int gmc_v10_0_set_powergating_state(void *handle,
1156 enum amd_powergating_state state)
1157 {
1158 return 0;
1159 }
1160
1161 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1162 .name = "gmc_v10_0",
1163 .early_init = gmc_v10_0_early_init,
1164 .late_init = gmc_v10_0_late_init,
1165 .sw_init = gmc_v10_0_sw_init,
1166 .sw_fini = gmc_v10_0_sw_fini,
1167 .hw_init = gmc_v10_0_hw_init,
1168 .hw_fini = gmc_v10_0_hw_fini,
1169 .suspend = gmc_v10_0_suspend,
1170 .resume = gmc_v10_0_resume,
1171 .is_idle = gmc_v10_0_is_idle,
1172 .wait_for_idle = gmc_v10_0_wait_for_idle,
1173 .soft_reset = gmc_v10_0_soft_reset,
1174 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1175 .set_powergating_state = gmc_v10_0_set_powergating_state,
1176 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1177 };
1178
1179 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1180 {
1181 .type = AMD_IP_BLOCK_TYPE_GMC,
1182 .major = 10,
1183 .minor = 0,
1184 .rev = 0,
1185 .funcs = &gmc_v10_0_ip_funcs,
1186 };
1187