1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #ifndef __AMDGPU_H__
29 #define __AMDGPU_H__
30
31 #include <linux/atomic.h>
32 #include <linux/wait.h>
33 #include <linux/list.h>
34 #include <linux/kref.h>
35 #include <linux/rbtree.h>
36 #include <linux/hashtable.h>
37 #include <linux/dma-fence.h>
38
39 #include <asm/cpufeature.h>
40
41 #include <drm/ttm/ttm_bo_api.h>
42 #include <drm/ttm/ttm_bo_driver.h>
43 #include <drm/ttm/ttm_placement.h>
44 #include <drm/ttm/ttm_module.h>
45 #include <drm/ttm/ttm_execbuf_util.h>
46
47 #include <drm/drmP.h>
48 #include <drm/drm_gem.h>
49 #include <drm/amdgpu_drm.h>
50 #include <drm/gpu_scheduler.h>
51
52 #include <kgd_kfd_interface.h>
53 #include "dm_pp_interface.h"
54 #include "kgd_pp_interface.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_mode.h"
58 #include "amdgpu_ih.h"
59 #include "amdgpu_irq.h"
60 #include "amdgpu_ucode.h"
61 #include "amdgpu_ttm.h"
62 #include "amdgpu_psp.h"
63 #include "amdgpu_gds.h"
64 #include "amdgpu_sync.h"
65 #include "amdgpu_ring.h"
66 #include "amdgpu_vm.h"
67 #include "amdgpu_dpm.h"
68 #include "amdgpu_acp.h"
69 #include "amdgpu_uvd.h"
70 #include "amdgpu_vce.h"
71 #include "amdgpu_vcn.h"
72 #include "amdgpu_mn.h"
73 #include "amdgpu_gmc.h"
74 #include "amdgpu_dm.h"
75 #include "amdgpu_virt.h"
76 #include "amdgpu_gart.h"
77 #include "amdgpu_debugfs.h"
78 #include "amdgpu_job.h"
79 #include "amdgpu_bo_list.h"
80
81 #include <contrib/dev/acpica/source/include/acpi.h>
82 #include <dev/acpica/acpivar.h>
83
84 /*
85 * Modules parameters.
86 */
87 extern int amdgpu_modeset;
88 extern int amdgpu_vram_limit;
89 extern int amdgpu_vis_vram_limit;
90 extern int amdgpu_gart_size;
91 extern int amdgpu_gtt_size;
92 extern int amdgpu_moverate;
93 extern int amdgpu_benchmarking;
94 extern int amdgpu_testing;
95 extern int amdgpu_audio;
96 extern int amdgpu_disp_priority;
97 extern int amdgpu_hw_i2c;
98 extern int amdgpu_pcie_gen2;
99 extern int amdgpu_msi;
100 extern int amdgpu_lockup_timeout;
101 extern int amdgpu_dpm;
102 extern int amdgpu_fw_load_type;
103 extern int amdgpu_aspm;
104 extern int amdgpu_runtime_pm;
105 extern uint amdgpu_ip_block_mask;
106 extern int amdgpu_bapm;
107 extern int amdgpu_deep_color;
108 extern int amdgpu_vm_size;
109 extern int amdgpu_vm_block_size;
110 extern int amdgpu_vm_fragment_size;
111 extern int amdgpu_vm_fault_stop;
112 extern int amdgpu_vm_debug;
113 extern int amdgpu_vm_update_mode;
114 extern int amdgpu_dc;
115 extern int amdgpu_sched_jobs;
116 extern int amdgpu_sched_hw_submission;
117 extern uint amdgpu_pcie_gen_cap;
118 extern uint amdgpu_pcie_lane_cap;
119 extern uint amdgpu_cg_mask;
120 extern uint amdgpu_pg_mask;
121 extern uint amdgpu_sdma_phase_quantum;
122 extern char *amdgpu_disable_cu;
123 extern char *amdgpu_virtual_display;
124 extern uint amdgpu_pp_feature_mask;
125 extern int amdgpu_vram_page_split;
126 extern int amdgpu_ngg;
127 extern int amdgpu_prim_buf_per_se;
128 extern int amdgpu_pos_buf_per_se;
129 extern int amdgpu_cntl_sb_buf_per_se;
130 extern int amdgpu_param_buf_per_se;
131 extern int amdgpu_job_hang_limit;
132 extern int amdgpu_lbpw;
133 extern int amdgpu_compute_multipipe;
134 extern int amdgpu_gpu_recovery;
135 extern int amdgpu_emu_mode;
136 extern uint amdgpu_smu_memory_pool_size;
137
138 #ifdef CONFIG_DRM_AMDGPU_SI
139 extern int amdgpu_si_support;
140 #endif
141 #ifdef CONFIG_DRM_AMDGPU_CIK
142 extern int amdgpu_cik_support;
143 #endif
144
145 #define AMDGPU_SG_THRESHOLD (256*1024*1024)
146 #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
147 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
148 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
149 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
150 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
151 #define AMDGPU_IB_POOL_SIZE 16
152 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
153 #define AMDGPUFB_CONN_LIMIT 4
154 #define AMDGPU_BIOS_NUM_SCRATCH 16
155
156 /* max number of IP instances */
157 #define AMDGPU_MAX_SDMA_INSTANCES 2
158
159 /* hard reset data */
160 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
161
162 /* reset flags */
163 #define AMDGPU_RESET_GFX (1 << 0)
164 #define AMDGPU_RESET_COMPUTE (1 << 1)
165 #define AMDGPU_RESET_DMA (1 << 2)
166 #define AMDGPU_RESET_CP (1 << 3)
167 #define AMDGPU_RESET_GRBM (1 << 4)
168 #define AMDGPU_RESET_DMA1 (1 << 5)
169 #define AMDGPU_RESET_RLC (1 << 6)
170 #define AMDGPU_RESET_SEM (1 << 7)
171 #define AMDGPU_RESET_IH (1 << 8)
172 #define AMDGPU_RESET_VMC (1 << 9)
173 #define AMDGPU_RESET_MC (1 << 10)
174 #define AMDGPU_RESET_DISPLAY (1 << 11)
175 #define AMDGPU_RESET_UVD (1 << 12)
176 #define AMDGPU_RESET_VCE (1 << 13)
177 #define AMDGPU_RESET_VCE1 (1 << 14)
178
179 /* GFX current status */
180 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
181 #define AMDGPU_GFX_SAFE_MODE 0x00000001L
182 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
183 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
184 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
185
186 /* max cursor sizes (in pixels) */
187 #define CIK_CURSOR_WIDTH 128
188 #define CIK_CURSOR_HEIGHT 128
189
190 struct amdgpu_device;
191 struct amdgpu_ib;
192 struct amdgpu_cs_parser;
193 struct amdgpu_job;
194 struct amdgpu_irq_src;
195 struct amdgpu_fpriv;
196 struct amdgpu_bo_va_mapping;
197 struct amdgpu_atif;
198
199 enum amdgpu_cp_irq {
200 AMDGPU_CP_IRQ_GFX_EOP = 0,
201 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
202 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
203 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
204 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
205 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
206 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
207 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
208 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
209
210 AMDGPU_CP_IRQ_LAST
211 };
212
213 enum amdgpu_sdma_irq {
214 AMDGPU_SDMA_IRQ_TRAP0 = 0,
215 AMDGPU_SDMA_IRQ_TRAP1,
216
217 AMDGPU_SDMA_IRQ_LAST
218 };
219
220 enum amdgpu_thermal_irq {
221 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
222 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
223
224 AMDGPU_THERMAL_IRQ_LAST
225 };
226
227 enum amdgpu_kiq_irq {
228 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
229 AMDGPU_CP_KIQ_IRQ_LAST
230 };
231
232 int amdgpu_device_ip_set_clockgating_state(void *dev,
233 enum amd_ip_block_type block_type,
234 enum amd_clockgating_state state);
235 int amdgpu_device_ip_set_powergating_state(void *dev,
236 enum amd_ip_block_type block_type,
237 enum amd_powergating_state state);
238 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
239 u32 *flags);
240 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
241 enum amd_ip_block_type block_type);
242 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
243 enum amd_ip_block_type block_type);
244
245 #define AMDGPU_MAX_IP_NUM 16
246
247 struct amdgpu_ip_block_status {
248 bool valid;
249 bool sw;
250 bool hw;
251 bool late_initialized;
252 bool hang;
253 };
254
255 struct amdgpu_ip_block_version {
256 const enum amd_ip_block_type type;
257 const u32 major;
258 const u32 minor;
259 const u32 rev;
260 const struct amd_ip_funcs *funcs;
261 };
262
263 struct amdgpu_ip_block {
264 struct amdgpu_ip_block_status status;
265 const struct amdgpu_ip_block_version *version;
266 };
267
268 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
269 enum amd_ip_block_type type,
270 u32 major, u32 minor);
271
272 struct amdgpu_ip_block *
273 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
274 enum amd_ip_block_type type);
275
276 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
277 const struct amdgpu_ip_block_version *ip_block_version);
278
279 /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
280 struct amdgpu_buffer_funcs {
281 /* maximum bytes in a single operation */
282 uint32_t copy_max_bytes;
283
284 /* number of dw to reserve per operation */
285 unsigned copy_num_dw;
286
287 /* used for buffer migration */
288 void (*emit_copy_buffer)(struct amdgpu_ib *ib,
289 /* src addr in bytes */
290 uint64_t src_offset,
291 /* dst addr in bytes */
292 uint64_t dst_offset,
293 /* number of byte to transfer */
294 uint32_t byte_count);
295
296 /* maximum bytes in a single operation */
297 uint32_t fill_max_bytes;
298
299 /* number of dw to reserve per operation */
300 unsigned fill_num_dw;
301
302 /* used for buffer clearing */
303 void (*emit_fill_buffer)(struct amdgpu_ib *ib,
304 /* value to write to memory */
305 uint32_t src_data,
306 /* dst addr in bytes */
307 uint64_t dst_offset,
308 /* number of byte to fill */
309 uint32_t byte_count);
310 };
311
312 /* provided by hw blocks that can write ptes, e.g., sdma */
313 struct amdgpu_vm_pte_funcs {
314 /* number of dw to reserve per operation */
315 unsigned copy_pte_num_dw;
316
317 /* copy pte entries from GART */
318 void (*copy_pte)(struct amdgpu_ib *ib,
319 uint64_t pe, uint64_t src,
320 unsigned count);
321
322 /* write pte one entry at a time with addr mapping */
323 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
324 uint64_t value, unsigned count,
325 uint32_t incr);
326 /* for linear pte/pde updates without addr mapping */
327 void (*set_pte_pde)(struct amdgpu_ib *ib,
328 uint64_t pe,
329 uint64_t addr, unsigned count,
330 uint32_t incr, uint64_t flags);
331 };
332
333 /* provided by the ih block */
334 struct amdgpu_ih_funcs {
335 /* ring read/write ptr handling, called from interrupt context */
336 u32 (*get_wptr)(struct amdgpu_device *adev);
337 bool (*prescreen_iv)(struct amdgpu_device *adev);
338 void (*decode_iv)(struct amdgpu_device *adev,
339 struct amdgpu_iv_entry *entry);
340 void (*set_rptr)(struct amdgpu_device *adev);
341 };
342
343 /*
344 * BIOS.
345 */
346 bool amdgpu_get_bios(struct amdgpu_device *adev);
347 bool amdgpu_read_bios(struct amdgpu_device *adev);
348
349 /*
350 * Clocks
351 */
352
353 #define AMDGPU_MAX_PPLL 3
354
355 struct amdgpu_clock {
356 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
357 struct amdgpu_pll spll;
358 struct amdgpu_pll mpll;
359 /* 10 Khz units */
360 uint32_t default_mclk;
361 uint32_t default_sclk;
362 uint32_t default_dispclk;
363 uint32_t current_dispclk;
364 uint32_t dp_extclk;
365 uint32_t max_pixel_clock;
366 };
367
368 /*
369 * GEM.
370 */
371
372 #define AMDGPU_GEM_DOMAIN_MAX 0x3
373 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
374
375 void amdgpu_gem_object_free(struct drm_gem_object *obj);
376 int amdgpu_gem_object_open(struct drm_gem_object *obj,
377 struct drm_file *file_priv);
378 void amdgpu_gem_object_close(struct drm_gem_object *obj,
379 struct drm_file *file_priv);
380 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
381 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
382 struct drm_gem_object *
383 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
384 struct dma_buf_attachment *attach,
385 struct sg_table *sg);
386 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
387 struct drm_gem_object *gobj,
388 int flags);
389 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
390 struct dma_buf *dma_buf);
391 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
392 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
393 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
394 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
395
396 /* sub-allocation manager, it has to be protected by another lock.
397 * By conception this is an helper for other part of the driver
398 * like the indirect buffer or semaphore, which both have their
399 * locking.
400 *
401 * Principe is simple, we keep a list of sub allocation in offset
402 * order (first entry has offset == 0, last entry has the highest
403 * offset).
404 *
405 * When allocating new object we first check if there is room at
406 * the end total_size - (last_object_offset + last_object_size) >=
407 * alloc_size. If so we allocate new object there.
408 *
409 * When there is not enough room at the end, we start waiting for
410 * each sub object until we reach object_offset+object_size >=
411 * alloc_size, this object then become the sub object we return.
412 *
413 * Alignment can't be bigger than page size.
414 *
415 * Hole are not considered for allocation to keep things simple.
416 * Assumption is that there won't be hole (all object on same
417 * alignment).
418 */
419
420 #define AMDGPU_SA_NUM_FENCE_LISTS 32
421
422 struct amdgpu_sa_manager {
423 wait_queue_head_t wq;
424 struct amdgpu_bo *bo;
425 struct list_head *hole;
426 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
427 struct list_head olist;
428 unsigned size;
429 uint64_t gpu_addr;
430 void *cpu_ptr;
431 uint32_t domain;
432 uint32_t align;
433 };
434
435 /* sub-allocation buffer */
436 struct amdgpu_sa_bo {
437 struct list_head olist;
438 struct list_head flist;
439 struct amdgpu_sa_manager *manager;
440 unsigned soffset;
441 unsigned eoffset;
442 struct dma_fence *fence;
443 };
444
445 /*
446 * GEM objects.
447 */
448 void amdgpu_gem_force_release(struct amdgpu_device *adev);
449 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
450 int alignment, u32 initial_domain,
451 u64 flags, enum ttm_bo_type type,
452 struct reservation_object *resv,
453 struct drm_gem_object **obj);
454
455 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
456 struct drm_device *dev,
457 struct drm_mode_create_dumb *args);
458 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
459 struct drm_device *dev,
460 uint32_t handle, uint64_t *offset_p);
461 int amdgpu_fence_slab_init(void);
462 void amdgpu_fence_slab_fini(void);
463
464 /*
465 * GPU doorbell structures, functions & helpers
466 */
467 typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
468 {
469 AMDGPU_DOORBELL_KIQ = 0x000,
470 AMDGPU_DOORBELL_HIQ = 0x001,
471 AMDGPU_DOORBELL_DIQ = 0x002,
472 AMDGPU_DOORBELL_MEC_RING0 = 0x010,
473 AMDGPU_DOORBELL_MEC_RING1 = 0x011,
474 AMDGPU_DOORBELL_MEC_RING2 = 0x012,
475 AMDGPU_DOORBELL_MEC_RING3 = 0x013,
476 AMDGPU_DOORBELL_MEC_RING4 = 0x014,
477 AMDGPU_DOORBELL_MEC_RING5 = 0x015,
478 AMDGPU_DOORBELL_MEC_RING6 = 0x016,
479 AMDGPU_DOORBELL_MEC_RING7 = 0x017,
480 AMDGPU_DOORBELL_GFX_RING0 = 0x020,
481 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
482 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
483 AMDGPU_DOORBELL_IH = 0x1E8,
484 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
485 AMDGPU_DOORBELL_INVALID = 0xFFFF
486 } AMDGPU_DOORBELL_ASSIGNMENT;
487
488 struct amdgpu_doorbell {
489 /* doorbell mmio */
490 resource_size_t base;
491 resource_size_t size;
492 u32 __iomem *ptr;
493 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
494 };
495
496 /*
497 * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
498 */
499 typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
500 {
501 /*
502 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
503 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
504 * Compute related doorbells are allocated from 0x00 to 0x8a
505 */
506
507
508 /* kernel scheduling */
509 AMDGPU_DOORBELL64_KIQ = 0x00,
510
511 /* HSA interface queue and debug queue */
512 AMDGPU_DOORBELL64_HIQ = 0x01,
513 AMDGPU_DOORBELL64_DIQ = 0x02,
514
515 /* Compute engines */
516 AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
517 AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
518 AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
519 AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
520 AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
521 AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
522 AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
523 AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
524
525 /* User queue doorbell range (128 doorbells) */
526 AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
527 AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
528
529 /* Graphics engine */
530 AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
531
532 /*
533 * Other graphics doorbells can be allocated here: from 0x8c to 0xef
534 * Graphics voltage island aperture 1
535 * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
536 */
537
538 /* sDMA engines */
539 AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
540 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
541 AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
542 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
543
544 /* Interrupt handler */
545 AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
546 AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
547 AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
548
549 /* VCN engine use 32 bits doorbell */
550 AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
551 AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
552 AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
553 AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
554
555 /* overlap the doorbell assignment with VCN as they are mutually exclusive
556 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
557 */
558 AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
559 AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
560 AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
561 AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
562
563 AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
564 AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
565 AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
566 AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
567
568 AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
569 AMDGPU_DOORBELL64_INVALID = 0xFFFF
570 } AMDGPU_DOORBELL64_ASSIGNMENT;
571
572 /*
573 * IRQS.
574 */
575
576 struct amdgpu_flip_work {
577 struct delayed_work flip_work;
578 struct work_struct unpin_work;
579 struct amdgpu_device *adev;
580 int crtc_id;
581 u32 target_vblank;
582 uint64_t base;
583 struct drm_pending_vblank_event *event;
584 struct amdgpu_bo *old_abo;
585 struct dma_fence *excl;
586 unsigned shared_count;
587 struct dma_fence **shared;
588 struct dma_fence_cb cb;
589 bool async;
590 };
591
592
593 /*
594 * CP & rings.
595 */
596
597 struct amdgpu_ib {
598 struct amdgpu_sa_bo *sa_bo;
599 uint32_t length_dw;
600 uint64_t gpu_addr;
601 uint32_t *ptr;
602 uint32_t flags;
603 };
604
605 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
606
607 /*
608 * Queue manager
609 */
610 struct amdgpu_queue_mapper {
611 int hw_ip;
612 struct lock lock;
613 /* protected by lock */
614 struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
615 };
616
617 struct amdgpu_queue_mgr {
618 struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
619 };
620
621 int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
622 struct amdgpu_queue_mgr *mgr);
623 int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
624 struct amdgpu_queue_mgr *mgr);
625 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
626 struct amdgpu_queue_mgr *mgr,
627 u32 hw_ip, u32 instance, u32 ring,
628 struct amdgpu_ring **out_ring);
629
630 /*
631 * context related structures
632 */
633
634 struct amdgpu_ctx_ring {
635 uint64_t sequence;
636 struct dma_fence **fences;
637 struct drm_sched_entity entity;
638 };
639
640 struct amdgpu_ctx {
641 struct kref refcount;
642 struct amdgpu_device *adev;
643 struct amdgpu_queue_mgr queue_mgr;
644 unsigned reset_counter;
645 unsigned reset_counter_query;
646 uint32_t vram_lost_counter;
647 struct spinlock ring_lock;
648 struct dma_fence **fences;
649 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
650 bool preamble_presented;
651 enum drm_sched_priority init_priority;
652 enum drm_sched_priority override_priority;
653 struct lock lock;
654 atomic_t guilty;
655 };
656
657 struct amdgpu_ctx_mgr {
658 struct amdgpu_device *adev;
659 struct lock lock;
660 /* protected by lock */
661 struct idr ctx_handles;
662 };
663
664 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
665 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
666
667 int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
668 struct dma_fence *fence, uint64_t *seq);
669 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
670 struct amdgpu_ring *ring, uint64_t seq);
671 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
672 enum drm_sched_priority priority);
673
674 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *filp);
676
677 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
678
679 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
680 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
681 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
682 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
683
684
685 /*
686 * file private structure
687 */
688
689 struct amdgpu_fpriv {
690 struct amdgpu_vm vm;
691 struct amdgpu_bo_va *prt_va;
692 struct amdgpu_bo_va *csa_va;
693 struct lock bo_list_lock;
694 struct idr bo_list_handles;
695 struct amdgpu_ctx_mgr ctx_mgr;
696 };
697
698 /*
699 * GFX stuff
700 */
701 #include "clearstate_defs.h"
702
703 struct amdgpu_rlc_funcs {
704 void (*enter_safe_mode)(struct amdgpu_device *adev);
705 void (*exit_safe_mode)(struct amdgpu_device *adev);
706 };
707
708 struct amdgpu_rlc {
709 /* for power gating */
710 struct amdgpu_bo *save_restore_obj;
711 uint64_t save_restore_gpu_addr;
712 volatile uint32_t *sr_ptr;
713 const u32 *reg_list;
714 u32 reg_list_size;
715 /* for clear state */
716 struct amdgpu_bo *clear_state_obj;
717 uint64_t clear_state_gpu_addr;
718 volatile uint32_t *cs_ptr;
719 const struct cs_section_def *cs_data;
720 u32 clear_state_size;
721 /* for cp tables */
722 struct amdgpu_bo *cp_table_obj;
723 uint64_t cp_table_gpu_addr;
724 volatile uint32_t *cp_table_ptr;
725 u32 cp_table_size;
726
727 /* safe mode for updating CG/PG state */
728 bool in_safe_mode;
729 const struct amdgpu_rlc_funcs *funcs;
730
731 /* for firmware data */
732 u32 save_and_restore_offset;
733 u32 clear_state_descriptor_offset;
734 u32 avail_scratch_ram_locations;
735 u32 reg_restore_list_size;
736 u32 reg_list_format_start;
737 u32 reg_list_format_separate_start;
738 u32 starting_offsets_start;
739 u32 reg_list_format_size_bytes;
740 u32 reg_list_size_bytes;
741 u32 reg_list_format_direct_reg_list_length;
742 u32 save_restore_list_cntl_size_bytes;
743 u32 save_restore_list_gpm_size_bytes;
744 u32 save_restore_list_srm_size_bytes;
745
746 u32 *register_list_format;
747 u32 *register_restore;
748 u8 *save_restore_list_cntl;
749 u8 *save_restore_list_gpm;
750 u8 *save_restore_list_srm;
751
752 bool is_rlc_v2_1;
753 };
754
755 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
756
757 struct amdgpu_mec {
758 struct amdgpu_bo *hpd_eop_obj;
759 u64 hpd_eop_gpu_addr;
760 struct amdgpu_bo *mec_fw_obj;
761 u64 mec_fw_gpu_addr;
762 u32 num_mec;
763 u32 num_pipe_per_mec;
764 u32 num_queue_per_pipe;
765 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
766
767 /* These are the resources for which amdgpu takes ownership */
768 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
769 };
770
771 struct amdgpu_kiq {
772 u64 eop_gpu_addr;
773 struct amdgpu_bo *eop_obj;
774 spinlock_t ring_lock;
775 struct amdgpu_ring ring;
776 struct amdgpu_irq_src irq;
777 };
778
779 /*
780 * GPU scratch registers structures, functions & helpers
781 */
782 struct amdgpu_scratch {
783 unsigned num_reg;
784 uint32_t reg_base;
785 uint32_t free_mask;
786 };
787
788 /*
789 * GFX configurations
790 */
791 #define AMDGPU_GFX_MAX_SE 4
792 #define AMDGPU_GFX_MAX_SH_PER_SE 2
793
794 struct amdgpu_rb_config {
795 uint32_t rb_backend_disable;
796 uint32_t user_rb_backend_disable;
797 uint32_t raster_config;
798 uint32_t raster_config_1;
799 };
800
801 struct gb_addr_config {
802 uint16_t pipe_interleave_size;
803 uint8_t num_pipes;
804 uint8_t max_compress_frags;
805 uint8_t num_banks;
806 uint8_t num_se;
807 uint8_t num_rb_per_se;
808 };
809
810 struct amdgpu_gfx_config {
811 unsigned max_shader_engines;
812 unsigned max_tile_pipes;
813 unsigned max_cu_per_sh;
814 unsigned max_sh_per_se;
815 unsigned max_backends_per_se;
816 unsigned max_texture_channel_caches;
817 unsigned max_gprs;
818 unsigned max_gs_threads;
819 unsigned max_hw_contexts;
820 unsigned sc_prim_fifo_size_frontend;
821 unsigned sc_prim_fifo_size_backend;
822 unsigned sc_hiz_tile_fifo_size;
823 unsigned sc_earlyz_tile_fifo_size;
824
825 unsigned num_tile_pipes;
826 unsigned backend_enable_mask;
827 unsigned mem_max_burst_length_bytes;
828 unsigned mem_row_size_in_kb;
829 unsigned shader_engine_tile_size;
830 unsigned num_gpus;
831 unsigned multi_gpu_tile_size;
832 unsigned mc_arb_ramcfg;
833 unsigned gb_addr_config;
834 unsigned num_rbs;
835 unsigned gs_vgt_table_depth;
836 unsigned gs_prim_buffer_depth;
837
838 uint32_t tile_mode_array[32];
839 uint32_t macrotile_mode_array[16];
840
841 struct gb_addr_config gb_addr_config_fields;
842 struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
843
844 /* gfx configure feature */
845 uint32_t double_offchip_lds_buf;
846 /* cached value of DB_DEBUG2 */
847 uint32_t db_debug2;
848 };
849
850 struct amdgpu_cu_info {
851 uint32_t simd_per_cu;
852 uint32_t max_waves_per_simd;
853 uint32_t wave_front_size;
854 uint32_t max_scratch_slots_per_cu;
855 uint32_t lds_size;
856
857 /* total active CU number */
858 uint32_t number;
859 uint32_t ao_cu_mask;
860 uint32_t ao_cu_bitmap[4][4];
861 uint32_t bitmap[4][4];
862 };
863
864 struct amdgpu_gfx_funcs {
865 /* get the gpu clock counter */
866 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
867 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
868 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
869 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
870 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
871 void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
872 };
873
874 struct amdgpu_ngg_buf {
875 struct amdgpu_bo *bo;
876 uint64_t gpu_addr;
877 uint32_t size;
878 uint32_t bo_size;
879 };
880
881 enum {
882 NGG_PRIM = 0,
883 NGG_POS,
884 NGG_CNTL,
885 NGG_PARAM,
886 NGG_BUF_MAX
887 };
888
889 struct amdgpu_ngg {
890 struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
891 uint32_t gds_reserve_addr;
892 uint32_t gds_reserve_size;
893 bool init;
894 };
895
896 struct sq_work {
897 struct work_struct work;
898 unsigned ih_data;
899 };
900
901 struct amdgpu_gfx {
902 struct lock gpu_clock_mutex;
903 struct amdgpu_gfx_config config;
904 struct amdgpu_rlc rlc;
905 struct amdgpu_mec mec;
906 struct amdgpu_kiq kiq;
907 struct amdgpu_scratch scratch;
908 const struct firmware *me_fw; /* ME firmware */
909 uint32_t me_fw_version;
910 const struct firmware *pfp_fw; /* PFP firmware */
911 uint32_t pfp_fw_version;
912 const struct firmware *ce_fw; /* CE firmware */
913 uint32_t ce_fw_version;
914 const struct firmware *rlc_fw; /* RLC firmware */
915 uint32_t rlc_fw_version;
916 const struct firmware *mec_fw; /* MEC firmware */
917 uint32_t mec_fw_version;
918 const struct firmware *mec2_fw; /* MEC2 firmware */
919 uint32_t mec2_fw_version;
920 uint32_t me_feature_version;
921 uint32_t ce_feature_version;
922 uint32_t pfp_feature_version;
923 uint32_t rlc_feature_version;
924 uint32_t rlc_srlc_fw_version;
925 uint32_t rlc_srlc_feature_version;
926 uint32_t rlc_srlg_fw_version;
927 uint32_t rlc_srlg_feature_version;
928 uint32_t rlc_srls_fw_version;
929 uint32_t rlc_srls_feature_version;
930 uint32_t mec_feature_version;
931 uint32_t mec2_feature_version;
932 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
933 unsigned num_gfx_rings;
934 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
935 unsigned num_compute_rings;
936 struct amdgpu_irq_src eop_irq;
937 struct amdgpu_irq_src priv_reg_irq;
938 struct amdgpu_irq_src priv_inst_irq;
939 struct amdgpu_irq_src cp_ecc_error_irq;
940 struct amdgpu_irq_src sq_irq;
941 struct sq_work sq_work;
942
943 /* gfx status */
944 uint32_t gfx_current_status;
945 /* ce ram size*/
946 unsigned ce_ram_size;
947 struct amdgpu_cu_info cu_info;
948 const struct amdgpu_gfx_funcs *funcs;
949
950 /* reset mask */
951 uint32_t grbm_soft_reset;
952 uint32_t srbm_soft_reset;
953 /* s3/s4 mask */
954 bool in_suspend;
955 /* NGG */
956 struct amdgpu_ngg ngg;
957
958 /* pipe reservation */
959 struct lock pipe_reserve_mutex;
960 DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
961 };
962
963 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
964 unsigned size, struct amdgpu_ib *ib);
965 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
966 struct dma_fence *f);
967 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
968 struct amdgpu_ib *ibs, struct amdgpu_job *job,
969 struct dma_fence **f);
970 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
971 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
972 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
973
974 /*
975 * CS.
976 */
977 struct amdgpu_cs_chunk {
978 uint32_t chunk_id;
979 uint32_t length_dw;
980 void *kdata;
981 };
982
983 struct amdgpu_cs_parser {
984 struct amdgpu_device *adev;
985 struct drm_file *filp;
986 struct amdgpu_ctx *ctx;
987
988 /* chunks */
989 unsigned nchunks;
990 struct amdgpu_cs_chunk *chunks;
991
992 /* scheduler job object */
993 struct amdgpu_job *job;
994 struct amdgpu_ring *ring;
995
996 /* buffer objects */
997 struct ww_acquire_ctx ticket;
998 struct amdgpu_bo_list *bo_list;
999 struct amdgpu_mn *mn;
1000 struct amdgpu_bo_list_entry vm_pd;
1001 struct list_head validated;
1002 struct dma_fence *fence;
1003 uint64_t bytes_moved_threshold;
1004 uint64_t bytes_moved_vis_threshold;
1005 uint64_t bytes_moved;
1006 uint64_t bytes_moved_vis;
1007 struct amdgpu_bo_list_entry *evictable;
1008
1009 /* user fence */
1010 struct amdgpu_bo_list_entry uf_entry;
1011
1012 unsigned num_post_dep_syncobjs;
1013 struct drm_syncobj **post_dep_syncobjs;
1014 };
1015
amdgpu_get_ib_value(struct amdgpu_cs_parser * p,uint32_t ib_idx,int idx)1016 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
1017 uint32_t ib_idx, int idx)
1018 {
1019 return p->job->ibs[ib_idx].ptr[idx];
1020 }
1021
amdgpu_set_ib_value(struct amdgpu_cs_parser * p,uint32_t ib_idx,int idx,uint32_t value)1022 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1023 uint32_t ib_idx, int idx,
1024 uint32_t value)
1025 {
1026 p->job->ibs[ib_idx].ptr[idx] = value;
1027 }
1028
1029 /*
1030 * Writeback
1031 */
1032 #define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
1033
1034 struct amdgpu_wb {
1035 struct amdgpu_bo *wb_obj;
1036 volatile uint32_t *wb;
1037 uint64_t gpu_addr;
1038 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
1039 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1040 };
1041
1042 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
1043 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
1044
1045 /*
1046 * SDMA
1047 */
1048 struct amdgpu_sdma_instance {
1049 /* SDMA firmware */
1050 const struct firmware *fw;
1051 uint32_t fw_version;
1052 uint32_t feature_version;
1053
1054 struct amdgpu_ring ring;
1055 bool burst_nop;
1056 };
1057
1058 struct amdgpu_sdma {
1059 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1060 #ifdef CONFIG_DRM_AMDGPU_SI
1061 //SI DMA has a difference trap irq number for the second engine
1062 struct amdgpu_irq_src trap_irq_1;
1063 #endif
1064 struct amdgpu_irq_src trap_irq;
1065 struct amdgpu_irq_src illegal_inst_irq;
1066 int num_instances;
1067 uint32_t srbm_soft_reset;
1068 };
1069
1070 /*
1071 * Firmware
1072 */
1073 enum amdgpu_firmware_load_type {
1074 AMDGPU_FW_LOAD_DIRECT = 0,
1075 AMDGPU_FW_LOAD_SMU,
1076 AMDGPU_FW_LOAD_PSP,
1077 };
1078
1079 struct amdgpu_firmware {
1080 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1081 enum amdgpu_firmware_load_type load_type;
1082 struct amdgpu_bo *fw_buf;
1083 unsigned int fw_size;
1084 unsigned int max_ucodes;
1085 /* firmwares are loaded by psp instead of smu from vega10 */
1086 const struct amdgpu_psp_funcs *funcs;
1087 struct amdgpu_bo *rbuf;
1088 struct lock mutex;
1089
1090 /* gpu info firmware data pointer */
1091 const struct firmware *gpu_info_fw;
1092
1093 void *fw_buf_ptr;
1094 uint64_t fw_buf_mc;
1095 };
1096
1097 /*
1098 * Benchmarking
1099 */
1100 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1101
1102
1103 /*
1104 * Testing
1105 */
1106 void amdgpu_test_moves(struct amdgpu_device *adev);
1107
1108
1109 /*
1110 * amdgpu smumgr functions
1111 */
1112 struct amdgpu_smumgr_funcs {
1113 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1114 int (*request_smu_load_fw)(struct amdgpu_device *adev);
1115 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1116 };
1117
1118 /*
1119 * amdgpu smumgr
1120 */
1121 struct amdgpu_smumgr {
1122 struct amdgpu_bo *toc_buf;
1123 struct amdgpu_bo *smu_buf;
1124 /* asic priv smu data */
1125 void *priv;
1126 spinlock_t smu_lock;
1127 /* smumgr functions */
1128 const struct amdgpu_smumgr_funcs *smumgr_funcs;
1129 /* ucode loading complete flag */
1130 uint32_t fw_flags;
1131 };
1132
1133 /*
1134 * ASIC specific register table accessible by UMD
1135 */
1136 struct amdgpu_allowed_register_entry {
1137 uint32_t reg_offset;
1138 bool grbm_indexed;
1139 };
1140
1141 /*
1142 * ASIC specific functions.
1143 */
1144 struct amdgpu_asic_funcs {
1145 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1146 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1147 u8 *bios, u32 length_bytes);
1148 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1149 u32 sh_num, u32 reg_offset, u32 *value);
1150 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1151 int (*reset)(struct amdgpu_device *adev);
1152 /* get the reference clock */
1153 u32 (*get_xclk)(struct amdgpu_device *adev);
1154 /* MM block clocks */
1155 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1156 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1157 /* static power management */
1158 int (*get_pcie_lanes)(struct amdgpu_device *adev);
1159 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
1160 /* get config memsize register */
1161 u32 (*get_config_memsize)(struct amdgpu_device *adev);
1162 /* flush hdp write queue */
1163 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
1164 /* invalidate hdp read cache */
1165 void (*invalidate_hdp)(struct amdgpu_device *adev,
1166 struct amdgpu_ring *ring);
1167 /* check if the asic needs a full reset of if soft reset will work */
1168 bool (*need_full_reset)(struct amdgpu_device *adev);
1169 };
1170
1171 /*
1172 * IOCTL.
1173 */
1174 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1175 struct drm_file *filp);
1176 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1177 struct drm_file *filp);
1178
1179 int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1180 struct drm_file *filp);
1181 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1182 struct drm_file *filp);
1183 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1184 struct drm_file *filp);
1185 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1186 struct drm_file *filp);
1187 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1188 struct drm_file *filp);
1189 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1190 struct drm_file *filp);
1191 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1192 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1193 struct drm_file *filp);
1194 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1195 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1196 struct drm_file *filp);
1197
1198 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1199 struct drm_file *filp);
1200
1201 /* VRAM scratch page for HDP bug, default vram page */
1202 struct amdgpu_vram_scratch {
1203 struct amdgpu_bo *robj;
1204 volatile uint32_t *ptr;
1205 u64 gpu_addr;
1206 };
1207
1208 /*
1209 * ACPI
1210 */
1211 struct amdgpu_atcs_functions {
1212 bool get_ext_state;
1213 bool pcie_perf_req;
1214 bool pcie_dev_rdy;
1215 bool pcie_bus_width;
1216 };
1217
1218 struct amdgpu_atcs {
1219 struct amdgpu_atcs_functions functions;
1220 };
1221
1222 /*
1223 * Firmware VRAM reservation
1224 */
1225 struct amdgpu_fw_vram_usage {
1226 u64 start_offset;
1227 u64 size;
1228 struct amdgpu_bo *reserved_bo;
1229 void *va;
1230 };
1231
1232 /*
1233 * CGS
1234 */
1235 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1236 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1237
1238 /*
1239 * Core structure, functions and helpers.
1240 */
1241 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1242 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1243
1244 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1245 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1246
1247
1248 /*
1249 * amdgpu nbio functions
1250 *
1251 */
1252 struct nbio_hdp_flush_reg {
1253 u32 ref_and_mask_cp0;
1254 u32 ref_and_mask_cp1;
1255 u32 ref_and_mask_cp2;
1256 u32 ref_and_mask_cp3;
1257 u32 ref_and_mask_cp4;
1258 u32 ref_and_mask_cp5;
1259 u32 ref_and_mask_cp6;
1260 u32 ref_and_mask_cp7;
1261 u32 ref_and_mask_cp8;
1262 u32 ref_and_mask_cp9;
1263 u32 ref_and_mask_sdma0;
1264 u32 ref_and_mask_sdma1;
1265 };
1266
1267 struct amdgpu_nbio_funcs {
1268 const struct nbio_hdp_flush_reg *hdp_flush_reg;
1269 u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
1270 u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
1271 u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
1272 u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
1273 u32 (*get_rev_id)(struct amdgpu_device *adev);
1274 void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
1275 void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
1276 u32 (*get_memsize)(struct amdgpu_device *adev);
1277 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
1278 bool use_doorbell, int doorbell_index);
1279 void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
1280 bool enable);
1281 void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
1282 bool enable);
1283 void (*ih_doorbell_range)(struct amdgpu_device *adev,
1284 bool use_doorbell, int doorbell_index);
1285 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1286 bool enable);
1287 void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
1288 bool enable);
1289 void (*get_clockgating_state)(struct amdgpu_device *adev,
1290 u32 *flags);
1291 void (*ih_control)(struct amdgpu_device *adev);
1292 void (*init_registers)(struct amdgpu_device *adev);
1293 void (*detect_hw_virt)(struct amdgpu_device *adev);
1294 };
1295
1296 struct amdgpu_df_funcs {
1297 void (*init)(struct amdgpu_device *adev);
1298 void (*enable_broadcast_mode)(struct amdgpu_device *adev,
1299 bool enable);
1300 u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
1301 u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
1302 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1303 bool enable);
1304 void (*get_clockgating_state)(struct amdgpu_device *adev,
1305 u32 *flags);
1306 void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
1307 bool enable);
1308 };
1309 /* Define the HW IP blocks will be used in driver , add more if necessary */
1310 enum amd_hw_ip_block_type {
1311 GC_HWIP = 1,
1312 HDP_HWIP,
1313 SDMA0_HWIP,
1314 SDMA1_HWIP,
1315 MMHUB_HWIP,
1316 ATHUB_HWIP,
1317 NBIO_HWIP,
1318 MP0_HWIP,
1319 MP1_HWIP,
1320 UVD_HWIP,
1321 VCN_HWIP = UVD_HWIP,
1322 VCE_HWIP,
1323 DF_HWIP,
1324 DCE_HWIP,
1325 OSSSYS_HWIP,
1326 SMUIO_HWIP,
1327 PWR_HWIP,
1328 NBIF_HWIP,
1329 THM_HWIP,
1330 CLK_HWIP,
1331 MAX_HWIP
1332 };
1333
1334 #define HWIP_MAX_INSTANCE 6
1335
1336 struct amd_powerplay {
1337 void *pp_handle;
1338 const struct amd_pm_funcs *pp_funcs;
1339 uint32_t pp_feature;
1340 };
1341
1342 #define AMDGPU_RESET_MAGIC_NUM 64
1343 struct amdgpu_device {
1344 struct device *dev;
1345 struct drm_device *ddev;
1346 struct pci_dev *pdev;
1347
1348 #ifdef CONFIG_DRM_AMD_ACP
1349 struct amdgpu_acp acp;
1350 #endif
1351
1352 /* ASIC */
1353 enum amd_asic_type asic_type;
1354 uint32_t family;
1355 uint32_t rev_id;
1356 uint32_t external_rev_id;
1357 unsigned long flags;
1358 int usec_timeout;
1359 const struct amdgpu_asic_funcs *asic_funcs;
1360 bool shutdown;
1361 bool need_dma32;
1362 bool need_swiotlb;
1363 bool accel_working;
1364 struct work_struct reset_work;
1365 struct notifier_block acpi_nb;
1366 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
1367 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1368 unsigned debugfs_count;
1369 #if defined(CONFIG_DEBUG_FS)
1370 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1371 #endif
1372 struct amdgpu_atif *atif;
1373 struct amdgpu_atcs atcs;
1374 struct lock srbm_mutex;
1375 /* GRBM index mutex. Protects concurrent access to GRBM index */
1376 struct lock grbm_idx_mutex;
1377 #if 0
1378 struct dev_pm_domain vga_pm_domain;
1379 #endif
1380 bool have_disp_power_ref;
1381
1382 /* BIOS */
1383 bool is_atom_fw;
1384 uint8_t *bios;
1385 uint32_t bios_size;
1386 struct amdgpu_bo *stolen_vga_memory;
1387 uint32_t bios_scratch_reg_offset;
1388 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1389
1390 /* Register/doorbell mmio */
1391 resource_size_t rmmio_base;
1392 resource_size_t rmmio_size;
1393 #if 0
1394 void __iomem *rmmio;
1395 #endif
1396 #ifdef __DragonFly__
1397 int rmmio_rid;
1398 struct resource *rmmio;
1399 #endif
1400 /* protects concurrent MM_INDEX/DATA based register access */
1401 spinlock_t mmio_idx_lock;
1402 /* protects concurrent SMC based register access */
1403 spinlock_t smc_idx_lock;
1404 amdgpu_rreg_t smc_rreg;
1405 amdgpu_wreg_t smc_wreg;
1406 /* protects concurrent PCIE register access */
1407 spinlock_t pcie_idx_lock;
1408 amdgpu_rreg_t pcie_rreg;
1409 amdgpu_wreg_t pcie_wreg;
1410 amdgpu_rreg_t pciep_rreg;
1411 amdgpu_wreg_t pciep_wreg;
1412 /* protects concurrent UVD register access */
1413 spinlock_t uvd_ctx_idx_lock;
1414 amdgpu_rreg_t uvd_ctx_rreg;
1415 amdgpu_wreg_t uvd_ctx_wreg;
1416 /* protects concurrent DIDT register access */
1417 spinlock_t didt_idx_lock;
1418 amdgpu_rreg_t didt_rreg;
1419 amdgpu_wreg_t didt_wreg;
1420 /* protects concurrent gc_cac register access */
1421 spinlock_t gc_cac_idx_lock;
1422 amdgpu_rreg_t gc_cac_rreg;
1423 amdgpu_wreg_t gc_cac_wreg;
1424 /* protects concurrent se_cac register access */
1425 spinlock_t se_cac_idx_lock;
1426 amdgpu_rreg_t se_cac_rreg;
1427 amdgpu_wreg_t se_cac_wreg;
1428 /* protects concurrent ENDPOINT (audio) register access */
1429 spinlock_t audio_endpt_idx_lock;
1430 amdgpu_block_rreg_t audio_endpt_rreg;
1431 amdgpu_block_wreg_t audio_endpt_wreg;
1432 #if 0
1433 void __iomem *rio_mem;
1434 #endif
1435 #ifdef __DragonFly__
1436 int rio_rid;
1437 struct resource *rio_mem;
1438 #endif
1439 resource_size_t rio_mem_size;
1440 struct amdgpu_doorbell doorbell;
1441
1442 /* clock/pll info */
1443 struct amdgpu_clock clock;
1444
1445 /* MC */
1446 struct amdgpu_gmc gmc;
1447 struct amdgpu_gart gart;
1448 dma_addr_t dummy_page_addr;
1449 struct amdgpu_vm_manager vm_manager;
1450 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
1451
1452 /* memory management */
1453 struct amdgpu_mman mman;
1454 struct amdgpu_vram_scratch vram_scratch;
1455 struct amdgpu_wb wb;
1456 atomic64_t num_bytes_moved;
1457 atomic64_t num_evictions;
1458 atomic64_t num_vram_cpu_page_faults;
1459 atomic_t gpu_reset_counter;
1460 atomic_t vram_lost_counter;
1461
1462 /* data for buffer migration throttling */
1463 struct {
1464 struct spinlock lock;
1465 s64 last_update_us;
1466 s64 accum_us; /* accumulated microseconds */
1467 s64 accum_us_vis; /* for visible VRAM */
1468 u32 log2_max_MBps;
1469 } mm_stats;
1470
1471 /* display */
1472 bool enable_virtual_display;
1473 struct amdgpu_mode_info mode_info;
1474 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
1475 struct work_struct hotplug_work;
1476 struct amdgpu_irq_src crtc_irq;
1477 struct amdgpu_irq_src pageflip_irq;
1478 struct amdgpu_irq_src hpd_irq;
1479
1480 /* rings */
1481 u64 fence_context;
1482 unsigned num_rings;
1483 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
1484 bool ib_pool_ready;
1485 struct amdgpu_sa_manager ring_tmp_bo;
1486
1487 /* interrupts */
1488 struct amdgpu_irq irq;
1489
1490 /* powerplay */
1491 struct amd_powerplay powerplay;
1492 bool pp_force_state_enabled;
1493
1494 /* dpm */
1495 struct amdgpu_pm pm;
1496 u32 cg_flags;
1497 u32 pg_flags;
1498
1499 /* amdgpu smumgr */
1500 struct amdgpu_smumgr smu;
1501
1502 /* gfx */
1503 struct amdgpu_gfx gfx;
1504
1505 /* sdma */
1506 struct amdgpu_sdma sdma;
1507
1508 /* uvd */
1509 struct amdgpu_uvd uvd;
1510
1511 /* vce */
1512 struct amdgpu_vce vce;
1513
1514 /* vcn */
1515 struct amdgpu_vcn vcn;
1516
1517 /* firmwares */
1518 struct amdgpu_firmware firmware;
1519
1520 /* PSP */
1521 struct psp_context psp;
1522
1523 /* GDS */
1524 struct amdgpu_gds gds;
1525
1526 /* display related functionality */
1527 struct amdgpu_display_manager dm;
1528
1529 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
1530 int num_ip_blocks;
1531 struct lock mn_lock;
1532 DECLARE_HASHTABLE(mn_hash, 7);
1533
1534 /* tracking pinned memory */
1535 atomic64_t vram_pin_size;
1536 atomic64_t visible_pin_size;
1537 atomic64_t gart_pin_size;
1538
1539 /* amdkfd interface */
1540 struct kfd_dev *kfd;
1541
1542 /* soc15 register offset based on ip, instance and segment */
1543 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1544
1545 const struct amdgpu_nbio_funcs *nbio_funcs;
1546 const struct amdgpu_df_funcs *df_funcs;
1547
1548 /* delayed work_func for deferring clockgating during resume */
1549 struct delayed_work late_init_work;
1550
1551 struct amdgpu_virt virt;
1552 /* firmware VRAM reservation */
1553 struct amdgpu_fw_vram_usage fw_vram_usage;
1554
1555 /* link all shadow bo */
1556 struct list_head shadow_list;
1557 struct lock shadow_list_lock;
1558 /* keep an lru list of rings by HW IP */
1559 struct list_head ring_lru_list;
1560 struct spinlock ring_lru_list_lock;
1561
1562 /* record hw reset is performed */
1563 bool has_hw_reset;
1564 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
1565
1566 /* record last mm index being written through WREG32*/
1567 unsigned long last_mm_index;
1568 bool in_gpu_reset;
1569 struct lock lock_reset;
1570 #ifdef __DragonFly__
1571 struct {
1572 ACPI_HANDLE handle;
1573 ACPI_NOTIFY_HANDLER notifier_call;
1574 } acpi;
1575 bool fictitious_range_registered;
1576 #endif
1577 };
1578
amdgpu_ttm_adev(struct ttm_bo_device * bdev)1579 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
1580 {
1581 return container_of(bdev, struct amdgpu_device, mman.bdev);
1582 }
1583
1584 int amdgpu_device_init(struct amdgpu_device *adev,
1585 struct drm_device *ddev,
1586 struct pci_dev *pdev,
1587 uint32_t flags);
1588 void amdgpu_device_fini(struct amdgpu_device *adev);
1589 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1590
1591 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
1592 uint32_t acc_flags);
1593 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
1594 uint32_t acc_flags);
1595 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1596 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1597
1598 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
1599 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
1600
1601 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
1602 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
1603 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
1604 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
1605
1606 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1607 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1608
1609 int emu_soc_asic_init(struct amdgpu_device *adev);
1610
1611 /*
1612 * Registers read & write functions.
1613 */
1614
1615 #define AMDGPU_REGS_IDX (1<<0)
1616 #define AMDGPU_REGS_NO_KIQ (1<<1)
1617
1618 #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1619 #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1620
1621 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1622 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1623
1624 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
1625 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
1626 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
1627 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
1628 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
1629 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1630 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1631 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1632 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1633 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1634 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1635 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1636 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1637 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1638 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1639 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1640 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1641 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1642 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1643 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1644 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1645 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1646 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1647 #define WREG32_P(reg, val, mask) \
1648 do { \
1649 uint32_t tmp_ = RREG32(reg); \
1650 tmp_ &= (mask); \
1651 tmp_ |= ((val) & ~(mask)); \
1652 WREG32(reg, tmp_); \
1653 } while (0)
1654 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1655 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1656 #define WREG32_PLL_P(reg, val, mask) \
1657 do { \
1658 uint32_t tmp_ = RREG32_PLL(reg); \
1659 tmp_ &= (mask); \
1660 tmp_ |= ((val) & ~(mask)); \
1661 WREG32_PLL(reg, tmp_); \
1662 } while (0)
1663 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
1664 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
1665 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
1666
1667 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
1668 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
1669 #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
1670 #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
1671
1672 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1673 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1674
1675 #define REG_SET_FIELD(orig_val, reg, field, field_val) \
1676 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
1677 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1678
1679 #define REG_GET_FIELD(value, reg, field) \
1680 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1681
1682 #define WREG32_FIELD(reg, field, val) \
1683 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1684
1685 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1686 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1687
1688 /*
1689 * BIOS helpers.
1690 */
1691 #define RBIOS8(i) (adev->bios[i])
1692 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1693 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1694
1695 static inline struct amdgpu_sdma_instance *
amdgpu_get_sdma_instance(struct amdgpu_ring * ring)1696 amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1697 {
1698 struct amdgpu_device *adev = ring->adev;
1699 int i;
1700
1701 for (i = 0; i < adev->sdma.num_instances; i++)
1702 if (&adev->sdma.instance[i].ring == ring)
1703 break;
1704
1705 if (i < AMDGPU_MAX_SDMA_INSTANCES)
1706 return &adev->sdma.instance[i];
1707 else
1708 return NULL;
1709 }
1710
1711 /*
1712 * ASICs macro.
1713 */
1714 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
1715 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1716 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1717 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1718 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1719 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1720 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1721 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1722 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1723 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1724 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1725 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1726 #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
1727 #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
1728 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1729 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
1730 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
1731 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
1732 #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
1733 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
1734 #define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
1735 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
1736 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
1737 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
1738 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
1739 #define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
1740 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
1741 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
1742 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
1743 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
1744 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
1745 #define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
1746 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
1747 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
1748 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
1749 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
1750 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
1751 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
1752 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
1753 #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
1754 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
1755 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
1756 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
1757 #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
1758 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
1759 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
1760 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
1761 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
1762 #define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
1763 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
1764 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
1765 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
1766 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
1767 #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
1768 #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
1769 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
1770 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
1771 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
1772 #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
1773 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
1774 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
1775 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
1776 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
1777 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
1778 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
1779 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
1780 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
1781 #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
1782 #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
1783
1784 /* Common functions */
1785 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1786 struct amdgpu_job* job, bool force);
1787 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1788 bool amdgpu_device_need_post(struct amdgpu_device *adev);
1789 void amdgpu_display_update_priority(struct amdgpu_device *adev);
1790
1791 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1792 u64 num_vis_bytes);
1793 void amdgpu_device_vram_location(struct amdgpu_device *adev,
1794 struct amdgpu_gmc *mc, u64 base);
1795 void amdgpu_device_gart_location(struct amdgpu_device *adev,
1796 struct amdgpu_gmc *mc);
1797 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1798 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1799 const u32 *registers,
1800 const u32 array_size);
1801
1802 bool amdgpu_device_is_px(struct drm_device *dev);
1803 /* atpx handler */
1804 #if defined(CONFIG_VGA_SWITCHEROO)
1805 void amdgpu_register_atpx_handler(void);
1806 void amdgpu_unregister_atpx_handler(void);
1807 bool amdgpu_has_atpx_dgpu_power_cntl(void);
1808 bool amdgpu_is_atpx_hybrid(void);
1809 bool amdgpu_atpx_dgpu_req_power_for_displays(void);
1810 bool amdgpu_has_atpx(void);
1811 #else
amdgpu_register_atpx_handler(void)1812 static inline void amdgpu_register_atpx_handler(void) {}
amdgpu_unregister_atpx_handler(void)1813 static inline void amdgpu_unregister_atpx_handler(void) {}
amdgpu_has_atpx_dgpu_power_cntl(void)1814 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
amdgpu_is_atpx_hybrid(void)1815 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
amdgpu_atpx_dgpu_req_power_for_displays(void)1816 static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
amdgpu_has_atpx(void)1817 static inline bool amdgpu_has_atpx(void) { return false; }
1818 #endif
1819
1820 #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
1821 void *amdgpu_atpx_get_dhandle(void);
1822 #else
amdgpu_atpx_get_dhandle(void)1823 static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
1824 #endif
1825
1826 /*
1827 * KMS
1828 */
1829 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1830 extern const int amdgpu_max_kms_ioctl;
1831
1832 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
1833 void amdgpu_driver_unload_kms(struct drm_device *dev);
1834 void amdgpu_driver_lastclose_kms(struct drm_device *dev);
1835 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1836 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1837 struct drm_file *file_priv);
1838 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1839 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
1840 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
1841 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
1842 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1843 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1844 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
1845 unsigned long arg);
1846
1847 /*
1848 * functions used by amdgpu_encoder.c
1849 */
1850 struct amdgpu_afmt_acr {
1851 u32 clock;
1852
1853 int n_32khz;
1854 int cts_32khz;
1855
1856 int n_44_1khz;
1857 int cts_44_1khz;
1858
1859 int n_48khz;
1860 int cts_48khz;
1861
1862 };
1863
1864 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1865
1866 /* amdgpu_acpi.c */
1867 #if defined(CONFIG_ACPI)
1868 int amdgpu_acpi_init(struct amdgpu_device *adev);
1869 void amdgpu_acpi_fini(struct amdgpu_device *adev);
1870 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1871 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1872 u8 perf_req, bool advertise);
1873 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1874 #else
amdgpu_acpi_init(struct amdgpu_device * adev)1875 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
amdgpu_acpi_fini(struct amdgpu_device * adev)1876 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1877 #endif
1878
1879 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1880 uint64_t addr, struct amdgpu_bo **bo,
1881 struct amdgpu_bo_va_mapping **mapping);
1882
1883 #if defined(CONFIG_DRM_AMD_DC)
1884 int amdgpu_dm_display_resume(struct amdgpu_device *adev );
1885 #else
amdgpu_dm_display_resume(struct amdgpu_device * adev)1886 static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
1887 #endif
1888
1889 #include "amdgpu_object.h"
1890 #endif
1891