1*d350ecf5Sriastradh /*	$NetBSD: amdgpu.h,v 1.1.1.1 2018/08/27 01:34:43 riastradh Exp $	*/
2*d350ecf5Sriastradh 
3*d350ecf5Sriastradh /*
4*d350ecf5Sriastradh  * Copyright 2008 Advanced Micro Devices, Inc.
5*d350ecf5Sriastradh  * Copyright 2008 Red Hat Inc.
6*d350ecf5Sriastradh  * Copyright 2009 Jerome Glisse.
7*d350ecf5Sriastradh  *
8*d350ecf5Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
9*d350ecf5Sriastradh  * copy of this software and associated documentation files (the "Software"),
10*d350ecf5Sriastradh  * to deal in the Software without restriction, including without limitation
11*d350ecf5Sriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12*d350ecf5Sriastradh  * and/or sell copies of the Software, and to permit persons to whom the
13*d350ecf5Sriastradh  * Software is furnished to do so, subject to the following conditions:
14*d350ecf5Sriastradh  *
15*d350ecf5Sriastradh  * The above copyright notice and this permission notice shall be included in
16*d350ecf5Sriastradh  * all copies or substantial portions of the Software.
17*d350ecf5Sriastradh  *
18*d350ecf5Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*d350ecf5Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*d350ecf5Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21*d350ecf5Sriastradh  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22*d350ecf5Sriastradh  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23*d350ecf5Sriastradh  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24*d350ecf5Sriastradh  * OTHER DEALINGS IN THE SOFTWARE.
25*d350ecf5Sriastradh  *
26*d350ecf5Sriastradh  * Authors: Dave Airlie
27*d350ecf5Sriastradh  *          Alex Deucher
28*d350ecf5Sriastradh  *          Jerome Glisse
29*d350ecf5Sriastradh  */
30*d350ecf5Sriastradh #ifndef __AMDGPU_H__
31*d350ecf5Sriastradh #define __AMDGPU_H__
32*d350ecf5Sriastradh 
33*d350ecf5Sriastradh #include <linux/atomic.h>
34*d350ecf5Sriastradh #include <linux/wait.h>
35*d350ecf5Sriastradh #include <linux/list.h>
36*d350ecf5Sriastradh #include <linux/kref.h>
37*d350ecf5Sriastradh #include <linux/interval_tree.h>
38*d350ecf5Sriastradh #include <linux/hashtable.h>
39*d350ecf5Sriastradh #include <linux/fence.h>
40*d350ecf5Sriastradh 
41*d350ecf5Sriastradh #include <ttm/ttm_bo_api.h>
42*d350ecf5Sriastradh #include <ttm/ttm_bo_driver.h>
43*d350ecf5Sriastradh #include <ttm/ttm_placement.h>
44*d350ecf5Sriastradh #include <ttm/ttm_module.h>
45*d350ecf5Sriastradh #include <ttm/ttm_execbuf_util.h>
46*d350ecf5Sriastradh 
47*d350ecf5Sriastradh #include <drm/drmP.h>
48*d350ecf5Sriastradh #include <drm/drm_gem.h>
49*d350ecf5Sriastradh #include <drm/amdgpu_drm.h>
50*d350ecf5Sriastradh 
51*d350ecf5Sriastradh #include "amd_shared.h"
52*d350ecf5Sriastradh #include "amdgpu_mode.h"
53*d350ecf5Sriastradh #include "amdgpu_ih.h"
54*d350ecf5Sriastradh #include "amdgpu_irq.h"
55*d350ecf5Sriastradh #include "amdgpu_ucode.h"
56*d350ecf5Sriastradh #include "amdgpu_gds.h"
57*d350ecf5Sriastradh 
58*d350ecf5Sriastradh #include "gpu_scheduler.h"
59*d350ecf5Sriastradh 
60*d350ecf5Sriastradh /*
61*d350ecf5Sriastradh  * Modules parameters.
62*d350ecf5Sriastradh  */
63*d350ecf5Sriastradh extern int amdgpu_modeset;
64*d350ecf5Sriastradh extern int amdgpu_vram_limit;
65*d350ecf5Sriastradh extern int amdgpu_gart_size;
66*d350ecf5Sriastradh extern int amdgpu_benchmarking;
67*d350ecf5Sriastradh extern int amdgpu_testing;
68*d350ecf5Sriastradh extern int amdgpu_audio;
69*d350ecf5Sriastradh extern int amdgpu_disp_priority;
70*d350ecf5Sriastradh extern int amdgpu_hw_i2c;
71*d350ecf5Sriastradh extern int amdgpu_pcie_gen2;
72*d350ecf5Sriastradh extern int amdgpu_msi;
73*d350ecf5Sriastradh extern int amdgpu_lockup_timeout;
74*d350ecf5Sriastradh extern int amdgpu_dpm;
75*d350ecf5Sriastradh extern int amdgpu_smc_load_fw;
76*d350ecf5Sriastradh extern int amdgpu_aspm;
77*d350ecf5Sriastradh extern int amdgpu_runtime_pm;
78*d350ecf5Sriastradh extern int amdgpu_hard_reset;
79*d350ecf5Sriastradh extern unsigned amdgpu_ip_block_mask;
80*d350ecf5Sriastradh extern int amdgpu_bapm;
81*d350ecf5Sriastradh extern int amdgpu_deep_color;
82*d350ecf5Sriastradh extern int amdgpu_vm_size;
83*d350ecf5Sriastradh extern int amdgpu_vm_block_size;
84*d350ecf5Sriastradh extern int amdgpu_vm_fault_stop;
85*d350ecf5Sriastradh extern int amdgpu_vm_debug;
86*d350ecf5Sriastradh extern int amdgpu_enable_scheduler;
87*d350ecf5Sriastradh extern int amdgpu_sched_jobs;
88*d350ecf5Sriastradh extern int amdgpu_sched_hw_submission;
89*d350ecf5Sriastradh extern int amdgpu_enable_semaphores;
90*d350ecf5Sriastradh 
91*d350ecf5Sriastradh #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
92*d350ecf5Sriastradh #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
93*d350ecf5Sriastradh #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
94*d350ecf5Sriastradh /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
95*d350ecf5Sriastradh #define AMDGPU_IB_POOL_SIZE			16
96*d350ecf5Sriastradh #define AMDGPU_DEBUGFS_MAX_COMPONENTS		32
97*d350ecf5Sriastradh #define AMDGPUFB_CONN_LIMIT			4
98*d350ecf5Sriastradh #define AMDGPU_BIOS_NUM_SCRATCH			8
99*d350ecf5Sriastradh 
100*d350ecf5Sriastradh /* max number of rings */
101*d350ecf5Sriastradh #define AMDGPU_MAX_RINGS			16
102*d350ecf5Sriastradh #define AMDGPU_MAX_GFX_RINGS			1
103*d350ecf5Sriastradh #define AMDGPU_MAX_COMPUTE_RINGS		8
104*d350ecf5Sriastradh #define AMDGPU_MAX_VCE_RINGS			2
105*d350ecf5Sriastradh 
106*d350ecf5Sriastradh /* max number of IP instances */
107*d350ecf5Sriastradh #define AMDGPU_MAX_SDMA_INSTANCES		2
108*d350ecf5Sriastradh 
109*d350ecf5Sriastradh /* number of hw syncs before falling back on blocking */
110*d350ecf5Sriastradh #define AMDGPU_NUM_SYNCS			4
111*d350ecf5Sriastradh 
112*d350ecf5Sriastradh /* hardcode that limit for now */
113*d350ecf5Sriastradh #define AMDGPU_VA_RESERVED_SIZE			(8 << 20)
114*d350ecf5Sriastradh 
115*d350ecf5Sriastradh /* hard reset data */
116*d350ecf5Sriastradh #define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b
117*d350ecf5Sriastradh 
118*d350ecf5Sriastradh /* reset flags */
119*d350ecf5Sriastradh #define AMDGPU_RESET_GFX			(1 << 0)
120*d350ecf5Sriastradh #define AMDGPU_RESET_COMPUTE			(1 << 1)
121*d350ecf5Sriastradh #define AMDGPU_RESET_DMA			(1 << 2)
122*d350ecf5Sriastradh #define AMDGPU_RESET_CP				(1 << 3)
123*d350ecf5Sriastradh #define AMDGPU_RESET_GRBM			(1 << 4)
124*d350ecf5Sriastradh #define AMDGPU_RESET_DMA1			(1 << 5)
125*d350ecf5Sriastradh #define AMDGPU_RESET_RLC			(1 << 6)
126*d350ecf5Sriastradh #define AMDGPU_RESET_SEM			(1 << 7)
127*d350ecf5Sriastradh #define AMDGPU_RESET_IH				(1 << 8)
128*d350ecf5Sriastradh #define AMDGPU_RESET_VMC			(1 << 9)
129*d350ecf5Sriastradh #define AMDGPU_RESET_MC				(1 << 10)
130*d350ecf5Sriastradh #define AMDGPU_RESET_DISPLAY			(1 << 11)
131*d350ecf5Sriastradh #define AMDGPU_RESET_UVD			(1 << 12)
132*d350ecf5Sriastradh #define AMDGPU_RESET_VCE			(1 << 13)
133*d350ecf5Sriastradh #define AMDGPU_RESET_VCE1			(1 << 14)
134*d350ecf5Sriastradh 
135*d350ecf5Sriastradh /* CG block flags */
136*d350ecf5Sriastradh #define AMDGPU_CG_BLOCK_GFX			(1 << 0)
137*d350ecf5Sriastradh #define AMDGPU_CG_BLOCK_MC			(1 << 1)
138*d350ecf5Sriastradh #define AMDGPU_CG_BLOCK_SDMA			(1 << 2)
139*d350ecf5Sriastradh #define AMDGPU_CG_BLOCK_UVD			(1 << 3)
140*d350ecf5Sriastradh #define AMDGPU_CG_BLOCK_VCE			(1 << 4)
141*d350ecf5Sriastradh #define AMDGPU_CG_BLOCK_HDP			(1 << 5)
142*d350ecf5Sriastradh #define AMDGPU_CG_BLOCK_BIF			(1 << 6)
143*d350ecf5Sriastradh 
144*d350ecf5Sriastradh /* CG flags */
145*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_MGCG		(1 << 0)
146*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_MGLS		(1 << 1)
147*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_CGCG		(1 << 2)
148*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_CGLS		(1 << 3)
149*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_CGTS		(1 << 4)
150*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
151*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_CP_LS		(1 << 6)
152*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
153*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_MC_LS			(1 << 8)
154*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_MC_MGCG		(1 << 9)
155*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_SDMA_LS		(1 << 10)
156*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_SDMA_MGCG		(1 << 11)
157*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_BIF_LS		(1 << 12)
158*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_UVD_MGCG		(1 << 13)
159*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_VCE_MGCG		(1 << 14)
160*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_HDP_LS		(1 << 15)
161*d350ecf5Sriastradh #define AMDGPU_CG_SUPPORT_HDP_MGCG		(1 << 16)
162*d350ecf5Sriastradh 
163*d350ecf5Sriastradh /* PG flags */
164*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_GFX_PG		(1 << 0)
165*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_GFX_SMG		(1 << 1)
166*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_GFX_DMG		(1 << 2)
167*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_UVD			(1 << 3)
168*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_VCE			(1 << 4)
169*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_CP			(1 << 5)
170*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_GDS			(1 << 6)
171*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
172*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_SDMA			(1 << 8)
173*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_ACP			(1 << 9)
174*d350ecf5Sriastradh #define AMDGPU_PG_SUPPORT_SAMU			(1 << 10)
175*d350ecf5Sriastradh 
176*d350ecf5Sriastradh /* GFX current status */
177*d350ecf5Sriastradh #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
178*d350ecf5Sriastradh #define AMDGPU_GFX_SAFE_MODE			0x00000001L
179*d350ecf5Sriastradh #define AMDGPU_GFX_PG_DISABLED_MODE		0x00000002L
180*d350ecf5Sriastradh #define AMDGPU_GFX_CG_DISABLED_MODE		0x00000004L
181*d350ecf5Sriastradh #define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L
182*d350ecf5Sriastradh 
183*d350ecf5Sriastradh /* max cursor sizes (in pixels) */
184*d350ecf5Sriastradh #define CIK_CURSOR_WIDTH 128
185*d350ecf5Sriastradh #define CIK_CURSOR_HEIGHT 128
186*d350ecf5Sriastradh 
187*d350ecf5Sriastradh struct amdgpu_device;
188*d350ecf5Sriastradh struct amdgpu_fence;
189*d350ecf5Sriastradh struct amdgpu_ib;
190*d350ecf5Sriastradh struct amdgpu_vm;
191*d350ecf5Sriastradh struct amdgpu_ring;
192*d350ecf5Sriastradh struct amdgpu_semaphore;
193*d350ecf5Sriastradh struct amdgpu_cs_parser;
194*d350ecf5Sriastradh struct amdgpu_job;
195*d350ecf5Sriastradh struct amdgpu_irq_src;
196*d350ecf5Sriastradh struct amdgpu_fpriv;
197*d350ecf5Sriastradh 
198*d350ecf5Sriastradh enum amdgpu_cp_irq {
199*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_GFX_EOP = 0,
200*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
201*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
202*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
203*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
204*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
205*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
206*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
207*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
208*d350ecf5Sriastradh 
209*d350ecf5Sriastradh 	AMDGPU_CP_IRQ_LAST
210*d350ecf5Sriastradh };
211*d350ecf5Sriastradh 
212*d350ecf5Sriastradh enum amdgpu_sdma_irq {
213*d350ecf5Sriastradh 	AMDGPU_SDMA_IRQ_TRAP0 = 0,
214*d350ecf5Sriastradh 	AMDGPU_SDMA_IRQ_TRAP1,
215*d350ecf5Sriastradh 
216*d350ecf5Sriastradh 	AMDGPU_SDMA_IRQ_LAST
217*d350ecf5Sriastradh };
218*d350ecf5Sriastradh 
219*d350ecf5Sriastradh enum amdgpu_thermal_irq {
220*d350ecf5Sriastradh 	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
221*d350ecf5Sriastradh 	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
222*d350ecf5Sriastradh 
223*d350ecf5Sriastradh 	AMDGPU_THERMAL_IRQ_LAST
224*d350ecf5Sriastradh };
225*d350ecf5Sriastradh 
226*d350ecf5Sriastradh int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
227*d350ecf5Sriastradh 				  enum amd_ip_block_type block_type,
228*d350ecf5Sriastradh 				  enum amd_clockgating_state state);
229*d350ecf5Sriastradh int amdgpu_set_powergating_state(struct amdgpu_device *adev,
230*d350ecf5Sriastradh 				  enum amd_ip_block_type block_type,
231*d350ecf5Sriastradh 				  enum amd_powergating_state state);
232*d350ecf5Sriastradh 
233*d350ecf5Sriastradh struct amdgpu_ip_block_version {
234*d350ecf5Sriastradh 	enum amd_ip_block_type type;
235*d350ecf5Sriastradh 	u32 major;
236*d350ecf5Sriastradh 	u32 minor;
237*d350ecf5Sriastradh 	u32 rev;
238*d350ecf5Sriastradh 	const struct amd_ip_funcs *funcs;
239*d350ecf5Sriastradh };
240*d350ecf5Sriastradh 
241*d350ecf5Sriastradh int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
242*d350ecf5Sriastradh 				enum amd_ip_block_type type,
243*d350ecf5Sriastradh 				u32 major, u32 minor);
244*d350ecf5Sriastradh 
245*d350ecf5Sriastradh const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
246*d350ecf5Sriastradh 					struct amdgpu_device *adev,
247*d350ecf5Sriastradh 					enum amd_ip_block_type type);
248*d350ecf5Sriastradh 
249*d350ecf5Sriastradh /* provided by hw blocks that can move/clear data.  e.g., gfx or sdma */
250*d350ecf5Sriastradh struct amdgpu_buffer_funcs {
251*d350ecf5Sriastradh 	/* maximum bytes in a single operation */
252*d350ecf5Sriastradh 	uint32_t	copy_max_bytes;
253*d350ecf5Sriastradh 
254*d350ecf5Sriastradh 	/* number of dw to reserve per operation */
255*d350ecf5Sriastradh 	unsigned	copy_num_dw;
256*d350ecf5Sriastradh 
257*d350ecf5Sriastradh 	/* used for buffer migration */
258*d350ecf5Sriastradh 	void (*emit_copy_buffer)(struct amdgpu_ib *ib,
259*d350ecf5Sriastradh 				 /* src addr in bytes */
260*d350ecf5Sriastradh 				 uint64_t src_offset,
261*d350ecf5Sriastradh 				 /* dst addr in bytes */
262*d350ecf5Sriastradh 				 uint64_t dst_offset,
263*d350ecf5Sriastradh 				 /* number of byte to transfer */
264*d350ecf5Sriastradh 				 uint32_t byte_count);
265*d350ecf5Sriastradh 
266*d350ecf5Sriastradh 	/* maximum bytes in a single operation */
267*d350ecf5Sriastradh 	uint32_t	fill_max_bytes;
268*d350ecf5Sriastradh 
269*d350ecf5Sriastradh 	/* number of dw to reserve per operation */
270*d350ecf5Sriastradh 	unsigned	fill_num_dw;
271*d350ecf5Sriastradh 
272*d350ecf5Sriastradh 	/* used for buffer clearing */
273*d350ecf5Sriastradh 	void (*emit_fill_buffer)(struct amdgpu_ib *ib,
274*d350ecf5Sriastradh 				 /* value to write to memory */
275*d350ecf5Sriastradh 				 uint32_t src_data,
276*d350ecf5Sriastradh 				 /* dst addr in bytes */
277*d350ecf5Sriastradh 				 uint64_t dst_offset,
278*d350ecf5Sriastradh 				 /* number of byte to fill */
279*d350ecf5Sriastradh 				 uint32_t byte_count);
280*d350ecf5Sriastradh };
281*d350ecf5Sriastradh 
282*d350ecf5Sriastradh /* provided by hw blocks that can write ptes, e.g., sdma */
283*d350ecf5Sriastradh struct amdgpu_vm_pte_funcs {
284*d350ecf5Sriastradh 	/* copy pte entries from GART */
285*d350ecf5Sriastradh 	void (*copy_pte)(struct amdgpu_ib *ib,
286*d350ecf5Sriastradh 			 uint64_t pe, uint64_t src,
287*d350ecf5Sriastradh 			 unsigned count);
288*d350ecf5Sriastradh 	/* write pte one entry at a time with addr mapping */
289*d350ecf5Sriastradh 	void (*write_pte)(struct amdgpu_ib *ib,
290*d350ecf5Sriastradh 			  uint64_t pe,
291*d350ecf5Sriastradh 			  uint64_t addr, unsigned count,
292*d350ecf5Sriastradh 			  uint32_t incr, uint32_t flags);
293*d350ecf5Sriastradh 	/* for linear pte/pde updates without addr mapping */
294*d350ecf5Sriastradh 	void (*set_pte_pde)(struct amdgpu_ib *ib,
295*d350ecf5Sriastradh 			    uint64_t pe,
296*d350ecf5Sriastradh 			    uint64_t addr, unsigned count,
297*d350ecf5Sriastradh 			    uint32_t incr, uint32_t flags);
298*d350ecf5Sriastradh 	/* pad the indirect buffer to the necessary number of dw */
299*d350ecf5Sriastradh 	void (*pad_ib)(struct amdgpu_ib *ib);
300*d350ecf5Sriastradh };
301*d350ecf5Sriastradh 
302*d350ecf5Sriastradh /* provided by the gmc block */
303*d350ecf5Sriastradh struct amdgpu_gart_funcs {
304*d350ecf5Sriastradh 	/* flush the vm tlb via mmio */
305*d350ecf5Sriastradh 	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
306*d350ecf5Sriastradh 			      uint32_t vmid);
307*d350ecf5Sriastradh 	/* write pte/pde updates using the cpu */
308*d350ecf5Sriastradh 	int (*set_pte_pde)(struct amdgpu_device *adev,
309*d350ecf5Sriastradh 			   void *cpu_pt_addr, /* cpu addr of page table */
310*d350ecf5Sriastradh 			   uint32_t gpu_page_idx, /* pte/pde to update */
311*d350ecf5Sriastradh 			   uint64_t addr, /* addr to write into pte/pde */
312*d350ecf5Sriastradh 			   uint32_t flags); /* access flags */
313*d350ecf5Sriastradh };
314*d350ecf5Sriastradh 
315*d350ecf5Sriastradh /* provided by the ih block */
316*d350ecf5Sriastradh struct amdgpu_ih_funcs {
317*d350ecf5Sriastradh 	/* ring read/write ptr handling, called from interrupt context */
318*d350ecf5Sriastradh 	u32 (*get_wptr)(struct amdgpu_device *adev);
319*d350ecf5Sriastradh 	void (*decode_iv)(struct amdgpu_device *adev,
320*d350ecf5Sriastradh 			  struct amdgpu_iv_entry *entry);
321*d350ecf5Sriastradh 	void (*set_rptr)(struct amdgpu_device *adev);
322*d350ecf5Sriastradh };
323*d350ecf5Sriastradh 
324*d350ecf5Sriastradh /* provided by hw blocks that expose a ring buffer for commands */
325*d350ecf5Sriastradh struct amdgpu_ring_funcs {
326*d350ecf5Sriastradh 	/* ring read/write ptr handling */
327*d350ecf5Sriastradh 	u32 (*get_rptr)(struct amdgpu_ring *ring);
328*d350ecf5Sriastradh 	u32 (*get_wptr)(struct amdgpu_ring *ring);
329*d350ecf5Sriastradh 	void (*set_wptr)(struct amdgpu_ring *ring);
330*d350ecf5Sriastradh 	/* validating and patching of IBs */
331*d350ecf5Sriastradh 	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
332*d350ecf5Sriastradh 	/* command emit functions */
333*d350ecf5Sriastradh 	void (*emit_ib)(struct amdgpu_ring *ring,
334*d350ecf5Sriastradh 			struct amdgpu_ib *ib);
335*d350ecf5Sriastradh 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
336*d350ecf5Sriastradh 			   uint64_t seq, unsigned flags);
337*d350ecf5Sriastradh 	bool (*emit_semaphore)(struct amdgpu_ring *ring,
338*d350ecf5Sriastradh 			       struct amdgpu_semaphore *semaphore,
339*d350ecf5Sriastradh 			       bool emit_wait);
340*d350ecf5Sriastradh 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
341*d350ecf5Sriastradh 			      uint64_t pd_addr);
342*d350ecf5Sriastradh 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
343*d350ecf5Sriastradh 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
344*d350ecf5Sriastradh 				uint32_t gds_base, uint32_t gds_size,
345*d350ecf5Sriastradh 				uint32_t gws_base, uint32_t gws_size,
346*d350ecf5Sriastradh 				uint32_t oa_base, uint32_t oa_size);
347*d350ecf5Sriastradh 	/* testing functions */
348*d350ecf5Sriastradh 	int (*test_ring)(struct amdgpu_ring *ring);
349*d350ecf5Sriastradh 	int (*test_ib)(struct amdgpu_ring *ring);
350*d350ecf5Sriastradh 	/* insert NOP packets */
351*d350ecf5Sriastradh 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
352*d350ecf5Sriastradh };
353*d350ecf5Sriastradh 
354*d350ecf5Sriastradh /*
355*d350ecf5Sriastradh  * BIOS.
356*d350ecf5Sriastradh  */
357*d350ecf5Sriastradh bool amdgpu_get_bios(struct amdgpu_device *adev);
358*d350ecf5Sriastradh bool amdgpu_read_bios(struct amdgpu_device *adev);
359*d350ecf5Sriastradh 
360*d350ecf5Sriastradh /*
361*d350ecf5Sriastradh  * Dummy page
362*d350ecf5Sriastradh  */
363*d350ecf5Sriastradh struct amdgpu_dummy_page {
364*d350ecf5Sriastradh 	struct page	*page;
365*d350ecf5Sriastradh 	dma_addr_t	addr;
366*d350ecf5Sriastradh };
367*d350ecf5Sriastradh int amdgpu_dummy_page_init(struct amdgpu_device *adev);
368*d350ecf5Sriastradh void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
369*d350ecf5Sriastradh 
370*d350ecf5Sriastradh 
371*d350ecf5Sriastradh /*
372*d350ecf5Sriastradh  * Clocks
373*d350ecf5Sriastradh  */
374*d350ecf5Sriastradh 
375*d350ecf5Sriastradh #define AMDGPU_MAX_PPLL 3
376*d350ecf5Sriastradh 
377*d350ecf5Sriastradh struct amdgpu_clock {
378*d350ecf5Sriastradh 	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
379*d350ecf5Sriastradh 	struct amdgpu_pll spll;
380*d350ecf5Sriastradh 	struct amdgpu_pll mpll;
381*d350ecf5Sriastradh 	/* 10 Khz units */
382*d350ecf5Sriastradh 	uint32_t default_mclk;
383*d350ecf5Sriastradh 	uint32_t default_sclk;
384*d350ecf5Sriastradh 	uint32_t default_dispclk;
385*d350ecf5Sriastradh 	uint32_t current_dispclk;
386*d350ecf5Sriastradh 	uint32_t dp_extclk;
387*d350ecf5Sriastradh 	uint32_t max_pixel_clock;
388*d350ecf5Sriastradh };
389*d350ecf5Sriastradh 
390*d350ecf5Sriastradh /*
391*d350ecf5Sriastradh  * Fences.
392*d350ecf5Sriastradh  */
393*d350ecf5Sriastradh struct amdgpu_fence_driver {
394*d350ecf5Sriastradh 	uint64_t			gpu_addr;
395*d350ecf5Sriastradh 	volatile uint32_t		*cpu_addr;
396*d350ecf5Sriastradh 	/* sync_seq is protected by ring emission lock */
397*d350ecf5Sriastradh 	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
398*d350ecf5Sriastradh 	atomic64_t			last_seq;
399*d350ecf5Sriastradh 	bool				initialized;
400*d350ecf5Sriastradh 	struct amdgpu_irq_src		*irq_src;
401*d350ecf5Sriastradh 	unsigned			irq_type;
402*d350ecf5Sriastradh 	struct timer_list		fallback_timer;
403*d350ecf5Sriastradh 	wait_queue_head_t		fence_queue;
404*d350ecf5Sriastradh };
405*d350ecf5Sriastradh 
406*d350ecf5Sriastradh /* some special values for the owner field */
407*d350ecf5Sriastradh #define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
408*d350ecf5Sriastradh #define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
409*d350ecf5Sriastradh 
410*d350ecf5Sriastradh #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
411*d350ecf5Sriastradh #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
412*d350ecf5Sriastradh 
413*d350ecf5Sriastradh struct amdgpu_fence {
414*d350ecf5Sriastradh 	struct fence base;
415*d350ecf5Sriastradh 
416*d350ecf5Sriastradh 	/* RB, DMA, etc. */
417*d350ecf5Sriastradh 	struct amdgpu_ring		*ring;
418*d350ecf5Sriastradh 	uint64_t			seq;
419*d350ecf5Sriastradh 
420*d350ecf5Sriastradh 	/* filp or special value for fence creator */
421*d350ecf5Sriastradh 	void				*owner;
422*d350ecf5Sriastradh 
423*d350ecf5Sriastradh 	wait_queue_t			fence_wake;
424*d350ecf5Sriastradh };
425*d350ecf5Sriastradh 
426*d350ecf5Sriastradh struct amdgpu_user_fence {
427*d350ecf5Sriastradh 	/* write-back bo */
428*d350ecf5Sriastradh 	struct amdgpu_bo 	*bo;
429*d350ecf5Sriastradh 	/* write-back address offset to bo start */
430*d350ecf5Sriastradh 	uint32_t                offset;
431*d350ecf5Sriastradh };
432*d350ecf5Sriastradh 
433*d350ecf5Sriastradh int amdgpu_fence_driver_init(struct amdgpu_device *adev);
434*d350ecf5Sriastradh void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
435*d350ecf5Sriastradh void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
436*d350ecf5Sriastradh 
437*d350ecf5Sriastradh int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
438*d350ecf5Sriastradh int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
439*d350ecf5Sriastradh 				   struct amdgpu_irq_src *irq_src,
440*d350ecf5Sriastradh 				   unsigned irq_type);
441*d350ecf5Sriastradh void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
442*d350ecf5Sriastradh void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
443*d350ecf5Sriastradh int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
444*d350ecf5Sriastradh 		      struct amdgpu_fence **fence);
445*d350ecf5Sriastradh void amdgpu_fence_process(struct amdgpu_ring *ring);
446*d350ecf5Sriastradh int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
447*d350ecf5Sriastradh int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
448*d350ecf5Sriastradh unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
449*d350ecf5Sriastradh 
450*d350ecf5Sriastradh bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
451*d350ecf5Sriastradh 			    struct amdgpu_ring *ring);
452*d350ecf5Sriastradh void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
453*d350ecf5Sriastradh 			    struct amdgpu_ring *ring);
454*d350ecf5Sriastradh 
455*d350ecf5Sriastradh /*
456*d350ecf5Sriastradh  * TTM.
457*d350ecf5Sriastradh  */
458*d350ecf5Sriastradh struct amdgpu_mman {
459*d350ecf5Sriastradh 	struct ttm_bo_global_ref        bo_global_ref;
460*d350ecf5Sriastradh 	struct drm_global_reference	mem_global_ref;
461*d350ecf5Sriastradh 	struct ttm_bo_device		bdev;
462*d350ecf5Sriastradh 	bool				mem_global_referenced;
463*d350ecf5Sriastradh 	bool				initialized;
464*d350ecf5Sriastradh 
465*d350ecf5Sriastradh #if defined(CONFIG_DEBUG_FS)
466*d350ecf5Sriastradh 	struct dentry			*vram;
467*d350ecf5Sriastradh 	struct dentry			*gtt;
468*d350ecf5Sriastradh #endif
469*d350ecf5Sriastradh 
470*d350ecf5Sriastradh 	/* buffer handling */
471*d350ecf5Sriastradh 	const struct amdgpu_buffer_funcs	*buffer_funcs;
472*d350ecf5Sriastradh 	struct amdgpu_ring			*buffer_funcs_ring;
473*d350ecf5Sriastradh };
474*d350ecf5Sriastradh 
475*d350ecf5Sriastradh int amdgpu_copy_buffer(struct amdgpu_ring *ring,
476*d350ecf5Sriastradh 		       uint64_t src_offset,
477*d350ecf5Sriastradh 		       uint64_t dst_offset,
478*d350ecf5Sriastradh 		       uint32_t byte_count,
479*d350ecf5Sriastradh 		       struct reservation_object *resv,
480*d350ecf5Sriastradh 		       struct fence **fence);
481*d350ecf5Sriastradh int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
482*d350ecf5Sriastradh 
483*d350ecf5Sriastradh struct amdgpu_bo_list_entry {
484*d350ecf5Sriastradh 	struct amdgpu_bo		*robj;
485*d350ecf5Sriastradh 	struct ttm_validate_buffer	tv;
486*d350ecf5Sriastradh 	struct amdgpu_bo_va		*bo_va;
487*d350ecf5Sriastradh 	unsigned			prefered_domains;
488*d350ecf5Sriastradh 	unsigned			allowed_domains;
489*d350ecf5Sriastradh 	uint32_t			priority;
490*d350ecf5Sriastradh };
491*d350ecf5Sriastradh 
492*d350ecf5Sriastradh struct amdgpu_bo_va_mapping {
493*d350ecf5Sriastradh 	struct list_head		list;
494*d350ecf5Sriastradh 	struct interval_tree_node	it;
495*d350ecf5Sriastradh 	uint64_t			offset;
496*d350ecf5Sriastradh 	uint32_t			flags;
497*d350ecf5Sriastradh };
498*d350ecf5Sriastradh 
499*d350ecf5Sriastradh /* bo virtual addresses in a specific vm */
500*d350ecf5Sriastradh struct amdgpu_bo_va {
501*d350ecf5Sriastradh 	struct mutex		        mutex;
502*d350ecf5Sriastradh 	/* protected by bo being reserved */
503*d350ecf5Sriastradh 	struct list_head		bo_list;
504*d350ecf5Sriastradh 	struct fence		        *last_pt_update;
505*d350ecf5Sriastradh 	unsigned			ref_count;
506*d350ecf5Sriastradh 
507*d350ecf5Sriastradh 	/* protected by vm mutex and spinlock */
508*d350ecf5Sriastradh 	struct list_head		vm_status;
509*d350ecf5Sriastradh 
510*d350ecf5Sriastradh 	/* mappings for this bo_va */
511*d350ecf5Sriastradh 	struct list_head		invalids;
512*d350ecf5Sriastradh 	struct list_head		valids;
513*d350ecf5Sriastradh 
514*d350ecf5Sriastradh 	/* constant after initialization */
515*d350ecf5Sriastradh 	struct amdgpu_vm		*vm;
516*d350ecf5Sriastradh 	struct amdgpu_bo		*bo;
517*d350ecf5Sriastradh };
518*d350ecf5Sriastradh 
519*d350ecf5Sriastradh #define AMDGPU_GEM_DOMAIN_MAX		0x3
520*d350ecf5Sriastradh 
521*d350ecf5Sriastradh struct amdgpu_bo {
522*d350ecf5Sriastradh 	/* Protected by gem.mutex */
523*d350ecf5Sriastradh 	struct list_head		list;
524*d350ecf5Sriastradh 	/* Protected by tbo.reserved */
525*d350ecf5Sriastradh 	u32				initial_domain;
526*d350ecf5Sriastradh 	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1];
527*d350ecf5Sriastradh 	struct ttm_placement		placement;
528*d350ecf5Sriastradh 	struct ttm_buffer_object	tbo;
529*d350ecf5Sriastradh 	struct ttm_bo_kmap_obj		kmap;
530*d350ecf5Sriastradh 	u64				flags;
531*d350ecf5Sriastradh 	unsigned			pin_count;
532*d350ecf5Sriastradh 	void				*kptr;
533*d350ecf5Sriastradh 	u64				tiling_flags;
534*d350ecf5Sriastradh 	u64				metadata_flags;
535*d350ecf5Sriastradh 	void				*metadata;
536*d350ecf5Sriastradh 	u32				metadata_size;
537*d350ecf5Sriastradh 	unsigned			prime_shared_count;
538*d350ecf5Sriastradh 	/* list of all virtual address to which this bo
539*d350ecf5Sriastradh 	 * is associated to
540*d350ecf5Sriastradh 	 */
541*d350ecf5Sriastradh 	struct list_head		va;
542*d350ecf5Sriastradh 	/* Constant after initialization */
543*d350ecf5Sriastradh 	struct amdgpu_device		*adev;
544*d350ecf5Sriastradh 	struct drm_gem_object		gem_base;
545*d350ecf5Sriastradh 	struct amdgpu_bo		*parent;
546*d350ecf5Sriastradh 
547*d350ecf5Sriastradh 	struct ttm_bo_kmap_obj		dma_buf_vmap;
548*d350ecf5Sriastradh 	pid_t				pid;
549*d350ecf5Sriastradh 	struct amdgpu_mn		*mn;
550*d350ecf5Sriastradh 	struct list_head		mn_list;
551*d350ecf5Sriastradh };
552*d350ecf5Sriastradh #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
553*d350ecf5Sriastradh 
554*d350ecf5Sriastradh void amdgpu_gem_object_free(struct drm_gem_object *obj);
555*d350ecf5Sriastradh int amdgpu_gem_object_open(struct drm_gem_object *obj,
556*d350ecf5Sriastradh 				struct drm_file *file_priv);
557*d350ecf5Sriastradh void amdgpu_gem_object_close(struct drm_gem_object *obj,
558*d350ecf5Sriastradh 				struct drm_file *file_priv);
559*d350ecf5Sriastradh unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
560*d350ecf5Sriastradh struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
561*d350ecf5Sriastradh struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
562*d350ecf5Sriastradh 							struct dma_buf_attachment *attach,
563*d350ecf5Sriastradh 							struct sg_table *sg);
564*d350ecf5Sriastradh struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
565*d350ecf5Sriastradh 					struct drm_gem_object *gobj,
566*d350ecf5Sriastradh 					int flags);
567*d350ecf5Sriastradh int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
568*d350ecf5Sriastradh void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
569*d350ecf5Sriastradh struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
570*d350ecf5Sriastradh void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
571*d350ecf5Sriastradh void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
572*d350ecf5Sriastradh int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
573*d350ecf5Sriastradh 
574*d350ecf5Sriastradh /* sub-allocation manager, it has to be protected by another lock.
575*d350ecf5Sriastradh  * By conception this is an helper for other part of the driver
576*d350ecf5Sriastradh  * like the indirect buffer or semaphore, which both have their
577*d350ecf5Sriastradh  * locking.
578*d350ecf5Sriastradh  *
579*d350ecf5Sriastradh  * Principe is simple, we keep a list of sub allocation in offset
580*d350ecf5Sriastradh  * order (first entry has offset == 0, last entry has the highest
581*d350ecf5Sriastradh  * offset).
582*d350ecf5Sriastradh  *
583*d350ecf5Sriastradh  * When allocating new object we first check if there is room at
584*d350ecf5Sriastradh  * the end total_size - (last_object_offset + last_object_size) >=
585*d350ecf5Sriastradh  * alloc_size. If so we allocate new object there.
586*d350ecf5Sriastradh  *
587*d350ecf5Sriastradh  * When there is not enough room at the end, we start waiting for
588*d350ecf5Sriastradh  * each sub object until we reach object_offset+object_size >=
589*d350ecf5Sriastradh  * alloc_size, this object then become the sub object we return.
590*d350ecf5Sriastradh  *
591*d350ecf5Sriastradh  * Alignment can't be bigger than page size.
592*d350ecf5Sriastradh  *
593*d350ecf5Sriastradh  * Hole are not considered for allocation to keep things simple.
594*d350ecf5Sriastradh  * Assumption is that there won't be hole (all object on same
595*d350ecf5Sriastradh  * alignment).
596*d350ecf5Sriastradh  */
597*d350ecf5Sriastradh struct amdgpu_sa_manager {
598*d350ecf5Sriastradh 	wait_queue_head_t	wq;
599*d350ecf5Sriastradh 	struct amdgpu_bo	*bo;
600*d350ecf5Sriastradh 	struct list_head	*hole;
601*d350ecf5Sriastradh 	struct list_head	flist[AMDGPU_MAX_RINGS];
602*d350ecf5Sriastradh 	struct list_head	olist;
603*d350ecf5Sriastradh 	unsigned		size;
604*d350ecf5Sriastradh 	uint64_t		gpu_addr;
605*d350ecf5Sriastradh 	void			*cpu_ptr;
606*d350ecf5Sriastradh 	uint32_t		domain;
607*d350ecf5Sriastradh 	uint32_t		align;
608*d350ecf5Sriastradh };
609*d350ecf5Sriastradh 
610*d350ecf5Sriastradh /* sub-allocation buffer */
611*d350ecf5Sriastradh struct amdgpu_sa_bo {
612*d350ecf5Sriastradh 	struct list_head		olist;
613*d350ecf5Sriastradh 	struct list_head		flist;
614*d350ecf5Sriastradh 	struct amdgpu_sa_manager	*manager;
615*d350ecf5Sriastradh 	unsigned			soffset;
616*d350ecf5Sriastradh 	unsigned			eoffset;
617*d350ecf5Sriastradh 	struct fence		        *fence;
618*d350ecf5Sriastradh };
619*d350ecf5Sriastradh 
620*d350ecf5Sriastradh /*
621*d350ecf5Sriastradh  * GEM objects.
622*d350ecf5Sriastradh  */
623*d350ecf5Sriastradh struct amdgpu_gem {
624*d350ecf5Sriastradh 	struct mutex		mutex;
625*d350ecf5Sriastradh 	struct list_head	objects;
626*d350ecf5Sriastradh };
627*d350ecf5Sriastradh 
628*d350ecf5Sriastradh int amdgpu_gem_init(struct amdgpu_device *adev);
629*d350ecf5Sriastradh void amdgpu_gem_fini(struct amdgpu_device *adev);
630*d350ecf5Sriastradh int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
631*d350ecf5Sriastradh 				int alignment, u32 initial_domain,
632*d350ecf5Sriastradh 				u64 flags, bool kernel,
633*d350ecf5Sriastradh 				struct drm_gem_object **obj);
634*d350ecf5Sriastradh 
635*d350ecf5Sriastradh int amdgpu_mode_dumb_create(struct drm_file *file_priv,
636*d350ecf5Sriastradh 			    struct drm_device *dev,
637*d350ecf5Sriastradh 			    struct drm_mode_create_dumb *args);
638*d350ecf5Sriastradh int amdgpu_mode_dumb_mmap(struct drm_file *filp,
639*d350ecf5Sriastradh 			  struct drm_device *dev,
640*d350ecf5Sriastradh 			  uint32_t handle, uint64_t *offset_p);
641*d350ecf5Sriastradh 
642*d350ecf5Sriastradh /*
643*d350ecf5Sriastradh  * Semaphores.
644*d350ecf5Sriastradh  */
645*d350ecf5Sriastradh struct amdgpu_semaphore {
646*d350ecf5Sriastradh 	struct amdgpu_sa_bo	*sa_bo;
647*d350ecf5Sriastradh 	signed			waiters;
648*d350ecf5Sriastradh 	uint64_t		gpu_addr;
649*d350ecf5Sriastradh };
650*d350ecf5Sriastradh 
651*d350ecf5Sriastradh int amdgpu_semaphore_create(struct amdgpu_device *adev,
652*d350ecf5Sriastradh 			    struct amdgpu_semaphore **semaphore);
653*d350ecf5Sriastradh bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
654*d350ecf5Sriastradh 				  struct amdgpu_semaphore *semaphore);
655*d350ecf5Sriastradh bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
656*d350ecf5Sriastradh 				struct amdgpu_semaphore *semaphore);
657*d350ecf5Sriastradh void amdgpu_semaphore_free(struct amdgpu_device *adev,
658*d350ecf5Sriastradh 			   struct amdgpu_semaphore **semaphore,
659*d350ecf5Sriastradh 			   struct fence *fence);
660*d350ecf5Sriastradh 
661*d350ecf5Sriastradh /*
662*d350ecf5Sriastradh  * Synchronization
663*d350ecf5Sriastradh  */
664*d350ecf5Sriastradh struct amdgpu_sync {
665*d350ecf5Sriastradh 	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
666*d350ecf5Sriastradh 	struct fence		*sync_to[AMDGPU_MAX_RINGS];
667*d350ecf5Sriastradh 	DECLARE_HASHTABLE(fences, 4);
668*d350ecf5Sriastradh 	struct fence	        *last_vm_update;
669*d350ecf5Sriastradh };
670*d350ecf5Sriastradh 
671*d350ecf5Sriastradh void amdgpu_sync_create(struct amdgpu_sync *sync);
672*d350ecf5Sriastradh int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
673*d350ecf5Sriastradh 		      struct fence *f);
674*d350ecf5Sriastradh int amdgpu_sync_resv(struct amdgpu_device *adev,
675*d350ecf5Sriastradh 		     struct amdgpu_sync *sync,
676*d350ecf5Sriastradh 		     struct reservation_object *resv,
677*d350ecf5Sriastradh 		     void *owner);
678*d350ecf5Sriastradh int amdgpu_sync_rings(struct amdgpu_sync *sync,
679*d350ecf5Sriastradh 		      struct amdgpu_ring *ring);
680*d350ecf5Sriastradh struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
681*d350ecf5Sriastradh int amdgpu_sync_wait(struct amdgpu_sync *sync);
682*d350ecf5Sriastradh void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
683*d350ecf5Sriastradh 		      struct fence *fence);
684*d350ecf5Sriastradh 
685*d350ecf5Sriastradh /*
686*d350ecf5Sriastradh  * GART structures, functions & helpers
687*d350ecf5Sriastradh  */
688*d350ecf5Sriastradh struct amdgpu_mc;
689*d350ecf5Sriastradh 
690*d350ecf5Sriastradh #define AMDGPU_GPU_PAGE_SIZE 4096
691*d350ecf5Sriastradh #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
692*d350ecf5Sriastradh #define AMDGPU_GPU_PAGE_SHIFT 12
693*d350ecf5Sriastradh #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
694*d350ecf5Sriastradh 
695*d350ecf5Sriastradh struct amdgpu_gart {
696*d350ecf5Sriastradh 	dma_addr_t			table_addr;
697*d350ecf5Sriastradh 	struct amdgpu_bo		*robj;
698*d350ecf5Sriastradh 	void				*ptr;
699*d350ecf5Sriastradh 	unsigned			num_gpu_pages;
700*d350ecf5Sriastradh 	unsigned			num_cpu_pages;
701*d350ecf5Sriastradh 	unsigned			table_size;
702*d350ecf5Sriastradh 	struct page			**pages;
703*d350ecf5Sriastradh 	dma_addr_t			*pages_addr;
704*d350ecf5Sriastradh 	bool				ready;
705*d350ecf5Sriastradh 	const struct amdgpu_gart_funcs *gart_funcs;
706*d350ecf5Sriastradh };
707*d350ecf5Sriastradh 
708*d350ecf5Sriastradh int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
709*d350ecf5Sriastradh void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
710*d350ecf5Sriastradh int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
711*d350ecf5Sriastradh void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
712*d350ecf5Sriastradh int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
713*d350ecf5Sriastradh void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
714*d350ecf5Sriastradh int amdgpu_gart_init(struct amdgpu_device *adev);
715*d350ecf5Sriastradh void amdgpu_gart_fini(struct amdgpu_device *adev);
716*d350ecf5Sriastradh void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
717*d350ecf5Sriastradh 			int pages);
718*d350ecf5Sriastradh int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
719*d350ecf5Sriastradh 		     int pages, struct page **pagelist,
720*d350ecf5Sriastradh 		     dma_addr_t *dma_addr, uint32_t flags);
721*d350ecf5Sriastradh 
722*d350ecf5Sriastradh /*
723*d350ecf5Sriastradh  * GPU MC structures, functions & helpers
724*d350ecf5Sriastradh  */
725*d350ecf5Sriastradh struct amdgpu_mc {
726*d350ecf5Sriastradh 	resource_size_t		aper_size;
727*d350ecf5Sriastradh 	resource_size_t		aper_base;
728*d350ecf5Sriastradh 	resource_size_t		agp_base;
729*d350ecf5Sriastradh 	/* for some chips with <= 32MB we need to lie
730*d350ecf5Sriastradh 	 * about vram size near mc fb location */
731*d350ecf5Sriastradh 	u64			mc_vram_size;
732*d350ecf5Sriastradh 	u64			visible_vram_size;
733*d350ecf5Sriastradh 	u64			gtt_size;
734*d350ecf5Sriastradh 	u64			gtt_start;
735*d350ecf5Sriastradh 	u64			gtt_end;
736*d350ecf5Sriastradh 	u64			vram_start;
737*d350ecf5Sriastradh 	u64			vram_end;
738*d350ecf5Sriastradh 	unsigned		vram_width;
739*d350ecf5Sriastradh 	u64			real_vram_size;
740*d350ecf5Sriastradh 	int			vram_mtrr;
741*d350ecf5Sriastradh 	u64                     gtt_base_align;
742*d350ecf5Sriastradh 	u64                     mc_mask;
743*d350ecf5Sriastradh 	const struct firmware   *fw;	/* MC firmware */
744*d350ecf5Sriastradh 	uint32_t                fw_version;
745*d350ecf5Sriastradh 	struct amdgpu_irq_src	vm_fault;
746*d350ecf5Sriastradh 	uint32_t		vram_type;
747*d350ecf5Sriastradh };
748*d350ecf5Sriastradh 
749*d350ecf5Sriastradh /*
750*d350ecf5Sriastradh  * GPU doorbell structures, functions & helpers
751*d350ecf5Sriastradh  */
752*d350ecf5Sriastradh typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
753*d350ecf5Sriastradh {
754*d350ecf5Sriastradh 	AMDGPU_DOORBELL_KIQ                     = 0x000,
755*d350ecf5Sriastradh 	AMDGPU_DOORBELL_HIQ                     = 0x001,
756*d350ecf5Sriastradh 	AMDGPU_DOORBELL_DIQ                     = 0x002,
757*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
758*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
759*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
760*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
761*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
762*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
763*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
764*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
765*d350ecf5Sriastradh 	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
766*d350ecf5Sriastradh 	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
767*d350ecf5Sriastradh 	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
768*d350ecf5Sriastradh 	AMDGPU_DOORBELL_IH                      = 0x1E8,
769*d350ecf5Sriastradh 	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
770*d350ecf5Sriastradh 	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
771*d350ecf5Sriastradh } AMDGPU_DOORBELL_ASSIGNMENT;
772*d350ecf5Sriastradh 
773*d350ecf5Sriastradh struct amdgpu_doorbell {
774*d350ecf5Sriastradh 	/* doorbell mmio */
775*d350ecf5Sriastradh 	resource_size_t		base;
776*d350ecf5Sriastradh 	resource_size_t		size;
777*d350ecf5Sriastradh 	u32 __iomem		*ptr;
778*d350ecf5Sriastradh 	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
779*d350ecf5Sriastradh };
780*d350ecf5Sriastradh 
781*d350ecf5Sriastradh void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
782*d350ecf5Sriastradh 				phys_addr_t *aperture_base,
783*d350ecf5Sriastradh 				size_t *aperture_size,
784*d350ecf5Sriastradh 				size_t *start_offset);
785*d350ecf5Sriastradh 
786*d350ecf5Sriastradh /*
787*d350ecf5Sriastradh  * IRQS.
788*d350ecf5Sriastradh  */
789*d350ecf5Sriastradh 
790*d350ecf5Sriastradh struct amdgpu_flip_work {
791*d350ecf5Sriastradh 	struct work_struct		flip_work;
792*d350ecf5Sriastradh 	struct work_struct		unpin_work;
793*d350ecf5Sriastradh 	struct amdgpu_device		*adev;
794*d350ecf5Sriastradh 	int				crtc_id;
795*d350ecf5Sriastradh 	uint64_t			base;
796*d350ecf5Sriastradh 	struct drm_pending_vblank_event *event;
797*d350ecf5Sriastradh 	struct amdgpu_bo		*old_rbo;
798*d350ecf5Sriastradh 	struct fence			*excl;
799*d350ecf5Sriastradh 	unsigned			shared_count;
800*d350ecf5Sriastradh 	struct fence			**shared;
801*d350ecf5Sriastradh };
802*d350ecf5Sriastradh 
803*d350ecf5Sriastradh 
804*d350ecf5Sriastradh /*
805*d350ecf5Sriastradh  * CP & rings.
806*d350ecf5Sriastradh  */
807*d350ecf5Sriastradh 
808*d350ecf5Sriastradh struct amdgpu_ib {
809*d350ecf5Sriastradh 	struct amdgpu_sa_bo		*sa_bo;
810*d350ecf5Sriastradh 	uint32_t			length_dw;
811*d350ecf5Sriastradh 	uint64_t			gpu_addr;
812*d350ecf5Sriastradh 	uint32_t			*ptr;
813*d350ecf5Sriastradh 	struct amdgpu_ring		*ring;
814*d350ecf5Sriastradh 	struct amdgpu_fence		*fence;
815*d350ecf5Sriastradh 	struct amdgpu_user_fence        *user;
816*d350ecf5Sriastradh 	struct amdgpu_vm		*vm;
817*d350ecf5Sriastradh 	struct amdgpu_ctx		*ctx;
818*d350ecf5Sriastradh 	struct amdgpu_sync		sync;
819*d350ecf5Sriastradh 	uint32_t			gds_base, gds_size;
820*d350ecf5Sriastradh 	uint32_t			gws_base, gws_size;
821*d350ecf5Sriastradh 	uint32_t			oa_base, oa_size;
822*d350ecf5Sriastradh 	uint32_t			flags;
823*d350ecf5Sriastradh 	/* resulting sequence number */
824*d350ecf5Sriastradh 	uint64_t			sequence;
825*d350ecf5Sriastradh };
826*d350ecf5Sriastradh 
827*d350ecf5Sriastradh enum amdgpu_ring_type {
828*d350ecf5Sriastradh 	AMDGPU_RING_TYPE_GFX,
829*d350ecf5Sriastradh 	AMDGPU_RING_TYPE_COMPUTE,
830*d350ecf5Sriastradh 	AMDGPU_RING_TYPE_SDMA,
831*d350ecf5Sriastradh 	AMDGPU_RING_TYPE_UVD,
832*d350ecf5Sriastradh 	AMDGPU_RING_TYPE_VCE
833*d350ecf5Sriastradh };
834*d350ecf5Sriastradh 
835*d350ecf5Sriastradh extern struct amd_sched_backend_ops amdgpu_sched_ops;
836*d350ecf5Sriastradh 
837*d350ecf5Sriastradh int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
838*d350ecf5Sriastradh 					 struct amdgpu_ring *ring,
839*d350ecf5Sriastradh 					 struct amdgpu_ib *ibs,
840*d350ecf5Sriastradh 					 unsigned num_ibs,
841*d350ecf5Sriastradh 					 int (*free_job)(struct amdgpu_job *),
842*d350ecf5Sriastradh 					 void *owner,
843*d350ecf5Sriastradh 					 struct fence **fence);
844*d350ecf5Sriastradh 
845*d350ecf5Sriastradh struct amdgpu_ring {
846*d350ecf5Sriastradh 	struct amdgpu_device		*adev;
847*d350ecf5Sriastradh 	const struct amdgpu_ring_funcs	*funcs;
848*d350ecf5Sriastradh 	struct amdgpu_fence_driver	fence_drv;
849*d350ecf5Sriastradh 	struct amd_gpu_scheduler 	sched;
850*d350ecf5Sriastradh 
851*d350ecf5Sriastradh 	spinlock_t              fence_lock;
852*d350ecf5Sriastradh 	struct mutex		*ring_lock;
853*d350ecf5Sriastradh 	struct amdgpu_bo	*ring_obj;
854*d350ecf5Sriastradh 	volatile uint32_t	*ring;
855*d350ecf5Sriastradh 	unsigned		rptr_offs;
856*d350ecf5Sriastradh 	u64			next_rptr_gpu_addr;
857*d350ecf5Sriastradh 	volatile u32		*next_rptr_cpu_addr;
858*d350ecf5Sriastradh 	unsigned		wptr;
859*d350ecf5Sriastradh 	unsigned		wptr_old;
860*d350ecf5Sriastradh 	unsigned		ring_size;
861*d350ecf5Sriastradh 	unsigned		ring_free_dw;
862*d350ecf5Sriastradh 	int			count_dw;
863*d350ecf5Sriastradh 	uint64_t		gpu_addr;
864*d350ecf5Sriastradh 	uint32_t		align_mask;
865*d350ecf5Sriastradh 	uint32_t		ptr_mask;
866*d350ecf5Sriastradh 	bool			ready;
867*d350ecf5Sriastradh 	u32			nop;
868*d350ecf5Sriastradh 	u32			idx;
869*d350ecf5Sriastradh 	u64			last_semaphore_signal_addr;
870*d350ecf5Sriastradh 	u64			last_semaphore_wait_addr;
871*d350ecf5Sriastradh 	u32			me;
872*d350ecf5Sriastradh 	u32			pipe;
873*d350ecf5Sriastradh 	u32			queue;
874*d350ecf5Sriastradh 	struct amdgpu_bo	*mqd_obj;
875*d350ecf5Sriastradh 	u32			doorbell_index;
876*d350ecf5Sriastradh 	bool			use_doorbell;
877*d350ecf5Sriastradh 	unsigned		wptr_offs;
878*d350ecf5Sriastradh 	unsigned		next_rptr_offs;
879*d350ecf5Sriastradh 	unsigned		fence_offs;
880*d350ecf5Sriastradh 	struct amdgpu_ctx	*current_ctx;
881*d350ecf5Sriastradh 	enum amdgpu_ring_type	type;
882*d350ecf5Sriastradh 	char			name[16];
883*d350ecf5Sriastradh 	bool                    is_pte_ring;
884*d350ecf5Sriastradh };
885*d350ecf5Sriastradh 
886*d350ecf5Sriastradh /*
887*d350ecf5Sriastradh  * VM
888*d350ecf5Sriastradh  */
889*d350ecf5Sriastradh 
890*d350ecf5Sriastradh /* maximum number of VMIDs */
891*d350ecf5Sriastradh #define AMDGPU_NUM_VM	16
892*d350ecf5Sriastradh 
893*d350ecf5Sriastradh /* number of entries in page table */
894*d350ecf5Sriastradh #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
895*d350ecf5Sriastradh 
896*d350ecf5Sriastradh /* PTBs (Page Table Blocks) need to be aligned to 32K */
897*d350ecf5Sriastradh #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
898*d350ecf5Sriastradh #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
899*d350ecf5Sriastradh #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
900*d350ecf5Sriastradh 
901*d350ecf5Sriastradh #define AMDGPU_PTE_VALID	(1 << 0)
902*d350ecf5Sriastradh #define AMDGPU_PTE_SYSTEM	(1 << 1)
903*d350ecf5Sriastradh #define AMDGPU_PTE_SNOOPED	(1 << 2)
904*d350ecf5Sriastradh 
905*d350ecf5Sriastradh /* VI only */
906*d350ecf5Sriastradh #define AMDGPU_PTE_EXECUTABLE	(1 << 4)
907*d350ecf5Sriastradh 
908*d350ecf5Sriastradh #define AMDGPU_PTE_READABLE	(1 << 5)
909*d350ecf5Sriastradh #define AMDGPU_PTE_WRITEABLE	(1 << 6)
910*d350ecf5Sriastradh 
911*d350ecf5Sriastradh /* PTE (Page Table Entry) fragment field for different page sizes */
912*d350ecf5Sriastradh #define AMDGPU_PTE_FRAG_4KB	(0 << 7)
913*d350ecf5Sriastradh #define AMDGPU_PTE_FRAG_64KB	(4 << 7)
914*d350ecf5Sriastradh #define AMDGPU_LOG2_PAGES_PER_FRAG 4
915*d350ecf5Sriastradh 
916*d350ecf5Sriastradh /* How to programm VM fault handling */
917*d350ecf5Sriastradh #define AMDGPU_VM_FAULT_STOP_NEVER	0
918*d350ecf5Sriastradh #define AMDGPU_VM_FAULT_STOP_FIRST	1
919*d350ecf5Sriastradh #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
920*d350ecf5Sriastradh 
921*d350ecf5Sriastradh struct amdgpu_vm_pt {
922*d350ecf5Sriastradh 	struct amdgpu_bo	*bo;
923*d350ecf5Sriastradh 	uint64_t		addr;
924*d350ecf5Sriastradh };
925*d350ecf5Sriastradh 
926*d350ecf5Sriastradh struct amdgpu_vm_id {
927*d350ecf5Sriastradh 	unsigned		id;
928*d350ecf5Sriastradh 	uint64_t		pd_gpu_addr;
929*d350ecf5Sriastradh 	/* last flushed PD/PT update */
930*d350ecf5Sriastradh 	struct fence	        *flushed_updates;
931*d350ecf5Sriastradh };
932*d350ecf5Sriastradh 
933*d350ecf5Sriastradh struct amdgpu_vm {
934*d350ecf5Sriastradh 	struct rb_root		va;
935*d350ecf5Sriastradh 
936*d350ecf5Sriastradh 	/* protecting invalidated */
937*d350ecf5Sriastradh 	spinlock_t		status_lock;
938*d350ecf5Sriastradh 
939*d350ecf5Sriastradh 	/* BOs moved, but not yet updated in the PT */
940*d350ecf5Sriastradh 	struct list_head	invalidated;
941*d350ecf5Sriastradh 
942*d350ecf5Sriastradh 	/* BOs cleared in the PT because of a move */
943*d350ecf5Sriastradh 	struct list_head	cleared;
944*d350ecf5Sriastradh 
945*d350ecf5Sriastradh 	/* BO mappings freed, but not yet updated in the PT */
946*d350ecf5Sriastradh 	struct list_head	freed;
947*d350ecf5Sriastradh 
948*d350ecf5Sriastradh 	/* contains the page directory */
949*d350ecf5Sriastradh 	struct amdgpu_bo	*page_directory;
950*d350ecf5Sriastradh 	unsigned		max_pde_used;
951*d350ecf5Sriastradh 	struct fence		*page_directory_fence;
952*d350ecf5Sriastradh 
953*d350ecf5Sriastradh 	/* array of page tables, one for each page directory entry */
954*d350ecf5Sriastradh 	struct amdgpu_vm_pt	*page_tables;
955*d350ecf5Sriastradh 
956*d350ecf5Sriastradh 	/* for id and flush management per ring */
957*d350ecf5Sriastradh 	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
958*d350ecf5Sriastradh 	/* for interval tree */
959*d350ecf5Sriastradh 	spinlock_t		it_lock;
960*d350ecf5Sriastradh 	/* protecting freed */
961*d350ecf5Sriastradh 	spinlock_t		freed_lock;
962*d350ecf5Sriastradh };
963*d350ecf5Sriastradh 
964*d350ecf5Sriastradh struct amdgpu_vm_manager {
965*d350ecf5Sriastradh 	struct {
966*d350ecf5Sriastradh 		struct fence	*active;
967*d350ecf5Sriastradh 		atomic_long_t	owner;
968*d350ecf5Sriastradh 	} ids[AMDGPU_NUM_VM];
969*d350ecf5Sriastradh 
970*d350ecf5Sriastradh 	uint32_t				max_pfn;
971*d350ecf5Sriastradh 	/* number of VMIDs */
972*d350ecf5Sriastradh 	unsigned				nvm;
973*d350ecf5Sriastradh 	/* vram base address for page table entry  */
974*d350ecf5Sriastradh 	u64					vram_base_offset;
975*d350ecf5Sriastradh 	/* is vm enabled? */
976*d350ecf5Sriastradh 	bool					enabled;
977*d350ecf5Sriastradh 	/* vm pte handling */
978*d350ecf5Sriastradh 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
979*d350ecf5Sriastradh 	struct amdgpu_ring                      *vm_pte_funcs_ring;
980*d350ecf5Sriastradh };
981*d350ecf5Sriastradh 
982*d350ecf5Sriastradh void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
983*d350ecf5Sriastradh int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
984*d350ecf5Sriastradh void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
985*d350ecf5Sriastradh struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
986*d350ecf5Sriastradh 					       struct amdgpu_vm *vm,
987*d350ecf5Sriastradh 					       struct list_head *head);
988*d350ecf5Sriastradh int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
989*d350ecf5Sriastradh 		      struct amdgpu_sync *sync);
990*d350ecf5Sriastradh void amdgpu_vm_flush(struct amdgpu_ring *ring,
991*d350ecf5Sriastradh 		     struct amdgpu_vm *vm,
992*d350ecf5Sriastradh 		     struct fence *updates);
993*d350ecf5Sriastradh void amdgpu_vm_fence(struct amdgpu_device *adev,
994*d350ecf5Sriastradh 		     struct amdgpu_vm *vm,
995*d350ecf5Sriastradh 		     struct fence *fence);
996*d350ecf5Sriastradh uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
997*d350ecf5Sriastradh int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
998*d350ecf5Sriastradh 				    struct amdgpu_vm *vm);
999*d350ecf5Sriastradh int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1000*d350ecf5Sriastradh 			  struct amdgpu_vm *vm);
1001*d350ecf5Sriastradh int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1002*d350ecf5Sriastradh 			     struct amdgpu_sync *sync);
1003*d350ecf5Sriastradh int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1004*d350ecf5Sriastradh 			struct amdgpu_bo_va *bo_va,
1005*d350ecf5Sriastradh 			struct ttm_mem_reg *mem);
1006*d350ecf5Sriastradh void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1007*d350ecf5Sriastradh 			     struct amdgpu_bo *bo);
1008*d350ecf5Sriastradh struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1009*d350ecf5Sriastradh 				       struct amdgpu_bo *bo);
1010*d350ecf5Sriastradh struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1011*d350ecf5Sriastradh 				      struct amdgpu_vm *vm,
1012*d350ecf5Sriastradh 				      struct amdgpu_bo *bo);
1013*d350ecf5Sriastradh int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1014*d350ecf5Sriastradh 		     struct amdgpu_bo_va *bo_va,
1015*d350ecf5Sriastradh 		     uint64_t addr, uint64_t offset,
1016*d350ecf5Sriastradh 		     uint64_t size, uint32_t flags);
1017*d350ecf5Sriastradh int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1018*d350ecf5Sriastradh 		       struct amdgpu_bo_va *bo_va,
1019*d350ecf5Sriastradh 		       uint64_t addr);
1020*d350ecf5Sriastradh void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1021*d350ecf5Sriastradh 		      struct amdgpu_bo_va *bo_va);
1022*d350ecf5Sriastradh int amdgpu_vm_free_job(struct amdgpu_job *job);
1023*d350ecf5Sriastradh 
1024*d350ecf5Sriastradh /*
1025*d350ecf5Sriastradh  * context related structures
1026*d350ecf5Sriastradh  */
1027*d350ecf5Sriastradh 
1028*d350ecf5Sriastradh #define AMDGPU_CTX_MAX_CS_PENDING	16
1029*d350ecf5Sriastradh 
1030*d350ecf5Sriastradh struct amdgpu_ctx_ring {
1031*d350ecf5Sriastradh 	uint64_t		sequence;
1032*d350ecf5Sriastradh 	struct fence		*fences[AMDGPU_CTX_MAX_CS_PENDING];
1033*d350ecf5Sriastradh 	struct amd_sched_entity	entity;
1034*d350ecf5Sriastradh };
1035*d350ecf5Sriastradh 
1036*d350ecf5Sriastradh struct amdgpu_ctx {
1037*d350ecf5Sriastradh 	struct kref		refcount;
1038*d350ecf5Sriastradh 	struct amdgpu_device    *adev;
1039*d350ecf5Sriastradh 	unsigned		reset_counter;
1040*d350ecf5Sriastradh 	spinlock_t		ring_lock;
1041*d350ecf5Sriastradh 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
1042*d350ecf5Sriastradh };
1043*d350ecf5Sriastradh 
1044*d350ecf5Sriastradh struct amdgpu_ctx_mgr {
1045*d350ecf5Sriastradh 	struct amdgpu_device	*adev;
1046*d350ecf5Sriastradh 	struct mutex		lock;
1047*d350ecf5Sriastradh 	/* protected by lock */
1048*d350ecf5Sriastradh 	struct idr		ctx_handles;
1049*d350ecf5Sriastradh };
1050*d350ecf5Sriastradh 
1051*d350ecf5Sriastradh int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
1052*d350ecf5Sriastradh 		    struct amdgpu_ctx *ctx);
1053*d350ecf5Sriastradh void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
1054*d350ecf5Sriastradh 
1055*d350ecf5Sriastradh struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
1056*d350ecf5Sriastradh int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
1057*d350ecf5Sriastradh 
1058*d350ecf5Sriastradh uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
1059*d350ecf5Sriastradh 			      struct fence *fence);
1060*d350ecf5Sriastradh struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
1061*d350ecf5Sriastradh 				   struct amdgpu_ring *ring, uint64_t seq);
1062*d350ecf5Sriastradh 
1063*d350ecf5Sriastradh int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
1064*d350ecf5Sriastradh 		     struct drm_file *filp);
1065*d350ecf5Sriastradh 
1066*d350ecf5Sriastradh void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
1067*d350ecf5Sriastradh void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
1068*d350ecf5Sriastradh 
1069*d350ecf5Sriastradh /*
1070*d350ecf5Sriastradh  * file private structure
1071*d350ecf5Sriastradh  */
1072*d350ecf5Sriastradh 
1073*d350ecf5Sriastradh struct amdgpu_fpriv {
1074*d350ecf5Sriastradh 	struct amdgpu_vm	vm;
1075*d350ecf5Sriastradh 	struct mutex		bo_list_lock;
1076*d350ecf5Sriastradh 	struct idr		bo_list_handles;
1077*d350ecf5Sriastradh 	struct amdgpu_ctx_mgr	ctx_mgr;
1078*d350ecf5Sriastradh };
1079*d350ecf5Sriastradh 
1080*d350ecf5Sriastradh /*
1081*d350ecf5Sriastradh  * residency list
1082*d350ecf5Sriastradh  */
1083*d350ecf5Sriastradh 
1084*d350ecf5Sriastradh struct amdgpu_bo_list {
1085*d350ecf5Sriastradh 	struct mutex lock;
1086*d350ecf5Sriastradh 	struct amdgpu_bo *gds_obj;
1087*d350ecf5Sriastradh 	struct amdgpu_bo *gws_obj;
1088*d350ecf5Sriastradh 	struct amdgpu_bo *oa_obj;
1089*d350ecf5Sriastradh 	bool has_userptr;
1090*d350ecf5Sriastradh 	unsigned num_entries;
1091*d350ecf5Sriastradh 	struct amdgpu_bo_list_entry *array;
1092*d350ecf5Sriastradh };
1093*d350ecf5Sriastradh 
1094*d350ecf5Sriastradh struct amdgpu_bo_list *
1095*d350ecf5Sriastradh amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
1096*d350ecf5Sriastradh void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
1097*d350ecf5Sriastradh void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
1098*d350ecf5Sriastradh 
1099*d350ecf5Sriastradh /*
1100*d350ecf5Sriastradh  * GFX stuff
1101*d350ecf5Sriastradh  */
1102*d350ecf5Sriastradh #include "clearstate_defs.h"
1103*d350ecf5Sriastradh 
1104*d350ecf5Sriastradh struct amdgpu_rlc {
1105*d350ecf5Sriastradh 	/* for power gating */
1106*d350ecf5Sriastradh 	struct amdgpu_bo	*save_restore_obj;
1107*d350ecf5Sriastradh 	uint64_t		save_restore_gpu_addr;
1108*d350ecf5Sriastradh 	volatile uint32_t	*sr_ptr;
1109*d350ecf5Sriastradh 	const u32               *reg_list;
1110*d350ecf5Sriastradh 	u32                     reg_list_size;
1111*d350ecf5Sriastradh 	/* for clear state */
1112*d350ecf5Sriastradh 	struct amdgpu_bo	*clear_state_obj;
1113*d350ecf5Sriastradh 	uint64_t		clear_state_gpu_addr;
1114*d350ecf5Sriastradh 	volatile uint32_t	*cs_ptr;
1115*d350ecf5Sriastradh 	const struct cs_section_def   *cs_data;
1116*d350ecf5Sriastradh 	u32                     clear_state_size;
1117*d350ecf5Sriastradh 	/* for cp tables */
1118*d350ecf5Sriastradh 	struct amdgpu_bo	*cp_table_obj;
1119*d350ecf5Sriastradh 	uint64_t		cp_table_gpu_addr;
1120*d350ecf5Sriastradh 	volatile uint32_t	*cp_table_ptr;
1121*d350ecf5Sriastradh 	u32                     cp_table_size;
1122*d350ecf5Sriastradh };
1123*d350ecf5Sriastradh 
1124*d350ecf5Sriastradh struct amdgpu_mec {
1125*d350ecf5Sriastradh 	struct amdgpu_bo	*hpd_eop_obj;
1126*d350ecf5Sriastradh 	u64			hpd_eop_gpu_addr;
1127*d350ecf5Sriastradh 	u32 num_pipe;
1128*d350ecf5Sriastradh 	u32 num_mec;
1129*d350ecf5Sriastradh 	u32 num_queue;
1130*d350ecf5Sriastradh };
1131*d350ecf5Sriastradh 
1132*d350ecf5Sriastradh /*
1133*d350ecf5Sriastradh  * GPU scratch registers structures, functions & helpers
1134*d350ecf5Sriastradh  */
1135*d350ecf5Sriastradh struct amdgpu_scratch {
1136*d350ecf5Sriastradh 	unsigned		num_reg;
1137*d350ecf5Sriastradh 	uint32_t                reg_base;
1138*d350ecf5Sriastradh 	bool			free[32];
1139*d350ecf5Sriastradh 	uint32_t		reg[32];
1140*d350ecf5Sriastradh };
1141*d350ecf5Sriastradh 
1142*d350ecf5Sriastradh /*
1143*d350ecf5Sriastradh  * GFX configurations
1144*d350ecf5Sriastradh  */
1145*d350ecf5Sriastradh struct amdgpu_gca_config {
1146*d350ecf5Sriastradh 	unsigned max_shader_engines;
1147*d350ecf5Sriastradh 	unsigned max_tile_pipes;
1148*d350ecf5Sriastradh 	unsigned max_cu_per_sh;
1149*d350ecf5Sriastradh 	unsigned max_sh_per_se;
1150*d350ecf5Sriastradh 	unsigned max_backends_per_se;
1151*d350ecf5Sriastradh 	unsigned max_texture_channel_caches;
1152*d350ecf5Sriastradh 	unsigned max_gprs;
1153*d350ecf5Sriastradh 	unsigned max_gs_threads;
1154*d350ecf5Sriastradh 	unsigned max_hw_contexts;
1155*d350ecf5Sriastradh 	unsigned sc_prim_fifo_size_frontend;
1156*d350ecf5Sriastradh 	unsigned sc_prim_fifo_size_backend;
1157*d350ecf5Sriastradh 	unsigned sc_hiz_tile_fifo_size;
1158*d350ecf5Sriastradh 	unsigned sc_earlyz_tile_fifo_size;
1159*d350ecf5Sriastradh 
1160*d350ecf5Sriastradh 	unsigned num_tile_pipes;
1161*d350ecf5Sriastradh 	unsigned backend_enable_mask;
1162*d350ecf5Sriastradh 	unsigned mem_max_burst_length_bytes;
1163*d350ecf5Sriastradh 	unsigned mem_row_size_in_kb;
1164*d350ecf5Sriastradh 	unsigned shader_engine_tile_size;
1165*d350ecf5Sriastradh 	unsigned num_gpus;
1166*d350ecf5Sriastradh 	unsigned multi_gpu_tile_size;
1167*d350ecf5Sriastradh 	unsigned mc_arb_ramcfg;
1168*d350ecf5Sriastradh 	unsigned gb_addr_config;
1169*d350ecf5Sriastradh 
1170*d350ecf5Sriastradh 	uint32_t tile_mode_array[32];
1171*d350ecf5Sriastradh 	uint32_t macrotile_mode_array[16];
1172*d350ecf5Sriastradh };
1173*d350ecf5Sriastradh 
1174*d350ecf5Sriastradh struct amdgpu_gfx {
1175*d350ecf5Sriastradh 	struct mutex			gpu_clock_mutex;
1176*d350ecf5Sriastradh 	struct amdgpu_gca_config	config;
1177*d350ecf5Sriastradh 	struct amdgpu_rlc		rlc;
1178*d350ecf5Sriastradh 	struct amdgpu_mec		mec;
1179*d350ecf5Sriastradh 	struct amdgpu_scratch		scratch;
1180*d350ecf5Sriastradh 	const struct firmware		*me_fw;	/* ME firmware */
1181*d350ecf5Sriastradh 	uint32_t			me_fw_version;
1182*d350ecf5Sriastradh 	const struct firmware		*pfp_fw; /* PFP firmware */
1183*d350ecf5Sriastradh 	uint32_t			pfp_fw_version;
1184*d350ecf5Sriastradh 	const struct firmware		*ce_fw;	/* CE firmware */
1185*d350ecf5Sriastradh 	uint32_t			ce_fw_version;
1186*d350ecf5Sriastradh 	const struct firmware		*rlc_fw; /* RLC firmware */
1187*d350ecf5Sriastradh 	uint32_t			rlc_fw_version;
1188*d350ecf5Sriastradh 	const struct firmware		*mec_fw; /* MEC firmware */
1189*d350ecf5Sriastradh 	uint32_t			mec_fw_version;
1190*d350ecf5Sriastradh 	const struct firmware		*mec2_fw; /* MEC2 firmware */
1191*d350ecf5Sriastradh 	uint32_t			mec2_fw_version;
1192*d350ecf5Sriastradh 	uint32_t			me_feature_version;
1193*d350ecf5Sriastradh 	uint32_t			ce_feature_version;
1194*d350ecf5Sriastradh 	uint32_t			pfp_feature_version;
1195*d350ecf5Sriastradh 	uint32_t			rlc_feature_version;
1196*d350ecf5Sriastradh 	uint32_t			mec_feature_version;
1197*d350ecf5Sriastradh 	uint32_t			mec2_feature_version;
1198*d350ecf5Sriastradh 	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
1199*d350ecf5Sriastradh 	unsigned			num_gfx_rings;
1200*d350ecf5Sriastradh 	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
1201*d350ecf5Sriastradh 	unsigned			num_compute_rings;
1202*d350ecf5Sriastradh 	struct amdgpu_irq_src		eop_irq;
1203*d350ecf5Sriastradh 	struct amdgpu_irq_src		priv_reg_irq;
1204*d350ecf5Sriastradh 	struct amdgpu_irq_src		priv_inst_irq;
1205*d350ecf5Sriastradh 	/* gfx status */
1206*d350ecf5Sriastradh 	uint32_t gfx_current_status;
1207*d350ecf5Sriastradh 	/* ce ram size*/
1208*d350ecf5Sriastradh 	unsigned ce_ram_size;
1209*d350ecf5Sriastradh };
1210*d350ecf5Sriastradh 
1211*d350ecf5Sriastradh int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
1212*d350ecf5Sriastradh 		  unsigned size, struct amdgpu_ib *ib);
1213*d350ecf5Sriastradh void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
1214*d350ecf5Sriastradh int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
1215*d350ecf5Sriastradh 		       struct amdgpu_ib *ib, void *owner);
1216*d350ecf5Sriastradh int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1217*d350ecf5Sriastradh void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1218*d350ecf5Sriastradh int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1219*d350ecf5Sriastradh /* Ring access between begin & end cannot sleep */
1220*d350ecf5Sriastradh void amdgpu_ring_free_size(struct amdgpu_ring *ring);
1221*d350ecf5Sriastradh int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
1222*d350ecf5Sriastradh int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
1223*d350ecf5Sriastradh void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
1224*d350ecf5Sriastradh void amdgpu_ring_commit(struct amdgpu_ring *ring);
1225*d350ecf5Sriastradh void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
1226*d350ecf5Sriastradh void amdgpu_ring_undo(struct amdgpu_ring *ring);
1227*d350ecf5Sriastradh void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
1228*d350ecf5Sriastradh unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1229*d350ecf5Sriastradh 			    uint32_t **data);
1230*d350ecf5Sriastradh int amdgpu_ring_restore(struct amdgpu_ring *ring,
1231*d350ecf5Sriastradh 			unsigned size, uint32_t *data);
1232*d350ecf5Sriastradh int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1233*d350ecf5Sriastradh 		     unsigned ring_size, u32 nop, u32 align_mask,
1234*d350ecf5Sriastradh 		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
1235*d350ecf5Sriastradh 		     enum amdgpu_ring_type ring_type);
1236*d350ecf5Sriastradh void amdgpu_ring_fini(struct amdgpu_ring *ring);
1237*d350ecf5Sriastradh struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
1238*d350ecf5Sriastradh 
1239*d350ecf5Sriastradh /*
1240*d350ecf5Sriastradh  * CS.
1241*d350ecf5Sriastradh  */
1242*d350ecf5Sriastradh struct amdgpu_cs_chunk {
1243*d350ecf5Sriastradh 	uint32_t		chunk_id;
1244*d350ecf5Sriastradh 	uint32_t		length_dw;
1245*d350ecf5Sriastradh 	uint32_t		*kdata;
1246*d350ecf5Sriastradh 	void __user		*user_ptr;
1247*d350ecf5Sriastradh };
1248*d350ecf5Sriastradh 
1249*d350ecf5Sriastradh struct amdgpu_cs_parser {
1250*d350ecf5Sriastradh 	struct amdgpu_device	*adev;
1251*d350ecf5Sriastradh 	struct drm_file		*filp;
1252*d350ecf5Sriastradh 	struct amdgpu_ctx	*ctx;
1253*d350ecf5Sriastradh 	struct amdgpu_bo_list *bo_list;
1254*d350ecf5Sriastradh 	/* chunks */
1255*d350ecf5Sriastradh 	unsigned		nchunks;
1256*d350ecf5Sriastradh 	struct amdgpu_cs_chunk	*chunks;
1257*d350ecf5Sriastradh 	/* relocations */
1258*d350ecf5Sriastradh 	struct amdgpu_bo_list_entry	*vm_bos;
1259*d350ecf5Sriastradh 	struct list_head	validated;
1260*d350ecf5Sriastradh 	struct fence		*fence;
1261*d350ecf5Sriastradh 
1262*d350ecf5Sriastradh 	struct amdgpu_ib	*ibs;
1263*d350ecf5Sriastradh 	uint32_t		num_ibs;
1264*d350ecf5Sriastradh 
1265*d350ecf5Sriastradh 	struct ww_acquire_ctx	ticket;
1266*d350ecf5Sriastradh 
1267*d350ecf5Sriastradh 	/* user fence */
1268*d350ecf5Sriastradh 	struct amdgpu_user_fence	uf;
1269*d350ecf5Sriastradh 	struct amdgpu_bo_list_entry	uf_entry;
1270*d350ecf5Sriastradh };
1271*d350ecf5Sriastradh 
1272*d350ecf5Sriastradh struct amdgpu_job {
1273*d350ecf5Sriastradh 	struct amd_sched_job    base;
1274*d350ecf5Sriastradh 	struct amdgpu_device	*adev;
1275*d350ecf5Sriastradh 	struct amdgpu_ib	*ibs;
1276*d350ecf5Sriastradh 	uint32_t		num_ibs;
1277*d350ecf5Sriastradh 	void			*owner;
1278*d350ecf5Sriastradh 	struct amdgpu_user_fence uf;
1279*d350ecf5Sriastradh 	int (*free_job)(struct amdgpu_job *job);
1280*d350ecf5Sriastradh };
1281*d350ecf5Sriastradh #define to_amdgpu_job(sched_job)		\
1282*d350ecf5Sriastradh 		container_of((sched_job), struct amdgpu_job, base)
1283*d350ecf5Sriastradh 
1284*d350ecf5Sriastradh static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1285*d350ecf5Sriastradh {
1286*d350ecf5Sriastradh 	return p->ibs[ib_idx].ptr[idx];
1287*d350ecf5Sriastradh }
1288*d350ecf5Sriastradh 
1289*d350ecf5Sriastradh /*
1290*d350ecf5Sriastradh  * Writeback
1291*d350ecf5Sriastradh  */
1292*d350ecf5Sriastradh #define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */
1293*d350ecf5Sriastradh 
1294*d350ecf5Sriastradh struct amdgpu_wb {
1295*d350ecf5Sriastradh 	struct amdgpu_bo	*wb_obj;
1296*d350ecf5Sriastradh 	volatile uint32_t	*wb;
1297*d350ecf5Sriastradh 	uint64_t		gpu_addr;
1298*d350ecf5Sriastradh 	u32			num_wb;	/* Number of wb slots actually reserved for amdgpu. */
1299*d350ecf5Sriastradh 	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1300*d350ecf5Sriastradh };
1301*d350ecf5Sriastradh 
1302*d350ecf5Sriastradh int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
1303*d350ecf5Sriastradh void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
1304*d350ecf5Sriastradh 
1305*d350ecf5Sriastradh /**
1306*d350ecf5Sriastradh  * struct amdgpu_pm - power management datas
1307*d350ecf5Sriastradh  * It keeps track of various data needed to take powermanagement decision.
1308*d350ecf5Sriastradh  */
1309*d350ecf5Sriastradh 
1310*d350ecf5Sriastradh enum amdgpu_pm_state_type {
1311*d350ecf5Sriastradh 	/* not used for dpm */
1312*d350ecf5Sriastradh 	POWER_STATE_TYPE_DEFAULT,
1313*d350ecf5Sriastradh 	POWER_STATE_TYPE_POWERSAVE,
1314*d350ecf5Sriastradh 	/* user selectable states */
1315*d350ecf5Sriastradh 	POWER_STATE_TYPE_BATTERY,
1316*d350ecf5Sriastradh 	POWER_STATE_TYPE_BALANCED,
1317*d350ecf5Sriastradh 	POWER_STATE_TYPE_PERFORMANCE,
1318*d350ecf5Sriastradh 	/* internal states */
1319*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_UVD,
1320*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_UVD_SD,
1321*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_UVD_HD,
1322*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1323*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1324*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_BOOT,
1325*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_THERMAL,
1326*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_ACPI,
1327*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_ULV,
1328*d350ecf5Sriastradh 	POWER_STATE_TYPE_INTERNAL_3DPERF,
1329*d350ecf5Sriastradh };
1330*d350ecf5Sriastradh 
1331*d350ecf5Sriastradh enum amdgpu_int_thermal_type {
1332*d350ecf5Sriastradh 	THERMAL_TYPE_NONE,
1333*d350ecf5Sriastradh 	THERMAL_TYPE_EXTERNAL,
1334*d350ecf5Sriastradh 	THERMAL_TYPE_EXTERNAL_GPIO,
1335*d350ecf5Sriastradh 	THERMAL_TYPE_RV6XX,
1336*d350ecf5Sriastradh 	THERMAL_TYPE_RV770,
1337*d350ecf5Sriastradh 	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1338*d350ecf5Sriastradh 	THERMAL_TYPE_EVERGREEN,
1339*d350ecf5Sriastradh 	THERMAL_TYPE_SUMO,
1340*d350ecf5Sriastradh 	THERMAL_TYPE_NI,
1341*d350ecf5Sriastradh 	THERMAL_TYPE_SI,
1342*d350ecf5Sriastradh 	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1343*d350ecf5Sriastradh 	THERMAL_TYPE_CI,
1344*d350ecf5Sriastradh 	THERMAL_TYPE_KV,
1345*d350ecf5Sriastradh };
1346*d350ecf5Sriastradh 
1347*d350ecf5Sriastradh enum amdgpu_dpm_auto_throttle_src {
1348*d350ecf5Sriastradh 	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
1349*d350ecf5Sriastradh 	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1350*d350ecf5Sriastradh };
1351*d350ecf5Sriastradh 
1352*d350ecf5Sriastradh enum amdgpu_dpm_event_src {
1353*d350ecf5Sriastradh 	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
1354*d350ecf5Sriastradh 	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
1355*d350ecf5Sriastradh 	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
1356*d350ecf5Sriastradh 	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1357*d350ecf5Sriastradh 	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1358*d350ecf5Sriastradh };
1359*d350ecf5Sriastradh 
1360*d350ecf5Sriastradh #define AMDGPU_MAX_VCE_LEVELS 6
1361*d350ecf5Sriastradh 
1362*d350ecf5Sriastradh enum amdgpu_vce_level {
1363*d350ecf5Sriastradh 	AMDGPU_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
1364*d350ecf5Sriastradh 	AMDGPU_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
1365*d350ecf5Sriastradh 	AMDGPU_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
1366*d350ecf5Sriastradh 	AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1367*d350ecf5Sriastradh 	AMDGPU_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
1368*d350ecf5Sriastradh 	AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1369*d350ecf5Sriastradh };
1370*d350ecf5Sriastradh 
1371*d350ecf5Sriastradh struct amdgpu_ps {
1372*d350ecf5Sriastradh 	u32 caps; /* vbios flags */
1373*d350ecf5Sriastradh 	u32 class; /* vbios flags */
1374*d350ecf5Sriastradh 	u32 class2; /* vbios flags */
1375*d350ecf5Sriastradh 	/* UVD clocks */
1376*d350ecf5Sriastradh 	u32 vclk;
1377*d350ecf5Sriastradh 	u32 dclk;
1378*d350ecf5Sriastradh 	/* VCE clocks */
1379*d350ecf5Sriastradh 	u32 evclk;
1380*d350ecf5Sriastradh 	u32 ecclk;
1381*d350ecf5Sriastradh 	bool vce_active;
1382*d350ecf5Sriastradh 	enum amdgpu_vce_level vce_level;
1383*d350ecf5Sriastradh 	/* asic priv */
1384*d350ecf5Sriastradh 	void *ps_priv;
1385*d350ecf5Sriastradh };
1386*d350ecf5Sriastradh 
1387*d350ecf5Sriastradh struct amdgpu_dpm_thermal {
1388*d350ecf5Sriastradh 	/* thermal interrupt work */
1389*d350ecf5Sriastradh 	struct work_struct work;
1390*d350ecf5Sriastradh 	/* low temperature threshold */
1391*d350ecf5Sriastradh 	int                min_temp;
1392*d350ecf5Sriastradh 	/* high temperature threshold */
1393*d350ecf5Sriastradh 	int                max_temp;
1394*d350ecf5Sriastradh 	/* was last interrupt low to high or high to low */
1395*d350ecf5Sriastradh 	bool               high_to_low;
1396*d350ecf5Sriastradh 	/* interrupt source */
1397*d350ecf5Sriastradh 	struct amdgpu_irq_src	irq;
1398*d350ecf5Sriastradh };
1399*d350ecf5Sriastradh 
1400*d350ecf5Sriastradh enum amdgpu_clk_action
1401*d350ecf5Sriastradh {
1402*d350ecf5Sriastradh 	AMDGPU_SCLK_UP = 1,
1403*d350ecf5Sriastradh 	AMDGPU_SCLK_DOWN
1404*d350ecf5Sriastradh };
1405*d350ecf5Sriastradh 
1406*d350ecf5Sriastradh struct amdgpu_blacklist_clocks
1407*d350ecf5Sriastradh {
1408*d350ecf5Sriastradh 	u32 sclk;
1409*d350ecf5Sriastradh 	u32 mclk;
1410*d350ecf5Sriastradh 	enum amdgpu_clk_action action;
1411*d350ecf5Sriastradh };
1412*d350ecf5Sriastradh 
1413*d350ecf5Sriastradh struct amdgpu_clock_and_voltage_limits {
1414*d350ecf5Sriastradh 	u32 sclk;
1415*d350ecf5Sriastradh 	u32 mclk;
1416*d350ecf5Sriastradh 	u16 vddc;
1417*d350ecf5Sriastradh 	u16 vddci;
1418*d350ecf5Sriastradh };
1419*d350ecf5Sriastradh 
1420*d350ecf5Sriastradh struct amdgpu_clock_array {
1421*d350ecf5Sriastradh 	u32 count;
1422*d350ecf5Sriastradh 	u32 *values;
1423*d350ecf5Sriastradh };
1424*d350ecf5Sriastradh 
1425*d350ecf5Sriastradh struct amdgpu_clock_voltage_dependency_entry {
1426*d350ecf5Sriastradh 	u32 clk;
1427*d350ecf5Sriastradh 	u16 v;
1428*d350ecf5Sriastradh };
1429*d350ecf5Sriastradh 
1430*d350ecf5Sriastradh struct amdgpu_clock_voltage_dependency_table {
1431*d350ecf5Sriastradh 	u32 count;
1432*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_entry *entries;
1433*d350ecf5Sriastradh };
1434*d350ecf5Sriastradh 
1435*d350ecf5Sriastradh union amdgpu_cac_leakage_entry {
1436*d350ecf5Sriastradh 	struct {
1437*d350ecf5Sriastradh 		u16 vddc;
1438*d350ecf5Sriastradh 		u32 leakage;
1439*d350ecf5Sriastradh 	};
1440*d350ecf5Sriastradh 	struct {
1441*d350ecf5Sriastradh 		u16 vddc1;
1442*d350ecf5Sriastradh 		u16 vddc2;
1443*d350ecf5Sriastradh 		u16 vddc3;
1444*d350ecf5Sriastradh 	};
1445*d350ecf5Sriastradh };
1446*d350ecf5Sriastradh 
1447*d350ecf5Sriastradh struct amdgpu_cac_leakage_table {
1448*d350ecf5Sriastradh 	u32 count;
1449*d350ecf5Sriastradh 	union amdgpu_cac_leakage_entry *entries;
1450*d350ecf5Sriastradh };
1451*d350ecf5Sriastradh 
1452*d350ecf5Sriastradh struct amdgpu_phase_shedding_limits_entry {
1453*d350ecf5Sriastradh 	u16 voltage;
1454*d350ecf5Sriastradh 	u32 sclk;
1455*d350ecf5Sriastradh 	u32 mclk;
1456*d350ecf5Sriastradh };
1457*d350ecf5Sriastradh 
1458*d350ecf5Sriastradh struct amdgpu_phase_shedding_limits_table {
1459*d350ecf5Sriastradh 	u32 count;
1460*d350ecf5Sriastradh 	struct amdgpu_phase_shedding_limits_entry *entries;
1461*d350ecf5Sriastradh };
1462*d350ecf5Sriastradh 
1463*d350ecf5Sriastradh struct amdgpu_uvd_clock_voltage_dependency_entry {
1464*d350ecf5Sriastradh 	u32 vclk;
1465*d350ecf5Sriastradh 	u32 dclk;
1466*d350ecf5Sriastradh 	u16 v;
1467*d350ecf5Sriastradh };
1468*d350ecf5Sriastradh 
1469*d350ecf5Sriastradh struct amdgpu_uvd_clock_voltage_dependency_table {
1470*d350ecf5Sriastradh 	u8 count;
1471*d350ecf5Sriastradh 	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
1472*d350ecf5Sriastradh };
1473*d350ecf5Sriastradh 
1474*d350ecf5Sriastradh struct amdgpu_vce_clock_voltage_dependency_entry {
1475*d350ecf5Sriastradh 	u32 ecclk;
1476*d350ecf5Sriastradh 	u32 evclk;
1477*d350ecf5Sriastradh 	u16 v;
1478*d350ecf5Sriastradh };
1479*d350ecf5Sriastradh 
1480*d350ecf5Sriastradh struct amdgpu_vce_clock_voltage_dependency_table {
1481*d350ecf5Sriastradh 	u8 count;
1482*d350ecf5Sriastradh 	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
1483*d350ecf5Sriastradh };
1484*d350ecf5Sriastradh 
1485*d350ecf5Sriastradh struct amdgpu_ppm_table {
1486*d350ecf5Sriastradh 	u8 ppm_design;
1487*d350ecf5Sriastradh 	u16 cpu_core_number;
1488*d350ecf5Sriastradh 	u32 platform_tdp;
1489*d350ecf5Sriastradh 	u32 small_ac_platform_tdp;
1490*d350ecf5Sriastradh 	u32 platform_tdc;
1491*d350ecf5Sriastradh 	u32 small_ac_platform_tdc;
1492*d350ecf5Sriastradh 	u32 apu_tdp;
1493*d350ecf5Sriastradh 	u32 dgpu_tdp;
1494*d350ecf5Sriastradh 	u32 dgpu_ulv_power;
1495*d350ecf5Sriastradh 	u32 tj_max;
1496*d350ecf5Sriastradh };
1497*d350ecf5Sriastradh 
1498*d350ecf5Sriastradh struct amdgpu_cac_tdp_table {
1499*d350ecf5Sriastradh 	u16 tdp;
1500*d350ecf5Sriastradh 	u16 configurable_tdp;
1501*d350ecf5Sriastradh 	u16 tdc;
1502*d350ecf5Sriastradh 	u16 battery_power_limit;
1503*d350ecf5Sriastradh 	u16 small_power_limit;
1504*d350ecf5Sriastradh 	u16 low_cac_leakage;
1505*d350ecf5Sriastradh 	u16 high_cac_leakage;
1506*d350ecf5Sriastradh 	u16 maximum_power_delivery_limit;
1507*d350ecf5Sriastradh };
1508*d350ecf5Sriastradh 
1509*d350ecf5Sriastradh struct amdgpu_dpm_dynamic_state {
1510*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
1511*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
1512*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
1513*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1514*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1515*d350ecf5Sriastradh 	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1516*d350ecf5Sriastradh 	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1517*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1518*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1519*d350ecf5Sriastradh 	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
1520*d350ecf5Sriastradh 	struct amdgpu_clock_array valid_sclk_values;
1521*d350ecf5Sriastradh 	struct amdgpu_clock_array valid_mclk_values;
1522*d350ecf5Sriastradh 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
1523*d350ecf5Sriastradh 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
1524*d350ecf5Sriastradh 	u32 mclk_sclk_ratio;
1525*d350ecf5Sriastradh 	u32 sclk_mclk_delta;
1526*d350ecf5Sriastradh 	u16 vddc_vddci_delta;
1527*d350ecf5Sriastradh 	u16 min_vddc_for_pcie_gen2;
1528*d350ecf5Sriastradh 	struct amdgpu_cac_leakage_table cac_leakage_table;
1529*d350ecf5Sriastradh 	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
1530*d350ecf5Sriastradh 	struct amdgpu_ppm_table *ppm_table;
1531*d350ecf5Sriastradh 	struct amdgpu_cac_tdp_table *cac_tdp_table;
1532*d350ecf5Sriastradh };
1533*d350ecf5Sriastradh 
1534*d350ecf5Sriastradh struct amdgpu_dpm_fan {
1535*d350ecf5Sriastradh 	u16 t_min;
1536*d350ecf5Sriastradh 	u16 t_med;
1537*d350ecf5Sriastradh 	u16 t_high;
1538*d350ecf5Sriastradh 	u16 pwm_min;
1539*d350ecf5Sriastradh 	u16 pwm_med;
1540*d350ecf5Sriastradh 	u16 pwm_high;
1541*d350ecf5Sriastradh 	u8 t_hyst;
1542*d350ecf5Sriastradh 	u32 cycle_delay;
1543*d350ecf5Sriastradh 	u16 t_max;
1544*d350ecf5Sriastradh 	u8 control_mode;
1545*d350ecf5Sriastradh 	u16 default_max_fan_pwm;
1546*d350ecf5Sriastradh 	u16 default_fan_output_sensitivity;
1547*d350ecf5Sriastradh 	u16 fan_output_sensitivity;
1548*d350ecf5Sriastradh 	bool ucode_fan_control;
1549*d350ecf5Sriastradh };
1550*d350ecf5Sriastradh 
1551*d350ecf5Sriastradh enum amdgpu_pcie_gen {
1552*d350ecf5Sriastradh 	AMDGPU_PCIE_GEN1 = 0,
1553*d350ecf5Sriastradh 	AMDGPU_PCIE_GEN2 = 1,
1554*d350ecf5Sriastradh 	AMDGPU_PCIE_GEN3 = 2,
1555*d350ecf5Sriastradh 	AMDGPU_PCIE_GEN_INVALID = 0xffff
1556*d350ecf5Sriastradh };
1557*d350ecf5Sriastradh 
1558*d350ecf5Sriastradh enum amdgpu_dpm_forced_level {
1559*d350ecf5Sriastradh 	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
1560*d350ecf5Sriastradh 	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
1561*d350ecf5Sriastradh 	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
1562*d350ecf5Sriastradh };
1563*d350ecf5Sriastradh 
1564*d350ecf5Sriastradh struct amdgpu_vce_state {
1565*d350ecf5Sriastradh 	/* vce clocks */
1566*d350ecf5Sriastradh 	u32 evclk;
1567*d350ecf5Sriastradh 	u32 ecclk;
1568*d350ecf5Sriastradh 	/* gpu clocks */
1569*d350ecf5Sriastradh 	u32 sclk;
1570*d350ecf5Sriastradh 	u32 mclk;
1571*d350ecf5Sriastradh 	u8 clk_idx;
1572*d350ecf5Sriastradh 	u8 pstate;
1573*d350ecf5Sriastradh };
1574*d350ecf5Sriastradh 
1575*d350ecf5Sriastradh struct amdgpu_dpm_funcs {
1576*d350ecf5Sriastradh 	int (*get_temperature)(struct amdgpu_device *adev);
1577*d350ecf5Sriastradh 	int (*pre_set_power_state)(struct amdgpu_device *adev);
1578*d350ecf5Sriastradh 	int (*set_power_state)(struct amdgpu_device *adev);
1579*d350ecf5Sriastradh 	void (*post_set_power_state)(struct amdgpu_device *adev);
1580*d350ecf5Sriastradh 	void (*display_configuration_changed)(struct amdgpu_device *adev);
1581*d350ecf5Sriastradh 	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
1582*d350ecf5Sriastradh 	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
1583*d350ecf5Sriastradh 	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
1584*d350ecf5Sriastradh 	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
1585*d350ecf5Sriastradh 	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
1586*d350ecf5Sriastradh 	bool (*vblank_too_short)(struct amdgpu_device *adev);
1587*d350ecf5Sriastradh 	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
1588*d350ecf5Sriastradh 	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
1589*d350ecf5Sriastradh 	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
1590*d350ecf5Sriastradh 	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
1591*d350ecf5Sriastradh 	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
1592*d350ecf5Sriastradh 	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
1593*d350ecf5Sriastradh 	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
1594*d350ecf5Sriastradh };
1595*d350ecf5Sriastradh 
1596*d350ecf5Sriastradh struct amdgpu_dpm {
1597*d350ecf5Sriastradh 	struct amdgpu_ps        *ps;
1598*d350ecf5Sriastradh 	/* number of valid power states */
1599*d350ecf5Sriastradh 	int                     num_ps;
1600*d350ecf5Sriastradh 	/* current power state that is active */
1601*d350ecf5Sriastradh 	struct amdgpu_ps        *current_ps;
1602*d350ecf5Sriastradh 	/* requested power state */
1603*d350ecf5Sriastradh 	struct amdgpu_ps        *requested_ps;
1604*d350ecf5Sriastradh 	/* boot up power state */
1605*d350ecf5Sriastradh 	struct amdgpu_ps        *boot_ps;
1606*d350ecf5Sriastradh 	/* default uvd power state */
1607*d350ecf5Sriastradh 	struct amdgpu_ps        *uvd_ps;
1608*d350ecf5Sriastradh 	/* vce requirements */
1609*d350ecf5Sriastradh 	struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
1610*d350ecf5Sriastradh 	enum amdgpu_vce_level vce_level;
1611*d350ecf5Sriastradh 	enum amdgpu_pm_state_type state;
1612*d350ecf5Sriastradh 	enum amdgpu_pm_state_type user_state;
1613*d350ecf5Sriastradh 	u32                     platform_caps;
1614*d350ecf5Sriastradh 	u32                     voltage_response_time;
1615*d350ecf5Sriastradh 	u32                     backbias_response_time;
1616*d350ecf5Sriastradh 	void                    *priv;
1617*d350ecf5Sriastradh 	u32			new_active_crtcs;
1618*d350ecf5Sriastradh 	int			new_active_crtc_count;
1619*d350ecf5Sriastradh 	u32			current_active_crtcs;
1620*d350ecf5Sriastradh 	int			current_active_crtc_count;
1621*d350ecf5Sriastradh 	struct amdgpu_dpm_dynamic_state dyn_state;
1622*d350ecf5Sriastradh 	struct amdgpu_dpm_fan fan;
1623*d350ecf5Sriastradh 	u32 tdp_limit;
1624*d350ecf5Sriastradh 	u32 near_tdp_limit;
1625*d350ecf5Sriastradh 	u32 near_tdp_limit_adjusted;
1626*d350ecf5Sriastradh 	u32 sq_ramping_threshold;
1627*d350ecf5Sriastradh 	u32 cac_leakage;
1628*d350ecf5Sriastradh 	u16 tdp_od_limit;
1629*d350ecf5Sriastradh 	u32 tdp_adjustment;
1630*d350ecf5Sriastradh 	u16 load_line_slope;
1631*d350ecf5Sriastradh 	bool power_control;
1632*d350ecf5Sriastradh 	bool ac_power;
1633*d350ecf5Sriastradh 	/* special states active */
1634*d350ecf5Sriastradh 	bool                    thermal_active;
1635*d350ecf5Sriastradh 	bool                    uvd_active;
1636*d350ecf5Sriastradh 	bool                    vce_active;
1637*d350ecf5Sriastradh 	/* thermal handling */
1638*d350ecf5Sriastradh 	struct amdgpu_dpm_thermal thermal;
1639*d350ecf5Sriastradh 	/* forced levels */
1640*d350ecf5Sriastradh 	enum amdgpu_dpm_forced_level forced_level;
1641*d350ecf5Sriastradh };
1642*d350ecf5Sriastradh 
1643*d350ecf5Sriastradh struct amdgpu_pm {
1644*d350ecf5Sriastradh 	struct mutex		mutex;
1645*d350ecf5Sriastradh 	u32                     current_sclk;
1646*d350ecf5Sriastradh 	u32                     current_mclk;
1647*d350ecf5Sriastradh 	u32                     default_sclk;
1648*d350ecf5Sriastradh 	u32                     default_mclk;
1649*d350ecf5Sriastradh 	struct amdgpu_i2c_chan *i2c_bus;
1650*d350ecf5Sriastradh 	/* internal thermal controller on rv6xx+ */
1651*d350ecf5Sriastradh 	enum amdgpu_int_thermal_type int_thermal_type;
1652*d350ecf5Sriastradh 	struct device	        *int_hwmon_dev;
1653*d350ecf5Sriastradh 	/* fan control parameters */
1654*d350ecf5Sriastradh 	bool                    no_fan;
1655*d350ecf5Sriastradh 	u8                      fan_pulses_per_revolution;
1656*d350ecf5Sriastradh 	u8                      fan_min_rpm;
1657*d350ecf5Sriastradh 	u8                      fan_max_rpm;
1658*d350ecf5Sriastradh 	/* dpm */
1659*d350ecf5Sriastradh 	bool                    dpm_enabled;
1660*d350ecf5Sriastradh 	bool                    sysfs_initialized;
1661*d350ecf5Sriastradh 	struct amdgpu_dpm       dpm;
1662*d350ecf5Sriastradh 	const struct firmware	*fw;	/* SMC firmware */
1663*d350ecf5Sriastradh 	uint32_t                fw_version;
1664*d350ecf5Sriastradh 	const struct amdgpu_dpm_funcs *funcs;
1665*d350ecf5Sriastradh };
1666*d350ecf5Sriastradh 
1667*d350ecf5Sriastradh /*
1668*d350ecf5Sriastradh  * UVD
1669*d350ecf5Sriastradh  */
1670*d350ecf5Sriastradh #define AMDGPU_MAX_UVD_HANDLES	10
1671*d350ecf5Sriastradh #define AMDGPU_UVD_STACK_SIZE	(1024*1024)
1672*d350ecf5Sriastradh #define AMDGPU_UVD_HEAP_SIZE	(1024*1024)
1673*d350ecf5Sriastradh #define AMDGPU_UVD_FIRMWARE_OFFSET 256
1674*d350ecf5Sriastradh 
1675*d350ecf5Sriastradh struct amdgpu_uvd {
1676*d350ecf5Sriastradh 	struct amdgpu_bo	*vcpu_bo;
1677*d350ecf5Sriastradh 	void			*cpu_addr;
1678*d350ecf5Sriastradh 	uint64_t		gpu_addr;
1679*d350ecf5Sriastradh 	unsigned		fw_version;
1680*d350ecf5Sriastradh 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
1681*d350ecf5Sriastradh 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
1682*d350ecf5Sriastradh 	struct delayed_work	idle_work;
1683*d350ecf5Sriastradh 	const struct firmware	*fw;	/* UVD firmware */
1684*d350ecf5Sriastradh 	struct amdgpu_ring	ring;
1685*d350ecf5Sriastradh 	struct amdgpu_irq_src	irq;
1686*d350ecf5Sriastradh 	bool			address_64_bit;
1687*d350ecf5Sriastradh };
1688*d350ecf5Sriastradh 
1689*d350ecf5Sriastradh /*
1690*d350ecf5Sriastradh  * VCE
1691*d350ecf5Sriastradh  */
1692*d350ecf5Sriastradh #define AMDGPU_MAX_VCE_HANDLES	16
1693*d350ecf5Sriastradh #define AMDGPU_VCE_FIRMWARE_OFFSET 256
1694*d350ecf5Sriastradh 
1695*d350ecf5Sriastradh #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1696*d350ecf5Sriastradh #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1697*d350ecf5Sriastradh 
1698*d350ecf5Sriastradh struct amdgpu_vce {
1699*d350ecf5Sriastradh 	struct amdgpu_bo	*vcpu_bo;
1700*d350ecf5Sriastradh 	uint64_t		gpu_addr;
1701*d350ecf5Sriastradh 	unsigned		fw_version;
1702*d350ecf5Sriastradh 	unsigned		fb_version;
1703*d350ecf5Sriastradh 	atomic_t		handles[AMDGPU_MAX_VCE_HANDLES];
1704*d350ecf5Sriastradh 	struct drm_file		*filp[AMDGPU_MAX_VCE_HANDLES];
1705*d350ecf5Sriastradh 	uint32_t		img_size[AMDGPU_MAX_VCE_HANDLES];
1706*d350ecf5Sriastradh 	struct delayed_work	idle_work;
1707*d350ecf5Sriastradh 	const struct firmware	*fw;	/* VCE firmware */
1708*d350ecf5Sriastradh 	struct amdgpu_ring	ring[AMDGPU_MAX_VCE_RINGS];
1709*d350ecf5Sriastradh 	struct amdgpu_irq_src	irq;
1710*d350ecf5Sriastradh 	unsigned		harvest_config;
1711*d350ecf5Sriastradh };
1712*d350ecf5Sriastradh 
1713*d350ecf5Sriastradh /*
1714*d350ecf5Sriastradh  * SDMA
1715*d350ecf5Sriastradh  */
1716*d350ecf5Sriastradh struct amdgpu_sdma_instance {
1717*d350ecf5Sriastradh 	/* SDMA firmware */
1718*d350ecf5Sriastradh 	const struct firmware	*fw;
1719*d350ecf5Sriastradh 	uint32_t		fw_version;
1720*d350ecf5Sriastradh 	uint32_t		feature_version;
1721*d350ecf5Sriastradh 
1722*d350ecf5Sriastradh 	struct amdgpu_ring	ring;
1723*d350ecf5Sriastradh 	bool			burst_nop;
1724*d350ecf5Sriastradh };
1725*d350ecf5Sriastradh 
1726*d350ecf5Sriastradh struct amdgpu_sdma {
1727*d350ecf5Sriastradh 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1728*d350ecf5Sriastradh 	struct amdgpu_irq_src	trap_irq;
1729*d350ecf5Sriastradh 	struct amdgpu_irq_src	illegal_inst_irq;
1730*d350ecf5Sriastradh 	int 			num_instances;
1731*d350ecf5Sriastradh };
1732*d350ecf5Sriastradh 
1733*d350ecf5Sriastradh /*
1734*d350ecf5Sriastradh  * Firmware
1735*d350ecf5Sriastradh  */
1736*d350ecf5Sriastradh struct amdgpu_firmware {
1737*d350ecf5Sriastradh 	struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1738*d350ecf5Sriastradh 	bool smu_load;
1739*d350ecf5Sriastradh 	struct amdgpu_bo *fw_buf;
1740*d350ecf5Sriastradh 	unsigned int fw_size;
1741*d350ecf5Sriastradh };
1742*d350ecf5Sriastradh 
1743*d350ecf5Sriastradh /*
1744*d350ecf5Sriastradh  * Benchmarking
1745*d350ecf5Sriastradh  */
1746*d350ecf5Sriastradh void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1747*d350ecf5Sriastradh 
1748*d350ecf5Sriastradh 
1749*d350ecf5Sriastradh /*
1750*d350ecf5Sriastradh  * Testing
1751*d350ecf5Sriastradh  */
1752*d350ecf5Sriastradh void amdgpu_test_moves(struct amdgpu_device *adev);
1753*d350ecf5Sriastradh void amdgpu_test_ring_sync(struct amdgpu_device *adev,
1754*d350ecf5Sriastradh 			   struct amdgpu_ring *cpA,
1755*d350ecf5Sriastradh 			   struct amdgpu_ring *cpB);
1756*d350ecf5Sriastradh void amdgpu_test_syncing(struct amdgpu_device *adev);
1757*d350ecf5Sriastradh 
1758*d350ecf5Sriastradh /*
1759*d350ecf5Sriastradh  * MMU Notifier
1760*d350ecf5Sriastradh  */
1761*d350ecf5Sriastradh #if defined(CONFIG_MMU_NOTIFIER)
1762*d350ecf5Sriastradh int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1763*d350ecf5Sriastradh void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1764*d350ecf5Sriastradh #else
1765*d350ecf5Sriastradh static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1766*d350ecf5Sriastradh {
1767*d350ecf5Sriastradh 	return -ENODEV;
1768*d350ecf5Sriastradh }
1769*d350ecf5Sriastradh static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1770*d350ecf5Sriastradh #endif
1771*d350ecf5Sriastradh 
1772*d350ecf5Sriastradh /*
1773*d350ecf5Sriastradh  * Debugfs
1774*d350ecf5Sriastradh  */
1775*d350ecf5Sriastradh struct amdgpu_debugfs {
1776*d350ecf5Sriastradh 	struct drm_info_list	*files;
1777*d350ecf5Sriastradh 	unsigned		num_files;
1778*d350ecf5Sriastradh };
1779*d350ecf5Sriastradh 
1780*d350ecf5Sriastradh int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
1781*d350ecf5Sriastradh 			     struct drm_info_list *files,
1782*d350ecf5Sriastradh 			     unsigned nfiles);
1783*d350ecf5Sriastradh int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
1784*d350ecf5Sriastradh 
1785*d350ecf5Sriastradh #if defined(CONFIG_DEBUG_FS)
1786*d350ecf5Sriastradh int amdgpu_debugfs_init(struct drm_minor *minor);
1787*d350ecf5Sriastradh void amdgpu_debugfs_cleanup(struct drm_minor *minor);
1788*d350ecf5Sriastradh #endif
1789*d350ecf5Sriastradh 
1790*d350ecf5Sriastradh /*
1791*d350ecf5Sriastradh  * amdgpu smumgr functions
1792*d350ecf5Sriastradh  */
1793*d350ecf5Sriastradh struct amdgpu_smumgr_funcs {
1794*d350ecf5Sriastradh 	int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1795*d350ecf5Sriastradh 	int (*request_smu_load_fw)(struct amdgpu_device *adev);
1796*d350ecf5Sriastradh 	int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1797*d350ecf5Sriastradh };
1798*d350ecf5Sriastradh 
1799*d350ecf5Sriastradh /*
1800*d350ecf5Sriastradh  * amdgpu smumgr
1801*d350ecf5Sriastradh  */
1802*d350ecf5Sriastradh struct amdgpu_smumgr {
1803*d350ecf5Sriastradh 	struct amdgpu_bo *toc_buf;
1804*d350ecf5Sriastradh 	struct amdgpu_bo *smu_buf;
1805*d350ecf5Sriastradh 	/* asic priv smu data */
1806*d350ecf5Sriastradh 	void *priv;
1807*d350ecf5Sriastradh 	spinlock_t smu_lock;
1808*d350ecf5Sriastradh 	/* smumgr functions */
1809*d350ecf5Sriastradh 	const struct amdgpu_smumgr_funcs *smumgr_funcs;
1810*d350ecf5Sriastradh 	/* ucode loading complete flag */
1811*d350ecf5Sriastradh 	uint32_t fw_flags;
1812*d350ecf5Sriastradh };
1813*d350ecf5Sriastradh 
1814*d350ecf5Sriastradh /*
1815*d350ecf5Sriastradh  * ASIC specific register table accessible by UMD
1816*d350ecf5Sriastradh  */
1817*d350ecf5Sriastradh struct amdgpu_allowed_register_entry {
1818*d350ecf5Sriastradh 	uint32_t reg_offset;
1819*d350ecf5Sriastradh 	bool untouched;
1820*d350ecf5Sriastradh 	bool grbm_indexed;
1821*d350ecf5Sriastradh };
1822*d350ecf5Sriastradh 
1823*d350ecf5Sriastradh struct amdgpu_cu_info {
1824*d350ecf5Sriastradh 	uint32_t number; /* total active CU number */
1825*d350ecf5Sriastradh 	uint32_t ao_cu_mask;
1826*d350ecf5Sriastradh 	uint32_t bitmap[4][4];
1827*d350ecf5Sriastradh };
1828*d350ecf5Sriastradh 
1829*d350ecf5Sriastradh 
1830*d350ecf5Sriastradh /*
1831*d350ecf5Sriastradh  * ASIC specific functions.
1832*d350ecf5Sriastradh  */
1833*d350ecf5Sriastradh struct amdgpu_asic_funcs {
1834*d350ecf5Sriastradh 	bool (*read_disabled_bios)(struct amdgpu_device *adev);
1835*d350ecf5Sriastradh 	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1836*d350ecf5Sriastradh 			     u32 sh_num, u32 reg_offset, u32 *value);
1837*d350ecf5Sriastradh 	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1838*d350ecf5Sriastradh 	int (*reset)(struct amdgpu_device *adev);
1839*d350ecf5Sriastradh 	/* wait for mc_idle */
1840*d350ecf5Sriastradh 	int (*wait_for_mc_idle)(struct amdgpu_device *adev);
1841*d350ecf5Sriastradh 	/* get the reference clock */
1842*d350ecf5Sriastradh 	u32 (*get_xclk)(struct amdgpu_device *adev);
1843*d350ecf5Sriastradh 	/* get the gpu clock counter */
1844*d350ecf5Sriastradh 	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1845*d350ecf5Sriastradh 	int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
1846*d350ecf5Sriastradh 	/* MM block clocks */
1847*d350ecf5Sriastradh 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1848*d350ecf5Sriastradh 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1849*d350ecf5Sriastradh };
1850*d350ecf5Sriastradh 
1851*d350ecf5Sriastradh /*
1852*d350ecf5Sriastradh  * IOCTL.
1853*d350ecf5Sriastradh  */
1854*d350ecf5Sriastradh int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1855*d350ecf5Sriastradh 			    struct drm_file *filp);
1856*d350ecf5Sriastradh int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1857*d350ecf5Sriastradh 				struct drm_file *filp);
1858*d350ecf5Sriastradh 
1859*d350ecf5Sriastradh int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1860*d350ecf5Sriastradh 			  struct drm_file *filp);
1861*d350ecf5Sriastradh int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1862*d350ecf5Sriastradh 			struct drm_file *filp);
1863*d350ecf5Sriastradh int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1864*d350ecf5Sriastradh 			  struct drm_file *filp);
1865*d350ecf5Sriastradh int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1866*d350ecf5Sriastradh 			      struct drm_file *filp);
1867*d350ecf5Sriastradh int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1868*d350ecf5Sriastradh 			  struct drm_file *filp);
1869*d350ecf5Sriastradh int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1870*d350ecf5Sriastradh 			struct drm_file *filp);
1871*d350ecf5Sriastradh int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1872*d350ecf5Sriastradh int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1873*d350ecf5Sriastradh 
1874*d350ecf5Sriastradh int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1875*d350ecf5Sriastradh 				struct drm_file *filp);
1876*d350ecf5Sriastradh 
1877*d350ecf5Sriastradh /* VRAM scratch page for HDP bug, default vram page */
1878*d350ecf5Sriastradh struct amdgpu_vram_scratch {
1879*d350ecf5Sriastradh 	struct amdgpu_bo		*robj;
1880*d350ecf5Sriastradh 	volatile uint32_t		*ptr;
1881*d350ecf5Sriastradh 	u64				gpu_addr;
1882*d350ecf5Sriastradh };
1883*d350ecf5Sriastradh 
1884*d350ecf5Sriastradh /*
1885*d350ecf5Sriastradh  * ACPI
1886*d350ecf5Sriastradh  */
1887*d350ecf5Sriastradh struct amdgpu_atif_notification_cfg {
1888*d350ecf5Sriastradh 	bool enabled;
1889*d350ecf5Sriastradh 	int command_code;
1890*d350ecf5Sriastradh };
1891*d350ecf5Sriastradh 
1892*d350ecf5Sriastradh struct amdgpu_atif_notifications {
1893*d350ecf5Sriastradh 	bool display_switch;
1894*d350ecf5Sriastradh 	bool expansion_mode_change;
1895*d350ecf5Sriastradh 	bool thermal_state;
1896*d350ecf5Sriastradh 	bool forced_power_state;
1897*d350ecf5Sriastradh 	bool system_power_state;
1898*d350ecf5Sriastradh 	bool display_conf_change;
1899*d350ecf5Sriastradh 	bool px_gfx_switch;
1900*d350ecf5Sriastradh 	bool brightness_change;
1901*d350ecf5Sriastradh 	bool dgpu_display_event;
1902*d350ecf5Sriastradh };
1903*d350ecf5Sriastradh 
1904*d350ecf5Sriastradh struct amdgpu_atif_functions {
1905*d350ecf5Sriastradh 	bool system_params;
1906*d350ecf5Sriastradh 	bool sbios_requests;
1907*d350ecf5Sriastradh 	bool select_active_disp;
1908*d350ecf5Sriastradh 	bool lid_state;
1909*d350ecf5Sriastradh 	bool get_tv_standard;
1910*d350ecf5Sriastradh 	bool set_tv_standard;
1911*d350ecf5Sriastradh 	bool get_panel_expansion_mode;
1912*d350ecf5Sriastradh 	bool set_panel_expansion_mode;
1913*d350ecf5Sriastradh 	bool temperature_change;
1914*d350ecf5Sriastradh 	bool graphics_device_types;
1915*d350ecf5Sriastradh };
1916*d350ecf5Sriastradh 
1917*d350ecf5Sriastradh struct amdgpu_atif {
1918*d350ecf5Sriastradh 	struct amdgpu_atif_notifications notifications;
1919*d350ecf5Sriastradh 	struct amdgpu_atif_functions functions;
1920*d350ecf5Sriastradh 	struct amdgpu_atif_notification_cfg notification_cfg;
1921*d350ecf5Sriastradh 	struct amdgpu_encoder *encoder_for_bl;
1922*d350ecf5Sriastradh };
1923*d350ecf5Sriastradh 
1924*d350ecf5Sriastradh struct amdgpu_atcs_functions {
1925*d350ecf5Sriastradh 	bool get_ext_state;
1926*d350ecf5Sriastradh 	bool pcie_perf_req;
1927*d350ecf5Sriastradh 	bool pcie_dev_rdy;
1928*d350ecf5Sriastradh 	bool pcie_bus_width;
1929*d350ecf5Sriastradh };
1930*d350ecf5Sriastradh 
1931*d350ecf5Sriastradh struct amdgpu_atcs {
1932*d350ecf5Sriastradh 	struct amdgpu_atcs_functions functions;
1933*d350ecf5Sriastradh };
1934*d350ecf5Sriastradh 
1935*d350ecf5Sriastradh /*
1936*d350ecf5Sriastradh  * CGS
1937*d350ecf5Sriastradh  */
1938*d350ecf5Sriastradh void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1939*d350ecf5Sriastradh void amdgpu_cgs_destroy_device(void *cgs_device);
1940*d350ecf5Sriastradh 
1941*d350ecf5Sriastradh 
1942*d350ecf5Sriastradh /*
1943*d350ecf5Sriastradh  * Core structure, functions and helpers.
1944*d350ecf5Sriastradh  */
1945*d350ecf5Sriastradh typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1946*d350ecf5Sriastradh typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1947*d350ecf5Sriastradh 
1948*d350ecf5Sriastradh typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1949*d350ecf5Sriastradh typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1950*d350ecf5Sriastradh 
1951*d350ecf5Sriastradh struct amdgpu_ip_block_status {
1952*d350ecf5Sriastradh 	bool valid;
1953*d350ecf5Sriastradh 	bool sw;
1954*d350ecf5Sriastradh 	bool hw;
1955*d350ecf5Sriastradh };
1956*d350ecf5Sriastradh 
1957*d350ecf5Sriastradh struct amdgpu_device {
1958*d350ecf5Sriastradh 	struct device			*dev;
1959*d350ecf5Sriastradh 	struct drm_device		*ddev;
1960*d350ecf5Sriastradh 	struct pci_dev			*pdev;
1961*d350ecf5Sriastradh 
1962*d350ecf5Sriastradh 	/* ASIC */
1963*d350ecf5Sriastradh 	enum amd_asic_type		asic_type;
1964*d350ecf5Sriastradh 	uint32_t			family;
1965*d350ecf5Sriastradh 	uint32_t			rev_id;
1966*d350ecf5Sriastradh 	uint32_t			external_rev_id;
1967*d350ecf5Sriastradh 	unsigned long			flags;
1968*d350ecf5Sriastradh 	int				usec_timeout;
1969*d350ecf5Sriastradh 	const struct amdgpu_asic_funcs	*asic_funcs;
1970*d350ecf5Sriastradh 	bool				shutdown;
1971*d350ecf5Sriastradh 	bool				suspend;
1972*d350ecf5Sriastradh 	bool				need_dma32;
1973*d350ecf5Sriastradh 	bool				accel_working;
1974*d350ecf5Sriastradh 	struct work_struct 		reset_work;
1975*d350ecf5Sriastradh 	struct notifier_block		acpi_nb;
1976*d350ecf5Sriastradh 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
1977*d350ecf5Sriastradh 	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1978*d350ecf5Sriastradh 	unsigned 			debugfs_count;
1979*d350ecf5Sriastradh #if defined(CONFIG_DEBUG_FS)
1980*d350ecf5Sriastradh 	struct dentry			*debugfs_regs;
1981*d350ecf5Sriastradh #endif
1982*d350ecf5Sriastradh 	struct amdgpu_atif		atif;
1983*d350ecf5Sriastradh 	struct amdgpu_atcs		atcs;
1984*d350ecf5Sriastradh 	struct mutex			srbm_mutex;
1985*d350ecf5Sriastradh 	/* GRBM index mutex. Protects concurrent access to GRBM index */
1986*d350ecf5Sriastradh 	struct mutex                    grbm_idx_mutex;
1987*d350ecf5Sriastradh 	struct dev_pm_domain		vga_pm_domain;
1988*d350ecf5Sriastradh 	bool				have_disp_power_ref;
1989*d350ecf5Sriastradh 
1990*d350ecf5Sriastradh 	/* BIOS */
1991*d350ecf5Sriastradh 	uint8_t				*bios;
1992*d350ecf5Sriastradh 	bool				is_atom_bios;
1993*d350ecf5Sriastradh 	uint16_t			bios_header_start;
1994*d350ecf5Sriastradh 	struct amdgpu_bo		*stollen_vga_memory;
1995*d350ecf5Sriastradh 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1996*d350ecf5Sriastradh 
1997*d350ecf5Sriastradh 	/* Register/doorbell mmio */
1998*d350ecf5Sriastradh 	resource_size_t			rmmio_base;
1999*d350ecf5Sriastradh 	resource_size_t			rmmio_size;
2000*d350ecf5Sriastradh 	void __iomem			*rmmio;
2001*d350ecf5Sriastradh 	/* protects concurrent MM_INDEX/DATA based register access */
2002*d350ecf5Sriastradh 	spinlock_t mmio_idx_lock;
2003*d350ecf5Sriastradh 	/* protects concurrent SMC based register access */
2004*d350ecf5Sriastradh 	spinlock_t smc_idx_lock;
2005*d350ecf5Sriastradh 	amdgpu_rreg_t			smc_rreg;
2006*d350ecf5Sriastradh 	amdgpu_wreg_t			smc_wreg;
2007*d350ecf5Sriastradh 	/* protects concurrent PCIE register access */
2008*d350ecf5Sriastradh 	spinlock_t pcie_idx_lock;
2009*d350ecf5Sriastradh 	amdgpu_rreg_t			pcie_rreg;
2010*d350ecf5Sriastradh 	amdgpu_wreg_t			pcie_wreg;
2011*d350ecf5Sriastradh 	/* protects concurrent UVD register access */
2012*d350ecf5Sriastradh 	spinlock_t uvd_ctx_idx_lock;
2013*d350ecf5Sriastradh 	amdgpu_rreg_t			uvd_ctx_rreg;
2014*d350ecf5Sriastradh 	amdgpu_wreg_t			uvd_ctx_wreg;
2015*d350ecf5Sriastradh 	/* protects concurrent DIDT register access */
2016*d350ecf5Sriastradh 	spinlock_t didt_idx_lock;
2017*d350ecf5Sriastradh 	amdgpu_rreg_t			didt_rreg;
2018*d350ecf5Sriastradh 	amdgpu_wreg_t			didt_wreg;
2019*d350ecf5Sriastradh 	/* protects concurrent ENDPOINT (audio) register access */
2020*d350ecf5Sriastradh 	spinlock_t audio_endpt_idx_lock;
2021*d350ecf5Sriastradh 	amdgpu_block_rreg_t		audio_endpt_rreg;
2022*d350ecf5Sriastradh 	amdgpu_block_wreg_t		audio_endpt_wreg;
2023*d350ecf5Sriastradh 	void __iomem                    *rio_mem;
2024*d350ecf5Sriastradh 	resource_size_t			rio_mem_size;
2025*d350ecf5Sriastradh 	struct amdgpu_doorbell		doorbell;
2026*d350ecf5Sriastradh 
2027*d350ecf5Sriastradh 	/* clock/pll info */
2028*d350ecf5Sriastradh 	struct amdgpu_clock            clock;
2029*d350ecf5Sriastradh 
2030*d350ecf5Sriastradh 	/* MC */
2031*d350ecf5Sriastradh 	struct amdgpu_mc		mc;
2032*d350ecf5Sriastradh 	struct amdgpu_gart		gart;
2033*d350ecf5Sriastradh 	struct amdgpu_dummy_page	dummy_page;
2034*d350ecf5Sriastradh 	struct amdgpu_vm_manager	vm_manager;
2035*d350ecf5Sriastradh 
2036*d350ecf5Sriastradh 	/* memory management */
2037*d350ecf5Sriastradh 	struct amdgpu_mman		mman;
2038*d350ecf5Sriastradh 	struct amdgpu_gem		gem;
2039*d350ecf5Sriastradh 	struct amdgpu_vram_scratch	vram_scratch;
2040*d350ecf5Sriastradh 	struct amdgpu_wb		wb;
2041*d350ecf5Sriastradh 	atomic64_t			vram_usage;
2042*d350ecf5Sriastradh 	atomic64_t			vram_vis_usage;
2043*d350ecf5Sriastradh 	atomic64_t			gtt_usage;
2044*d350ecf5Sriastradh 	atomic64_t			num_bytes_moved;
2045*d350ecf5Sriastradh 	atomic_t			gpu_reset_counter;
2046*d350ecf5Sriastradh 
2047*d350ecf5Sriastradh 	/* display */
2048*d350ecf5Sriastradh 	struct amdgpu_mode_info		mode_info;
2049*d350ecf5Sriastradh 	struct work_struct		hotplug_work;
2050*d350ecf5Sriastradh 	struct amdgpu_irq_src		crtc_irq;
2051*d350ecf5Sriastradh 	struct amdgpu_irq_src		pageflip_irq;
2052*d350ecf5Sriastradh 	struct amdgpu_irq_src		hpd_irq;
2053*d350ecf5Sriastradh 
2054*d350ecf5Sriastradh 	/* rings */
2055*d350ecf5Sriastradh 	unsigned			fence_context;
2056*d350ecf5Sriastradh 	struct mutex			ring_lock;
2057*d350ecf5Sriastradh 	unsigned			num_rings;
2058*d350ecf5Sriastradh 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
2059*d350ecf5Sriastradh 	bool				ib_pool_ready;
2060*d350ecf5Sriastradh 	struct amdgpu_sa_manager	ring_tmp_bo;
2061*d350ecf5Sriastradh 
2062*d350ecf5Sriastradh 	/* interrupts */
2063*d350ecf5Sriastradh 	struct amdgpu_irq		irq;
2064*d350ecf5Sriastradh 
2065*d350ecf5Sriastradh 	/* dpm */
2066*d350ecf5Sriastradh 	struct amdgpu_pm		pm;
2067*d350ecf5Sriastradh 	u32				cg_flags;
2068*d350ecf5Sriastradh 	u32				pg_flags;
2069*d350ecf5Sriastradh 
2070*d350ecf5Sriastradh 	/* amdgpu smumgr */
2071*d350ecf5Sriastradh 	struct amdgpu_smumgr smu;
2072*d350ecf5Sriastradh 
2073*d350ecf5Sriastradh 	/* gfx */
2074*d350ecf5Sriastradh 	struct amdgpu_gfx		gfx;
2075*d350ecf5Sriastradh 
2076*d350ecf5Sriastradh 	/* sdma */
2077*d350ecf5Sriastradh 	struct amdgpu_sdma		sdma;
2078*d350ecf5Sriastradh 
2079*d350ecf5Sriastradh 	/* uvd */
2080*d350ecf5Sriastradh 	bool				has_uvd;
2081*d350ecf5Sriastradh 	struct amdgpu_uvd		uvd;
2082*d350ecf5Sriastradh 
2083*d350ecf5Sriastradh 	/* vce */
2084*d350ecf5Sriastradh 	struct amdgpu_vce		vce;
2085*d350ecf5Sriastradh 
2086*d350ecf5Sriastradh 	/* firmwares */
2087*d350ecf5Sriastradh 	struct amdgpu_firmware		firmware;
2088*d350ecf5Sriastradh 
2089*d350ecf5Sriastradh 	/* GDS */
2090*d350ecf5Sriastradh 	struct amdgpu_gds		gds;
2091*d350ecf5Sriastradh 
2092*d350ecf5Sriastradh 	const struct amdgpu_ip_block_version *ip_blocks;
2093*d350ecf5Sriastradh 	int				num_ip_blocks;
2094*d350ecf5Sriastradh 	struct amdgpu_ip_block_status	*ip_block_status;
2095*d350ecf5Sriastradh 	struct mutex	mn_lock;
2096*d350ecf5Sriastradh 	DECLARE_HASHTABLE(mn_hash, 7);
2097*d350ecf5Sriastradh 
2098*d350ecf5Sriastradh 	/* tracking pinned memory */
2099*d350ecf5Sriastradh 	u64 vram_pin_size;
2100*d350ecf5Sriastradh 	u64 gart_pin_size;
2101*d350ecf5Sriastradh 
2102*d350ecf5Sriastradh 	/* amdkfd interface */
2103*d350ecf5Sriastradh 	struct kfd_dev          *kfd;
2104*d350ecf5Sriastradh 
2105*d350ecf5Sriastradh 	/* kernel conext for IB submission */
2106*d350ecf5Sriastradh 	struct amdgpu_ctx	kernel_ctx;
2107*d350ecf5Sriastradh };
2108*d350ecf5Sriastradh 
2109*d350ecf5Sriastradh bool amdgpu_device_is_px(struct drm_device *dev);
2110*d350ecf5Sriastradh int amdgpu_device_init(struct amdgpu_device *adev,
2111*d350ecf5Sriastradh 		       struct drm_device *ddev,
2112*d350ecf5Sriastradh 		       struct pci_dev *pdev,
2113*d350ecf5Sriastradh 		       uint32_t flags);
2114*d350ecf5Sriastradh void amdgpu_device_fini(struct amdgpu_device *adev);
2115*d350ecf5Sriastradh int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
2116*d350ecf5Sriastradh 
2117*d350ecf5Sriastradh uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
2118*d350ecf5Sriastradh 			bool always_indirect);
2119*d350ecf5Sriastradh void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
2120*d350ecf5Sriastradh 		    bool always_indirect);
2121*d350ecf5Sriastradh u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
2122*d350ecf5Sriastradh void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
2123*d350ecf5Sriastradh 
2124*d350ecf5Sriastradh u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
2125*d350ecf5Sriastradh void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
2126*d350ecf5Sriastradh 
2127*d350ecf5Sriastradh /*
2128*d350ecf5Sriastradh  * Cast helper
2129*d350ecf5Sriastradh  */
2130*d350ecf5Sriastradh extern const struct fence_ops amdgpu_fence_ops;
2131*d350ecf5Sriastradh static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
2132*d350ecf5Sriastradh {
2133*d350ecf5Sriastradh 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
2134*d350ecf5Sriastradh 
2135*d350ecf5Sriastradh 	if (__f->base.ops == &amdgpu_fence_ops)
2136*d350ecf5Sriastradh 		return __f;
2137*d350ecf5Sriastradh 
2138*d350ecf5Sriastradh 	return NULL;
2139*d350ecf5Sriastradh }
2140*d350ecf5Sriastradh 
2141*d350ecf5Sriastradh /*
2142*d350ecf5Sriastradh  * Registers read & write functions.
2143*d350ecf5Sriastradh  */
2144*d350ecf5Sriastradh #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
2145*d350ecf5Sriastradh #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
2146*d350ecf5Sriastradh #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
2147*d350ecf5Sriastradh #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
2148*d350ecf5Sriastradh #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
2149*d350ecf5Sriastradh #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2150*d350ecf5Sriastradh #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2151*d350ecf5Sriastradh #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
2152*d350ecf5Sriastradh #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
2153*d350ecf5Sriastradh #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
2154*d350ecf5Sriastradh #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
2155*d350ecf5Sriastradh #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
2156*d350ecf5Sriastradh #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
2157*d350ecf5Sriastradh #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
2158*d350ecf5Sriastradh #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
2159*d350ecf5Sriastradh #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
2160*d350ecf5Sriastradh #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
2161*d350ecf5Sriastradh #define WREG32_P(reg, val, mask)				\
2162*d350ecf5Sriastradh 	do {							\
2163*d350ecf5Sriastradh 		uint32_t tmp_ = RREG32(reg);			\
2164*d350ecf5Sriastradh 		tmp_ &= (mask);					\
2165*d350ecf5Sriastradh 		tmp_ |= ((val) & ~(mask));			\
2166*d350ecf5Sriastradh 		WREG32(reg, tmp_);				\
2167*d350ecf5Sriastradh 	} while (0)
2168*d350ecf5Sriastradh #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2169*d350ecf5Sriastradh #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2170*d350ecf5Sriastradh #define WREG32_PLL_P(reg, val, mask)				\
2171*d350ecf5Sriastradh 	do {							\
2172*d350ecf5Sriastradh 		uint32_t tmp_ = RREG32_PLL(reg);		\
2173*d350ecf5Sriastradh 		tmp_ &= (mask);					\
2174*d350ecf5Sriastradh 		tmp_ |= ((val) & ~(mask));			\
2175*d350ecf5Sriastradh 		WREG32_PLL(reg, tmp_);				\
2176*d350ecf5Sriastradh 	} while (0)
2177*d350ecf5Sriastradh #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
2178*d350ecf5Sriastradh #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
2179*d350ecf5Sriastradh #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
2180*d350ecf5Sriastradh 
2181*d350ecf5Sriastradh #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
2182*d350ecf5Sriastradh #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
2183*d350ecf5Sriastradh 
2184*d350ecf5Sriastradh #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
2185*d350ecf5Sriastradh #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
2186*d350ecf5Sriastradh 
2187*d350ecf5Sriastradh #define REG_SET_FIELD(orig_val, reg, field, field_val)			\
2188*d350ecf5Sriastradh 	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\
2189*d350ecf5Sriastradh 	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
2190*d350ecf5Sriastradh 
2191*d350ecf5Sriastradh #define REG_GET_FIELD(value, reg, field)				\
2192*d350ecf5Sriastradh 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
2193*d350ecf5Sriastradh 
2194*d350ecf5Sriastradh /*
2195*d350ecf5Sriastradh  * BIOS helpers.
2196*d350ecf5Sriastradh  */
2197*d350ecf5Sriastradh #define RBIOS8(i) (adev->bios[i])
2198*d350ecf5Sriastradh #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
2199*d350ecf5Sriastradh #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
2200*d350ecf5Sriastradh 
2201*d350ecf5Sriastradh /*
2202*d350ecf5Sriastradh  * RING helpers.
2203*d350ecf5Sriastradh  */
2204*d350ecf5Sriastradh static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2205*d350ecf5Sriastradh {
2206*d350ecf5Sriastradh 	if (ring->count_dw <= 0)
2207*d350ecf5Sriastradh 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
2208*d350ecf5Sriastradh 	ring->ring[ring->wptr++] = v;
2209*d350ecf5Sriastradh 	ring->wptr &= ring->ptr_mask;
2210*d350ecf5Sriastradh 	ring->count_dw--;
2211*d350ecf5Sriastradh 	ring->ring_free_dw--;
2212*d350ecf5Sriastradh }
2213*d350ecf5Sriastradh 
2214*d350ecf5Sriastradh static inline struct amdgpu_sdma_instance *
2215*d350ecf5Sriastradh amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2216*d350ecf5Sriastradh {
2217*d350ecf5Sriastradh 	struct amdgpu_device *adev = ring->adev;
2218*d350ecf5Sriastradh 	int i;
2219*d350ecf5Sriastradh 
2220*d350ecf5Sriastradh 	for (i = 0; i < adev->sdma.num_instances; i++)
2221*d350ecf5Sriastradh 		if (&adev->sdma.instance[i].ring == ring)
2222*d350ecf5Sriastradh 			break;
2223*d350ecf5Sriastradh 
2224*d350ecf5Sriastradh 	if (i < AMDGPU_MAX_SDMA_INSTANCES)
2225*d350ecf5Sriastradh 		return &adev->sdma.instance[i];
2226*d350ecf5Sriastradh 	else
2227*d350ecf5Sriastradh 		return NULL;
2228*d350ecf5Sriastradh }
2229*d350ecf5Sriastradh 
2230*d350ecf5Sriastradh /*
2231*d350ecf5Sriastradh  * ASICs macro.
2232*d350ecf5Sriastradh  */
2233*d350ecf5Sriastradh #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
2234*d350ecf5Sriastradh #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
2235*d350ecf5Sriastradh #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
2236*d350ecf5Sriastradh #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2237*d350ecf5Sriastradh #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2238*d350ecf5Sriastradh #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2239*d350ecf5Sriastradh #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2240*d350ecf5Sriastradh #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2241*d350ecf5Sriastradh #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2242*d350ecf5Sriastradh #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
2243*d350ecf5Sriastradh #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2244*d350ecf5Sriastradh #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
2245*d350ecf5Sriastradh #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
2246*d350ecf5Sriastradh #define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
2247*d350ecf5Sriastradh #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
2248*d350ecf5Sriastradh #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
2249*d350ecf5Sriastradh #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2250*d350ecf5Sriastradh #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2251*d350ecf5Sriastradh #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2252*d350ecf5Sriastradh #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2253*d350ecf5Sriastradh #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2254*d350ecf5Sriastradh #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
2255*d350ecf5Sriastradh #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
2256*d350ecf5Sriastradh #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
2257*d350ecf5Sriastradh #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
2258*d350ecf5Sriastradh #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
2259*d350ecf5Sriastradh #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
2260*d350ecf5Sriastradh #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
2261*d350ecf5Sriastradh #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
2262*d350ecf5Sriastradh #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
2263*d350ecf5Sriastradh #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
2264*d350ecf5Sriastradh #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
2265*d350ecf5Sriastradh #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
2266*d350ecf5Sriastradh #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
2267*d350ecf5Sriastradh #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
2268*d350ecf5Sriastradh #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
2269*d350ecf5Sriastradh #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
2270*d350ecf5Sriastradh #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
2271*d350ecf5Sriastradh #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
2272*d350ecf5Sriastradh #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
2273*d350ecf5Sriastradh #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
2274*d350ecf5Sriastradh #define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
2275*d350ecf5Sriastradh #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
2276*d350ecf5Sriastradh #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
2277*d350ecf5Sriastradh #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
2278*d350ecf5Sriastradh #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
2279*d350ecf5Sriastradh #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
2280*d350ecf5Sriastradh #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
2281*d350ecf5Sriastradh #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
2282*d350ecf5Sriastradh #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
2283*d350ecf5Sriastradh #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
2284*d350ecf5Sriastradh #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
2285*d350ecf5Sriastradh #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
2286*d350ecf5Sriastradh #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
2287*d350ecf5Sriastradh #define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
2288*d350ecf5Sriastradh #define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
2289*d350ecf5Sriastradh #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2290*d350ecf5Sriastradh #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
2291*d350ecf5Sriastradh #define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
2292*d350ecf5Sriastradh #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2293*d350ecf5Sriastradh #define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
2294*d350ecf5Sriastradh #define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
2295*d350ecf5Sriastradh #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2296*d350ecf5Sriastradh #define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
2297*d350ecf5Sriastradh #define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
2298*d350ecf5Sriastradh #define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
2299*d350ecf5Sriastradh #define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
2300*d350ecf5Sriastradh 
2301*d350ecf5Sriastradh #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
2302*d350ecf5Sriastradh 
2303*d350ecf5Sriastradh /* Common functions */
2304*d350ecf5Sriastradh int amdgpu_gpu_reset(struct amdgpu_device *adev);
2305*d350ecf5Sriastradh void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2306*d350ecf5Sriastradh bool amdgpu_card_posted(struct amdgpu_device *adev);
2307*d350ecf5Sriastradh void amdgpu_update_display_priority(struct amdgpu_device *adev);
2308*d350ecf5Sriastradh bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2309*d350ecf5Sriastradh 
2310*d350ecf5Sriastradh int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2311*d350ecf5Sriastradh int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2312*d350ecf5Sriastradh 		       u32 ip_instance, u32 ring,
2313*d350ecf5Sriastradh 		       struct amdgpu_ring **out_ring);
2314*d350ecf5Sriastradh void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2315*d350ecf5Sriastradh bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2316*d350ecf5Sriastradh int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2317*d350ecf5Sriastradh 				     uint32_t flags);
2318*d350ecf5Sriastradh bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2319*d350ecf5Sriastradh bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2320*d350ecf5Sriastradh 				  unsigned long end);
2321*d350ecf5Sriastradh bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2322*d350ecf5Sriastradh uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2323*d350ecf5Sriastradh 				 struct ttm_mem_reg *mem);
2324*d350ecf5Sriastradh void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
2325*d350ecf5Sriastradh void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
2326*d350ecf5Sriastradh void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
2327*d350ecf5Sriastradh void amdgpu_program_register_sequence(struct amdgpu_device *adev,
2328*d350ecf5Sriastradh 					     const u32 *registers,
2329*d350ecf5Sriastradh 					     const u32 array_size);
2330*d350ecf5Sriastradh 
2331*d350ecf5Sriastradh bool amdgpu_device_is_px(struct drm_device *dev);
2332*d350ecf5Sriastradh /* atpx handler */
2333*d350ecf5Sriastradh #if defined(CONFIG_VGA_SWITCHEROO)
2334*d350ecf5Sriastradh void amdgpu_register_atpx_handler(void);
2335*d350ecf5Sriastradh void amdgpu_unregister_atpx_handler(void);
2336*d350ecf5Sriastradh #else
2337*d350ecf5Sriastradh static inline void amdgpu_register_atpx_handler(void) {}
2338*d350ecf5Sriastradh static inline void amdgpu_unregister_atpx_handler(void) {}
2339*d350ecf5Sriastradh #endif
2340*d350ecf5Sriastradh 
2341*d350ecf5Sriastradh /*
2342*d350ecf5Sriastradh  * KMS
2343*d350ecf5Sriastradh  */
2344*d350ecf5Sriastradh extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
2345*d350ecf5Sriastradh extern int amdgpu_max_kms_ioctl;
2346*d350ecf5Sriastradh 
2347*d350ecf5Sriastradh int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
2348*d350ecf5Sriastradh int amdgpu_driver_unload_kms(struct drm_device *dev);
2349*d350ecf5Sriastradh void amdgpu_driver_lastclose_kms(struct drm_device *dev);
2350*d350ecf5Sriastradh int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
2351*d350ecf5Sriastradh void amdgpu_driver_postclose_kms(struct drm_device *dev,
2352*d350ecf5Sriastradh 				 struct drm_file *file_priv);
2353*d350ecf5Sriastradh void amdgpu_driver_preclose_kms(struct drm_device *dev,
2354*d350ecf5Sriastradh 				struct drm_file *file_priv);
2355*d350ecf5Sriastradh int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2356*d350ecf5Sriastradh int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2357*d350ecf5Sriastradh u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
2358*d350ecf5Sriastradh int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2359*d350ecf5Sriastradh void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2360*d350ecf5Sriastradh int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
2361*d350ecf5Sriastradh 				    int *max_error,
2362*d350ecf5Sriastradh 				    struct timeval *vblank_time,
2363*d350ecf5Sriastradh 				    unsigned flags);
2364*d350ecf5Sriastradh long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2365*d350ecf5Sriastradh 			     unsigned long arg);
2366*d350ecf5Sriastradh 
2367*d350ecf5Sriastradh /*
2368*d350ecf5Sriastradh  * functions used by amdgpu_encoder.c
2369*d350ecf5Sriastradh  */
2370*d350ecf5Sriastradh struct amdgpu_afmt_acr {
2371*d350ecf5Sriastradh 	u32 clock;
2372*d350ecf5Sriastradh 
2373*d350ecf5Sriastradh 	int n_32khz;
2374*d350ecf5Sriastradh 	int cts_32khz;
2375*d350ecf5Sriastradh 
2376*d350ecf5Sriastradh 	int n_44_1khz;
2377*d350ecf5Sriastradh 	int cts_44_1khz;
2378*d350ecf5Sriastradh 
2379*d350ecf5Sriastradh 	int n_48khz;
2380*d350ecf5Sriastradh 	int cts_48khz;
2381*d350ecf5Sriastradh 
2382*d350ecf5Sriastradh };
2383*d350ecf5Sriastradh 
2384*d350ecf5Sriastradh struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
2385*d350ecf5Sriastradh 
2386*d350ecf5Sriastradh /* amdgpu_acpi.c */
2387*d350ecf5Sriastradh #if defined(CONFIG_ACPI)
2388*d350ecf5Sriastradh int amdgpu_acpi_init(struct amdgpu_device *adev);
2389*d350ecf5Sriastradh void amdgpu_acpi_fini(struct amdgpu_device *adev);
2390*d350ecf5Sriastradh bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
2391*d350ecf5Sriastradh int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
2392*d350ecf5Sriastradh 						u8 perf_req, bool advertise);
2393*d350ecf5Sriastradh int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
2394*d350ecf5Sriastradh #else
2395*d350ecf5Sriastradh static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
2396*d350ecf5Sriastradh static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
2397*d350ecf5Sriastradh #endif
2398*d350ecf5Sriastradh 
2399*d350ecf5Sriastradh struct amdgpu_bo_va_mapping *
2400*d350ecf5Sriastradh amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
2401*d350ecf5Sriastradh 		       uint64_t addr, struct amdgpu_bo **bo);
2402*d350ecf5Sriastradh 
2403*d350ecf5Sriastradh #include "amdgpu_object.h"
2404*d350ecf5Sriastradh 
2405*d350ecf5Sriastradh #endif
2406