xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu.h (revision 8571a5a7)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_H__
29 #define __AMDGPU_H__
30 
31 #ifdef pr_fmt
32 #undef pr_fmt
33 #endif
34 
35 #define pr_fmt(fmt) "amdgpu: " fmt
36 
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40 
41 #define dev_fmt(fmt) "amdgpu: " fmt
42 
43 #include "amdgpu_ctx.h"
44 
45 #include <linux/atomic.h>
46 #include <linux/wait.h>
47 #include <linux/list.h>
48 #include <linux/kref.h>
49 #include <linux/rbtree.h>
50 #include <linux/hashtable.h>
51 #include <linux/dma-fence.h>
52 #include <linux/pci.h>
53 
54 #include <drm/ttm/ttm_bo.h>
55 #include <drm/ttm/ttm_placement.h>
56 
57 #include <drm/amdgpu_drm.h>
58 #include <drm/drm_gem.h>
59 #include <drm/drm_ioctl.h>
60 
61 #include <dev/wscons/wsconsio.h>
62 #include <dev/wscons/wsdisplayvar.h>
63 #include <dev/rasops/rasops.h>
64 
65 #include <kgd_kfd_interface.h>
66 #include "dm_pp_interface.h"
67 #include "kgd_pp_interface.h"
68 
69 #include "amd_shared.h"
70 #include "amdgpu_mode.h"
71 #include "amdgpu_ih.h"
72 #include "amdgpu_irq.h"
73 #include "amdgpu_ucode.h"
74 #include "amdgpu_ttm.h"
75 #include "amdgpu_psp.h"
76 #include "amdgpu_gds.h"
77 #include "amdgpu_sync.h"
78 #include "amdgpu_ring.h"
79 #include "amdgpu_vm.h"
80 #include "amdgpu_dpm.h"
81 #include "amdgpu_acp.h"
82 #include "amdgpu_uvd.h"
83 #include "amdgpu_vce.h"
84 #include "amdgpu_vcn.h"
85 #include "amdgpu_jpeg.h"
86 #include "amdgpu_gmc.h"
87 #include "amdgpu_gfx.h"
88 #include "amdgpu_sdma.h"
89 #include "amdgpu_lsdma.h"
90 #include "amdgpu_nbio.h"
91 #include "amdgpu_hdp.h"
92 #include "amdgpu_dm.h"
93 #include "amdgpu_virt.h"
94 #include "amdgpu_csa.h"
95 #include "amdgpu_mes_ctx.h"
96 #include "amdgpu_gart.h"
97 #include "amdgpu_debugfs.h"
98 #include "amdgpu_job.h"
99 #include "amdgpu_bo_list.h"
100 #include "amdgpu_gem.h"
101 #include "amdgpu_doorbell.h"
102 #include "amdgpu_amdkfd.h"
103 #include "amdgpu_discovery.h"
104 #include "amdgpu_mes.h"
105 #include "amdgpu_umc.h"
106 #include "amdgpu_mmhub.h"
107 #include "amdgpu_gfxhub.h"
108 #include "amdgpu_df.h"
109 #include "amdgpu_smuio.h"
110 #include "amdgpu_fdinfo.h"
111 #include "amdgpu_mca.h"
112 #include "amdgpu_ras.h"
113 #include "amdgpu_xcp.h"
114 
115 #define MAX_GPU_INSTANCE		64
116 
117 struct amdgpu_gpu_instance
118 {
119 	struct amdgpu_device		*adev;
120 	int				mgpu_fan_enabled;
121 };
122 
123 struct amdgpu_mgpu_info
124 {
125 	struct amdgpu_gpu_instance	gpu_ins[MAX_GPU_INSTANCE];
126 	struct rwlock			mutex;
127 	uint32_t			num_gpu;
128 	uint32_t			num_dgpu;
129 	uint32_t			num_apu;
130 
131 	/* delayed reset_func for XGMI configuration if necessary */
132 	struct delayed_work		delayed_reset_work;
133 	bool				pending_reset;
134 };
135 
136 enum amdgpu_ss {
137 	AMDGPU_SS_DRV_LOAD,
138 	AMDGPU_SS_DEV_D0,
139 	AMDGPU_SS_DEV_D3,
140 	AMDGPU_SS_DRV_UNLOAD
141 };
142 
143 struct amdgpu_watchdog_timer
144 {
145 	bool timeout_fatal_disable;
146 	uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
147 };
148 
149 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH	256
150 
151 /*
152  * Modules parameters.
153  */
154 extern int amdgpu_modeset;
155 extern unsigned int amdgpu_vram_limit;
156 extern int amdgpu_vis_vram_limit;
157 extern int amdgpu_gart_size;
158 extern int amdgpu_gtt_size;
159 extern int amdgpu_moverate;
160 extern int amdgpu_audio;
161 extern int amdgpu_disp_priority;
162 extern int amdgpu_hw_i2c;
163 extern int amdgpu_pcie_gen2;
164 extern int amdgpu_msi;
165 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
166 extern int amdgpu_dpm;
167 extern int amdgpu_fw_load_type;
168 extern int amdgpu_aspm;
169 extern int amdgpu_runtime_pm;
170 extern uint amdgpu_ip_block_mask;
171 extern int amdgpu_bapm;
172 extern int amdgpu_deep_color;
173 extern int amdgpu_vm_size;
174 extern int amdgpu_vm_block_size;
175 extern int amdgpu_vm_fragment_size;
176 extern int amdgpu_vm_fault_stop;
177 extern int amdgpu_vm_debug;
178 extern int amdgpu_vm_update_mode;
179 extern int amdgpu_exp_hw_support;
180 extern int amdgpu_dc;
181 extern int amdgpu_sched_jobs;
182 extern int amdgpu_sched_hw_submission;
183 extern uint amdgpu_pcie_gen_cap;
184 extern uint amdgpu_pcie_lane_cap;
185 extern u64 amdgpu_cg_mask;
186 extern uint amdgpu_pg_mask;
187 extern uint amdgpu_sdma_phase_quantum;
188 extern char *amdgpu_disable_cu;
189 extern char *amdgpu_virtual_display;
190 extern uint amdgpu_pp_feature_mask;
191 extern uint amdgpu_force_long_training;
192 extern int amdgpu_lbpw;
193 extern int amdgpu_compute_multipipe;
194 extern int amdgpu_gpu_recovery;
195 extern int amdgpu_emu_mode;
196 extern uint amdgpu_smu_memory_pool_size;
197 extern int amdgpu_smu_pptable_id;
198 extern uint amdgpu_dc_feature_mask;
199 extern uint amdgpu_dc_debug_mask;
200 extern uint amdgpu_dc_visual_confirm;
201 extern uint amdgpu_dm_abm_level;
202 extern int amdgpu_backlight;
203 extern struct amdgpu_mgpu_info mgpu_info;
204 extern int amdgpu_ras_enable;
205 extern uint amdgpu_ras_mask;
206 extern int amdgpu_bad_page_threshold;
207 extern bool amdgpu_ignore_bad_page_threshold;
208 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
209 extern int amdgpu_async_gfx_ring;
210 extern int amdgpu_mcbp;
211 extern int amdgpu_discovery;
212 extern int amdgpu_mes;
213 extern int amdgpu_mes_kiq;
214 extern int amdgpu_noretry;
215 extern int amdgpu_force_asic_type;
216 extern int amdgpu_smartshift_bias;
217 extern int amdgpu_use_xgmi_p2p;
218 extern int amdgpu_mtype_local;
219 extern bool enforce_isolation;
220 #ifdef CONFIG_HSA_AMD
221 extern int sched_policy;
222 extern bool debug_evictions;
223 extern bool no_system_mem_limit;
224 extern int halt_if_hws_hang;
225 #else
226 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
227 static const bool __maybe_unused debug_evictions; /* = false */
228 static const bool __maybe_unused no_system_mem_limit;
229 static const int __maybe_unused halt_if_hws_hang;
230 #endif
231 #ifdef CONFIG_HSA_AMD_P2P
232 extern bool pcie_p2p;
233 #endif
234 
235 extern int amdgpu_tmz;
236 extern int amdgpu_reset_method;
237 
238 #ifdef CONFIG_DRM_AMDGPU_SI
239 extern int amdgpu_si_support;
240 #endif
241 #ifdef CONFIG_DRM_AMDGPU_CIK
242 extern int amdgpu_cik_support;
243 #endif
244 extern int amdgpu_num_kcq;
245 
246 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
247 extern int amdgpu_vcnfw_log;
248 extern int amdgpu_sg_display;
249 
250 extern int amdgpu_user_partt_mode;
251 
252 #define AMDGPU_VM_MAX_NUM_CTX			4096
253 #define AMDGPU_SG_THRESHOLD			(256*1024*1024)
254 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
255 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
256 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
257 #define AMDGPU_DEBUGFS_MAX_COMPONENTS		32
258 #define AMDGPUFB_CONN_LIMIT			4
259 #define AMDGPU_BIOS_NUM_SCRATCH			16
260 
261 #define AMDGPU_VBIOS_VGA_ALLOCATION		(9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
262 
263 /* hard reset data */
264 #define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b
265 
266 /* reset flags */
267 #define AMDGPU_RESET_GFX			(1 << 0)
268 #define AMDGPU_RESET_COMPUTE			(1 << 1)
269 #define AMDGPU_RESET_DMA			(1 << 2)
270 #define AMDGPU_RESET_CP				(1 << 3)
271 #define AMDGPU_RESET_GRBM			(1 << 4)
272 #define AMDGPU_RESET_DMA1			(1 << 5)
273 #define AMDGPU_RESET_RLC			(1 << 6)
274 #define AMDGPU_RESET_SEM			(1 << 7)
275 #define AMDGPU_RESET_IH				(1 << 8)
276 #define AMDGPU_RESET_VMC			(1 << 9)
277 #define AMDGPU_RESET_MC				(1 << 10)
278 #define AMDGPU_RESET_DISPLAY			(1 << 11)
279 #define AMDGPU_RESET_UVD			(1 << 12)
280 #define AMDGPU_RESET_VCE			(1 << 13)
281 #define AMDGPU_RESET_VCE1			(1 << 14)
282 
283 /* max cursor sizes (in pixels) */
284 #define CIK_CURSOR_WIDTH 128
285 #define CIK_CURSOR_HEIGHT 128
286 
287 /* smart shift bias level limits */
288 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
289 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
290 
291 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
292 #define AMDGPU_SWCTF_EXTRA_DELAY		50
293 
294 struct amdgpu_xcp_mgr;
295 struct amdgpu_device;
296 struct amdgpu_irq_src;
297 struct amdgpu_fpriv;
298 struct amdgpu_bo_va_mapping;
299 struct kfd_vm_fault_info;
300 struct amdgpu_hive_info;
301 struct amdgpu_reset_context;
302 struct amdgpu_reset_control;
303 
304 enum amdgpu_cp_irq {
305 	AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
306 	AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
307 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
308 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
309 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
310 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
311 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
312 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
313 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
314 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
315 
316 	AMDGPU_CP_IRQ_LAST
317 };
318 
319 enum amdgpu_thermal_irq {
320 	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
321 	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
322 
323 	AMDGPU_THERMAL_IRQ_LAST
324 };
325 
326 enum amdgpu_kiq_irq {
327 	AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
328 	AMDGPU_CP_KIQ_IRQ_LAST
329 };
330 #define SRIOV_USEC_TIMEOUT  1200000 /* wait 12 * 100ms for SRIOV */
331 #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
332 #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
333 #define MAX_KIQ_REG_TRY 1000
334 
335 int amdgpu_device_ip_set_clockgating_state(void *dev,
336 					   enum amd_ip_block_type block_type,
337 					   enum amd_clockgating_state state);
338 int amdgpu_device_ip_set_powergating_state(void *dev,
339 					   enum amd_ip_block_type block_type,
340 					   enum amd_powergating_state state);
341 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
342 					    u64 *flags);
343 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
344 				   enum amd_ip_block_type block_type);
345 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
346 			      enum amd_ip_block_type block_type);
347 
348 #define AMDGPU_MAX_IP_NUM 16
349 
350 struct amdgpu_ip_block_status {
351 	bool valid;
352 	bool sw;
353 	bool hw;
354 	bool late_initialized;
355 	bool hang;
356 };
357 
358 struct amdgpu_ip_block_version {
359 	const enum amd_ip_block_type type;
360 	const u32 major;
361 	const u32 minor;
362 	const u32 rev;
363 	const struct amd_ip_funcs *funcs;
364 };
365 
366 #define HW_REV(_Major, _Minor, _Rev) \
367 	((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
368 
369 struct amdgpu_ip_block {
370 	struct amdgpu_ip_block_status status;
371 	const struct amdgpu_ip_block_version *version;
372 };
373 
374 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
375 				       enum amd_ip_block_type type,
376 				       u32 major, u32 minor);
377 
378 struct amdgpu_ip_block *
379 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
380 			      enum amd_ip_block_type type);
381 
382 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
383 			       const struct amdgpu_ip_block_version *ip_block_version);
384 
385 /*
386  * BIOS.
387  */
388 bool amdgpu_get_bios(struct amdgpu_device *adev);
389 bool amdgpu_read_bios(struct amdgpu_device *adev);
390 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
391 				     u8 *bios, u32 length_bytes);
392 /*
393  * Clocks
394  */
395 
396 #define AMDGPU_MAX_PPLL 3
397 
398 struct amdgpu_clock {
399 	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
400 	struct amdgpu_pll spll;
401 	struct amdgpu_pll mpll;
402 	/* 10 Khz units */
403 	uint32_t default_mclk;
404 	uint32_t default_sclk;
405 	uint32_t default_dispclk;
406 	uint32_t current_dispclk;
407 	uint32_t dp_extclk;
408 	uint32_t max_pixel_clock;
409 };
410 
411 /* sub-allocation manager, it has to be protected by another lock.
412  * By conception this is an helper for other part of the driver
413  * like the indirect buffer or semaphore, which both have their
414  * locking.
415  *
416  * Principe is simple, we keep a list of sub allocation in offset
417  * order (first entry has offset == 0, last entry has the highest
418  * offset).
419  *
420  * When allocating new object we first check if there is room at
421  * the end total_size - (last_object_offset + last_object_size) >=
422  * alloc_size. If so we allocate new object there.
423  *
424  * When there is not enough room at the end, we start waiting for
425  * each sub object until we reach object_offset+object_size >=
426  * alloc_size, this object then become the sub object we return.
427  *
428  * Alignment can't be bigger than page size.
429  *
430  * Hole are not considered for allocation to keep things simple.
431  * Assumption is that there won't be hole (all object on same
432  * alignment).
433  */
434 
435 struct amdgpu_sa_manager {
436 	struct drm_suballoc_manager	base;
437 	struct amdgpu_bo		*bo;
438 	uint64_t			gpu_addr;
439 	void				*cpu_ptr;
440 };
441 
442 int amdgpu_fence_slab_init(void);
443 void amdgpu_fence_slab_fini(void);
444 
445 /*
446  * IRQS.
447  */
448 
449 struct amdgpu_flip_work {
450 	struct delayed_work		flip_work;
451 	struct work_struct		unpin_work;
452 	struct amdgpu_device		*adev;
453 	int				crtc_id;
454 	u32				target_vblank;
455 	uint64_t			base;
456 	struct drm_pending_vblank_event *event;
457 	struct amdgpu_bo		*old_abo;
458 	unsigned			shared_count;
459 	struct dma_fence		**shared;
460 	struct dma_fence_cb		cb;
461 	bool				async;
462 };
463 
464 
465 /*
466  * file private structure
467  */
468 
469 struct amdgpu_fpriv {
470 	struct amdgpu_vm	vm;
471 	struct amdgpu_bo_va	*prt_va;
472 	struct amdgpu_bo_va	*csa_va;
473 	struct rwlock		bo_list_lock;
474 	struct idr		bo_list_handles;
475 	struct amdgpu_ctx_mgr	ctx_mgr;
476 	/** GPU partition selection */
477 	uint32_t		xcp_id;
478 };
479 
480 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
481 
482 /*
483  * Writeback
484  */
485 #define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */
486 
487 struct amdgpu_wb {
488 	struct amdgpu_bo	*wb_obj;
489 	volatile uint32_t	*wb;
490 	uint64_t		gpu_addr;
491 	u32			num_wb;	/* Number of wb slots actually reserved for amdgpu. */
492 	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
493 };
494 
495 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
496 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
497 
498 /*
499  * Benchmarking
500  */
501 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
502 
503 /*
504  * ASIC specific register table accessible by UMD
505  */
506 struct amdgpu_allowed_register_entry {
507 	uint32_t reg_offset;
508 	bool grbm_indexed;
509 };
510 
511 enum amd_reset_method {
512 	AMD_RESET_METHOD_NONE = -1,
513 	AMD_RESET_METHOD_LEGACY = 0,
514 	AMD_RESET_METHOD_MODE0,
515 	AMD_RESET_METHOD_MODE1,
516 	AMD_RESET_METHOD_MODE2,
517 	AMD_RESET_METHOD_BACO,
518 	AMD_RESET_METHOD_PCI,
519 };
520 
521 struct amdgpu_video_codec_info {
522 	u32 codec_type;
523 	u32 max_width;
524 	u32 max_height;
525 	u32 max_pixels_per_frame;
526 	u32 max_level;
527 };
528 
529 #define codec_info_build(type, width, height, level) \
530 			 .codec_type = type,\
531 			 .max_width = width,\
532 			 .max_height = height,\
533 			 .max_pixels_per_frame = height * width,\
534 			 .max_level = level,
535 
536 struct amdgpu_video_codecs {
537 	const u32 codec_count;
538 	const struct amdgpu_video_codec_info *codec_array;
539 };
540 
541 /*
542  * ASIC specific functions.
543  */
544 struct amdgpu_asic_funcs {
545 	bool (*read_disabled_bios)(struct amdgpu_device *adev);
546 	bool (*read_bios_from_rom)(struct amdgpu_device *adev,
547 				   u8 *bios, u32 length_bytes);
548 	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
549 			     u32 sh_num, u32 reg_offset, u32 *value);
550 	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
551 	int (*reset)(struct amdgpu_device *adev);
552 	enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
553 	/* get the reference clock */
554 	u32 (*get_xclk)(struct amdgpu_device *adev);
555 	/* MM block clocks */
556 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
557 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
558 	/* static power management */
559 	int (*get_pcie_lanes)(struct amdgpu_device *adev);
560 	void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
561 	/* get config memsize register */
562 	u32 (*get_config_memsize)(struct amdgpu_device *adev);
563 	/* flush hdp write queue */
564 	void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
565 	/* invalidate hdp read cache */
566 	void (*invalidate_hdp)(struct amdgpu_device *adev,
567 			       struct amdgpu_ring *ring);
568 	/* check if the asic needs a full reset of if soft reset will work */
569 	bool (*need_full_reset)(struct amdgpu_device *adev);
570 	/* initialize doorbell layout for specific asic*/
571 	void (*init_doorbell_index)(struct amdgpu_device *adev);
572 	/* PCIe bandwidth usage */
573 	void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
574 			       uint64_t *count1);
575 	/* do we need to reset the asic at init time (e.g., kexec) */
576 	bool (*need_reset_on_init)(struct amdgpu_device *adev);
577 	/* PCIe replay counter */
578 	uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
579 	/* device supports BACO */
580 	bool (*supports_baco)(struct amdgpu_device *adev);
581 	/* pre asic_init quirks */
582 	void (*pre_asic_init)(struct amdgpu_device *adev);
583 	/* enter/exit umd stable pstate */
584 	int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
585 	/* query video codecs */
586 	int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
587 				  const struct amdgpu_video_codecs **codecs);
588 	/* encode "> 32bits" smn addressing */
589 	u64 (*encode_ext_smn_addressing)(int ext_id);
590 };
591 
592 /*
593  * IOCTL.
594  */
595 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
596 				struct drm_file *filp);
597 
598 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
599 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
600 				    struct drm_file *filp);
601 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
602 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
603 				struct drm_file *filp);
604 
605 /* VRAM scratch page for HDP bug, default vram page */
606 struct amdgpu_mem_scratch {
607 	struct amdgpu_bo		*robj;
608 	volatile uint32_t		*ptr;
609 	u64				gpu_addr;
610 };
611 
612 /*
613  * CGS
614  */
615 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
616 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
617 
618 /*
619  * Core structure, functions and helpers.
620  */
621 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
622 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
623 
624 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
625 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
626 
627 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
628 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
629 
630 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
631 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
632 
633 struct amdgpu_mmio_remap {
634 	u32 reg_offset;
635 	resource_size_t bus_addr;
636 };
637 
638 /* Define the HW IP blocks will be used in driver , add more if necessary */
639 enum amd_hw_ip_block_type {
640 	GC_HWIP = 1,
641 	HDP_HWIP,
642 	SDMA0_HWIP,
643 	SDMA1_HWIP,
644 	SDMA2_HWIP,
645 	SDMA3_HWIP,
646 	SDMA4_HWIP,
647 	SDMA5_HWIP,
648 	SDMA6_HWIP,
649 	SDMA7_HWIP,
650 	LSDMA_HWIP,
651 	MMHUB_HWIP,
652 	ATHUB_HWIP,
653 	NBIO_HWIP,
654 	MP0_HWIP,
655 	MP1_HWIP,
656 	UVD_HWIP,
657 	VCN_HWIP = UVD_HWIP,
658 	JPEG_HWIP = VCN_HWIP,
659 	VCN1_HWIP,
660 	VCE_HWIP,
661 	DF_HWIP,
662 	DCE_HWIP,
663 	OSSSYS_HWIP,
664 	SMUIO_HWIP,
665 	PWR_HWIP,
666 	NBIF_HWIP,
667 	THM_HWIP,
668 	CLK_HWIP,
669 	UMC_HWIP,
670 	RSMU_HWIP,
671 	XGMI_HWIP,
672 	DCI_HWIP,
673 	PCIE_HWIP,
674 	MAX_HWIP
675 };
676 
677 #define HWIP_MAX_INSTANCE	44
678 
679 #define HW_ID_MAX		300
680 #define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv))
681 #define IP_VERSION_MAJ(ver) ((ver) >> 16)
682 #define IP_VERSION_MIN(ver) (((ver) >> 8) & 0xFF)
683 #define IP_VERSION_REV(ver) ((ver) & 0xFF)
684 
685 struct amdgpu_ip_map_info {
686 	/* Map of logical to actual dev instances/mask */
687 	uint32_t 		dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
688 	int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
689 				      enum amd_hw_ip_block_type block,
690 				      int8_t inst);
691 	uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
692 					enum amd_hw_ip_block_type block,
693 					uint32_t mask);
694 };
695 
696 struct amd_powerplay {
697 	void *pp_handle;
698 	const struct amd_pm_funcs *pp_funcs;
699 };
700 
701 struct ip_discovery_top;
702 
703 /* polaris10 kickers */
704 #define ASICID_IS_P20(did, rid)		(((did == 0x67DF) && \
705 					 ((rid == 0xE3) || \
706 					  (rid == 0xE4) || \
707 					  (rid == 0xE5) || \
708 					  (rid == 0xE7) || \
709 					  (rid == 0xEF))) || \
710 					 ((did == 0x6FDF) && \
711 					 ((rid == 0xE7) || \
712 					  (rid == 0xEF) || \
713 					  (rid == 0xFF))))
714 
715 #define ASICID_IS_P30(did, rid)		((did == 0x67DF) && \
716 					((rid == 0xE1) || \
717 					 (rid == 0xF7)))
718 
719 /* polaris11 kickers */
720 #define ASICID_IS_P21(did, rid)		(((did == 0x67EF) && \
721 					 ((rid == 0xE0) || \
722 					  (rid == 0xE5))) || \
723 					 ((did == 0x67FF) && \
724 					 ((rid == 0xCF) || \
725 					  (rid == 0xEF) || \
726 					  (rid == 0xFF))))
727 
728 #define ASICID_IS_P31(did, rid)		((did == 0x67EF) && \
729 					((rid == 0xE2)))
730 
731 /* polaris12 kickers */
732 #define ASICID_IS_P23(did, rid)		(((did == 0x6987) && \
733 					 ((rid == 0xC0) || \
734 					  (rid == 0xC1) || \
735 					  (rid == 0xC3) || \
736 					  (rid == 0xC7))) || \
737 					 ((did == 0x6981) && \
738 					 ((rid == 0x00) || \
739 					  (rid == 0x01) || \
740 					  (rid == 0x10))))
741 
742 struct amdgpu_mqd_prop {
743 	uint64_t mqd_gpu_addr;
744 	uint64_t hqd_base_gpu_addr;
745 	uint64_t rptr_gpu_addr;
746 	uint64_t wptr_gpu_addr;
747 	uint32_t queue_size;
748 	bool use_doorbell;
749 	uint32_t doorbell_index;
750 	uint64_t eop_gpu_addr;
751 	uint32_t hqd_pipe_priority;
752 	uint32_t hqd_queue_priority;
753 	bool hqd_active;
754 };
755 
756 struct amdgpu_mqd {
757 	unsigned mqd_size;
758 	int (*init_mqd)(struct amdgpu_device *adev, void *mqd,
759 			struct amdgpu_mqd_prop *p);
760 };
761 
762 #define AMDGPU_RESET_MAGIC_NUM 64
763 #define AMDGPU_MAX_DF_PERFMONS 4
764 #define AMDGPU_PRODUCT_NAME_LEN 64
765 struct amdgpu_reset_domain;
766 
767 /*
768  * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
769  */
770 #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
771 
772 struct amdgpu_device {
773 	struct device			self;
774 	struct device			*dev;
775 	struct pci_dev			*pdev;
776 	struct drm_device		ddev;
777 
778 	pci_chipset_tag_t		pc;
779 	pcitag_t			pa_tag;
780 	pci_intr_handle_t		intrh;
781 	bus_space_tag_t			iot;
782 	bus_space_tag_t			memt;
783 	bus_dma_tag_t			dmat;
784 	void				*irqh;
785 
786 	void				(*switchcb)(void *, int, int);
787 	void				*switchcbarg;
788 	void				*switchcookie;
789 	struct task			switchtask;
790 	struct rasops_info		ro;
791 	int				console;
792 	int				primary;
793 
794 	struct task			burner_task;
795 	int				burner_fblank;
796 
797 	unsigned long			fb_aper_offset;
798 	unsigned long			fb_aper_size;
799 
800 #ifdef CONFIG_DRM_AMD_ACP
801 	struct amdgpu_acp		acp;
802 #endif
803 	struct amdgpu_hive_info *hive;
804 	struct amdgpu_xcp_mgr *xcp_mgr;
805 	/* ASIC */
806 	enum amd_asic_type		asic_type;
807 	uint32_t			family;
808 	uint32_t			rev_id;
809 	uint32_t			external_rev_id;
810 	unsigned long			flags;
811 	unsigned long			apu_flags;
812 	int				usec_timeout;
813 	const struct amdgpu_asic_funcs	*asic_funcs;
814 	bool				shutdown;
815 	bool				need_swiotlb;
816 	bool				accel_working;
817 	struct notifier_block		acpi_nb;
818 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
819 #ifdef notyet
820 	struct debugfs_blob_wrapper     debugfs_vbios_blob;
821 	struct debugfs_blob_wrapper     debugfs_discovery_blob;
822 #endif
823 	struct rwlock			srbm_mutex;
824 	/* GRBM index mutex. Protects concurrent access to GRBM index */
825 	struct rwlock			grbm_idx_mutex;
826 	struct dev_pm_domain		vga_pm_domain;
827 	bool				have_disp_power_ref;
828 	bool                            have_atomics_support;
829 
830 	/* BIOS */
831 	bool				is_atom_fw;
832 	uint8_t				*bios;
833 	uint32_t			bios_size;
834 	uint32_t			bios_scratch_reg_offset;
835 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
836 
837 	/* Register/doorbell mmio */
838 	resource_size_t			rmmio_base;
839 	resource_size_t			rmmio_size;
840 	void __iomem			*rmmio;
841 	bus_space_tag_t			rmmio_bst;
842 	bus_space_handle_t		rmmio_bsh;
843 	/* protects concurrent MM_INDEX/DATA based register access */
844 	spinlock_t mmio_idx_lock;
845 	struct amdgpu_mmio_remap        rmmio_remap;
846 	/* protects concurrent SMC based register access */
847 	spinlock_t smc_idx_lock;
848 	amdgpu_rreg_t			smc_rreg;
849 	amdgpu_wreg_t			smc_wreg;
850 	/* protects concurrent PCIE register access */
851 	spinlock_t pcie_idx_lock;
852 	amdgpu_rreg_t			pcie_rreg;
853 	amdgpu_wreg_t			pcie_wreg;
854 	amdgpu_rreg_t			pciep_rreg;
855 	amdgpu_wreg_t			pciep_wreg;
856 	amdgpu_rreg_ext_t		pcie_rreg_ext;
857 	amdgpu_wreg_ext_t		pcie_wreg_ext;
858 	amdgpu_rreg64_t			pcie_rreg64;
859 	amdgpu_wreg64_t			pcie_wreg64;
860 	/* protects concurrent UVD register access */
861 	spinlock_t uvd_ctx_idx_lock;
862 	amdgpu_rreg_t			uvd_ctx_rreg;
863 	amdgpu_wreg_t			uvd_ctx_wreg;
864 	/* protects concurrent DIDT register access */
865 	spinlock_t didt_idx_lock;
866 	amdgpu_rreg_t			didt_rreg;
867 	amdgpu_wreg_t			didt_wreg;
868 	/* protects concurrent gc_cac register access */
869 	spinlock_t gc_cac_idx_lock;
870 	amdgpu_rreg_t			gc_cac_rreg;
871 	amdgpu_wreg_t			gc_cac_wreg;
872 	/* protects concurrent se_cac register access */
873 	spinlock_t se_cac_idx_lock;
874 	amdgpu_rreg_t			se_cac_rreg;
875 	amdgpu_wreg_t			se_cac_wreg;
876 	/* protects concurrent ENDPOINT (audio) register access */
877 	spinlock_t audio_endpt_idx_lock;
878 	amdgpu_block_rreg_t		audio_endpt_rreg;
879 	amdgpu_block_wreg_t		audio_endpt_wreg;
880 	struct amdgpu_doorbell		doorbell;
881 
882 	/* clock/pll info */
883 	struct amdgpu_clock            clock;
884 
885 	/* MC */
886 	struct amdgpu_gmc		gmc;
887 	struct amdgpu_gart		gart;
888 	dma_addr_t			dummy_page_addr;
889 	struct amdgpu_vm_manager	vm_manager;
890 	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
891 	DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
892 
893 	/* memory management */
894 	struct amdgpu_mman		mman;
895 	struct amdgpu_mem_scratch	mem_scratch;
896 	struct amdgpu_wb		wb;
897 	atomic64_t			num_bytes_moved;
898 	atomic64_t			num_evictions;
899 	atomic64_t			num_vram_cpu_page_faults;
900 	atomic_t			gpu_reset_counter;
901 	atomic_t			vram_lost_counter;
902 
903 	/* data for buffer migration throttling */
904 	struct {
905 		spinlock_t		lock;
906 		s64			last_update_us;
907 		s64			accum_us; /* accumulated microseconds */
908 		s64			accum_us_vis; /* for visible VRAM */
909 		u32			log2_max_MBps;
910 	} mm_stats;
911 
912 	/* display */
913 	bool				enable_virtual_display;
914 	struct amdgpu_vkms_output       *amdgpu_vkms_output;
915 	struct amdgpu_mode_info		mode_info;
916 	/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
917 	struct delayed_work         hotplug_work;
918 	struct amdgpu_irq_src		crtc_irq;
919 	struct amdgpu_irq_src		vline0_irq;
920 	struct amdgpu_irq_src		vupdate_irq;
921 	struct amdgpu_irq_src		pageflip_irq;
922 	struct amdgpu_irq_src		hpd_irq;
923 	struct amdgpu_irq_src		dmub_trace_irq;
924 	struct amdgpu_irq_src		dmub_outbox_irq;
925 
926 	/* rings */
927 	u64				fence_context;
928 	unsigned			num_rings;
929 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
930 	struct dma_fence __rcu		*gang_submit;
931 	bool				ib_pool_ready;
932 	struct amdgpu_sa_manager	ib_pools[AMDGPU_IB_POOL_MAX];
933 	struct amdgpu_sched		gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
934 
935 	/* interrupts */
936 	struct amdgpu_irq		irq;
937 
938 	/* powerplay */
939 	struct amd_powerplay		powerplay;
940 	struct amdgpu_pm		pm;
941 	u64				cg_flags;
942 	u32				pg_flags;
943 
944 	/* nbio */
945 	struct amdgpu_nbio		nbio;
946 
947 	/* hdp */
948 	struct amdgpu_hdp		hdp;
949 
950 	/* smuio */
951 	struct amdgpu_smuio		smuio;
952 
953 	/* mmhub */
954 	struct amdgpu_mmhub		mmhub;
955 
956 	/* gfxhub */
957 	struct amdgpu_gfxhub		gfxhub;
958 
959 	/* gfx */
960 	struct amdgpu_gfx		gfx;
961 
962 	/* sdma */
963 	struct amdgpu_sdma		sdma;
964 
965 	/* lsdma */
966 	struct amdgpu_lsdma		lsdma;
967 
968 	/* uvd */
969 	struct amdgpu_uvd		uvd;
970 
971 	/* vce */
972 	struct amdgpu_vce		vce;
973 
974 	/* vcn */
975 	struct amdgpu_vcn		vcn;
976 
977 	/* jpeg */
978 	struct amdgpu_jpeg		jpeg;
979 
980 	/* firmwares */
981 	struct amdgpu_firmware		firmware;
982 
983 	/* PSP */
984 	struct psp_context		psp;
985 
986 	/* GDS */
987 	struct amdgpu_gds		gds;
988 
989 	/* KFD */
990 	struct amdgpu_kfd_dev		kfd;
991 
992 	/* UMC */
993 	struct amdgpu_umc		umc;
994 
995 	/* display related functionality */
996 	struct amdgpu_display_manager dm;
997 
998 	/* mes */
999 	bool                            enable_mes;
1000 	bool                            enable_mes_kiq;
1001 	struct amdgpu_mes               mes;
1002 	struct amdgpu_mqd               mqds[AMDGPU_HW_IP_NUM];
1003 
1004 	/* df */
1005 	struct amdgpu_df                df;
1006 
1007 	/* MCA */
1008 	struct amdgpu_mca               mca;
1009 
1010 	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
1011 	uint32_t		        harvest_ip_mask;
1012 	int				num_ip_blocks;
1013 	struct rwlock	mn_lock;
1014 	DECLARE_HASHTABLE(mn_hash, 7);
1015 
1016 	/* tracking pinned memory */
1017 	atomic64_t vram_pin_size;
1018 	atomic64_t visible_pin_size;
1019 	atomic64_t gart_pin_size;
1020 
1021 	/* soc15 register offset based on ip, instance and  segment */
1022 	uint32_t		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1023 	struct amdgpu_ip_map_info	ip_map;
1024 
1025 	/* delayed work_func for deferring clockgating during resume */
1026 	struct delayed_work     delayed_init_work;
1027 
1028 	struct amdgpu_virt	virt;
1029 
1030 	/* link all shadow bo */
1031 	struct list_head                shadow_list;
1032 	struct rwlock                   shadow_list_lock;
1033 
1034 	/* record hw reset is performed */
1035 	bool has_hw_reset;
1036 	u8				reset_magic[AMDGPU_RESET_MAGIC_NUM];
1037 
1038 	/* s3/s4 mask */
1039 	bool                            in_suspend;
1040 	bool				in_s3;
1041 	bool				in_s4;
1042 	bool				in_s0ix;
1043 	/* indicate amdgpu suspension status */
1044 	bool				suspend_complete;
1045 
1046 	enum pp_mp1_state               mp1_state;
1047 	struct amdgpu_doorbell_index doorbell_index;
1048 
1049 	struct rwlock			notifier_lock;
1050 
1051 	int asic_reset_res;
1052 	struct work_struct		xgmi_reset_work;
1053 	struct list_head		reset_list;
1054 
1055 	long				gfx_timeout;
1056 	long				sdma_timeout;
1057 	long				video_timeout;
1058 	long				compute_timeout;
1059 
1060 	uint64_t			unique_id;
1061 	uint64_t	df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
1062 
1063 	/* enable runtime pm on the device */
1064 	bool                            in_runpm;
1065 	bool                            has_pr3;
1066 
1067 	bool                            ucode_sysfs_en;
1068 
1069 	/* Chip product information */
1070 	char				product_number[20];
1071 	char				product_name[AMDGPU_PRODUCT_NAME_LEN];
1072 	char				serial[20];
1073 
1074 	atomic_t			throttling_logging_enabled;
1075 	struct ratelimit_state		throttling_logging_rs;
1076 	uint32_t                        ras_hw_enabled;
1077 	uint32_t                        ras_enabled;
1078 
1079 	bool                            no_hw_access;
1080 	struct pci_saved_state          *pci_state;
1081 	pci_channel_state_t		pci_channel_state;
1082 
1083 	/* Track auto wait count on s_barrier settings */
1084 	bool				barrier_has_auto_waitcnt;
1085 
1086 	struct amdgpu_reset_control     *reset_cntl;
1087 	uint32_t                        ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
1088 
1089 	bool				ram_is_direct_mapped;
1090 
1091 	struct list_head                ras_list;
1092 
1093 	struct ip_discovery_top         *ip_top;
1094 
1095 	struct amdgpu_reset_domain	*reset_domain;
1096 
1097 	struct rwlock			benchmark_mutex;
1098 
1099 	/* reset dump register */
1100 	uint32_t                        *reset_dump_reg_list;
1101 	uint32_t			*reset_dump_reg_value;
1102 	int                             num_regs;
1103 #ifdef CONFIG_DEV_COREDUMP
1104 	struct amdgpu_task_info         reset_task_info;
1105 	bool                            reset_vram_lost;
1106 	struct timespec64               reset_time;
1107 #endif
1108 
1109 	bool                            scpm_enabled;
1110 	uint32_t                        scpm_status;
1111 
1112 	struct work_struct		reset_work;
1113 
1114 	bool                            job_hang;
1115 	bool                            dc_enabled;
1116 	/* Mask of active clusters */
1117 	uint32_t			aid_mask;
1118 };
1119 
drm_to_adev(struct drm_device * ddev)1120 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
1121 {
1122 	return container_of(ddev, struct amdgpu_device, ddev);
1123 }
1124 
adev_to_drm(struct amdgpu_device * adev)1125 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
1126 {
1127 	return &adev->ddev;
1128 }
1129 
amdgpu_ttm_adev(struct ttm_device * bdev)1130 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
1131 {
1132 	return container_of(bdev, struct amdgpu_device, mman.bdev);
1133 }
1134 
1135 int amdgpu_device_init(struct amdgpu_device *adev,
1136 		       uint32_t flags);
1137 void amdgpu_device_fini_hw(struct amdgpu_device *adev);
1138 void amdgpu_device_fini_sw(struct amdgpu_device *adev);
1139 
1140 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1141 
1142 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
1143 			     void *buf, size_t size, bool write);
1144 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
1145 				 void *buf, size_t size, bool write);
1146 
1147 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
1148 			       void *buf, size_t size, bool write);
1149 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
1150 			    uint32_t inst, uint32_t reg_addr, char reg_name[],
1151 			    uint32_t expected_value, uint32_t mask);
1152 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
1153 			    uint32_t reg, uint32_t acc_flags);
1154 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
1155 				    u64 reg_addr);
1156 void amdgpu_device_wreg(struct amdgpu_device *adev,
1157 			uint32_t reg, uint32_t v,
1158 			uint32_t acc_flags);
1159 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1160 				     u64 reg_addr, u32 reg_data);
1161 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
1162 			     uint32_t reg, uint32_t v, uint32_t xcc_id);
1163 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1164 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1165 
1166 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1167 				u32 reg_addr);
1168 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1169 				  u32 reg_addr);
1170 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1171 				 u32 reg_addr, u32 reg_data);
1172 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1173 				   u32 reg_addr, u64 reg_data);
1174 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
1175 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1176 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1177 
1178 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
1179 
1180 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
1181 				 struct amdgpu_reset_context *reset_context);
1182 
1183 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
1184 			 struct amdgpu_reset_context *reset_context);
1185 
1186 int emu_soc_asic_init(struct amdgpu_device *adev);
1187 
1188 /*
1189  * Registers read & write functions.
1190  */
1191 #define AMDGPU_REGS_NO_KIQ    (1<<1)
1192 #define AMDGPU_REGS_RLC	(1<<2)
1193 
1194 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1195 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1196 
1197 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
1198 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
1199 
1200 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1201 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1202 
1203 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
1204 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
1205 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
1206 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1207 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1208 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1209 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1210 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1211 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1212 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
1213 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
1214 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
1215 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
1216 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1217 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1218 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1219 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1220 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1221 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1222 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1223 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1224 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1225 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1226 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1227 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1228 #define WREG32_P(reg, val, mask)				\
1229 	do {							\
1230 		uint32_t tmp_ = RREG32(reg);			\
1231 		tmp_ &= (mask);					\
1232 		tmp_ |= ((val) & ~(mask));			\
1233 		WREG32(reg, tmp_);				\
1234 	} while (0)
1235 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1236 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1237 #define WREG32_PLL_P(reg, val, mask)				\
1238 	do {							\
1239 		uint32_t tmp_ = RREG32_PLL(reg);		\
1240 		tmp_ &= (mask);					\
1241 		tmp_ |= ((val) & ~(mask));			\
1242 		WREG32_PLL(reg, tmp_);				\
1243 	} while (0)
1244 
1245 #define WREG32_SMC_P(_Reg, _Val, _Mask)                         \
1246 	do {                                                    \
1247 		u32 tmp = RREG32_SMC(_Reg);                     \
1248 		tmp &= (_Mask);                                 \
1249 		tmp |= ((_Val) & ~(_Mask));                     \
1250 		WREG32_SMC(_Reg, tmp);                          \
1251 	} while (0)
1252 
1253 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
1254 
1255 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1256 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1257 
1258 #define REG_SET_FIELD(orig_val, reg, field, field_val)			\
1259 	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\
1260 	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1261 
1262 #define REG_GET_FIELD(value, reg, field)				\
1263 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1264 
1265 #define WREG32_FIELD(reg, field, val)	\
1266 	WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1267 
1268 #define WREG32_FIELD_OFFSET(reg, offset, field, val)	\
1269 	WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1270 
1271 /*
1272  * BIOS helpers.
1273  */
1274 #define RBIOS8(i) (adev->bios[i])
1275 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1276 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1277 
1278 /*
1279  * ASICs macro.
1280  */
1281 #define amdgpu_asic_set_vga_state(adev, state) \
1282     ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
1283 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1284 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
1285 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1286 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1287 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1288 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1289 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1290 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1291 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1292 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1293 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1294 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1295 #define amdgpu_asic_flush_hdp(adev, r) \
1296 	((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
1297 #define amdgpu_asic_invalidate_hdp(adev, r) \
1298 	((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
1299 	 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
1300 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1301 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
1302 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
1303 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
1304 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
1305 #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
1306 #define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
1307 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
1308 	((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
1309 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
1310 
1311 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
1312 
1313 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
1314 #define for_each_inst(i, inst_mask)        \
1315 	for (i = ffs(inst_mask); i-- != 0; \
1316 	     i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
1317 
1318 /* Common functions */
1319 bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
1320 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
1321 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1322 			      struct amdgpu_job *job,
1323 			      struct amdgpu_reset_context *reset_context);
1324 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1325 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
1326 bool amdgpu_device_need_post(struct amdgpu_device *adev);
1327 bool amdgpu_device_pcie_dynamic_switching_supported(void);
1328 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
1329 bool amdgpu_device_aspm_support_quirk(void);
1330 
1331 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1332 				  u64 num_vis_bytes);
1333 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1334 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1335 					     const u32 *registers,
1336 					     const u32 array_size);
1337 
1338 int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
1339 bool amdgpu_device_supports_atpx(struct drm_device *dev);
1340 bool amdgpu_device_supports_px(struct drm_device *dev);
1341 bool amdgpu_device_supports_boco(struct drm_device *dev);
1342 bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
1343 bool amdgpu_device_supports_baco(struct drm_device *dev);
1344 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
1345 				      struct amdgpu_device *peer_adev);
1346 int amdgpu_device_baco_enter(struct drm_device *dev);
1347 int amdgpu_device_baco_exit(struct drm_device *dev);
1348 
1349 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
1350 		struct amdgpu_ring *ring);
1351 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
1352 		struct amdgpu_ring *ring);
1353 
1354 void amdgpu_device_halt(struct amdgpu_device *adev);
1355 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
1356 				u32 reg);
1357 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
1358 				u32 reg, u32 v);
1359 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
1360 					    struct dma_fence *gang);
1361 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
1362 
1363 /* atpx handler */
1364 #if defined(CONFIG_VGA_SWITCHEROO)
1365 void amdgpu_register_atpx_handler(void);
1366 void amdgpu_unregister_atpx_handler(void);
1367 bool amdgpu_has_atpx_dgpu_power_cntl(void);
1368 bool amdgpu_is_atpx_hybrid(void);
1369 bool amdgpu_atpx_dgpu_req_power_for_displays(void);
1370 bool amdgpu_has_atpx(void);
1371 #else
amdgpu_register_atpx_handler(void)1372 static inline void amdgpu_register_atpx_handler(void) {}
amdgpu_unregister_atpx_handler(void)1373 static inline void amdgpu_unregister_atpx_handler(void) {}
amdgpu_has_atpx_dgpu_power_cntl(void)1374 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
amdgpu_is_atpx_hybrid(void)1375 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
amdgpu_atpx_dgpu_req_power_for_displays(void)1376 static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
amdgpu_has_atpx(void)1377 static inline bool amdgpu_has_atpx(void) { return false; }
1378 #endif
1379 
1380 #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
1381 void *amdgpu_atpx_get_dhandle(void);
1382 #else
amdgpu_atpx_get_dhandle(void)1383 static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
1384 #endif
1385 
1386 /*
1387  * KMS
1388  */
1389 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1390 extern const int amdgpu_max_kms_ioctl;
1391 
1392 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
1393 void amdgpu_driver_unload_kms(struct drm_device *dev);
1394 void amdgpu_driver_lastclose_kms(struct drm_device *dev);
1395 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1396 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1397 				 struct drm_file *file_priv);
1398 void amdgpu_driver_release_kms(struct drm_device *dev);
1399 
1400 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1401 int amdgpu_device_prepare(struct drm_device *dev);
1402 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
1403 int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
1404 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
1405 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
1406 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
1407 int amdgpu_info_ioctl(struct drm_device *dev, void *data,
1408 		      struct drm_file *filp);
1409 
1410 /*
1411  * functions used by amdgpu_encoder.c
1412  */
1413 struct amdgpu_afmt_acr {
1414 	u32 clock;
1415 
1416 	int n_32khz;
1417 	int cts_32khz;
1418 
1419 	int n_44_1khz;
1420 	int cts_44_1khz;
1421 
1422 	int n_48khz;
1423 	int cts_48khz;
1424 
1425 };
1426 
1427 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1428 
1429 /* amdgpu_acpi.c */
1430 
1431 struct amdgpu_numa_info {
1432 	uint64_t size;
1433 	int pxm;
1434 	int nid;
1435 };
1436 
1437 /* ATCS Device/Driver State */
1438 #define AMDGPU_ATCS_PSC_DEV_STATE_D0		0
1439 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT	3
1440 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR		0
1441 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR	1
1442 
1443 #if defined(CONFIG_ACPI)
1444 int amdgpu_acpi_init(struct amdgpu_device *adev);
1445 void amdgpu_acpi_fini(struct amdgpu_device *adev);
1446 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1447 bool amdgpu_acpi_is_power_shift_control_supported(void);
1448 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1449 						u8 perf_req, bool advertise);
1450 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1451 				    u8 dev_state, bool drv_state);
1452 int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
1453 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1454 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
1455 			     u64 *tmr_size);
1456 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
1457 			     struct amdgpu_numa_info *numa_info);
1458 
1459 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1460 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
1461 void amdgpu_acpi_detect(void);
1462 void amdgpu_acpi_release(void);
1463 #else
amdgpu_acpi_init(struct amdgpu_device * adev)1464 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
amdgpu_acpi_get_tmr_info(struct amdgpu_device * adev,u64 * tmr_offset,u64 * tmr_size)1465 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
1466 					   u64 *tmr_offset, u64 *tmr_size)
1467 {
1468 	return -EINVAL;
1469 }
amdgpu_acpi_get_mem_info(struct amdgpu_device * adev,int xcc_id,struct amdgpu_numa_info * numa_info)1470 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
1471 					   int xcc_id,
1472 					   struct amdgpu_numa_info *numa_info)
1473 {
1474 	return -EINVAL;
1475 }
amdgpu_acpi_fini(struct amdgpu_device * adev)1476 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
amdgpu_acpi_should_gpu_reset(struct amdgpu_device * adev)1477 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
amdgpu_acpi_detect(void)1478 static inline void amdgpu_acpi_detect(void) { }
amdgpu_acpi_release(void)1479 static inline void amdgpu_acpi_release(void) { }
amdgpu_acpi_is_power_shift_control_supported(void)1480 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
amdgpu_acpi_power_shift_control(struct amdgpu_device * adev,u8 dev_state,bool drv_state)1481 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1482 						  u8 dev_state, bool drv_state) { return 0; }
amdgpu_acpi_smart_shift_update(struct drm_device * dev,enum amdgpu_ss ss_state)1483 static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
1484 						 enum amdgpu_ss ss_state) { return 0; }
1485 #endif
1486 
1487 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1488 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1489 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1490 void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
1491 #else
amdgpu_acpi_is_s0ix_active(struct amdgpu_device * adev)1492 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
amdgpu_acpi_is_s3_active(struct amdgpu_device * adev)1493 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
amdgpu_choose_low_power_state(struct amdgpu_device * adev)1494 static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
1495 #endif
1496 
1497 #if defined(CONFIG_DRM_AMD_DC)
1498 int amdgpu_dm_display_resume(struct amdgpu_device *adev );
1499 #else
amdgpu_dm_display_resume(struct amdgpu_device * adev)1500 static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
1501 #endif
1502 
1503 
1504 void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
1505 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
1506 
1507 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
1508 					   pci_channel_state_t state);
1509 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
1510 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
1511 void amdgpu_pci_resume(struct pci_dev *pdev);
1512 
1513 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
1514 bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
1515 
1516 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
1517 
1518 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1519 			       enum amd_clockgating_state state);
1520 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
1521 			       enum amd_powergating_state state);
1522 
amdgpu_device_has_timeouts_enabled(struct amdgpu_device * adev)1523 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
1524 {
1525 	return amdgpu_gpu_recovery != 0 &&
1526 		adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
1527 		adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
1528 		adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
1529 		adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
1530 }
1531 
1532 #include "amdgpu_object.h"
1533 
amdgpu_is_tmz(struct amdgpu_device * adev)1534 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
1535 {
1536        return adev->gmc.tmz_enabled;
1537 }
1538 
1539 int amdgpu_in_reset(struct amdgpu_device *adev);
1540 
1541 extern const struct attribute_group amdgpu_vram_mgr_attr_group;
1542 extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
1543 extern const struct attribute_group amdgpu_flash_attr_group;
1544 
1545 #endif
1546