xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_debug.h (revision fda3f378)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef KFD_DEBUG_EVENTS_H_INCLUDED
24 #define KFD_DEBUG_EVENTS_H_INCLUDED
25 
26 #include "kfd_priv.h"
27 
28 void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count);
29 int kfd_dbg_trap_activate(struct kfd_process *target);
30 int kfd_dbg_ev_query_debug_event(struct kfd_process *process,
31 			unsigned int *queue_id,
32 			unsigned int *gpu_id,
33 			uint64_t exception_clear_mask,
34 			uint64_t *event_status);
35 bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
36 				   unsigned int pasid,
37 				   uint32_t doorbell_id,
38 				   uint64_t trap_mask,
39 				   void *exception_data,
40 				   size_t exception_data_size);
41 bool kfd_dbg_ev_raise(uint64_t event_mask,
42 			struct kfd_process *process, struct kfd_node *dev,
43 			unsigned int source_id, bool use_worker,
44 			void *exception_data,
45 			size_t exception_data_size);
46 int kfd_dbg_trap_disable(struct kfd_process *target);
47 int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
48 			void __user *runtime_info,
49 			uint32_t *runtime_info_size);
50 int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
51 					uint32_t trap_override,
52 					uint32_t trap_mask_bits,
53 					uint32_t trap_mask_request,
54 					uint32_t *trap_mask_prev,
55 					uint32_t *trap_mask_supported);
56 int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
57 					uint8_t wave_launch_mode);
58 int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
59 					uint32_t watch_id);
60 int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
61 					uint64_t watch_address,
62 					uint32_t watch_address_mask,
63 					uint32_t *watch_id,
64 					uint32_t watch_mode);
65 int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags);
66 int kfd_dbg_trap_query_exception_info(struct kfd_process *target,
67 		uint32_t source_id,
68 		uint32_t exception_code,
69 		bool clear_exception,
70 		void __user *info,
71 		uint32_t *info_size);
72 int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
73 					unsigned int dev_id,
74 					unsigned int queue_id,
75 					uint64_t error_reason);
76 
kfd_dbg_is_per_vmid_supported(struct kfd_node * dev)77 static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
78 {
79 	return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
80 		KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
81 		KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
82 		KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0));
83 }
84 
85 void debug_event_write_work_handler(struct work_struct *work);
86 int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
87 		uint64_t exception_clear_mask,
88 		void __user *user_info,
89 		uint32_t *number_of_device_infos,
90 		uint32_t *entry_size);
91 
92 void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target,
93 					uint64_t exception_set_mask);
94 /*
95  * If GFX off is enabled, chips that do not support RLC restore for the debug
96  * registers will disable GFX off temporarily for the entire debug session.
97  * See disable_on_trap_action_entry and enable_on_trap_action_exit for details.
98  */
kfd_dbg_is_rlc_restore_supported(struct kfd_node * dev)99 static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev)
100 {
101 	return !(KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 10) ||
102 		 KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));
103 }
104 
kfd_dbg_has_cwsr_workaround(struct kfd_node * dev)105 static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev)
106 {
107 	return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
108 	       KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3);
109 }
110 
kfd_dbg_has_gws_support(struct kfd_node * dev)111 static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
112 {
113 	if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
114 			&& dev->kfd->mec2_fw_version < 0x81b6) ||
115 		(KFD_GC_VERSION(dev) >= IP_VERSION(9, 1, 0)
116 			&& KFD_GC_VERSION(dev) <= IP_VERSION(9, 2, 2)
117 			&& dev->kfd->mec2_fw_version < 0x1b6) ||
118 		(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0)
119 			&& dev->kfd->mec2_fw_version < 0x1b6) ||
120 		(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1)
121 			&& dev->kfd->mec2_fw_version < 0x30) ||
122 		(KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
123 			KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0)))
124 		return false;
125 
126 	/* Assume debugging and cooperative launch supported otherwise. */
127 	return true;
128 }
129 
130 int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en);
131 
kfd_dbg_has_ttmps_always_setup(struct kfd_node * dev)132 static inline bool kfd_dbg_has_ttmps_always_setup(struct kfd_node *dev)
133 {
134 	return (KFD_GC_VERSION(dev) < IP_VERSION(11, 0, 0) &&
135 			KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 2)) ||
136 	       (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
137 			KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0) &&
138 			(dev->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 70) ||
139 	       (KFD_GC_VERSION(dev) >= IP_VERSION(12, 0, 0));
140 }
141 #endif
142