1 /*	$NetBSD: kgd_kfd_interface.h,v 1.4 2021/12/19 10:59:02 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2014 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 /*
26  * This file defines the private interface between the
27  * AMD kernel graphics drivers and the AMD KFD.
28  */
29 
30 #ifndef KGD_KFD_INTERFACE_H_INCLUDED
31 #define KGD_KFD_INTERFACE_H_INCLUDED
32 
33 #include <linux/types.h>
34 #include <linux/bitmap.h>
35 #include <linux/dma-fence.h>
36 
37 struct pci_dev;
38 
39 #define KGD_MAX_QUEUES 128
40 
41 struct kfd_dev;
42 struct kgd_dev;
43 
44 struct kgd_mem;
45 
46 enum kfd_preempt_type {
47 	KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
48 	KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
49 };
50 
51 struct kfd_vm_fault_info {
52 	uint64_t	page_addr;
53 	uint32_t	vmid;
54 	uint32_t	mc_id;
55 	uint32_t	status;
56 	bool		prot_valid;
57 	bool		prot_read;
58 	bool		prot_write;
59 	bool		prot_exec;
60 };
61 
62 struct kfd_cu_info {
63 	uint32_t num_shader_engines;
64 	uint32_t num_shader_arrays_per_engine;
65 	uint32_t num_cu_per_sh;
66 	uint32_t cu_active_number;
67 	uint32_t cu_ao_mask;
68 	uint32_t simd_per_cu;
69 	uint32_t max_waves_per_simd;
70 	uint32_t wave_front_size;
71 	uint32_t max_scratch_slots_per_cu;
72 	uint32_t lds_size;
73 	uint32_t cu_bitmap[4][4];
74 };
75 
76 /* For getting GPU local memory information from KGD */
77 struct kfd_local_mem_info {
78 	uint64_t local_mem_size_private;
79 	uint64_t local_mem_size_public;
80 	uint32_t vram_width;
81 	uint32_t mem_clk_max;
82 };
83 
84 enum kgd_memory_pool {
85 	KGD_POOL_SYSTEM_CACHEABLE = 1,
86 	KGD_POOL_SYSTEM_WRITECOMBINE = 2,
87 	KGD_POOL_FRAMEBUFFER = 3,
88 };
89 
90 /**
91  * enum kfd_sched_policy
92  *
93  * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
94  * scheduling. In this scheduling mode we're using the firmware code to
95  * schedule the user mode queues and kernel queues such as HIQ and DIQ.
96  * the HIQ queue is used as a special queue that dispatches the configuration
97  * to the cp and the user mode queues list that are currently running.
98  * the DIQ queue is a debugging queue that dispatches debugging commands to the
99  * firmware.
100  * in this scheduling mode user mode queues over subscription feature is
101  * enabled.
102  *
103  * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
104  * subscription feature disabled.
105  *
106  * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
107  * set the command processor registers and sets the queues "manually". This
108  * mode is used *ONLY* for debugging proposes.
109  *
110  */
111 enum kfd_sched_policy {
112 	KFD_SCHED_POLICY_HWS = 0,
113 	KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
114 	KFD_SCHED_POLICY_NO_HWS
115 };
116 
117 struct kgd2kfd_shared_resources {
118 	/* Bit n == 1 means VMID n is available for KFD. */
119 	unsigned int compute_vmid_bitmap;
120 
121 	/* number of pipes per mec */
122 	uint32_t num_pipe_per_mec;
123 
124 	/* number of queues per pipe */
125 	uint32_t num_queue_per_pipe;
126 
127 	/* Bit n == 1 means Queue n is available for KFD */
128 	DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
129 
130 	/* SDMA doorbell assignments (SOC15 and later chips only). Only
131 	 * specific doorbells are routed to each SDMA engine. Others
132 	 * are routed to IH and VCN. They are not usable by the CP.
133 	 */
134 	uint32_t *sdma_doorbell_idx;
135 
136 	/* From SOC15 onward, the doorbell index range not usable for CP
137 	 * queues.
138 	 */
139 	uint32_t non_cp_doorbells_start;
140 	uint32_t non_cp_doorbells_end;
141 
142 	/* Base address of doorbell aperture. */
143 	phys_addr_t doorbell_physical_address;
144 
145 	/* Size in bytes of doorbell aperture. */
146 	size_t doorbell_aperture_size;
147 
148 	/* Number of bytes at start of aperture reserved for KGD. */
149 	size_t doorbell_start_offset;
150 
151 	/* GPUVM address space size in bytes */
152 	uint64_t gpuvm_size;
153 
154 	/* Minor device number of the render node */
155 	int drm_render_minor;
156 };
157 
158 struct tile_config {
159 	uint32_t *tile_config_ptr;
160 	uint32_t *macro_tile_config_ptr;
161 	uint32_t num_tile_configs;
162 	uint32_t num_macro_tile_configs;
163 
164 	uint32_t gb_addr_config;
165 	uint32_t num_banks;
166 	uint32_t num_ranks;
167 };
168 
169 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
170 
171 /*
172  * Allocation flag domains
173  * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
174  */
175 #define ALLOC_MEM_FLAGS_VRAM		(1 << 0)
176 #define ALLOC_MEM_FLAGS_GTT		(1 << 1)
177 #define ALLOC_MEM_FLAGS_USERPTR		(1 << 2)
178 #define ALLOC_MEM_FLAGS_DOORBELL	(1 << 3)
179 #define ALLOC_MEM_FLAGS_MMIO_REMAP	(1 << 4)
180 
181 /*
182  * Allocation flags attributes/access options.
183  * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
184  */
185 #define ALLOC_MEM_FLAGS_WRITABLE	(1 << 31)
186 #define ALLOC_MEM_FLAGS_EXECUTABLE	(1 << 30)
187 #define ALLOC_MEM_FLAGS_PUBLIC		(1 << 29)
188 #define ALLOC_MEM_FLAGS_NO_SUBSTITUTE	(1 << 28) /* TODO */
189 #define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM	(1 << 27)
190 #define ALLOC_MEM_FLAGS_COHERENT	(1 << 26) /* For GFXv9 or later */
191 
192 /**
193  * struct kfd2kgd_calls
194  *
195  * @program_sh_mem_settings: A function that should initiate the memory
196  * properties such as main aperture memory type (cache / non cached) and
197  * secondary aperture base address, size and memory type.
198  * This function is used only for no cp scheduling mode.
199  *
200  * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
201  * scheduling mode. Only used for no cp scheduling mode.
202  *
203  * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
204  * sceduling mode.
205  *
206  * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
207  * used only for no HWS mode.
208  *
209  * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
210  * Array is allocated with kmalloc, needs to be freed with kfree by caller.
211  *
212  * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
213  * Array is allocated with kmalloc, needs to be freed with kfree by caller.
214  *
215  * @hqd_is_occupies: Checks if a hqd slot is occupied.
216  *
217  * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
218  *
219  * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
220  *
221  * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
222  * SDMA hqd slot.
223  *
224  * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
225  * Only used for no cp scheduling mode
226  *
227  * @get_tile_config: Returns GPU-specific tiling mode information
228  *
229  * @set_vm_context_page_table_base: Program page table base for a VMID
230  *
231  * @invalidate_tlbs: Invalidate TLBs for a specific PASID
232  *
233  * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
234  *
235  * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
236  * IH ring entry. This function allows the KFD ISR to get the VMID
237  * from the fault status register as early as possible.
238  *
239  * @get_hive_id: Returns hive id of current  device,  0 if xgmi is not enabled
240  *
241  * This structure contains function pointers to services that the kgd driver
242  * provides to amdkfd driver.
243  *
244  */
245 struct kfd2kgd_calls {
246 	/* Register access functions */
247 	void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
248 			uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
249 			uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
250 
251 	int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
252 					unsigned int vmid);
253 
254 	int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
255 
256 #ifndef __NetBSD__
257 	int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
258 			uint32_t queue_id, uint32_t __user *wptr,
259 			uint32_t wptr_shift, uint32_t wptr_mask,
260 			struct mm_struct *mm);
261 #endif
262 
263 	int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd,
264 			    uint32_t pipe_id, uint32_t queue_id,
265 			    uint32_t doorbell_off);
266 
267 #ifndef __NetBSD__
268 	int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
269 			     uint32_t __user *wptr, struct mm_struct *mm);
270 #endif
271 
272 	int (*hqd_dump)(struct kgd_dev *kgd,
273 			uint32_t pipe_id, uint32_t queue_id,
274 			uint32_t (**dump)[2], uint32_t *n_regs);
275 
276 	int (*hqd_sdma_dump)(struct kgd_dev *kgd,
277 			     uint32_t engine_id, uint32_t queue_id,
278 			     uint32_t (**dump)[2], uint32_t *n_regs);
279 
280 	bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
281 				uint32_t pipe_id, uint32_t queue_id);
282 
283 	int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
284 				unsigned int timeout, uint32_t pipe_id,
285 				uint32_t queue_id);
286 
287 	bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
288 
289 	int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
290 				unsigned int timeout);
291 
292 	int (*address_watch_disable)(struct kgd_dev *kgd);
293 	int (*address_watch_execute)(struct kgd_dev *kgd,
294 					unsigned int watch_point_id,
295 					uint32_t cntl_val,
296 					uint32_t addr_hi,
297 					uint32_t addr_lo);
298 	int (*wave_control_execute)(struct kgd_dev *kgd,
299 					uint32_t gfx_index_val,
300 					uint32_t sq_cmd);
301 	uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
302 					unsigned int watch_point_id,
303 					unsigned int reg_offset);
304 	bool (*get_atc_vmid_pasid_mapping_info)(
305 					struct kgd_dev *kgd,
306 					uint8_t vmid,
307 					uint16_t *p_pasid);
308 
309 	/* No longer needed from GFXv9 onward. The scratch base address is
310 	 * passed to the shader by the CP. It's the user mode driver's
311 	 * responsibility.
312 	 */
313 	void (*set_scratch_backing_va)(struct kgd_dev *kgd,
314 				uint64_t va, uint32_t vmid);
315 
316 	int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
317 
318 	void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
319 			uint32_t vmid, uint64_t page_table_base);
320 	uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
321 	uint64_t (*get_hive_id)(struct kgd_dev *kgd);
322 
323 };
324 
325 #endif	/* KGD_KFD_INTERFACE_H_INCLUDED */
326