1 /*	$NetBSD: amdgpu_rlc.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2014 Advanced Micro Devices, Inc.
5  * Copyright 2008 Red Hat Inc.
6  * Copyright 2009 Jerome Glisse.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_rlc.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
29 
30 #include <linux/firmware.h>
31 #include "amdgpu.h"
32 #include "amdgpu_gfx.h"
33 #include "amdgpu_rlc.h"
34 
35 /**
36  * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
37  *
38  * @adev: amdgpu_device pointer
39  *
40  * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
41  */
amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device * adev)42 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
43 {
44 	if (adev->gfx.rlc.in_safe_mode)
45 		return;
46 
47 	/* if RLC is not enabled, do nothing */
48 	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
49 		return;
50 
51 	if (adev->cg_flags &
52 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
53 	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
54 		adev->gfx.rlc.funcs->set_safe_mode(adev);
55 		adev->gfx.rlc.in_safe_mode = true;
56 	}
57 }
58 
59 /**
60  * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
61  *
62  * @adev: amdgpu_device pointer
63  *
64  * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
65  */
amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device * adev)66 void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
67 {
68 	if (!(adev->gfx.rlc.in_safe_mode))
69 		return;
70 
71 	/* if RLC is not enabled, do nothing */
72 	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
73 		return;
74 
75 	if (adev->cg_flags &
76 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
77 	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
78 		adev->gfx.rlc.funcs->unset_safe_mode(adev);
79 		adev->gfx.rlc.in_safe_mode = false;
80 	}
81 }
82 
83 /**
84  * amdgpu_gfx_rlc_init_sr - Init save restore block
85  *
86  * @adev: amdgpu_device pointer
87  * @dws: the size of save restore block
88  *
89  * Allocate and setup value to save restore block of rlc.
90  * Returns 0 on succeess or negative error code if allocate failed.
91  */
amdgpu_gfx_rlc_init_sr(struct amdgpu_device * adev,u32 dws)92 int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
93 {
94 	const u32 *src_ptr;
95 	volatile u32 *dst_ptr;
96 	u32 i;
97 	int r;
98 
99 	/* allocate save restore block */
100 	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
101 				      AMDGPU_GEM_DOMAIN_VRAM,
102 				      &adev->gfx.rlc.save_restore_obj,
103 				      &adev->gfx.rlc.save_restore_gpu_addr,
104 				      (void **)__UNVOLATILE(&adev->gfx.rlc.sr_ptr));
105 	if (r) {
106 		dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
107 		amdgpu_gfx_rlc_fini(adev);
108 		return r;
109 	}
110 
111 	/* write the sr buffer */
112 	src_ptr = adev->gfx.rlc.reg_list;
113 	dst_ptr = adev->gfx.rlc.sr_ptr;
114 	for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
115 		dst_ptr[i] = cpu_to_le32(src_ptr[i]);
116 	amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
117 	amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
118 
119 	return 0;
120 }
121 
122 /**
123  * amdgpu_gfx_rlc_init_csb - Init clear state block
124  *
125  * @adev: amdgpu_device pointer
126  *
127  * Allocate and setup value to clear state block of rlc.
128  * Returns 0 on succeess or negative error code if allocate failed.
129  */
amdgpu_gfx_rlc_init_csb(struct amdgpu_device * adev)130 int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
131 {
132 	u32 dws;
133 	int r;
134 
135 	/* allocate clear state block */
136 	adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
137 	r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
138 				      AMDGPU_GEM_DOMAIN_VRAM,
139 				      &adev->gfx.rlc.clear_state_obj,
140 				      &adev->gfx.rlc.clear_state_gpu_addr,
141 				      (void **)__UNVOLATILE(&adev->gfx.rlc.cs_ptr));
142 	if (r) {
143 		dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
144 		amdgpu_gfx_rlc_fini(adev);
145 		return r;
146 	}
147 
148 	return 0;
149 }
150 
151 /**
152  * amdgpu_gfx_rlc_init_cpt - Init cp table
153  *
154  * @adev: amdgpu_device pointer
155  *
156  * Allocate and setup value to cp table of rlc.
157  * Returns 0 on succeess or negative error code if allocate failed.
158  */
amdgpu_gfx_rlc_init_cpt(struct amdgpu_device * adev)159 int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
160 {
161 	int r;
162 
163 	r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
164 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
165 				      &adev->gfx.rlc.cp_table_obj,
166 				      &adev->gfx.rlc.cp_table_gpu_addr,
167 				      (void **)__UNVOLATILE(&adev->gfx.rlc.cp_table_ptr));
168 	if (r) {
169 		dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
170 		amdgpu_gfx_rlc_fini(adev);
171 		return r;
172 	}
173 
174 	/* set up the cp table */
175 	amdgpu_gfx_rlc_setup_cp_table(adev);
176 	amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
177 	amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
178 
179 	return 0;
180 }
181 
182 /**
183  * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
184  *
185  * @adev: amdgpu_device pointer
186  *
187  * Write cp firmware data into cp table.
188  */
amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device * adev)189 void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
190 {
191 	const __le32 *fw_data;
192 	volatile u32 *dst_ptr;
193 	int me, i, max_me;
194 	u32 bo_offset = 0;
195 	u32 table_offset, table_size;
196 
197 	max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
198 
199 	/* write the cp table buffer */
200 	dst_ptr = adev->gfx.rlc.cp_table_ptr;
201 	for (me = 0; me < max_me; me++) {
202 		if (me == 0) {
203 			const struct gfx_firmware_header_v1_0 *hdr =
204 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
205 			fw_data = (const __le32 *)
206 				(adev->gfx.ce_fw->data +
207 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
208 			table_offset = le32_to_cpu(hdr->jt_offset);
209 			table_size = le32_to_cpu(hdr->jt_size);
210 		} else if (me == 1) {
211 			const struct gfx_firmware_header_v1_0 *hdr =
212 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
213 			fw_data = (const __le32 *)
214 				(adev->gfx.pfp_fw->data +
215 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
216 			table_offset = le32_to_cpu(hdr->jt_offset);
217 			table_size = le32_to_cpu(hdr->jt_size);
218 		} else if (me == 2) {
219 			const struct gfx_firmware_header_v1_0 *hdr =
220 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
221 			fw_data = (const __le32 *)
222 				(adev->gfx.me_fw->data +
223 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
224 			table_offset = le32_to_cpu(hdr->jt_offset);
225 			table_size = le32_to_cpu(hdr->jt_size);
226 		} else if (me == 3) {
227 			const struct gfx_firmware_header_v1_0 *hdr =
228 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
229 			fw_data = (const __le32 *)
230 				(adev->gfx.mec_fw->data +
231 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
232 			table_offset = le32_to_cpu(hdr->jt_offset);
233 			table_size = le32_to_cpu(hdr->jt_size);
234 		} else  if (me == 4) {
235 			const struct gfx_firmware_header_v1_0 *hdr =
236 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
237 			fw_data = (const __le32 *)
238 				(adev->gfx.mec2_fw->data +
239 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
240 			table_offset = le32_to_cpu(hdr->jt_offset);
241 			table_size = le32_to_cpu(hdr->jt_size);
242 		}
243 
244 		for (i = 0; i < table_size; i ++) {
245 			dst_ptr[bo_offset + i] =
246 				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
247 		}
248 
249 		bo_offset += table_size;
250 	}
251 }
252 
253 /**
254  * amdgpu_gfx_rlc_fini - Free BO which used for RLC
255  *
256  * @adev: amdgpu_device pointer
257  *
258  * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
259  * and rlc_jump_table_block.
260  */
amdgpu_gfx_rlc_fini(struct amdgpu_device * adev)261 void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
262 {
263 	/* save restore block */
264 	if (adev->gfx.rlc.save_restore_obj) {
265 		amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
266 				      &adev->gfx.rlc.save_restore_gpu_addr,
267 				      (void **)__UNVOLATILE(&adev->gfx.rlc.sr_ptr));
268 	}
269 
270 	/* clear state block */
271 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
272 			      &adev->gfx.rlc.clear_state_gpu_addr,
273 			      (void **)__UNVOLATILE(&adev->gfx.rlc.cs_ptr));
274 
275 	/* jump table block */
276 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
277 			      &adev->gfx.rlc.cp_table_gpu_addr,
278 			      (void **)__UNVOLATILE(&adev->gfx.rlc.cp_table_ptr));
279 }
280