xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu_sdma.c (revision f005ef32)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_sdma.h"
27 #include "amdgpu_ras.h"
28 
29 #define AMDGPU_CSA_SDMA_SIZE 64
30 /* SDMA CSA reside in the 3rd page of CSA */
31 #define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
32 
33 /*
34  * GPU SDMA IP block helpers function.
35  */
36 
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring * ring)37 struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
38 {
39 	struct amdgpu_device *adev = ring->adev;
40 	int i;
41 
42 	for (i = 0; i < adev->sdma.num_instances; i++)
43 		if (ring == &adev->sdma.instance[i].ring ||
44 		    ring == &adev->sdma.instance[i].page)
45 			return &adev->sdma.instance[i];
46 
47 	return NULL;
48 }
49 
amdgpu_sdma_get_index_from_ring(struct amdgpu_ring * ring,uint32_t * index)50 int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
51 {
52 	struct amdgpu_device *adev = ring->adev;
53 	int i;
54 
55 	for (i = 0; i < adev->sdma.num_instances; i++) {
56 		if (ring == &adev->sdma.instance[i].ring ||
57 			ring == &adev->sdma.instance[i].page) {
58 			*index = i;
59 			return 0;
60 		}
61 	}
62 
63 	return -EINVAL;
64 }
65 
amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring * ring,unsigned int vmid)66 uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
67 				     unsigned int vmid)
68 {
69 	struct amdgpu_device *adev = ring->adev;
70 	uint64_t csa_mc_addr;
71 	uint32_t index = 0;
72 	int r;
73 
74 	/* don't enable OS preemption on SDMA under SRIOV */
75 	if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
76 		return 0;
77 
78 	if (ring->is_mes_queue) {
79 		uint32_t offset = 0;
80 
81 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
82 				  sdma[ring->idx].sdma_meta_data);
83 		csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
84 	} else {
85 		r = amdgpu_sdma_get_index_from_ring(ring, &index);
86 
87 		if (r || index > 31)
88 			csa_mc_addr = 0;
89 		else
90 			csa_mc_addr = amdgpu_csa_vaddr(adev) +
91 				AMDGPU_CSA_SDMA_OFFSET +
92 				index * AMDGPU_CSA_SDMA_SIZE;
93 	}
94 
95 	return csa_mc_addr;
96 }
97 
amdgpu_sdma_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)98 int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
99 			      struct ras_common_if *ras_block)
100 {
101 	int r, i;
102 
103 	r = amdgpu_ras_block_late_init(adev, ras_block);
104 	if (r)
105 		return r;
106 
107 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
108 		for (i = 0; i < adev->sdma.num_instances; i++) {
109 			r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
110 				AMDGPU_SDMA_IRQ_INSTANCE0 + i);
111 			if (r)
112 				goto late_fini;
113 		}
114 	}
115 
116 	return 0;
117 
118 late_fini:
119 	amdgpu_ras_block_late_fini(adev, ras_block);
120 	return r;
121 }
122 
amdgpu_sdma_process_ras_data_cb(struct amdgpu_device * adev,void * err_data,struct amdgpu_iv_entry * entry)123 int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
124 		void *err_data,
125 		struct amdgpu_iv_entry *entry)
126 {
127 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
128 
129 	if (amdgpu_sriov_vf(adev))
130 		return AMDGPU_RAS_SUCCESS;
131 
132 	amdgpu_ras_reset_gpu(adev);
133 
134 	return AMDGPU_RAS_SUCCESS;
135 }
136 
amdgpu_sdma_process_ecc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)137 int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
138 				      struct amdgpu_irq_src *source,
139 				      struct amdgpu_iv_entry *entry)
140 {
141 	struct ras_common_if *ras_if = adev->sdma.ras_if;
142 	struct ras_dispatch_if ih_data = {
143 		.entry = entry,
144 	};
145 
146 	if (!ras_if)
147 		return 0;
148 
149 	ih_data.head = *ras_if;
150 
151 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
152 	return 0;
153 }
154 
amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance * sdma_inst)155 static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
156 {
157 	uint16_t version_major;
158 	const struct common_firmware_header *header = NULL;
159 	const struct sdma_firmware_header_v1_0 *hdr;
160 	const struct sdma_firmware_header_v2_0 *hdr_v2;
161 
162 	header = (const struct common_firmware_header *)
163 		sdma_inst->fw->data;
164 	version_major = le16_to_cpu(header->header_version_major);
165 
166 	switch (version_major) {
167 	case 1:
168 		hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
169 		sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
170 		sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
171 		break;
172 	case 2:
173 		hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
174 		sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
175 		sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
176 		break;
177 	default:
178 		return -EINVAL;
179 	}
180 
181 	if (sdma_inst->feature_version >= 20)
182 		sdma_inst->burst_nop = true;
183 
184 	return 0;
185 }
186 
amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device * adev,bool duplicate)187 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
188 				  bool duplicate)
189 {
190 	int i;
191 
192 	for (i = 0; i < adev->sdma.num_instances; i++) {
193 		amdgpu_ucode_release(&adev->sdma.instance[i].fw);
194 		if (duplicate)
195 			break;
196 	}
197 
198 	memset((void *)adev->sdma.instance, 0,
199 	       sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
200 }
201 
amdgpu_sdma_init_microcode(struct amdgpu_device * adev,u32 instance,bool duplicate)202 int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
203 			       u32 instance, bool duplicate)
204 {
205 	struct amdgpu_firmware_info *info = NULL;
206 	const struct common_firmware_header *header = NULL;
207 	int err, i;
208 	const struct sdma_firmware_header_v2_0 *sdma_hdr;
209 	uint16_t version_major;
210 	char ucode_prefix[30];
211 	char fw_name[40];
212 
213 	amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
214 	if (instance == 0)
215 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
216 	else
217 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s%d.bin", ucode_prefix, instance);
218 	err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, fw_name);
219 	if (err)
220 		goto out;
221 
222 	header = (const struct common_firmware_header *)
223 		adev->sdma.instance[instance].fw->data;
224 	version_major = le16_to_cpu(header->header_version_major);
225 
226 	if ((duplicate && instance) || (!duplicate && version_major > 1)) {
227 		err = -EINVAL;
228 		goto out;
229 	}
230 
231 	err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
232 	if (err)
233 		goto out;
234 
235 	if (duplicate) {
236 		for (i = 1; i < adev->sdma.num_instances; i++)
237 			memcpy((void *)&adev->sdma.instance[i],
238 			       (void *)&adev->sdma.instance[0],
239 			       sizeof(struct amdgpu_sdma_instance));
240 	}
241 
242 	DRM_DEBUG("psp_load == '%s'\n",
243 		  adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
244 
245 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
246 		switch (version_major) {
247 		case 1:
248 			for (i = 0; i < adev->sdma.num_instances; i++) {
249 				if (!duplicate && (instance != i))
250 					continue;
251 				else {
252 					/* Use a single copy per SDMA firmware type. PSP uses the same instance for all
253 					 * groups of SDMAs */
254 					if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2) &&
255 					    adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
256 					    adev->sdma.num_inst_per_aid == i) {
257 						break;
258 					}
259 					info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
260 					info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
261 					info->fw = adev->sdma.instance[i].fw;
262 					adev->firmware.fw_size +=
263 						ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
264 				}
265 			}
266 			break;
267 		case 2:
268 			sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
269 				adev->sdma.instance[0].fw->data;
270 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
271 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
272 			info->fw = adev->sdma.instance[0].fw;
273 			adev->firmware.fw_size +=
274 				ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
275 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
276 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
277 			info->fw = adev->sdma.instance[0].fw;
278 			adev->firmware.fw_size +=
279 				ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
280 			break;
281 		default:
282 			err = -EINVAL;
283 		}
284 	}
285 
286 out:
287 	if (err)
288 		amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
289 	return err;
290 }
291 
amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device * adev)292 void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
293 {
294 	struct amdgpu_ring *sdma;
295 	int i;
296 
297 	for (i = 0; i < adev->sdma.num_instances; i++) {
298 		if (adev->sdma.has_page_queue) {
299 			sdma = &adev->sdma.instance[i].page;
300 			if (adev->mman.buffer_funcs_ring == sdma) {
301 				amdgpu_ttm_set_buffer_funcs_status(adev, false);
302 				break;
303 			}
304 		}
305 		sdma = &adev->sdma.instance[i].ring;
306 		if (adev->mman.buffer_funcs_ring == sdma) {
307 			amdgpu_ttm_set_buffer_funcs_status(adev, false);
308 			break;
309 		}
310 	}
311 }
312 
amdgpu_sdma_ras_sw_init(struct amdgpu_device * adev)313 int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
314 {
315 	int err = 0;
316 	struct amdgpu_sdma_ras *ras = NULL;
317 
318 	/* adev->sdma.ras is NULL, which means sdma does not
319 	 * support ras function, then do nothing here.
320 	 */
321 	if (!adev->sdma.ras)
322 		return 0;
323 
324 	ras = adev->sdma.ras;
325 
326 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
327 	if (err) {
328 		dev_err(adev->dev, "Failed to register sdma ras block!\n");
329 		return err;
330 	}
331 
332 	strlcpy(ras->ras_block.ras_comm.name, "sdma",
333 	    sizeof(ras->ras_block.ras_comm.name));
334 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
335 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
336 	adev->sdma.ras_if = &ras->ras_block.ras_comm;
337 
338 	/* If not define special ras_late_init function, use default ras_late_init */
339 	if (!ras->ras_block.ras_late_init)
340 		ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
341 
342 	/* If not defined special ras_cb function, use default ras_cb */
343 	if (!ras->ras_block.ras_cb)
344 		ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
345 
346 	return 0;
347 }
348