xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu_vce.c (revision ba755da8)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 
31 #include <drm/drm.h>
32 #include <drm/drm_drv.h>
33 
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
37 #include "amdgpu_cs.h"
38 #include "cikd.h"
39 
40 /* 1 second timeout */
41 #define VCE_IDLE_TIMEOUT	msecs_to_jiffies(1000)
42 
43 /* Firmware Names */
44 #ifdef CONFIG_DRM_AMDGPU_CIK
45 #define FIRMWARE_BONAIRE	"amdgpu/bonaire_vce.bin"
46 #define FIRMWARE_KABINI	"amdgpu/kabini_vce.bin"
47 #define FIRMWARE_KAVERI	"amdgpu/kaveri_vce.bin"
48 #define FIRMWARE_HAWAII	"amdgpu/hawaii_vce.bin"
49 #define FIRMWARE_MULLINS	"amdgpu/mullins_vce.bin"
50 #endif
51 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
52 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
53 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
54 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
55 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
56 #define FIRMWARE_POLARIS11	"amdgpu/polaris11_vce.bin"
57 #define FIRMWARE_POLARIS12	"amdgpu/polaris12_vce.bin"
58 #define FIRMWARE_VEGAM		"amdgpu/vegam_vce.bin"
59 
60 #define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
61 #define FIRMWARE_VEGA12		"amdgpu/vega12_vce.bin"
62 #define FIRMWARE_VEGA20		"amdgpu/vega20_vce.bin"
63 
64 #ifdef CONFIG_DRM_AMDGPU_CIK
65 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
66 MODULE_FIRMWARE(FIRMWARE_KABINI);
67 MODULE_FIRMWARE(FIRMWARE_KAVERI);
68 MODULE_FIRMWARE(FIRMWARE_HAWAII);
69 MODULE_FIRMWARE(FIRMWARE_MULLINS);
70 #endif
71 MODULE_FIRMWARE(FIRMWARE_TONGA);
72 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
73 MODULE_FIRMWARE(FIRMWARE_FIJI);
74 MODULE_FIRMWARE(FIRMWARE_STONEY);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
77 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
78 MODULE_FIRMWARE(FIRMWARE_VEGAM);
79 
80 MODULE_FIRMWARE(FIRMWARE_VEGA10);
81 MODULE_FIRMWARE(FIRMWARE_VEGA12);
82 MODULE_FIRMWARE(FIRMWARE_VEGA20);
83 
84 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
85 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
86 				     struct dma_fence **fence);
87 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
88 				      bool direct, struct dma_fence **fence);
89 
90 /**
91  * amdgpu_vce_sw_init - allocate memory, load vce firmware
92  *
93  * @adev: amdgpu_device pointer
94  * @size: size for the new BO
95  *
96  * First step to get VCE online, allocate memory and load the firmware
97  */
amdgpu_vce_sw_init(struct amdgpu_device * adev,unsigned long size)98 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
99 {
100 	const char *fw_name;
101 	const struct common_firmware_header *hdr;
102 	unsigned int ucode_version, version_major, version_minor, binary_id;
103 	int i, r;
104 
105 	switch (adev->asic_type) {
106 #ifdef CONFIG_DRM_AMDGPU_CIK
107 	case CHIP_BONAIRE:
108 		fw_name = FIRMWARE_BONAIRE;
109 		break;
110 	case CHIP_KAVERI:
111 		fw_name = FIRMWARE_KAVERI;
112 		break;
113 	case CHIP_KABINI:
114 		fw_name = FIRMWARE_KABINI;
115 		break;
116 	case CHIP_HAWAII:
117 		fw_name = FIRMWARE_HAWAII;
118 		break;
119 	case CHIP_MULLINS:
120 		fw_name = FIRMWARE_MULLINS;
121 		break;
122 #endif
123 	case CHIP_TONGA:
124 		fw_name = FIRMWARE_TONGA;
125 		break;
126 	case CHIP_CARRIZO:
127 		fw_name = FIRMWARE_CARRIZO;
128 		break;
129 	case CHIP_FIJI:
130 		fw_name = FIRMWARE_FIJI;
131 		break;
132 	case CHIP_STONEY:
133 		fw_name = FIRMWARE_STONEY;
134 		break;
135 	case CHIP_POLARIS10:
136 		fw_name = FIRMWARE_POLARIS10;
137 		break;
138 	case CHIP_POLARIS11:
139 		fw_name = FIRMWARE_POLARIS11;
140 		break;
141 	case CHIP_POLARIS12:
142 		fw_name = FIRMWARE_POLARIS12;
143 		break;
144 	case CHIP_VEGAM:
145 		fw_name = FIRMWARE_VEGAM;
146 		break;
147 	case CHIP_VEGA10:
148 		fw_name = FIRMWARE_VEGA10;
149 		break;
150 	case CHIP_VEGA12:
151 		fw_name = FIRMWARE_VEGA12;
152 		break;
153 	case CHIP_VEGA20:
154 		fw_name = FIRMWARE_VEGA20;
155 		break;
156 
157 	default:
158 		return -EINVAL;
159 	}
160 
161 	r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
162 	if (r) {
163 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
164 			fw_name);
165 		amdgpu_ucode_release(&adev->vce.fw);
166 		return r;
167 	}
168 
169 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
170 
171 	ucode_version = le32_to_cpu(hdr->ucode_version);
172 	version_major = (ucode_version >> 20) & 0xfff;
173 	version_minor = (ucode_version >> 8) & 0xfff;
174 	binary_id = ucode_version & 0xff;
175 	DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
176 		version_major, version_minor, binary_id);
177 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
178 				(binary_id << 8));
179 
180 	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
181 				    AMDGPU_GEM_DOMAIN_VRAM |
182 				    AMDGPU_GEM_DOMAIN_GTT,
183 				    &adev->vce.vcpu_bo,
184 				    &adev->vce.gpu_addr, &adev->vce.cpu_addr);
185 	if (r) {
186 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
187 		return r;
188 	}
189 
190 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
191 		atomic_set(&adev->vce.handles[i], 0);
192 		adev->vce.filp[i] = NULL;
193 	}
194 
195 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
196 	rw_init(&adev->vce.idle_mutex, "vceidle");
197 
198 	return 0;
199 }
200 
201 /**
202  * amdgpu_vce_sw_fini - free memory
203  *
204  * @adev: amdgpu_device pointer
205  *
206  * Last step on VCE teardown, free firmware memory
207  */
amdgpu_vce_sw_fini(struct amdgpu_device * adev)208 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
209 {
210 	unsigned int i;
211 
212 	if (adev->vce.vcpu_bo == NULL)
213 		return 0;
214 
215 	drm_sched_entity_destroy(&adev->vce.entity);
216 
217 	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
218 		(void **)&adev->vce.cpu_addr);
219 
220 	for (i = 0; i < adev->vce.num_rings; i++)
221 		amdgpu_ring_fini(&adev->vce.ring[i]);
222 
223 	amdgpu_ucode_release(&adev->vce.fw);
224 	mutex_destroy(&adev->vce.idle_mutex);
225 
226 	return 0;
227 }
228 
229 /**
230  * amdgpu_vce_entity_init - init entity
231  *
232  * @adev: amdgpu_device pointer
233  *
234  */
amdgpu_vce_entity_init(struct amdgpu_device * adev)235 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
236 {
237 	struct amdgpu_ring *ring;
238 	struct drm_gpu_scheduler *sched;
239 	int r;
240 
241 	ring = &adev->vce.ring[0];
242 	sched = &ring->sched;
243 	r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
244 				  &sched, 1, NULL);
245 	if (r != 0) {
246 		DRM_ERROR("Failed setting up VCE run queue.\n");
247 		return r;
248 	}
249 
250 	return 0;
251 }
252 
253 /**
254  * amdgpu_vce_suspend - unpin VCE fw memory
255  *
256  * @adev: amdgpu_device pointer
257  *
258  */
amdgpu_vce_suspend(struct amdgpu_device * adev)259 int amdgpu_vce_suspend(struct amdgpu_device *adev)
260 {
261 	int i;
262 
263 	cancel_delayed_work_sync(&adev->vce.idle_work);
264 
265 	if (adev->vce.vcpu_bo == NULL)
266 		return 0;
267 
268 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
269 		if (atomic_read(&adev->vce.handles[i]))
270 			break;
271 
272 	if (i == AMDGPU_MAX_VCE_HANDLES)
273 		return 0;
274 
275 	/* TODO: suspending running encoding sessions isn't supported */
276 	return -EINVAL;
277 }
278 
279 /**
280  * amdgpu_vce_resume - pin VCE fw memory
281  *
282  * @adev: amdgpu_device pointer
283  *
284  */
amdgpu_vce_resume(struct amdgpu_device * adev)285 int amdgpu_vce_resume(struct amdgpu_device *adev)
286 {
287 	void *cpu_addr;
288 	const struct common_firmware_header *hdr;
289 	unsigned int offset;
290 	int r, idx;
291 
292 	if (adev->vce.vcpu_bo == NULL)
293 		return -EINVAL;
294 
295 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
296 	if (r) {
297 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
298 		return r;
299 	}
300 
301 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
302 	if (r) {
303 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
304 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
305 		return r;
306 	}
307 
308 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
309 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
310 
311 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
312 		memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
313 			    adev->vce.fw->size - offset);
314 		drm_dev_exit(idx);
315 	}
316 
317 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
318 
319 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
320 
321 	return 0;
322 }
323 
324 /**
325  * amdgpu_vce_idle_work_handler - power off VCE
326  *
327  * @work: pointer to work structure
328  *
329  * power of VCE when it's not used any more
330  */
amdgpu_vce_idle_work_handler(struct work_struct * work)331 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
332 {
333 	struct amdgpu_device *adev =
334 		container_of(work, struct amdgpu_device, vce.idle_work.work);
335 	unsigned int i, count = 0;
336 
337 	for (i = 0; i < adev->vce.num_rings; i++)
338 		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
339 
340 	if (count == 0) {
341 		if (adev->pm.dpm_enabled) {
342 			amdgpu_dpm_enable_vce(adev, false);
343 		} else {
344 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
345 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
346 							       AMD_PG_STATE_GATE);
347 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
348 							       AMD_CG_STATE_GATE);
349 		}
350 	} else {
351 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
352 	}
353 }
354 
355 /**
356  * amdgpu_vce_ring_begin_use - power up VCE
357  *
358  * @ring: amdgpu ring
359  *
360  * Make sure VCE is powerd up when we want to use it
361  */
amdgpu_vce_ring_begin_use(struct amdgpu_ring * ring)362 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
363 {
364 	struct amdgpu_device *adev = ring->adev;
365 	bool set_clocks;
366 
367 	if (amdgpu_sriov_vf(adev))
368 		return;
369 
370 	mutex_lock(&adev->vce.idle_mutex);
371 	set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
372 	if (set_clocks) {
373 		if (adev->pm.dpm_enabled) {
374 			amdgpu_dpm_enable_vce(adev, true);
375 		} else {
376 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
377 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
378 							       AMD_CG_STATE_UNGATE);
379 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
380 							       AMD_PG_STATE_UNGATE);
381 
382 		}
383 	}
384 	mutex_unlock(&adev->vce.idle_mutex);
385 }
386 
387 /**
388  * amdgpu_vce_ring_end_use - power VCE down
389  *
390  * @ring: amdgpu ring
391  *
392  * Schedule work to power VCE down again
393  */
amdgpu_vce_ring_end_use(struct amdgpu_ring * ring)394 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
395 {
396 	if (!amdgpu_sriov_vf(ring->adev))
397 		schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
398 }
399 
400 /**
401  * amdgpu_vce_free_handles - free still open VCE handles
402  *
403  * @adev: amdgpu_device pointer
404  * @filp: drm file pointer
405  *
406  * Close all VCE handles still open by this file pointer
407  */
amdgpu_vce_free_handles(struct amdgpu_device * adev,struct drm_file * filp)408 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
409 {
410 	struct amdgpu_ring *ring = &adev->vce.ring[0];
411 	int i, r;
412 
413 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
414 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
415 
416 		if (!handle || adev->vce.filp[i] != filp)
417 			continue;
418 
419 		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
420 		if (r)
421 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
422 
423 		adev->vce.filp[i] = NULL;
424 		atomic_set(&adev->vce.handles[i], 0);
425 	}
426 }
427 
428 /**
429  * amdgpu_vce_get_create_msg - generate a VCE create msg
430  *
431  * @ring: ring we should submit the msg to
432  * @handle: VCE session handle to use
433  * @fence: optional fence to return
434  *
435  * Open up a stream for HW test
436  */
amdgpu_vce_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)437 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
438 				     struct dma_fence **fence)
439 {
440 	const unsigned int ib_size_dw = 1024;
441 	struct amdgpu_job *job;
442 	struct amdgpu_ib *ib;
443 	struct amdgpu_ib ib_msg;
444 	struct dma_fence *f = NULL;
445 	uint64_t addr;
446 	int i, r;
447 
448 	r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
449 				     AMDGPU_FENCE_OWNER_UNDEFINED,
450 				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
451 				     &job);
452 	if (r)
453 		return r;
454 
455 	memset(&ib_msg, 0, sizeof(ib_msg));
456 	/* only one gpu page is needed, alloc +1 page to make addr aligned. */
457 	r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
458 			  AMDGPU_IB_POOL_DIRECT,
459 			  &ib_msg);
460 	if (r)
461 		goto err;
462 
463 	ib = &job->ibs[0];
464 	/* let addr point to page boundary */
465 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
466 
467 	/* stitch together an VCE create msg */
468 	ib->length_dw = 0;
469 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
470 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
471 	ib->ptr[ib->length_dw++] = handle;
472 
473 	if ((ring->adev->vce.fw_version >> 24) >= 52)
474 		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
475 	else
476 		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
477 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
478 	ib->ptr[ib->length_dw++] = 0x00000000;
479 	ib->ptr[ib->length_dw++] = 0x00000042;
480 	ib->ptr[ib->length_dw++] = 0x0000000a;
481 	ib->ptr[ib->length_dw++] = 0x00000001;
482 	ib->ptr[ib->length_dw++] = 0x00000080;
483 	ib->ptr[ib->length_dw++] = 0x00000060;
484 	ib->ptr[ib->length_dw++] = 0x00000100;
485 	ib->ptr[ib->length_dw++] = 0x00000100;
486 	ib->ptr[ib->length_dw++] = 0x0000000c;
487 	ib->ptr[ib->length_dw++] = 0x00000000;
488 	if ((ring->adev->vce.fw_version >> 24) >= 52) {
489 		ib->ptr[ib->length_dw++] = 0x00000000;
490 		ib->ptr[ib->length_dw++] = 0x00000000;
491 		ib->ptr[ib->length_dw++] = 0x00000000;
492 		ib->ptr[ib->length_dw++] = 0x00000000;
493 	}
494 
495 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
496 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
497 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
498 	ib->ptr[ib->length_dw++] = addr;
499 	ib->ptr[ib->length_dw++] = 0x00000001;
500 
501 	for (i = ib->length_dw; i < ib_size_dw; ++i)
502 		ib->ptr[i] = 0x0;
503 
504 	r = amdgpu_job_submit_direct(job, ring, &f);
505 	amdgpu_ib_free(ring->adev, &ib_msg, f);
506 	if (r)
507 		goto err;
508 
509 	if (fence)
510 		*fence = dma_fence_get(f);
511 	dma_fence_put(f);
512 	return 0;
513 
514 err:
515 	amdgpu_job_free(job);
516 	return r;
517 }
518 
519 /**
520  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
521  *
522  * @ring: ring we should submit the msg to
523  * @handle: VCE session handle to use
524  * @direct: direct or delayed pool
525  * @fence: optional fence to return
526  *
527  * Close up a stream for HW test or if userspace failed to do so
528  */
amdgpu_vce_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)529 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
530 				      bool direct, struct dma_fence **fence)
531 {
532 	const unsigned int ib_size_dw = 1024;
533 	struct amdgpu_job *job;
534 	struct amdgpu_ib *ib;
535 	struct dma_fence *f = NULL;
536 	int i, r;
537 
538 	r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
539 				     AMDGPU_FENCE_OWNER_UNDEFINED,
540 				     ib_size_dw * 4,
541 				     direct ? AMDGPU_IB_POOL_DIRECT :
542 				     AMDGPU_IB_POOL_DELAYED, &job);
543 	if (r)
544 		return r;
545 
546 	ib = &job->ibs[0];
547 
548 	/* stitch together an VCE destroy msg */
549 	ib->length_dw = 0;
550 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
551 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
552 	ib->ptr[ib->length_dw++] = handle;
553 
554 	ib->ptr[ib->length_dw++] = 0x00000020; /* len */
555 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
556 	ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
557 	ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
558 	ib->ptr[ib->length_dw++] = 0x00000000;
559 	ib->ptr[ib->length_dw++] = 0x00000000;
560 	ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
561 	ib->ptr[ib->length_dw++] = 0x00000000;
562 
563 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
564 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
565 
566 	for (i = ib->length_dw; i < ib_size_dw; ++i)
567 		ib->ptr[i] = 0x0;
568 
569 	if (direct)
570 		r = amdgpu_job_submit_direct(job, ring, &f);
571 	else
572 		f = amdgpu_job_submit(job);
573 	if (r)
574 		goto err;
575 
576 	if (fence)
577 		*fence = dma_fence_get(f);
578 	dma_fence_put(f);
579 	return 0;
580 
581 err:
582 	amdgpu_job_free(job);
583 	return r;
584 }
585 
586 /**
587  * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
588  *
589  * @p: cs parser
590  * @ib: indirect buffer to use
591  * @lo: address of lower dword
592  * @hi: address of higher dword
593  * @size: minimum size
594  * @index: bs/fb index
595  *
596  * Make sure that no BO cross a 4GB boundary.
597  */
amdgpu_vce_validate_bo(struct amdgpu_cs_parser * p,struct amdgpu_ib * ib,int lo,int hi,unsigned int size,int32_t index)598 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
599 				  struct amdgpu_ib *ib, int lo, int hi,
600 				  unsigned int size, int32_t index)
601 {
602 	int64_t offset = ((uint64_t)size) * ((int64_t)index);
603 	struct ttm_operation_ctx ctx = { false, false };
604 	struct amdgpu_bo_va_mapping *mapping;
605 	unsigned int i, fpfn, lpfn;
606 	struct amdgpu_bo *bo;
607 	uint64_t addr;
608 	int r;
609 
610 	addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
611 	       ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
612 	if (index >= 0) {
613 		addr += offset;
614 		fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
615 		lpfn = 0x100000000ULL >> PAGE_SHIFT;
616 	} else {
617 		fpfn = 0;
618 		lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
619 	}
620 
621 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
622 	if (r) {
623 		DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
624 			  addr, lo, hi, size, index);
625 		return r;
626 	}
627 
628 	for (i = 0; i < bo->placement.num_placement; ++i) {
629 		bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
630 		bo->placements[i].lpfn = bo->placements[i].lpfn ?
631 			min(bo->placements[i].lpfn, lpfn) : lpfn;
632 	}
633 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
634 }
635 
636 
637 /**
638  * amdgpu_vce_cs_reloc - command submission relocation
639  *
640  * @p: parser context
641  * @ib: indirect buffer to use
642  * @lo: address of lower dword
643  * @hi: address of higher dword
644  * @size: minimum size
645  * @index: bs/fb index
646  *
647  * Patch relocation inside command stream with real buffer address
648  */
amdgpu_vce_cs_reloc(struct amdgpu_cs_parser * p,struct amdgpu_ib * ib,int lo,int hi,unsigned int size,uint32_t index)649 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
650 			       int lo, int hi, unsigned int size, uint32_t index)
651 {
652 	struct amdgpu_bo_va_mapping *mapping;
653 	struct amdgpu_bo *bo;
654 	uint64_t addr;
655 	int r;
656 
657 	if (index == 0xffffffff)
658 		index = 0;
659 
660 	addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
661 	       ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
662 	addr += ((uint64_t)size) * ((uint64_t)index);
663 
664 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
665 	if (r) {
666 		DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
667 			  addr, lo, hi, size, index);
668 		return r;
669 	}
670 
671 	if ((addr + (uint64_t)size) >
672 	    (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
673 		DRM_ERROR("BO too small for addr 0x%010llx %d %d\n",
674 			  addr, lo, hi);
675 		return -EINVAL;
676 	}
677 
678 	addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
679 	addr += amdgpu_bo_gpu_offset(bo);
680 	addr -= ((uint64_t)size) * ((uint64_t)index);
681 
682 	amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
683 	amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
684 
685 	return 0;
686 }
687 
688 /**
689  * amdgpu_vce_validate_handle - validate stream handle
690  *
691  * @p: parser context
692  * @handle: handle to validate
693  * @allocated: allocated a new handle?
694  *
695  * Validates the handle and return the found session index or -EINVAL
696  * we don't have another free session index.
697  */
amdgpu_vce_validate_handle(struct amdgpu_cs_parser * p,uint32_t handle,uint32_t * allocated)698 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
699 				      uint32_t handle, uint32_t *allocated)
700 {
701 	unsigned int i;
702 
703 	/* validate the handle */
704 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
705 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
706 			if (p->adev->vce.filp[i] != p->filp) {
707 				DRM_ERROR("VCE handle collision detected!\n");
708 				return -EINVAL;
709 			}
710 			return i;
711 		}
712 	}
713 
714 	/* handle not found try to alloc a new one */
715 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
716 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
717 			p->adev->vce.filp[i] = p->filp;
718 			p->adev->vce.img_size[i] = 0;
719 			*allocated |= 1 << i;
720 			return i;
721 		}
722 	}
723 
724 	DRM_ERROR("No more free VCE handles!\n");
725 	return -EINVAL;
726 }
727 
728 /**
729  * amdgpu_vce_ring_parse_cs - parse and validate the command stream
730  *
731  * @p: parser context
732  * @job: the job to parse
733  * @ib: the IB to patch
734  */
amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)735 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
736 			     struct amdgpu_job *job,
737 			     struct amdgpu_ib *ib)
738 {
739 	unsigned int fb_idx = 0, bs_idx = 0;
740 	int session_idx = -1;
741 	uint32_t destroyed = 0;
742 	uint32_t created = 0;
743 	uint32_t allocated = 0;
744 	uint32_t tmp, handle = 0;
745 	uint32_t dummy = 0xffffffff;
746 	uint32_t *size = &dummy;
747 	unsigned int idx;
748 	int i, r = 0;
749 
750 	job->vm = NULL;
751 	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
752 
753 	for (idx = 0; idx < ib->length_dw;) {
754 		uint32_t len = amdgpu_ib_get_value(ib, idx);
755 		uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
756 
757 		if ((len < 8) || (len & 3)) {
758 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
759 			r = -EINVAL;
760 			goto out;
761 		}
762 
763 		switch (cmd) {
764 		case 0x00000002: /* task info */
765 			fb_idx = amdgpu_ib_get_value(ib, idx + 6);
766 			bs_idx = amdgpu_ib_get_value(ib, idx + 7);
767 			break;
768 
769 		case 0x03000001: /* encode */
770 			r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
771 						   0, 0);
772 			if (r)
773 				goto out;
774 
775 			r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
776 						   0, 0);
777 			if (r)
778 				goto out;
779 			break;
780 
781 		case 0x05000001: /* context buffer */
782 			r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
783 						   0, 0);
784 			if (r)
785 				goto out;
786 			break;
787 
788 		case 0x05000004: /* video bitstream buffer */
789 			tmp = amdgpu_ib_get_value(ib, idx + 4);
790 			r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
791 						   tmp, bs_idx);
792 			if (r)
793 				goto out;
794 			break;
795 
796 		case 0x05000005: /* feedback buffer */
797 			r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
798 						   4096, fb_idx);
799 			if (r)
800 				goto out;
801 			break;
802 
803 		case 0x0500000d: /* MV buffer */
804 			r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
805 						   0, 0);
806 			if (r)
807 				goto out;
808 
809 			r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
810 						   0, 0);
811 			if (r)
812 				goto out;
813 			break;
814 		}
815 
816 		idx += len / 4;
817 	}
818 
819 	for (idx = 0; idx < ib->length_dw;) {
820 		uint32_t len = amdgpu_ib_get_value(ib, idx);
821 		uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
822 
823 		switch (cmd) {
824 		case 0x00000001: /* session */
825 			handle = amdgpu_ib_get_value(ib, idx + 2);
826 			session_idx = amdgpu_vce_validate_handle(p, handle,
827 								 &allocated);
828 			if (session_idx < 0) {
829 				r = session_idx;
830 				goto out;
831 			}
832 			size = &p->adev->vce.img_size[session_idx];
833 			break;
834 
835 		case 0x00000002: /* task info */
836 			fb_idx = amdgpu_ib_get_value(ib, idx + 6);
837 			bs_idx = amdgpu_ib_get_value(ib, idx + 7);
838 			break;
839 
840 		case 0x01000001: /* create */
841 			created |= 1 << session_idx;
842 			if (destroyed & (1 << session_idx)) {
843 				destroyed &= ~(1 << session_idx);
844 				allocated |= 1 << session_idx;
845 
846 			} else if (!(allocated & (1 << session_idx))) {
847 				DRM_ERROR("Handle already in use!\n");
848 				r = -EINVAL;
849 				goto out;
850 			}
851 
852 			*size = amdgpu_ib_get_value(ib, idx + 8) *
853 				amdgpu_ib_get_value(ib, idx + 10) *
854 				8 * 3 / 2;
855 			break;
856 
857 		case 0x04000001: /* config extension */
858 		case 0x04000002: /* pic control */
859 		case 0x04000005: /* rate control */
860 		case 0x04000007: /* motion estimation */
861 		case 0x04000008: /* rdo */
862 		case 0x04000009: /* vui */
863 		case 0x05000002: /* auxiliary buffer */
864 		case 0x05000009: /* clock table */
865 			break;
866 
867 		case 0x0500000c: /* hw config */
868 			switch (p->adev->asic_type) {
869 #ifdef CONFIG_DRM_AMDGPU_CIK
870 			case CHIP_KAVERI:
871 			case CHIP_MULLINS:
872 #endif
873 			case CHIP_CARRIZO:
874 				break;
875 			default:
876 				r = -EINVAL;
877 				goto out;
878 			}
879 			break;
880 
881 		case 0x03000001: /* encode */
882 			r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
883 						*size, 0);
884 			if (r)
885 				goto out;
886 
887 			r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
888 						*size / 3, 0);
889 			if (r)
890 				goto out;
891 			break;
892 
893 		case 0x02000001: /* destroy */
894 			destroyed |= 1 << session_idx;
895 			break;
896 
897 		case 0x05000001: /* context buffer */
898 			r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
899 						*size * 2, 0);
900 			if (r)
901 				goto out;
902 			break;
903 
904 		case 0x05000004: /* video bitstream buffer */
905 			tmp = amdgpu_ib_get_value(ib, idx + 4);
906 			r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
907 						tmp, bs_idx);
908 			if (r)
909 				goto out;
910 			break;
911 
912 		case 0x05000005: /* feedback buffer */
913 			r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
914 						4096, fb_idx);
915 			if (r)
916 				goto out;
917 			break;
918 
919 		case 0x0500000d: /* MV buffer */
920 			r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
921 						idx + 2, *size, 0);
922 			if (r)
923 				goto out;
924 
925 			r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
926 						idx + 7, *size / 12, 0);
927 			if (r)
928 				goto out;
929 			break;
930 
931 		default:
932 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
933 			r = -EINVAL;
934 			goto out;
935 		}
936 
937 		if (session_idx == -1) {
938 			DRM_ERROR("no session command at start of IB\n");
939 			r = -EINVAL;
940 			goto out;
941 		}
942 
943 		idx += len / 4;
944 	}
945 
946 	if (allocated & ~created) {
947 		DRM_ERROR("New session without create command!\n");
948 		r = -ENOENT;
949 	}
950 
951 out:
952 	if (!r) {
953 		/* No error, free all destroyed handle slots */
954 		tmp = destroyed;
955 	} else {
956 		/* Error during parsing, free all allocated handle slots */
957 		tmp = allocated;
958 	}
959 
960 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
961 		if (tmp & (1 << i))
962 			atomic_set(&p->adev->vce.handles[i], 0);
963 
964 	return r;
965 }
966 
967 /**
968  * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
969  *
970  * @p: parser context
971  * @job: the job to parse
972  * @ib: the IB to patch
973  */
amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)974 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
975 				struct amdgpu_job *job,
976 				struct amdgpu_ib *ib)
977 {
978 	int session_idx = -1;
979 	uint32_t destroyed = 0;
980 	uint32_t created = 0;
981 	uint32_t allocated = 0;
982 	uint32_t tmp, handle = 0;
983 	int i, r = 0, idx = 0;
984 
985 	while (idx < ib->length_dw) {
986 		uint32_t len = amdgpu_ib_get_value(ib, idx);
987 		uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
988 
989 		if ((len < 8) || (len & 3)) {
990 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
991 			r = -EINVAL;
992 			goto out;
993 		}
994 
995 		switch (cmd) {
996 		case 0x00000001: /* session */
997 			handle = amdgpu_ib_get_value(ib, idx + 2);
998 			session_idx = amdgpu_vce_validate_handle(p, handle,
999 								 &allocated);
1000 			if (session_idx < 0) {
1001 				r = session_idx;
1002 				goto out;
1003 			}
1004 			break;
1005 
1006 		case 0x01000001: /* create */
1007 			created |= 1 << session_idx;
1008 			if (destroyed & (1 << session_idx)) {
1009 				destroyed &= ~(1 << session_idx);
1010 				allocated |= 1 << session_idx;
1011 
1012 			} else if (!(allocated & (1 << session_idx))) {
1013 				DRM_ERROR("Handle already in use!\n");
1014 				r = -EINVAL;
1015 				goto out;
1016 			}
1017 
1018 			break;
1019 
1020 		case 0x02000001: /* destroy */
1021 			destroyed |= 1 << session_idx;
1022 			break;
1023 
1024 		default:
1025 			break;
1026 		}
1027 
1028 		if (session_idx == -1) {
1029 			DRM_ERROR("no session command at start of IB\n");
1030 			r = -EINVAL;
1031 			goto out;
1032 		}
1033 
1034 		idx += len / 4;
1035 	}
1036 
1037 	if (allocated & ~created) {
1038 		DRM_ERROR("New session without create command!\n");
1039 		r = -ENOENT;
1040 	}
1041 
1042 out:
1043 	if (!r) {
1044 		/* No error, free all destroyed handle slots */
1045 		tmp = destroyed;
1046 		amdgpu_ib_free(p->adev, ib, NULL);
1047 	} else {
1048 		/* Error during parsing, free all allocated handle slots */
1049 		tmp = allocated;
1050 	}
1051 
1052 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1053 		if (tmp & (1 << i))
1054 			atomic_set(&p->adev->vce.handles[i], 0);
1055 
1056 	return r;
1057 }
1058 
1059 /**
1060  * amdgpu_vce_ring_emit_ib - execute indirect buffer
1061  *
1062  * @ring: engine to use
1063  * @job: job to retrieve vmid from
1064  * @ib: the IB to execute
1065  * @flags: unused
1066  *
1067  */
amdgpu_vce_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1068 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1069 				struct amdgpu_job *job,
1070 				struct amdgpu_ib *ib,
1071 				uint32_t flags)
1072 {
1073 	amdgpu_ring_write(ring, VCE_CMD_IB);
1074 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1075 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1076 	amdgpu_ring_write(ring, ib->length_dw);
1077 }
1078 
1079 /**
1080  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1081  *
1082  * @ring: engine to use
1083  * @addr: address
1084  * @seq: sequence number
1085  * @flags: fence related flags
1086  *
1087  */
amdgpu_vce_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)1088 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1089 				unsigned int flags)
1090 {
1091 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1092 
1093 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
1094 	amdgpu_ring_write(ring, addr);
1095 	amdgpu_ring_write(ring, upper_32_bits(addr));
1096 	amdgpu_ring_write(ring, seq);
1097 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
1098 	amdgpu_ring_write(ring, VCE_CMD_END);
1099 }
1100 
1101 /**
1102  * amdgpu_vce_ring_test_ring - test if VCE ring is working
1103  *
1104  * @ring: the engine to test on
1105  *
1106  */
amdgpu_vce_ring_test_ring(struct amdgpu_ring * ring)1107 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1108 {
1109 	struct amdgpu_device *adev = ring->adev;
1110 	uint32_t rptr;
1111 	unsigned int i;
1112 	int r, timeout = adev->usec_timeout;
1113 
1114 	/* skip ring test for sriov*/
1115 	if (amdgpu_sriov_vf(adev))
1116 		return 0;
1117 
1118 	r = amdgpu_ring_alloc(ring, 16);
1119 	if (r)
1120 		return r;
1121 
1122 	rptr = amdgpu_ring_get_rptr(ring);
1123 
1124 	amdgpu_ring_write(ring, VCE_CMD_END);
1125 	amdgpu_ring_commit(ring);
1126 
1127 	for (i = 0; i < timeout; i++) {
1128 		if (amdgpu_ring_get_rptr(ring) != rptr)
1129 			break;
1130 		udelay(1);
1131 	}
1132 
1133 	if (i >= timeout)
1134 		r = -ETIMEDOUT;
1135 
1136 	return r;
1137 }
1138 
1139 /**
1140  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1141  *
1142  * @ring: the engine to test on
1143  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1144  *
1145  */
amdgpu_vce_ring_test_ib(struct amdgpu_ring * ring,long timeout)1146 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1147 {
1148 	struct dma_fence *fence = NULL;
1149 	long r;
1150 
1151 	/* skip vce ring1/2 ib test for now, since it's not reliable */
1152 	if (ring != &ring->adev->vce.ring[0])
1153 		return 0;
1154 
1155 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1156 	if (r)
1157 		goto error;
1158 
1159 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1160 	if (r)
1161 		goto error;
1162 
1163 	r = dma_fence_wait_timeout(fence, false, timeout);
1164 	if (r == 0)
1165 		r = -ETIMEDOUT;
1166 	else if (r > 0)
1167 		r = 0;
1168 
1169 error:
1170 	dma_fence_put(fence);
1171 	return r;
1172 }
1173 
amdgpu_vce_get_ring_prio(int ring)1174 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1175 {
1176 	switch (ring) {
1177 	case 0:
1178 		return AMDGPU_RING_PRIO_0;
1179 	case 1:
1180 		return AMDGPU_RING_PRIO_1;
1181 	case 2:
1182 		return AMDGPU_RING_PRIO_2;
1183 	default:
1184 		return AMDGPU_RING_PRIO_0;
1185 	}
1186 }
1187