xref: /dragonfly/sys/dev/drm/radeon/radeon_uvd.c (revision a1282e19)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <deathsimple@vodafone.de>
29  */
30 
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <drm/drmP.h>
34 
35 #include "radeon.h"
36 #include "r600d.h"
37 
38 /* 1 second timeout */
39 #define UVD_IDLE_TIMEOUT_MS	1000
40 
41 /* Firmware Names */
42 #define FIRMWARE_RV710		"radeonkmsfw_RV710_uvd"
43 #define FIRMWARE_CYPRESS	"radeonkmsfw_CYPRESS_uvd"
44 #define FIRMWARE_SUMO		"radeonkmsfw_SUMO_uvd"
45 #define FIRMWARE_TAHITI		"radeonkmsfw_TAHITI_uvd"
46 #define FIRMWARE_BONAIRE	"radeonkmsfw_BONAIRE_uvd"
47 
48 MODULE_FIRMWARE(FIRMWARE_RV710);
49 MODULE_FIRMWARE(FIRMWARE_CYPRESS);
50 MODULE_FIRMWARE(FIRMWARE_SUMO);
51 MODULE_FIRMWARE(FIRMWARE_TAHITI);
52 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
53 
54 static void radeon_uvd_idle_work_handler(struct work_struct *work);
55 
56 int radeon_uvd_init(struct radeon_device *rdev)
57 {
58 	unsigned long bo_size;
59 	const char *fw_name;
60 	int i, r;
61 
62 	INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
63 
64 	switch (rdev->family) {
65 	case CHIP_RV710:
66 	case CHIP_RV730:
67 	case CHIP_RV740:
68 		fw_name = FIRMWARE_RV710;
69 		break;
70 
71 	case CHIP_CYPRESS:
72 	case CHIP_HEMLOCK:
73 	case CHIP_JUNIPER:
74 	case CHIP_REDWOOD:
75 	case CHIP_CEDAR:
76 		fw_name = FIRMWARE_CYPRESS;
77 		break;
78 
79 	case CHIP_SUMO:
80 	case CHIP_SUMO2:
81 	case CHIP_PALM:
82 	case CHIP_CAYMAN:
83 	case CHIP_BARTS:
84 	case CHIP_TURKS:
85 	case CHIP_CAICOS:
86 		fw_name = FIRMWARE_SUMO;
87 		break;
88 
89 	case CHIP_TAHITI:
90 	case CHIP_VERDE:
91 	case CHIP_PITCAIRN:
92 	case CHIP_ARUBA:
93 	case CHIP_OLAND:
94 		fw_name = FIRMWARE_TAHITI;
95 		break;
96 
97 	case CHIP_BONAIRE:
98 	case CHIP_KABINI:
99 	case CHIP_KAVERI:
100 	case CHIP_HAWAII:
101 	case CHIP_MULLINS:
102 		fw_name = FIRMWARE_BONAIRE;
103 		break;
104 
105 	default:
106 		return -EINVAL;
107 	}
108 
109 	r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
110 	if (r) {
111 		dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
112 			fw_name);
113 		return r;
114 	}
115 
116 	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->datasize + 8) +
117 		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
118 		  RADEON_GPU_PAGE_SIZE;
119 	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
120 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
121 	if (r) {
122 		dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
123 		return r;
124 	}
125 
126 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
127 	if (r) {
128 		radeon_bo_unref(&rdev->uvd.vcpu_bo);
129 		dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
130 		return r;
131 	}
132 
133 	r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
134 			  &rdev->uvd.gpu_addr);
135 	if (r) {
136 		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
137 		radeon_bo_unref(&rdev->uvd.vcpu_bo);
138 		dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
139 		return r;
140 	}
141 
142 	r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
143 	if (r) {
144 		dev_err(rdev->dev, "(%d) UVD map failed\n", r);
145 		return r;
146 	}
147 
148 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
149 
150 	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
151 		atomic_set(&rdev->uvd.handles[i], 0);
152 		rdev->uvd.filp[i] = NULL;
153 		rdev->uvd.img_size[i] = 0;
154 	}
155 
156 	return 0;
157 }
158 
159 void radeon_uvd_fini(struct radeon_device *rdev)
160 {
161 	int r;
162 
163 	if (rdev->uvd.vcpu_bo == NULL)
164 		return;
165 
166 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
167 	if (!r) {
168 		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
169 		radeon_bo_unpin(rdev->uvd.vcpu_bo);
170 		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
171 	}
172 
173 	radeon_bo_unref(&rdev->uvd.vcpu_bo);
174 
175 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
176 
177 	release_firmware(rdev->uvd_fw);
178 }
179 
180 int radeon_uvd_suspend(struct radeon_device *rdev)
181 {
182 	unsigned size;
183 	char *ptr;
184 	int i;
185 
186 	if (rdev->uvd.vcpu_bo == NULL)
187 		return 0;
188 
189 	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
190 		if (atomic_read(&rdev->uvd.handles[i]))
191 			break;
192 
193 	if (i == RADEON_MAX_UVD_HANDLES)
194 		return 0;
195 
196 	size = radeon_bo_size(rdev->uvd.vcpu_bo);
197 	size -= rdev->uvd_fw->datasize;
198 
199 	ptr = rdev->uvd.cpu_addr;
200 	ptr += rdev->uvd_fw->datasize;
201 
202 	rdev->uvd.saved_bo = kmalloc(size, M_DRM, M_WAITOK);
203 	memcpy(rdev->uvd.saved_bo, ptr, size);
204 
205 	return 0;
206 }
207 
208 int radeon_uvd_resume(struct radeon_device *rdev)
209 {
210 	unsigned size;
211 	char *ptr;
212 
213 	if (rdev->uvd.vcpu_bo == NULL)
214 		return -EINVAL;
215 
216 	memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->datasize);
217 
218 	size = radeon_bo_size(rdev->uvd.vcpu_bo);
219 	size -= rdev->uvd_fw->datasize;
220 
221 	ptr = rdev->uvd.cpu_addr;
222 	ptr += rdev->uvd_fw->datasize;
223 
224 	if (rdev->uvd.saved_bo != NULL) {
225 		memcpy(ptr, rdev->uvd.saved_bo, size);
226 		kfree(rdev->uvd.saved_bo);
227 		rdev->uvd.saved_bo = NULL;
228 	} else
229 		memset(ptr, 0, size);
230 
231 	return 0;
232 }
233 
234 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
235 {
236 	rbo->placement.fpfn = 0 >> PAGE_SHIFT;
237 	rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
238 }
239 
240 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
241 {
242 	int i, r;
243 	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
244 		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
245 		if (handle != 0 && rdev->uvd.filp[i] == filp) {
246 			struct radeon_fence *fence;
247 
248 			radeon_uvd_note_usage(rdev);
249 
250 			r = radeon_uvd_get_destroy_msg(rdev,
251 				R600_RING_TYPE_UVD_INDEX, handle, &fence);
252 			if (r) {
253 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
254 				continue;
255 			}
256 
257 			radeon_fence_wait(fence, false);
258 			radeon_fence_unref(&fence);
259 
260 			rdev->uvd.filp[i] = NULL;
261 			atomic_set(&rdev->uvd.handles[i], 0);
262 		}
263 	}
264 }
265 
266 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
267 {
268 	unsigned stream_type = msg[4];
269 	unsigned width = msg[6];
270 	unsigned height = msg[7];
271 	unsigned dpb_size = msg[9];
272 	unsigned pitch = msg[28];
273 
274 	unsigned width_in_mb = width / 16;
275 	unsigned height_in_mb = ALIGN(height / 16, 2);
276 
277 	unsigned image_size, tmp, min_dpb_size;
278 
279 	image_size = width * height;
280 	image_size += image_size / 2;
281 	image_size = ALIGN(image_size, 1024);
282 
283 	switch (stream_type) {
284 	case 0: /* H264 */
285 
286 		/* reference picture buffer */
287 		min_dpb_size = image_size * 17;
288 
289 		/* macroblock context buffer */
290 		min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
291 
292 		/* IT surface buffer */
293 		min_dpb_size += width_in_mb * height_in_mb * 32;
294 		break;
295 
296 	case 1: /* VC1 */
297 
298 		/* reference picture buffer */
299 		min_dpb_size = image_size * 3;
300 
301 		/* CONTEXT_BUFFER */
302 		min_dpb_size += width_in_mb * height_in_mb * 128;
303 
304 		/* IT surface buffer */
305 		min_dpb_size += width_in_mb * 64;
306 
307 		/* DB surface buffer */
308 		min_dpb_size += width_in_mb * 128;
309 
310 		/* BP */
311 		tmp = max(width_in_mb, height_in_mb);
312 		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
313 		break;
314 
315 	case 3: /* MPEG2 */
316 
317 		/* reference picture buffer */
318 		min_dpb_size = image_size * 3;
319 		break;
320 
321 	case 4: /* MPEG4 */
322 
323 		/* reference picture buffer */
324 		min_dpb_size = image_size * 3;
325 
326 		/* CM */
327 		min_dpb_size += width_in_mb * height_in_mb * 64;
328 
329 		/* IT surface buffer */
330 		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
331 		break;
332 
333 	default:
334 		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
335 		return -EINVAL;
336 	}
337 
338 	if (width > pitch) {
339 		DRM_ERROR("Invalid UVD decoding target pitch!\n");
340 		return -EINVAL;
341 	}
342 
343 	if (dpb_size < min_dpb_size) {
344 		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
345 			  dpb_size, min_dpb_size);
346 		return -EINVAL;
347 	}
348 
349 	buf_sizes[0x1] = dpb_size;
350 	buf_sizes[0x2] = image_size;
351 	return 0;
352 }
353 
354 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
355 			     unsigned offset, unsigned buf_sizes[])
356 {
357 	int32_t *msg, msg_type, handle;
358 	unsigned img_size = 0;
359 	void *ptr;
360 
361 	int i, r;
362 
363 	if (offset & 0x3F) {
364 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
365 		return -EINVAL;
366 	}
367 
368 	if (bo->tbo.sync_obj) {
369 		r = radeon_fence_wait(bo->tbo.sync_obj, false);
370 		if (r) {
371 			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
372 			return r;
373 		}
374 	}
375 
376 	r = radeon_bo_kmap(bo, &ptr);
377 	if (r) {
378 		DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
379 		return r;
380 	}
381 
382 	msg = (uint32_t*)((uint8_t*)ptr + offset);
383 
384 	msg_type = msg[1];
385 	handle = msg[2];
386 
387 	if (handle == 0) {
388 		DRM_ERROR("Invalid UVD handle!\n");
389 		return -EINVAL;
390 	}
391 
392 	if (msg_type == 1) {
393 		/* it's a decode msg, calc buffer sizes */
394 		r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
395 		/* calc image size (width * height) */
396 		img_size = msg[6] * msg[7];
397 		radeon_bo_kunmap(bo);
398 		if (r)
399 			return r;
400 
401 	} else if (msg_type == 2) {
402 		/* it's a destroy msg, free the handle */
403 		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
404 			atomic_cmpset(&p->rdev->uvd.handles[i], handle, 0);
405 		radeon_bo_kunmap(bo);
406 		return 0;
407 	} else {
408 		/* it's a create msg, calc image size (width * height) */
409 		img_size = msg[7] * msg[8];
410 		radeon_bo_kunmap(bo);
411 
412 		if (msg_type != 0) {
413 			DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
414 			return -EINVAL;
415 		}
416 
417 		/* it's a create msg, no special handling needed */
418 	}
419 
420 	/* create or decode, validate the handle */
421 	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
422 		if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
423 			return 0;
424 	}
425 
426 	/* handle not found try to alloc a new one */
427 	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
428 		if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
429 			p->rdev->uvd.filp[i] = p->filp;
430 			p->rdev->uvd.img_size[i] = img_size;
431 			return 0;
432 		}
433 	}
434 
435 	DRM_ERROR("No more free UVD handles!\n");
436 	return -EINVAL;
437 }
438 
439 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
440 			       int data0, int data1,
441 			       unsigned buf_sizes[], bool *has_msg_cmd)
442 {
443 	struct radeon_cs_chunk *relocs_chunk;
444 	struct radeon_cs_reloc *reloc;
445 	unsigned idx, cmd, offset;
446 	uint64_t start, end;
447 	int r;
448 
449 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
450 	offset = radeon_get_ib_value(p, data0);
451 	idx = radeon_get_ib_value(p, data1);
452 	if (idx >= relocs_chunk->length_dw) {
453 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
454 			  idx, relocs_chunk->length_dw);
455 		return -EINVAL;
456 	}
457 
458 	reloc = p->relocs_ptr[(idx / 4)];
459 	start = reloc->gpu_offset;
460 	end = start + radeon_bo_size(reloc->robj);
461 	start += offset;
462 
463 	p->ib.ptr[data0] = start & 0xFFFFFFFF;
464 	p->ib.ptr[data1] = start >> 32;
465 
466 	cmd = radeon_get_ib_value(p, p->idx) >> 1;
467 
468 	if (cmd < 0x4) {
469 		if (end <= start) {
470 			DRM_ERROR("invalid reloc offset %X!\n", offset);
471 			return -EINVAL;
472 		}
473 		if ((end - start) < buf_sizes[cmd]) {
474 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
475 				  (unsigned)(end - start), buf_sizes[cmd]);
476 			return -EINVAL;
477 		}
478 
479 	} else if (cmd != 0x100) {
480 		DRM_ERROR("invalid UVD command %X!\n", cmd);
481 		return -EINVAL;
482 	}
483 
484 	if ((start >> 28) != ((end - 1) >> 28)) {
485 		DRM_ERROR("reloc %lX-%lX crossing 256MB boundary!\n",
486 			  start, end);
487 		return -EINVAL;
488 	}
489 
490 	/* TODO: is this still necessary on NI+ ? */
491 	if ((cmd == 0 || cmd == 0x3) &&
492 	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
493 		DRM_ERROR("msg/fb buffer %lX-%lX out of 256MB segment!\n",
494 			  start, end);
495 		return -EINVAL;
496 	}
497 
498 	if (cmd == 0) {
499 		if (*has_msg_cmd) {
500 			DRM_ERROR("More than one message in a UVD-IB!\n");
501 			return -EINVAL;
502 		}
503 		*has_msg_cmd = true;
504 		r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
505 		if (r)
506 			return r;
507 	} else if (!*has_msg_cmd) {
508 		DRM_ERROR("Message needed before other commands are send!\n");
509 		return -EINVAL;
510 	}
511 
512 	return 0;
513 }
514 
515 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
516 			     struct radeon_cs_packet *pkt,
517 			     int *data0, int *data1,
518 			     unsigned buf_sizes[],
519 			     bool *has_msg_cmd)
520 {
521 	int i, r;
522 
523 	p->idx++;
524 	for (i = 0; i <= pkt->count; ++i) {
525 		switch (pkt->reg + i*4) {
526 		case UVD_GPCOM_VCPU_DATA0:
527 			*data0 = p->idx;
528 			break;
529 		case UVD_GPCOM_VCPU_DATA1:
530 			*data1 = p->idx;
531 			break;
532 		case UVD_GPCOM_VCPU_CMD:
533 			r = radeon_uvd_cs_reloc(p, *data0, *data1,
534 						buf_sizes, has_msg_cmd);
535 			if (r)
536 				return r;
537 			break;
538 		case UVD_ENGINE_CNTL:
539 			break;
540 		default:
541 			DRM_ERROR("Invalid reg 0x%X!\n",
542 				  pkt->reg + i*4);
543 			return -EINVAL;
544 		}
545 		p->idx++;
546 	}
547 	return 0;
548 }
549 
550 int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
551 {
552 	struct radeon_cs_packet pkt;
553 	int r, data0 = 0, data1 = 0;
554 
555 	/* does the IB has a msg command */
556 	bool has_msg_cmd = false;
557 
558 	/* minimum buffer sizes */
559 	unsigned buf_sizes[] = {
560 		[0x00000000]	=	2048,
561 		[0x00000001]	=	32 * 1024 * 1024,
562 		[0x00000002]	=	2048 * 1152 * 3,
563 		[0x00000003]	=	2048,
564 	};
565 
566 	if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
567 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
568 			  p->chunks[p->chunk_ib_idx].length_dw);
569 		return -EINVAL;
570 	}
571 
572 	if (p->chunk_relocs_idx == -1) {
573 		DRM_ERROR("No relocation chunk !\n");
574 		return -EINVAL;
575 	}
576 
577 
578 	do {
579 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
580 		if (r)
581 			return r;
582 		switch (pkt.type) {
583 		case RADEON_PACKET_TYPE0:
584 			r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
585 					      buf_sizes, &has_msg_cmd);
586 			if (r)
587 				return r;
588 			break;
589 		case RADEON_PACKET_TYPE2:
590 			p->idx += pkt.count + 2;
591 			break;
592 		default:
593 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
594 			return -EINVAL;
595 		}
596 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
597 
598 	if (!has_msg_cmd) {
599 		DRM_ERROR("UVD-IBs need a msg command!\n");
600 		return -EINVAL;
601 	}
602 
603 	return 0;
604 }
605 
606 static int radeon_uvd_send_msg(struct radeon_device *rdev,
607 			       int ring, uint64_t addr,
608 			       struct radeon_fence **fence)
609 {
610 	struct radeon_ib ib;
611 	int i, r;
612 
613 	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
614 	if (r)
615 		return r;
616 
617 	ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
618 	ib.ptr[1] = addr;
619 	ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
620 	ib.ptr[3] = addr >> 32;
621 	ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
622 	ib.ptr[5] = 0;
623 	for (i = 6; i < 16; ++i)
624 		ib.ptr[i] = PACKET2(0);
625 	ib.length_dw = 16;
626 
627 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
628 
629 	if (fence)
630 		*fence = radeon_fence_ref(ib.fence);
631 
632 	radeon_ib_free(rdev, &ib);
633 	return r;
634 }
635 
636 /* multiple fence commands without any stream commands in between can
637    crash the vcpu so just try to emmit a dummy create/destroy msg to
638    avoid this */
639 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
640 			      uint32_t handle, struct radeon_fence **fence)
641 {
642 	/* we use the last page of the vcpu bo for the UVD message */
643 	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
644 		RADEON_GPU_PAGE_SIZE;
645 
646 	uint32_t *msg = (uint32_t*)((uint8_t*)rdev->uvd.cpu_addr + offs);
647 	uint64_t addr = rdev->uvd.gpu_addr + offs;
648 
649 	int r, i;
650 
651 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
652 	if (r)
653 		return r;
654 
655 	/* stitch together an UVD create msg */
656 	msg[0] = cpu_to_le32(0x00000de4);
657 	msg[1] = cpu_to_le32(0x00000000);
658 	msg[2] = cpu_to_le32(handle);
659 	msg[3] = cpu_to_le32(0x00000000);
660 	msg[4] = cpu_to_le32(0x00000000);
661 	msg[5] = cpu_to_le32(0x00000000);
662 	msg[6] = cpu_to_le32(0x00000000);
663 	msg[7] = cpu_to_le32(0x00000780);
664 	msg[8] = cpu_to_le32(0x00000440);
665 	msg[9] = cpu_to_le32(0x00000000);
666 	msg[10] = cpu_to_le32(0x01b37000);
667 	for (i = 11; i < 1024; ++i)
668 		msg[i] = cpu_to_le32(0x0);
669 
670 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
671 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
672 	return r;
673 }
674 
675 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
676 			       uint32_t handle, struct radeon_fence **fence)
677 {
678 	/* we use the last page of the vcpu bo for the UVD message */
679 	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
680 		RADEON_GPU_PAGE_SIZE;
681 
682 	uint32_t *msg = (uint32_t*)((uint8_t*)rdev->uvd.cpu_addr + offs);
683 	uint64_t addr = rdev->uvd.gpu_addr + offs;
684 
685 	int r, i;
686 
687 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
688 	if (r)
689 		return r;
690 
691 	/* stitch together an UVD destroy msg */
692 	msg[0] = cpu_to_le32(0x00000de4);
693 	msg[1] = cpu_to_le32(0x00000002);
694 	msg[2] = cpu_to_le32(handle);
695 	msg[3] = cpu_to_le32(0x00000000);
696 	for (i = 4; i < 1024; ++i)
697 		msg[i] = cpu_to_le32(0x0);
698 
699 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
700 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
701 	return r;
702 }
703 
704 /**
705  * radeon_uvd_count_handles - count number of open streams
706  *
707  * @rdev: radeon_device pointer
708  * @sd: number of SD streams
709  * @hd: number of HD streams
710  *
711  * Count the number of open SD/HD streams as a hint for power mangement
712  */
713 static void radeon_uvd_count_handles(struct radeon_device *rdev,
714 				     unsigned *sd, unsigned *hd)
715 {
716 	unsigned i;
717 
718 	*sd = 0;
719 	*hd = 0;
720 
721 	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
722 		if (!atomic_read(&rdev->uvd.handles[i]))
723 			continue;
724 
725 		if (rdev->uvd.img_size[i] >= 720*576)
726 			++(*hd);
727 		else
728 			++(*sd);
729 	}
730 }
731 
732 static void radeon_uvd_idle_work_handler(struct work_struct *work)
733 {
734 	struct radeon_device *rdev =
735 		container_of(work, struct radeon_device, uvd.idle_work.work);
736 
737 	if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
738 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
739 			radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
740 						 &rdev->pm.dpm.hd);
741 			radeon_dpm_enable_uvd(rdev, false);
742 		} else {
743 			radeon_set_uvd_clocks(rdev, 0, 0);
744 		}
745 	} else {
746 		schedule_delayed_work(&rdev->uvd.idle_work,
747 				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
748 	}
749 }
750 
751 void radeon_uvd_note_usage(struct radeon_device *rdev)
752 {
753 	bool streams_changed = false;
754 	bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
755 	set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
756 					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
757 
758 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
759 		unsigned hd = 0, sd = 0;
760 		radeon_uvd_count_handles(rdev, &sd, &hd);
761 		if ((rdev->pm.dpm.sd != sd) ||
762 		    (rdev->pm.dpm.hd != hd)) {
763 			rdev->pm.dpm.sd = sd;
764 			rdev->pm.dpm.hd = hd;
765 			/* disable this for now */
766 			/*streams_changed = true;*/
767 		}
768 	}
769 
770 	if (set_clocks || streams_changed) {
771 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
772 			radeon_dpm_enable_uvd(rdev, true);
773 		} else {
774 			radeon_set_uvd_clocks(rdev, 53300, 40000);
775 		}
776 	}
777 }
778 
779 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
780 					      unsigned target_freq,
781 					      unsigned pd_min,
782 					      unsigned pd_even)
783 {
784 	unsigned post_div = vco_freq / target_freq;
785 
786 	/* adjust to post divider minimum value */
787 	if (post_div < pd_min)
788 		post_div = pd_min;
789 
790 	/* we alway need a frequency less than or equal the target */
791 	if ((vco_freq / post_div) > target_freq)
792 		post_div += 1;
793 
794 	/* post dividers above a certain value must be even */
795 	if (post_div > pd_even && post_div % 2)
796 		post_div += 1;
797 
798 	return post_div;
799 }
800 
801 /**
802  * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
803  *
804  * @rdev: radeon_device pointer
805  * @vclk: wanted VCLK
806  * @dclk: wanted DCLK
807  * @vco_min: minimum VCO frequency
808  * @vco_max: maximum VCO frequency
809  * @fb_factor: factor to multiply vco freq with
810  * @fb_mask: limit and bitmask for feedback divider
811  * @pd_min: post divider minimum
812  * @pd_max: post divider maximum
813  * @pd_even: post divider must be even above this value
814  * @optimal_fb_div: resulting feedback divider
815  * @optimal_vclk_div: resulting vclk post divider
816  * @optimal_dclk_div: resulting dclk post divider
817  *
818  * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
819  * Returns zero on success -EINVAL on error.
820  */
821 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
822 				  unsigned vclk, unsigned dclk,
823 				  unsigned vco_min, unsigned vco_max,
824 				  unsigned fb_factor, unsigned fb_mask,
825 				  unsigned pd_min, unsigned pd_max,
826 				  unsigned pd_even,
827 				  unsigned *optimal_fb_div,
828 				  unsigned *optimal_vclk_div,
829 				  unsigned *optimal_dclk_div)
830 {
831 	unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
832 
833 	/* start off with something large */
834 	unsigned optimal_score = ~0;
835 
836 	/* loop through vco from low to high */
837 	vco_min = max(max(vco_min, vclk), dclk);
838 	for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
839 
840 		uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
841 		unsigned vclk_div, dclk_div, score;
842 
843 		do_div(fb_div, ref_freq);
844 
845 		/* fb div out of range ? */
846 		if (fb_div > fb_mask)
847 			break; /* it can oly get worse */
848 
849 		fb_div &= fb_mask;
850 
851 		/* calc vclk divider with current vco freq */
852 		vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
853 							 pd_min, pd_even);
854 		if (vclk_div > pd_max)
855 			break; /* vco is too big, it has to stop */
856 
857 		/* calc dclk divider with current vco freq */
858 		dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
859 							 pd_min, pd_even);
860 		if (vclk_div > pd_max)
861 			break; /* vco is too big, it has to stop */
862 
863 		/* calc score with current vco freq */
864 		score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
865 
866 		/* determine if this vco setting is better than current optimal settings */
867 		if (score < optimal_score) {
868 			*optimal_fb_div = fb_div;
869 			*optimal_vclk_div = vclk_div;
870 			*optimal_dclk_div = dclk_div;
871 			optimal_score = score;
872 			if (optimal_score == 0)
873 				break; /* it can't get better than this */
874 		}
875 	}
876 
877 	/* did we found a valid setup ? */
878 	if (optimal_score == ~0)
879 		return -EINVAL;
880 
881 	return 0;
882 }
883 
884 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
885 				unsigned cg_upll_func_cntl)
886 {
887 	unsigned i;
888 
889 	/* make sure UPLL_CTLREQ is deasserted */
890 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
891 
892 	mdelay(10);
893 
894 	/* assert UPLL_CTLREQ */
895 	WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
896 
897 	/* wait for CTLACK and CTLACK2 to get asserted */
898 	for (i = 0; i < 100; ++i) {
899 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
900 		if ((RREG32(cg_upll_func_cntl) & mask) == mask)
901 			break;
902 		mdelay(10);
903 	}
904 
905 	/* deassert UPLL_CTLREQ */
906 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
907 
908 	if (i == 100) {
909 		DRM_ERROR("Timeout setting UVD clocks!\n");
910 		return -ETIMEDOUT;
911 	}
912 
913 	return 0;
914 }
915