1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2017 Linaro Ltd.
5  */
6 #include <linux/clk.h>
7 #include <linux/iopoll.h>
8 #include <linux/interconnect.h>
9 #include <linux/list.h>
10 #include <linux/mutex.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/slab.h>
13 #include <media/videobuf2-dma-sg.h>
14 #include <media/v4l2-mem2mem.h>
15 #include <asm/div64.h>
16 
17 #include "core.h"
18 #include "helpers.h"
19 #include "hfi_helper.h"
20 #include "hfi_venus_io.h"
21 
22 struct intbuf {
23 	struct list_head list;
24 	u32 type;
25 	size_t size;
26 	void *va;
27 	dma_addr_t da;
28 	unsigned long attrs;
29 };
30 
31 bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
32 {
33 	struct venus_core *core = inst->core;
34 	u32 session_type = inst->session_type;
35 	u32 codec;
36 
37 	switch (v4l2_pixfmt) {
38 	case V4L2_PIX_FMT_H264:
39 		codec = HFI_VIDEO_CODEC_H264;
40 		break;
41 	case V4L2_PIX_FMT_H263:
42 		codec = HFI_VIDEO_CODEC_H263;
43 		break;
44 	case V4L2_PIX_FMT_MPEG1:
45 		codec = HFI_VIDEO_CODEC_MPEG1;
46 		break;
47 	case V4L2_PIX_FMT_MPEG2:
48 		codec = HFI_VIDEO_CODEC_MPEG2;
49 		break;
50 	case V4L2_PIX_FMT_MPEG4:
51 		codec = HFI_VIDEO_CODEC_MPEG4;
52 		break;
53 	case V4L2_PIX_FMT_VC1_ANNEX_G:
54 	case V4L2_PIX_FMT_VC1_ANNEX_L:
55 		codec = HFI_VIDEO_CODEC_VC1;
56 		break;
57 	case V4L2_PIX_FMT_VP8:
58 		codec = HFI_VIDEO_CODEC_VP8;
59 		break;
60 	case V4L2_PIX_FMT_VP9:
61 		codec = HFI_VIDEO_CODEC_VP9;
62 		break;
63 	case V4L2_PIX_FMT_XVID:
64 		codec = HFI_VIDEO_CODEC_DIVX;
65 		break;
66 	case V4L2_PIX_FMT_HEVC:
67 		codec = HFI_VIDEO_CODEC_HEVC;
68 		break;
69 	default:
70 		return false;
71 	}
72 
73 	if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec)
74 		return true;
75 
76 	if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec)
77 		return true;
78 
79 	return false;
80 }
81 EXPORT_SYMBOL_GPL(venus_helper_check_codec);
82 
83 int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
84 {
85 	struct intbuf *buf;
86 	int ret = 0;
87 
88 	list_for_each_entry(buf, &inst->dpbbufs, list) {
89 		struct hfi_frame_data fdata;
90 
91 		memset(&fdata, 0, sizeof(fdata));
92 		fdata.alloc_len = buf->size;
93 		fdata.device_addr = buf->da;
94 		fdata.buffer_type = buf->type;
95 
96 		ret = hfi_session_process_buf(inst, &fdata);
97 		if (ret)
98 			goto fail;
99 	}
100 
101 fail:
102 	return ret;
103 }
104 EXPORT_SYMBOL_GPL(venus_helper_queue_dpb_bufs);
105 
106 int venus_helper_free_dpb_bufs(struct venus_inst *inst)
107 {
108 	struct intbuf *buf, *n;
109 
110 	list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
111 		list_del_init(&buf->list);
112 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
113 			       buf->attrs);
114 		kfree(buf);
115 	}
116 
117 	INIT_LIST_HEAD(&inst->dpbbufs);
118 
119 	return 0;
120 }
121 EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
122 
123 int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
124 {
125 	struct venus_core *core = inst->core;
126 	struct device *dev = core->dev;
127 	enum hfi_version ver = core->res->hfi_version;
128 	struct hfi_buffer_requirements bufreq;
129 	u32 buftype = inst->dpb_buftype;
130 	unsigned int dpb_size = 0;
131 	struct intbuf *buf;
132 	unsigned int i;
133 	u32 count;
134 	int ret;
135 
136 	/* no need to allocate dpb buffers */
137 	if (!inst->dpb_fmt)
138 		return 0;
139 
140 	if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
141 		dpb_size = inst->output_buf_size;
142 	else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
143 		dpb_size = inst->output2_buf_size;
144 
145 	if (!dpb_size)
146 		return 0;
147 
148 	ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
149 	if (ret)
150 		return ret;
151 
152 	count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
153 
154 	for (i = 0; i < count; i++) {
155 		buf = kzalloc(sizeof(*buf), GFP_KERNEL);
156 		if (!buf) {
157 			ret = -ENOMEM;
158 			goto fail;
159 		}
160 
161 		buf->type = buftype;
162 		buf->size = dpb_size;
163 		buf->attrs = DMA_ATTR_WRITE_COMBINE |
164 			     DMA_ATTR_NO_KERNEL_MAPPING;
165 		buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
166 					  buf->attrs);
167 		if (!buf->va) {
168 			kfree(buf);
169 			ret = -ENOMEM;
170 			goto fail;
171 		}
172 
173 		list_add_tail(&buf->list, &inst->dpbbufs);
174 	}
175 
176 	return 0;
177 
178 fail:
179 	venus_helper_free_dpb_bufs(inst);
180 	return ret;
181 }
182 EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
183 
184 static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
185 {
186 	struct venus_core *core = inst->core;
187 	struct device *dev = core->dev;
188 	struct hfi_buffer_requirements bufreq;
189 	struct hfi_buffer_desc bd;
190 	struct intbuf *buf;
191 	unsigned int i;
192 	int ret;
193 
194 	ret = venus_helper_get_bufreq(inst, type, &bufreq);
195 	if (ret)
196 		return 0;
197 
198 	if (!bufreq.size)
199 		return 0;
200 
201 	for (i = 0; i < bufreq.count_actual; i++) {
202 		buf = kzalloc(sizeof(*buf), GFP_KERNEL);
203 		if (!buf) {
204 			ret = -ENOMEM;
205 			goto fail;
206 		}
207 
208 		buf->type = bufreq.type;
209 		buf->size = bufreq.size;
210 		buf->attrs = DMA_ATTR_WRITE_COMBINE |
211 			     DMA_ATTR_NO_KERNEL_MAPPING;
212 		buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
213 					  buf->attrs);
214 		if (!buf->va) {
215 			ret = -ENOMEM;
216 			goto fail;
217 		}
218 
219 		memset(&bd, 0, sizeof(bd));
220 		bd.buffer_size = buf->size;
221 		bd.buffer_type = buf->type;
222 		bd.num_buffers = 1;
223 		bd.device_addr = buf->da;
224 
225 		ret = hfi_session_set_buffers(inst, &bd);
226 		if (ret) {
227 			dev_err(dev, "set session buffers failed\n");
228 			goto dma_free;
229 		}
230 
231 		list_add_tail(&buf->list, &inst->internalbufs);
232 	}
233 
234 	return 0;
235 
236 dma_free:
237 	dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
238 fail:
239 	kfree(buf);
240 	return ret;
241 }
242 
243 static int intbufs_unset_buffers(struct venus_inst *inst)
244 {
245 	struct hfi_buffer_desc bd = {0};
246 	struct intbuf *buf, *n;
247 	int ret = 0;
248 
249 	list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
250 		bd.buffer_size = buf->size;
251 		bd.buffer_type = buf->type;
252 		bd.num_buffers = 1;
253 		bd.device_addr = buf->da;
254 		bd.response_required = true;
255 
256 		ret = hfi_session_unset_buffers(inst, &bd);
257 
258 		list_del_init(&buf->list);
259 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
260 			       buf->attrs);
261 		kfree(buf);
262 	}
263 
264 	return ret;
265 }
266 
267 static const unsigned int intbuf_types_1xx[] = {
268 	HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
269 	HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
270 	HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
271 	HFI_BUFFER_INTERNAL_PERSIST,
272 	HFI_BUFFER_INTERNAL_PERSIST_1,
273 };
274 
275 static const unsigned int intbuf_types_4xx[] = {
276 	HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
277 	HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
278 	HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
279 	HFI_BUFFER_INTERNAL_PERSIST,
280 	HFI_BUFFER_INTERNAL_PERSIST_1,
281 };
282 
283 int venus_helper_intbufs_alloc(struct venus_inst *inst)
284 {
285 	const unsigned int *intbuf;
286 	size_t arr_sz, i;
287 	int ret;
288 
289 	if (IS_V4(inst->core)) {
290 		arr_sz = ARRAY_SIZE(intbuf_types_4xx);
291 		intbuf = intbuf_types_4xx;
292 	} else {
293 		arr_sz = ARRAY_SIZE(intbuf_types_1xx);
294 		intbuf = intbuf_types_1xx;
295 	}
296 
297 	for (i = 0; i < arr_sz; i++) {
298 		ret = intbufs_set_buffer(inst, intbuf[i]);
299 		if (ret)
300 			goto error;
301 	}
302 
303 	return 0;
304 
305 error:
306 	intbufs_unset_buffers(inst);
307 	return ret;
308 }
309 EXPORT_SYMBOL_GPL(venus_helper_intbufs_alloc);
310 
311 int venus_helper_intbufs_free(struct venus_inst *inst)
312 {
313 	return intbufs_unset_buffers(inst);
314 }
315 EXPORT_SYMBOL_GPL(venus_helper_intbufs_free);
316 
317 int venus_helper_intbufs_realloc(struct venus_inst *inst)
318 {
319 	enum hfi_version ver = inst->core->res->hfi_version;
320 	struct hfi_buffer_desc bd;
321 	struct intbuf *buf, *n;
322 	int ret;
323 
324 	list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
325 		if (buf->type == HFI_BUFFER_INTERNAL_PERSIST ||
326 		    buf->type == HFI_BUFFER_INTERNAL_PERSIST_1)
327 			continue;
328 
329 		memset(&bd, 0, sizeof(bd));
330 		bd.buffer_size = buf->size;
331 		bd.buffer_type = buf->type;
332 		bd.num_buffers = 1;
333 		bd.device_addr = buf->da;
334 		bd.response_required = true;
335 
336 		ret = hfi_session_unset_buffers(inst, &bd);
337 
338 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
339 			       buf->attrs);
340 
341 		list_del_init(&buf->list);
342 		kfree(buf);
343 	}
344 
345 	ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH(ver));
346 	if (ret)
347 		goto err;
348 
349 	ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_1(ver));
350 	if (ret)
351 		goto err;
352 
353 	ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_2(ver));
354 	if (ret)
355 		goto err;
356 
357 	return 0;
358 err:
359 	return ret;
360 }
361 EXPORT_SYMBOL_GPL(venus_helper_intbufs_realloc);
362 
363 static u32 load_per_instance(struct venus_inst *inst)
364 {
365 	u32 mbs;
366 
367 	if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
368 		return 0;
369 
370 	mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
371 
372 	return mbs * inst->fps;
373 }
374 
375 static u32 load_per_type(struct venus_core *core, u32 session_type)
376 {
377 	struct venus_inst *inst = NULL;
378 	u32 mbs_per_sec = 0;
379 
380 	mutex_lock(&core->lock);
381 	list_for_each_entry(inst, &core->instances, list) {
382 		if (inst->session_type != session_type)
383 			continue;
384 
385 		mbs_per_sec += load_per_instance(inst);
386 	}
387 	mutex_unlock(&core->lock);
388 
389 	return mbs_per_sec;
390 }
391 
392 static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
393 {
394 	const struct venus_resources *res = inst->core->res;
395 	const struct bw_tbl *bw_tbl;
396 	unsigned int num_rows, i;
397 
398 	*avg = 0;
399 	*peak = 0;
400 
401 	if (mbs == 0)
402 		return;
403 
404 	if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
405 		num_rows = res->bw_tbl_enc_size;
406 		bw_tbl = res->bw_tbl_enc;
407 	} else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
408 		num_rows = res->bw_tbl_dec_size;
409 		bw_tbl = res->bw_tbl_dec;
410 	} else {
411 		return;
412 	}
413 
414 	if (!bw_tbl || num_rows == 0)
415 		return;
416 
417 	for (i = 0; i < num_rows; i++) {
418 		if (mbs > bw_tbl[i].mbs_per_sec)
419 			break;
420 
421 		if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
422 			*avg = bw_tbl[i].avg_10bit;
423 			*peak = bw_tbl[i].peak_10bit;
424 		} else {
425 			*avg = bw_tbl[i].avg;
426 			*peak = bw_tbl[i].peak;
427 		}
428 	}
429 }
430 
431 static int load_scale_bw(struct venus_core *core)
432 {
433 	struct venus_inst *inst = NULL;
434 	u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
435 
436 	mutex_lock(&core->lock);
437 	list_for_each_entry(inst, &core->instances, list) {
438 		mbs_per_sec = load_per_instance(inst);
439 		mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
440 		total_avg += avg;
441 		total_peak += peak;
442 	}
443 	mutex_unlock(&core->lock);
444 
445 	dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
446 		total_avg, total_peak);
447 
448 	return icc_set_bw(core->video_path, total_avg, total_peak);
449 }
450 
451 static int set_clk_freq(struct venus_core *core, unsigned long freq)
452 {
453 	struct clk *clk = core->clks[0];
454 	int ret;
455 
456 	ret = clk_set_rate(clk, freq);
457 	if (ret)
458 		return ret;
459 
460 	ret = clk_set_rate(core->core0_clk, freq);
461 	if (ret)
462 		return ret;
463 
464 	ret = clk_set_rate(core->core1_clk, freq);
465 	if (ret)
466 		return ret;
467 
468 	return 0;
469 }
470 
471 static int scale_clocks(struct venus_inst *inst)
472 {
473 	struct venus_core *core = inst->core;
474 	const struct freq_tbl *table = core->res->freq_tbl;
475 	unsigned int num_rows = core->res->freq_tbl_size;
476 	unsigned long freq = table[0].freq;
477 	struct device *dev = core->dev;
478 	u32 mbs_per_sec;
479 	unsigned int i;
480 	int ret;
481 
482 	mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
483 		      load_per_type(core, VIDC_SESSION_TYPE_DEC);
484 
485 	if (mbs_per_sec > core->res->max_load)
486 		dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
487 			 mbs_per_sec, core->res->max_load);
488 
489 	if (!mbs_per_sec && num_rows > 1) {
490 		freq = table[num_rows - 1].freq;
491 		goto set_freq;
492 	}
493 
494 	for (i = 0; i < num_rows; i++) {
495 		if (mbs_per_sec > table[i].load)
496 			break;
497 		freq = table[i].freq;
498 	}
499 
500 set_freq:
501 
502 	ret = set_clk_freq(core, freq);
503 	if (ret) {
504 		dev_err(dev, "failed to set clock rate %lu (%d)\n",
505 			freq, ret);
506 		return ret;
507 	}
508 
509 	ret = load_scale_bw(core);
510 	if (ret) {
511 		dev_err(dev, "failed to set bandwidth (%d)\n",
512 			ret);
513 		return ret;
514 	}
515 
516 	return 0;
517 }
518 
519 static unsigned long calculate_inst_freq(struct venus_inst *inst,
520 					 unsigned long filled_len)
521 {
522 	unsigned long vpp_freq = 0, vsp_freq = 0;
523 	u32 fps = (u32)inst->fps;
524 	u32 mbs_per_sec;
525 
526 	mbs_per_sec = load_per_instance(inst) / fps;
527 
528 	vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
529 	/* 21 / 20 is overhead factor */
530 	vpp_freq += vpp_freq / 20;
531 	vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
532 
533 	/* 10 / 7 is overhead factor */
534 	if (inst->session_type == VIDC_SESSION_TYPE_ENC)
535 		vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
536 	else
537 		vsp_freq += ((fps * filled_len * 8) * 10) / 7;
538 
539 	return max(vpp_freq, vsp_freq);
540 }
541 
542 static int scale_clocks_v4(struct venus_inst *inst)
543 {
544 	struct venus_core *core = inst->core;
545 	const struct freq_tbl *table = core->res->freq_tbl;
546 	unsigned int num_rows = core->res->freq_tbl_size;
547 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
548 	struct device *dev = core->dev;
549 	unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
550 	unsigned long filled_len = 0;
551 	struct venus_buffer *buf, *n;
552 	struct vb2_buffer *vb;
553 	int i, ret;
554 
555 	v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
556 		vb = &buf->vb.vb2_buf;
557 		filled_len = max(filled_len, vb2_get_plane_payload(vb, 0));
558 	}
559 
560 	if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
561 		return 0;
562 
563 	freq = calculate_inst_freq(inst, filled_len);
564 	inst->clk_data.freq = freq;
565 
566 	mutex_lock(&core->lock);
567 	list_for_each_entry(inst, &core->instances, list) {
568 		if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
569 			freq_core1 += inst->clk_data.freq;
570 		} else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
571 			freq_core2 += inst->clk_data.freq;
572 		} else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
573 			freq_core1 += inst->clk_data.freq;
574 			freq_core2 += inst->clk_data.freq;
575 		}
576 	}
577 	mutex_unlock(&core->lock);
578 
579 	freq = max(freq_core1, freq_core2);
580 
581 	if (freq >= table[0].freq) {
582 		freq = table[0].freq;
583 		dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
584 			 freq, table[0].freq);
585 		goto set_freq;
586 	}
587 
588 	for (i = num_rows - 1 ; i >= 0; i--) {
589 		if (freq <= table[i].freq) {
590 			freq = table[i].freq;
591 			break;
592 		}
593 	}
594 
595 set_freq:
596 
597 	ret = set_clk_freq(core, freq);
598 	if (ret) {
599 		dev_err(dev, "failed to set clock rate %lu (%d)\n",
600 			freq, ret);
601 		return ret;
602 	}
603 
604 	ret = load_scale_bw(core);
605 	if (ret) {
606 		dev_err(dev, "failed to set bandwidth (%d)\n",
607 			ret);
608 		return ret;
609 	}
610 
611 	return 0;
612 }
613 
614 int venus_helper_load_scale_clocks(struct venus_inst *inst)
615 {
616 	if (IS_V4(inst->core))
617 		return scale_clocks_v4(inst);
618 
619 	return scale_clocks(inst);
620 }
621 EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
622 
623 static void fill_buffer_desc(const struct venus_buffer *buf,
624 			     struct hfi_buffer_desc *bd, bool response)
625 {
626 	memset(bd, 0, sizeof(*bd));
627 	bd->buffer_type = HFI_BUFFER_OUTPUT;
628 	bd->buffer_size = buf->size;
629 	bd->num_buffers = 1;
630 	bd->device_addr = buf->dma_addr;
631 	bd->response_required = response;
632 }
633 
634 static void return_buf_error(struct venus_inst *inst,
635 			     struct vb2_v4l2_buffer *vbuf)
636 {
637 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
638 
639 	if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
640 		v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
641 	else
642 		v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf);
643 
644 	v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
645 }
646 
647 static void
648 put_ts_metadata(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
649 {
650 	struct vb2_buffer *vb = &vbuf->vb2_buf;
651 	unsigned int i;
652 	int slot = -1;
653 	u64 ts_us = vb->timestamp;
654 
655 	for (i = 0; i < ARRAY_SIZE(inst->tss); i++) {
656 		if (!inst->tss[i].used) {
657 			slot = i;
658 			break;
659 		}
660 	}
661 
662 	if (slot == -1) {
663 		dev_dbg(inst->core->dev, "%s: no free slot\n", __func__);
664 		return;
665 	}
666 
667 	do_div(ts_us, NSEC_PER_USEC);
668 
669 	inst->tss[slot].used = true;
670 	inst->tss[slot].flags = vbuf->flags;
671 	inst->tss[slot].tc = vbuf->timecode;
672 	inst->tss[slot].ts_us = ts_us;
673 	inst->tss[slot].ts_ns = vb->timestamp;
674 }
675 
676 void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
677 				  struct vb2_v4l2_buffer *vbuf)
678 {
679 	struct vb2_buffer *vb = &vbuf->vb2_buf;
680 	unsigned int i;
681 
682 	for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) {
683 		if (!inst->tss[i].used)
684 			continue;
685 
686 		if (inst->tss[i].ts_us != timestamp_us)
687 			continue;
688 
689 		inst->tss[i].used = false;
690 		vbuf->flags |= inst->tss[i].flags;
691 		vbuf->timecode = inst->tss[i].tc;
692 		vb->timestamp = inst->tss[i].ts_ns;
693 		break;
694 	}
695 }
696 EXPORT_SYMBOL_GPL(venus_helper_get_ts_metadata);
697 
698 static int
699 session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
700 {
701 	struct venus_buffer *buf = to_venus_buffer(vbuf);
702 	struct vb2_buffer *vb = &vbuf->vb2_buf;
703 	unsigned int type = vb->type;
704 	struct hfi_frame_data fdata;
705 	int ret;
706 
707 	memset(&fdata, 0, sizeof(fdata));
708 	fdata.alloc_len = buf->size;
709 	fdata.device_addr = buf->dma_addr;
710 	fdata.timestamp = vb->timestamp;
711 	do_div(fdata.timestamp, NSEC_PER_USEC);
712 	fdata.flags = 0;
713 	fdata.clnt_data = vbuf->vb2_buf.index;
714 
715 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
716 		fdata.buffer_type = HFI_BUFFER_INPUT;
717 		fdata.filled_len = vb2_get_plane_payload(vb, 0);
718 		fdata.offset = vb->planes[0].data_offset;
719 
720 		if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
721 			fdata.flags |= HFI_BUFFERFLAG_EOS;
722 
723 		if (inst->session_type == VIDC_SESSION_TYPE_DEC)
724 			put_ts_metadata(inst, vbuf);
725 
726 		venus_helper_load_scale_clocks(inst);
727 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
728 		if (inst->session_type == VIDC_SESSION_TYPE_ENC)
729 			fdata.buffer_type = HFI_BUFFER_OUTPUT;
730 		else
731 			fdata.buffer_type = inst->opb_buftype;
732 		fdata.filled_len = 0;
733 		fdata.offset = 0;
734 	}
735 
736 	ret = hfi_session_process_buf(inst, &fdata);
737 	if (ret)
738 		return ret;
739 
740 	return 0;
741 }
742 
743 static bool is_dynamic_bufmode(struct venus_inst *inst)
744 {
745 	struct venus_core *core = inst->core;
746 	struct venus_caps *caps;
747 
748 	/*
749 	 * v4 doesn't send BUFFER_ALLOC_MODE_SUPPORTED property and supports
750 	 * dynamic buffer mode by default for HFI_BUFFER_OUTPUT/OUTPUT2.
751 	 */
752 	if (IS_V4(core))
753 		return true;
754 
755 	caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
756 	if (!caps)
757 		return false;
758 
759 	return caps->cap_bufs_mode_dynamic;
760 }
761 
762 int venus_helper_unregister_bufs(struct venus_inst *inst)
763 {
764 	struct venus_buffer *buf, *n;
765 	struct hfi_buffer_desc bd;
766 	int ret = 0;
767 
768 	if (is_dynamic_bufmode(inst))
769 		return 0;
770 
771 	list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
772 		fill_buffer_desc(buf, &bd, true);
773 		ret = hfi_session_unset_buffers(inst, &bd);
774 		list_del_init(&buf->reg_list);
775 	}
776 
777 	return ret;
778 }
779 EXPORT_SYMBOL_GPL(venus_helper_unregister_bufs);
780 
781 static int session_register_bufs(struct venus_inst *inst)
782 {
783 	struct venus_core *core = inst->core;
784 	struct device *dev = core->dev;
785 	struct hfi_buffer_desc bd;
786 	struct venus_buffer *buf;
787 	int ret = 0;
788 
789 	if (is_dynamic_bufmode(inst))
790 		return 0;
791 
792 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
793 		fill_buffer_desc(buf, &bd, false);
794 		ret = hfi_session_set_buffers(inst, &bd);
795 		if (ret) {
796 			dev_err(dev, "%s: set buffer failed\n", __func__);
797 			break;
798 		}
799 	}
800 
801 	return ret;
802 }
803 
804 static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
805 {
806 	switch (v4l2_fmt) {
807 	case V4L2_PIX_FMT_NV12:
808 		return HFI_COLOR_FORMAT_NV12;
809 	case V4L2_PIX_FMT_NV21:
810 		return HFI_COLOR_FORMAT_NV21;
811 	default:
812 		break;
813 	}
814 
815 	return 0;
816 }
817 
818 int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
819 			    struct hfi_buffer_requirements *req)
820 {
821 	u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
822 	union hfi_get_property hprop;
823 	unsigned int i;
824 	int ret;
825 
826 	if (req)
827 		memset(req, 0, sizeof(*req));
828 
829 	ret = hfi_session_get_property(inst, ptype, &hprop);
830 	if (ret)
831 		return ret;
832 
833 	ret = -EINVAL;
834 
835 	for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
836 		if (hprop.bufreq[i].type != type)
837 			continue;
838 
839 		if (req)
840 			memcpy(req, &hprop.bufreq[i], sizeof(*req));
841 		ret = 0;
842 		break;
843 	}
844 
845 	return ret;
846 }
847 EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
848 
849 static u32 get_framesize_raw_nv12(u32 width, u32 height)
850 {
851 	u32 y_stride, uv_stride, y_plane;
852 	u32 y_sclines, uv_sclines, uv_plane;
853 	u32 size;
854 
855 	y_stride = ALIGN(width, 128);
856 	uv_stride = ALIGN(width, 128);
857 	y_sclines = ALIGN(height, 32);
858 	uv_sclines = ALIGN(((height + 1) >> 1), 16);
859 
860 	y_plane = y_stride * y_sclines;
861 	uv_plane = uv_stride * uv_sclines + SZ_4K;
862 	size = y_plane + uv_plane + SZ_8K;
863 
864 	return ALIGN(size, SZ_4K);
865 }
866 
867 static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
868 {
869 	u32 y_meta_stride, y_meta_plane;
870 	u32 y_stride, y_plane;
871 	u32 uv_meta_stride, uv_meta_plane;
872 	u32 uv_stride, uv_plane;
873 	u32 extradata = SZ_16K;
874 
875 	y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
876 	y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
877 	y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
878 
879 	y_stride = ALIGN(width, 128);
880 	y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
881 
882 	uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
883 	uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
884 	uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
885 
886 	uv_stride = ALIGN(width, 128);
887 	uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
888 
889 	return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
890 		     max(extradata, y_stride * 48), SZ_4K);
891 }
892 
893 u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
894 {
895 	switch (hfi_fmt) {
896 	case HFI_COLOR_FORMAT_NV12:
897 	case HFI_COLOR_FORMAT_NV21:
898 		return get_framesize_raw_nv12(width, height);
899 	case HFI_COLOR_FORMAT_NV12_UBWC:
900 		return get_framesize_raw_nv12_ubwc(width, height);
901 	default:
902 		return 0;
903 	}
904 }
905 EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
906 
907 u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
908 {
909 	u32 hfi_fmt, sz;
910 	bool compressed;
911 
912 	switch (v4l2_fmt) {
913 	case V4L2_PIX_FMT_MPEG:
914 	case V4L2_PIX_FMT_H264:
915 	case V4L2_PIX_FMT_H264_NO_SC:
916 	case V4L2_PIX_FMT_H264_MVC:
917 	case V4L2_PIX_FMT_H263:
918 	case V4L2_PIX_FMT_MPEG1:
919 	case V4L2_PIX_FMT_MPEG2:
920 	case V4L2_PIX_FMT_MPEG4:
921 	case V4L2_PIX_FMT_XVID:
922 	case V4L2_PIX_FMT_VC1_ANNEX_G:
923 	case V4L2_PIX_FMT_VC1_ANNEX_L:
924 	case V4L2_PIX_FMT_VP8:
925 	case V4L2_PIX_FMT_VP9:
926 	case V4L2_PIX_FMT_HEVC:
927 		compressed = true;
928 		break;
929 	default:
930 		compressed = false;
931 		break;
932 	}
933 
934 	if (compressed) {
935 		sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
936 		return ALIGN(sz, SZ_4K);
937 	}
938 
939 	hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
940 	if (!hfi_fmt)
941 		return 0;
942 
943 	return venus_helper_get_framesz_raw(hfi_fmt, width, height);
944 }
945 EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
946 
947 int venus_helper_set_input_resolution(struct venus_inst *inst,
948 				      unsigned int width, unsigned int height)
949 {
950 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
951 	struct hfi_framesize fs;
952 
953 	fs.buffer_type = HFI_BUFFER_INPUT;
954 	fs.width = width;
955 	fs.height = height;
956 
957 	return hfi_session_set_property(inst, ptype, &fs);
958 }
959 EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
960 
961 int venus_helper_set_output_resolution(struct venus_inst *inst,
962 				       unsigned int width, unsigned int height,
963 				       u32 buftype)
964 {
965 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
966 	struct hfi_framesize fs;
967 
968 	fs.buffer_type = buftype;
969 	fs.width = width;
970 	fs.height = height;
971 
972 	return hfi_session_set_property(inst, ptype, &fs);
973 }
974 EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
975 
976 int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
977 {
978 	const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
979 	struct hfi_video_work_mode wm;
980 
981 	if (!IS_V4(inst->core))
982 		return 0;
983 
984 	wm.video_work_mode = mode;
985 
986 	return hfi_session_set_property(inst, ptype, &wm);
987 }
988 EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
989 
990 int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
991 {
992 	const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
993 	struct hfi_videocores_usage_type cu;
994 
995 	inst->clk_data.core_id = usage;
996 	if (!IS_V4(inst->core))
997 		return 0;
998 
999 	cu.video_core_enable_mask = usage;
1000 
1001 	return hfi_session_set_property(inst, ptype, &cu);
1002 }
1003 EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
1004 
1005 int venus_helper_init_codec_freq_data(struct venus_inst *inst)
1006 {
1007 	const struct codec_freq_data *data;
1008 	unsigned int i, data_size;
1009 	u32 pixfmt;
1010 	int ret = 0;
1011 
1012 	if (!IS_V4(inst->core))
1013 		return 0;
1014 
1015 	data = inst->core->res->codec_freq_data;
1016 	data_size = inst->core->res->codec_freq_data_size;
1017 	pixfmt = inst->session_type == VIDC_SESSION_TYPE_DEC ?
1018 			inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
1019 
1020 	for (i = 0; i < data_size; i++) {
1021 		if (data[i].pixfmt == pixfmt &&
1022 		    data[i].session_type == inst->session_type) {
1023 			inst->clk_data.codec_freq_data = &data[i];
1024 			break;
1025 		}
1026 	}
1027 
1028 	if (!inst->clk_data.codec_freq_data)
1029 		ret = -EINVAL;
1030 
1031 	return ret;
1032 }
1033 EXPORT_SYMBOL_GPL(venus_helper_init_codec_freq_data);
1034 
1035 int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
1036 			      unsigned int output_bufs,
1037 			      unsigned int output2_bufs)
1038 {
1039 	u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
1040 	struct hfi_buffer_count_actual buf_count;
1041 	int ret;
1042 
1043 	buf_count.type = HFI_BUFFER_INPUT;
1044 	buf_count.count_actual = input_bufs;
1045 
1046 	ret = hfi_session_set_property(inst, ptype, &buf_count);
1047 	if (ret)
1048 		return ret;
1049 
1050 	buf_count.type = HFI_BUFFER_OUTPUT;
1051 	buf_count.count_actual = output_bufs;
1052 
1053 	ret = hfi_session_set_property(inst, ptype, &buf_count);
1054 	if (ret)
1055 		return ret;
1056 
1057 	if (output2_bufs) {
1058 		buf_count.type = HFI_BUFFER_OUTPUT2;
1059 		buf_count.count_actual = output2_bufs;
1060 
1061 		ret = hfi_session_set_property(inst, ptype, &buf_count);
1062 	}
1063 
1064 	return ret;
1065 }
1066 EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
1067 
1068 int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
1069 				u32 buftype)
1070 {
1071 	const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
1072 	struct hfi_uncompressed_format_select fmt;
1073 
1074 	fmt.buffer_type = buftype;
1075 	fmt.format = hfi_format;
1076 
1077 	return hfi_session_set_property(inst, ptype, &fmt);
1078 }
1079 EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
1080 
1081 int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
1082 {
1083 	u32 hfi_format, buftype;
1084 
1085 	if (inst->session_type == VIDC_SESSION_TYPE_DEC)
1086 		buftype = HFI_BUFFER_OUTPUT;
1087 	else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
1088 		buftype = HFI_BUFFER_INPUT;
1089 	else
1090 		return -EINVAL;
1091 
1092 	hfi_format = to_hfi_raw_fmt(pixfmt);
1093 	if (!hfi_format)
1094 		return -EINVAL;
1095 
1096 	return venus_helper_set_raw_format(inst, hfi_format, buftype);
1097 }
1098 EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
1099 
1100 int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
1101 				 bool out2_en)
1102 {
1103 	struct hfi_multi_stream multi = {0};
1104 	u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
1105 	int ret;
1106 
1107 	multi.buffer_type = HFI_BUFFER_OUTPUT;
1108 	multi.enable = out_en;
1109 
1110 	ret = hfi_session_set_property(inst, ptype, &multi);
1111 	if (ret)
1112 		return ret;
1113 
1114 	multi.buffer_type = HFI_BUFFER_OUTPUT2;
1115 	multi.enable = out2_en;
1116 
1117 	return hfi_session_set_property(inst, ptype, &multi);
1118 }
1119 EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
1120 
1121 int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
1122 {
1123 	const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
1124 	struct hfi_buffer_alloc_mode mode;
1125 	int ret;
1126 
1127 	if (!is_dynamic_bufmode(inst))
1128 		return 0;
1129 
1130 	mode.type = HFI_BUFFER_OUTPUT;
1131 	mode.mode = HFI_BUFFER_MODE_DYNAMIC;
1132 
1133 	ret = hfi_session_set_property(inst, ptype, &mode);
1134 	if (ret)
1135 		return ret;
1136 
1137 	mode.type = HFI_BUFFER_OUTPUT2;
1138 
1139 	return hfi_session_set_property(inst, ptype, &mode);
1140 }
1141 EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
1142 
1143 int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
1144 {
1145 	const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
1146 	struct hfi_buffer_size_actual bufsz;
1147 
1148 	bufsz.type = buftype;
1149 	bufsz.size = bufsize;
1150 
1151 	return hfi_session_set_property(inst, ptype, &bufsz);
1152 }
1153 EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
1154 
1155 unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
1156 {
1157 	/* the encoder has only one output */
1158 	if (inst->session_type == VIDC_SESSION_TYPE_ENC)
1159 		return inst->output_buf_size;
1160 
1161 	if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
1162 		return inst->output_buf_size;
1163 	else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
1164 		return inst->output2_buf_size;
1165 
1166 	return 0;
1167 }
1168 EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
1169 
1170 static void delayed_process_buf_func(struct work_struct *work)
1171 {
1172 	struct venus_buffer *buf, *n;
1173 	struct venus_inst *inst;
1174 	int ret;
1175 
1176 	inst = container_of(work, struct venus_inst, delayed_process_work);
1177 
1178 	mutex_lock(&inst->lock);
1179 
1180 	if (!(inst->streamon_out & inst->streamon_cap))
1181 		goto unlock;
1182 
1183 	list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
1184 		if (buf->flags & HFI_BUFFERFLAG_READONLY)
1185 			continue;
1186 
1187 		ret = session_process_buf(inst, &buf->vb);
1188 		if (ret)
1189 			return_buf_error(inst, &buf->vb);
1190 
1191 		list_del_init(&buf->ref_list);
1192 	}
1193 unlock:
1194 	mutex_unlock(&inst->lock);
1195 }
1196 
1197 void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
1198 {
1199 	struct venus_buffer *buf;
1200 
1201 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
1202 		if (buf->vb.vb2_buf.index == idx) {
1203 			buf->flags &= ~HFI_BUFFERFLAG_READONLY;
1204 			schedule_work(&inst->delayed_process_work);
1205 			break;
1206 		}
1207 	}
1208 }
1209 EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
1210 
1211 void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
1212 {
1213 	struct venus_buffer *buf = to_venus_buffer(vbuf);
1214 
1215 	buf->flags |= HFI_BUFFERFLAG_READONLY;
1216 }
1217 EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
1218 
1219 static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
1220 {
1221 	struct venus_buffer *buf = to_venus_buffer(vbuf);
1222 
1223 	if (buf->flags & HFI_BUFFERFLAG_READONLY) {
1224 		list_add_tail(&buf->ref_list, &inst->delayed_process);
1225 		schedule_work(&inst->delayed_process_work);
1226 		return 1;
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 struct vb2_v4l2_buffer *
1233 venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
1234 {
1235 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1236 
1237 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1238 		return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
1239 	else
1240 		return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
1241 }
1242 EXPORT_SYMBOL_GPL(venus_helper_find_buf);
1243 
1244 int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
1245 {
1246 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
1247 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1248 	struct venus_buffer *buf = to_venus_buffer(vbuf);
1249 	struct sg_table *sgt;
1250 
1251 	sgt = vb2_dma_sg_plane_desc(vb, 0);
1252 	if (!sgt)
1253 		return -EFAULT;
1254 
1255 	buf->size = vb2_plane_size(vb, 0);
1256 	buf->dma_addr = sg_dma_address(sgt->sgl);
1257 
1258 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1259 		list_add_tail(&buf->reg_list, &inst->registeredbufs);
1260 
1261 	return 0;
1262 }
1263 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
1264 
1265 int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
1266 {
1267 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
1268 	unsigned int out_buf_size = venus_helper_get_opb_size(inst);
1269 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1270 
1271 	if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
1272 		if (vbuf->field == V4L2_FIELD_ANY)
1273 			vbuf->field = V4L2_FIELD_NONE;
1274 		if (vbuf->field != V4L2_FIELD_NONE) {
1275 			dev_err(inst->core->dev, "%s field isn't supported\n",
1276 				__func__);
1277 			return -EINVAL;
1278 		}
1279 	}
1280 
1281 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
1282 	    vb2_plane_size(vb, 0) < out_buf_size)
1283 		return -EINVAL;
1284 	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
1285 	    vb2_plane_size(vb, 0) < inst->input_buf_size)
1286 		return -EINVAL;
1287 
1288 	return 0;
1289 }
1290 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
1291 
1292 void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
1293 {
1294 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1295 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
1296 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1297 	int ret;
1298 
1299 	mutex_lock(&inst->lock);
1300 
1301 	v4l2_m2m_buf_queue(m2m_ctx, vbuf);
1302 
1303 	if (inst->session_type == VIDC_SESSION_TYPE_ENC &&
1304 	    !(inst->streamon_out && inst->streamon_cap))
1305 		goto unlock;
1306 
1307 	if (vb2_start_streaming_called(vb->vb2_queue)) {
1308 		ret = is_buf_refed(inst, vbuf);
1309 		if (ret)
1310 			goto unlock;
1311 
1312 		ret = session_process_buf(inst, vbuf);
1313 		if (ret)
1314 			return_buf_error(inst, vbuf);
1315 	}
1316 
1317 unlock:
1318 	mutex_unlock(&inst->lock);
1319 }
1320 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
1321 
1322 void venus_helper_buffers_done(struct venus_inst *inst,
1323 			       enum vb2_buffer_state state)
1324 {
1325 	struct vb2_v4l2_buffer *buf;
1326 
1327 	while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
1328 		v4l2_m2m_buf_done(buf, state);
1329 	while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
1330 		v4l2_m2m_buf_done(buf, state);
1331 }
1332 EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
1333 
1334 void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
1335 {
1336 	struct venus_inst *inst = vb2_get_drv_priv(q);
1337 	struct venus_core *core = inst->core;
1338 	int ret;
1339 
1340 	mutex_lock(&inst->lock);
1341 
1342 	if (inst->streamon_out & inst->streamon_cap) {
1343 		ret = hfi_session_stop(inst);
1344 		ret |= hfi_session_unload_res(inst);
1345 		ret |= venus_helper_unregister_bufs(inst);
1346 		ret |= venus_helper_intbufs_free(inst);
1347 		ret |= hfi_session_deinit(inst);
1348 
1349 		if (inst->session_error || core->sys_error)
1350 			ret = -EIO;
1351 
1352 		if (ret)
1353 			hfi_session_abort(inst);
1354 
1355 		venus_helper_free_dpb_bufs(inst);
1356 
1357 		venus_helper_load_scale_clocks(inst);
1358 		INIT_LIST_HEAD(&inst->registeredbufs);
1359 	}
1360 
1361 	venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
1362 
1363 	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1364 		inst->streamon_out = 0;
1365 	else
1366 		inst->streamon_cap = 0;
1367 
1368 	mutex_unlock(&inst->lock);
1369 }
1370 EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
1371 
1372 int venus_helper_process_initial_cap_bufs(struct venus_inst *inst)
1373 {
1374 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1375 	struct v4l2_m2m_buffer *buf, *n;
1376 	int ret;
1377 
1378 	v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
1379 		ret = session_process_buf(inst, &buf->vb);
1380 		if (ret) {
1381 			return_buf_error(inst, &buf->vb);
1382 			return ret;
1383 		}
1384 	}
1385 
1386 	return 0;
1387 }
1388 EXPORT_SYMBOL_GPL(venus_helper_process_initial_cap_bufs);
1389 
1390 int venus_helper_process_initial_out_bufs(struct venus_inst *inst)
1391 {
1392 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1393 	struct v4l2_m2m_buffer *buf, *n;
1394 	int ret;
1395 
1396 	v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
1397 		ret = session_process_buf(inst, &buf->vb);
1398 		if (ret) {
1399 			return_buf_error(inst, &buf->vb);
1400 			return ret;
1401 		}
1402 	}
1403 
1404 	return 0;
1405 }
1406 EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
1407 
1408 int venus_helper_vb2_start_streaming(struct venus_inst *inst)
1409 {
1410 	int ret;
1411 
1412 	ret = venus_helper_intbufs_alloc(inst);
1413 	if (ret)
1414 		return ret;
1415 
1416 	ret = session_register_bufs(inst);
1417 	if (ret)
1418 		goto err_bufs_free;
1419 
1420 	venus_helper_load_scale_clocks(inst);
1421 
1422 	ret = hfi_session_load_res(inst);
1423 	if (ret)
1424 		goto err_unreg_bufs;
1425 
1426 	ret = hfi_session_start(inst);
1427 	if (ret)
1428 		goto err_unload_res;
1429 
1430 	return 0;
1431 
1432 err_unload_res:
1433 	hfi_session_unload_res(inst);
1434 err_unreg_bufs:
1435 	venus_helper_unregister_bufs(inst);
1436 err_bufs_free:
1437 	venus_helper_intbufs_free(inst);
1438 	return ret;
1439 }
1440 EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
1441 
1442 void venus_helper_m2m_device_run(void *priv)
1443 {
1444 	struct venus_inst *inst = priv;
1445 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1446 	struct v4l2_m2m_buffer *buf, *n;
1447 	int ret;
1448 
1449 	mutex_lock(&inst->lock);
1450 
1451 	v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
1452 		ret = session_process_buf(inst, &buf->vb);
1453 		if (ret)
1454 			return_buf_error(inst, &buf->vb);
1455 	}
1456 
1457 	v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
1458 		ret = session_process_buf(inst, &buf->vb);
1459 		if (ret)
1460 			return_buf_error(inst, &buf->vb);
1461 	}
1462 
1463 	mutex_unlock(&inst->lock);
1464 }
1465 EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
1466 
1467 void venus_helper_m2m_job_abort(void *priv)
1468 {
1469 	struct venus_inst *inst = priv;
1470 
1471 	v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
1472 }
1473 EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
1474 
1475 void venus_helper_init_instance(struct venus_inst *inst)
1476 {
1477 	if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
1478 		INIT_LIST_HEAD(&inst->delayed_process);
1479 		INIT_WORK(&inst->delayed_process_work,
1480 			  delayed_process_buf_func);
1481 	}
1482 }
1483 EXPORT_SYMBOL_GPL(venus_helper_init_instance);
1484 
1485 static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
1486 {
1487 	unsigned int i;
1488 
1489 	for (i = 0; i < caps->num_fmts; i++) {
1490 		if (caps->fmts[i].buftype == buftype &&
1491 		    caps->fmts[i].fmt == fmt)
1492 			return true;
1493 	}
1494 
1495 	return false;
1496 }
1497 
1498 int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
1499 			      u32 *out_fmt, u32 *out2_fmt, bool ubwc)
1500 {
1501 	struct venus_core *core = inst->core;
1502 	struct venus_caps *caps;
1503 	u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
1504 	bool found, found_ubwc;
1505 
1506 	*out_fmt = *out2_fmt = 0;
1507 
1508 	if (!fmt)
1509 		return -EINVAL;
1510 
1511 	caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
1512 	if (!caps)
1513 		return -EINVAL;
1514 
1515 	if (ubwc) {
1516 		ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
1517 		found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
1518 						ubwc_fmt);
1519 		found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1520 
1521 		if (found_ubwc && found) {
1522 			*out_fmt = ubwc_fmt;
1523 			*out2_fmt = fmt;
1524 			return 0;
1525 		}
1526 	}
1527 
1528 	found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
1529 	if (found) {
1530 		*out_fmt = fmt;
1531 		*out2_fmt = 0;
1532 		return 0;
1533 	}
1534 
1535 	found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1536 	if (found) {
1537 		*out_fmt = 0;
1538 		*out2_fmt = fmt;
1539 		return 0;
1540 	}
1541 
1542 	return -EINVAL;
1543 }
1544 EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
1545 
1546 int venus_helper_power_enable(struct venus_core *core, u32 session_type,
1547 			      bool enable)
1548 {
1549 	void __iomem *ctrl, *stat;
1550 	u32 val;
1551 	int ret;
1552 
1553 	if (!IS_V3(core) && !IS_V4(core))
1554 		return 0;
1555 
1556 	if (IS_V3(core)) {
1557 		if (session_type == VIDC_SESSION_TYPE_DEC)
1558 			ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
1559 		else
1560 			ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
1561 		if (enable)
1562 			writel(0, ctrl);
1563 		else
1564 			writel(1, ctrl);
1565 
1566 		return 0;
1567 	}
1568 
1569 	if (session_type == VIDC_SESSION_TYPE_DEC) {
1570 		ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
1571 		stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
1572 	} else {
1573 		ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
1574 		stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
1575 	}
1576 
1577 	if (enable) {
1578 		writel(0, ctrl);
1579 
1580 		ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
1581 		if (ret)
1582 			return ret;
1583 	} else {
1584 		writel(1, ctrl);
1585 
1586 		ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
1587 		if (ret)
1588 			return ret;
1589 	}
1590 
1591 	return 0;
1592 }
1593 EXPORT_SYMBOL_GPL(venus_helper_power_enable);
1594